From fe829285a6f26b27866f1ca67bb9865258583197 Mon Sep 17 00:00:00 2001 From: Ilia Denisov Date: Sun, 26 Apr 2026 20:34:39 +0200 Subject: [PATCH] feat: use postgres --- ARCHITECTURE.md | 173 +- PG_PLAN.md | 920 ++++++++ authsession/README.md | 6 +- authsession/docs/redis-config.md | 88 + authsession/docs/runbook.md | 27 +- authsession/docs/runtime.md | 21 +- authsession/gateway_compatibility_test.go | 28 +- authsession/go.mod | 22 +- authsession/go.sum | 56 +- .../adapters/redis/challengestore/store.go | 71 +- .../redis/challengestore/store_test.go | 101 +- authsession/internal/adapters/redis/client.go | 56 + .../adapters/redis/configprovider/store.go | 70 +- .../redis/configprovider/store_test.go | 107 +- .../redis/projectionpublisher/publisher.go | 69 +- .../projectionpublisher/publisher_test.go | 112 +- .../redis/sendemailcodeabuse/protector.go | 70 +- .../sendemailcodeabuse/protector_test.go | 93 +- .../adapters/redis/sessionstore/store.go | 70 +- .../adapters/redis/sessionstore/store_test.go | 102 +- authsession/internal/app/runtime.go | 87 +- authsession/internal/app/runtime_test.go | 14 +- authsession/internal/config/config.go | 64 +- authsession/internal/config/config_test.go | 93 +- authsession/production_hardening_test.go | 27 +- ...service_real_runtime_compatibility_test.go | 273 --- client/go.mod | 4 +- client/go.sum | 4 +- game/go.mod | 8 +- game/go.sum | 10 +- gateway/.env.example | 12 +- gateway/README.md | 47 +- gateway/cmd/gateway/main.go | 111 +- gateway/cmd/gateway/main_test.go | 30 +- gateway/docs/redis-config.md | 109 + gateway/docs/runbook.md | 31 +- gateway/go.mod | 21 +- gateway/go.sum | 35 +- gateway/internal/config/config.go | 94 +- gateway/internal/config/config_test.go | 242 ++- gateway/internal/events/client_subscriber.go | 78 +- .../internal/events/client_subscriber_test.go | 13 +- gateway/internal/events/subscriber.go | 86 +- gateway/internal/events/subscriber_test.go | 21 +- gateway/internal/redisclient/client.go | 55 + gateway/internal/replay/redis.go | 60 +- gateway/internal/replay/redis_test.go | 130 +- gateway/internal/session/redis.go | 60 +- gateway/internal/session/redis_test.go | 88 +- gateway/internal/telemetry/runtime.go | 21 + geoprofile/PLAN.md | 144 ++ geoprofile/README.md | 72 + go.work | 4 + go.work.sum | 63 +- integration/authsessionmail/harness_test.go | 24 +- integration/authsessionuser/harness_test.go | 16 +- .../gatewayauthsession/harness_test.go | 8 +- .../gatewayauthsessionmail/harness_test.go | 31 +- .../gatewayauthsessionuser/harness_test.go | 20 +- .../gateway_authsession_user_mail_test.go | 44 +- integration/gatewayuser/harness_test.go | 16 +- integration/go.mod | 30 +- integration/go.sum | 79 +- .../internal/harness/authsessionservice.go | 13 + .../internal/harness/gatewayservice.go | 12 + integration/internal/harness/lobbyservice.go | 51 + integration/internal/harness/mailservice.go | 51 + .../internal/harness/notificationservice.go | 55 + .../internal/harness/postgres_container.go | 241 +++ .../harness/postgres_container_test.go | 138 ++ integration/internal/harness/userservice.go | 51 + .../lobby_notification_test.go | 46 +- integration/lobbyuser/lobby_user_test.go | 32 +- .../notification_gateway_test.go | 42 +- .../notification_mail_test.go | 67 +- .../notification_user_test.go | 124 +- lobby/Makefile | 10 + lobby/README.md | 111 +- lobby/cmd/jetgen/main.go | 236 ++ lobby/docs/examples.md | 46 +- lobby/docs/postgres-migration.md | 386 ++++ lobby/docs/runbook.md | 65 +- lobby/docs/runtime.md | 30 +- lobby/go.mod | 44 +- lobby/go.sum | 276 ++- .../postgres/applicationstore/store.go | 310 +++ .../postgres/applicationstore/store_test.go | 194 ++ .../adapters/postgres/gamestore/codecs.go | 94 + .../adapters/postgres/gamestore/store.go | 610 ++++++ .../adapters/postgres/gamestore/store_test.go | 338 +++ .../postgres/internal/pgtest/pgtest.go | 208 ++ .../adapters/postgres/internal/sqlx/sqlx.go | 96 + .../adapters/postgres/invitestore/store.go | 348 +++ .../postgres/invitestore/store_test.go | 199 ++ .../postgres/jet/lobby/model/applications.go | 22 + .../postgres/jet/lobby/model/games.go | 34 + .../jet/lobby/model/goose_db_version.go | 19 + .../postgres/jet/lobby/model/invites.go | 24 + .../postgres/jet/lobby/model/memberships.go | 23 + .../postgres/jet/lobby/model/race_names.go | 20 + .../postgres/jet/lobby/table/applications.go | 96 + .../postgres/jet/lobby/table/games.go | 132 ++ .../jet/lobby/table/goose_db_version.go | 87 + .../postgres/jet/lobby/table/invites.go | 102 + .../postgres/jet/lobby/table/memberships.go | 99 + .../postgres/jet/lobby/table/race_names.go | 102 + .../jet/lobby/table/table_use_schema.go | 19 + .../postgres/membershipstore/store.go | 346 +++ .../postgres/membershipstore/store_test.go | 213 ++ .../postgres/migrations/00001_init.sql | 169 ++ .../postgres/migrations/migrations.go | 19 + .../postgres/racenamedir/directory.go | 1039 +++++++++ .../postgres/racenamedir/directory_test.go | 193 ++ .../adapters/redisstate/applicationstore.go | 277 --- .../redisstate/applicationstore_test.go | 360 ---- lobby/internal/adapters/redisstate/codecs.go | 172 -- .../adapters/redisstate/codecs_application.go | 73 - .../adapters/redisstate/codecs_invite.go | 77 - .../adapters/redisstate/codecs_membership.go | 75 - .../adapters/redisstate/codecs_racename.go | 111 - lobby/internal/adapters/redisstate/doc.go | 17 +- .../internal/adapters/redisstate/gamestore.go | 454 ---- .../adapters/redisstate/gamestore_test.go | 557 ----- .../adapters/redisstate/invitestore.go | 284 --- .../adapters/redisstate/invitestore_test.go | 363 ---- .../internal/adapters/redisstate/keyspace.go | 177 +- .../redisstate/keyspace_test_helpers_test.go | 10 + .../adapters/redisstate/membershipstore.go | 317 --- .../redisstate/membershipstore_test.go | 299 --- .../adapters/redisstate/racenamedir.go | 1101 ---------- .../adapters/redisstate/racenamedir_lua.go | 52 - .../adapters/redisstate/racenamedir_test.go | 244 --- lobby/internal/app/bootstrap.go | 60 +- lobby/internal/app/bootstrap_test.go | 28 +- lobby/internal/app/runtime.go | 28 +- lobby/internal/app/runtime_smoke_test.go | 159 -- lobby/internal/app/runtime_test.go | 151 -- lobby/internal/app/wiring.go | 60 +- lobby/internal/config/config.go | 102 +- lobby/internal/config/config_test.go | 138 +- lobby/internal/config/env.go | 18 +- lobby/internal/ports/racenamedirtest/suite.go | 39 +- mail/Makefile | 10 + mail/README.md | 137 +- mail/cmd/jetgen/main.go | 236 ++ mail/docs/README.md | 1 + mail/docs/examples.md | 8 +- mail/docs/flows.md | 41 +- mail/docs/postgres-migration.md | 236 ++ mail/docs/runbook.md | 35 +- mail/docs/runtime.md | 44 +- mail/go.mod | 41 +- mail/go.sum | 270 ++- .../postgres/jet/mail/model/attempts.go | 23 + .../postgres/jet/mail/model/dead_letters.go | 21 + .../postgres/jet/mail/model/deliveries.go | 41 + .../jet/mail/model/delivery_payloads.go | 13 + .../jet/mail/model/delivery_recipients.go | 15 + .../jet/mail/model/goose_db_version.go | 19 + .../jet/mail/model/malformed_commands.go | 23 + .../postgres/jet/mail/table/attempts.go | 99 + .../postgres/jet/mail/table/dead_letters.go | 93 + .../postgres/jet/mail/table/deliveries.go | 153 ++ .../jet/mail/table/delivery_payloads.go | 81 + .../jet/mail/table/delivery_recipients.go | 87 + .../jet/mail/table/goose_db_version.go | 87 + .../jet/mail/table/malformed_commands.go | 99 + .../jet/mail/table/table_use_schema.go | 20 + .../postgres/mailstore/attempt_execution.go | 354 +++ .../postgres/mailstore/auth_acceptance.go | 63 + .../adapters/postgres/mailstore/codecs.go | 176 ++ .../adapters/postgres/mailstore/deliveries.go | 806 +++++++ .../postgres/mailstore/generic_acceptance.go | 87 + .../postgres/mailstore/harness_test.go | 202 ++ .../adapters/postgres/mailstore/helpers.go | 64 + .../postgres/mailstore/malformed_command.go | 148 ++ .../adapters/postgres/mailstore/operator.go | 306 +++ .../adapters/postgres/mailstore/render.go | 101 + .../adapters/postgres/mailstore/store.go | 119 + .../adapters/postgres/mailstore/store_test.go | 586 +++++ .../postgres/migrations/00001_init.sql | 134 ++ .../postgres/migrations/migrations.go | 19 + .../adapters/redisstate/atomic_writer.go | 501 ----- .../adapters/redisstate/atomic_writer_test.go | 429 ---- .../redisstate/attempt_execution_store.go | 502 ----- .../attempt_execution_store_test.go | 301 --- .../redisstate/auth_acceptance_store.go | 117 - .../redisstate/auth_acceptance_store_test.go | 117 - mail/internal/adapters/redisstate/codecs.go | 697 ------ .../adapters/redisstate/codecs_test.go | 124 -- mail/internal/adapters/redisstate/errors.go | 12 - .../adapters/redisstate/fixtures_test.go | 201 -- .../redisstate/generic_acceptance_store.go | 148 -- .../generic_acceptance_store_test.go | 145 -- .../adapters/redisstate/index_cleaner.go | 118 - .../adapters/redisstate/index_cleaner_test.go | 112 - mail/internal/adapters/redisstate/keyspace.go | 161 +- .../adapters/redisstate/keyspace_test.go | 73 +- .../redisstate/malformed_command_store.go | 111 - .../adapters/redisstate/offset_codec.go | 40 + .../adapters/redisstate/operator_store.go | 532 ----- .../redisstate/operator_store_test.go | 346 --- .../adapters/redisstate/render_store.go | 74 - mail/internal/app/app.go | 5 + mail/internal/app/bootstrap.go | 36 +- mail/internal/app/runtime.go | 141 +- mail/internal/app/runtime_pgharness_test.go | 208 ++ mail/internal/app/runtime_smoke_test.go | 4 +- mail/internal/app/runtime_stage14_test.go | 32 +- mail/internal/app/runtime_test.go | 49 +- mail/internal/config/config.go | 205 +- mail/internal/config/config_test.go | 134 +- mail/internal/config/env.go | 39 +- mail/internal/config/validation.go | 10 +- mail/internal/worker/attempt_worker_test.go | 347 --- mail/internal/worker/cleanup_worker.go | 73 - mail/internal/worker/command_consumer.go | 12 +- mail/internal/worker/command_consumer_test.go | 391 ---- mail/internal/worker/sqlretention.go | 162 ++ notification/Makefile | 10 + notification/README.md | 149 +- notification/cmd/jetgen/main.go | 236 ++ notification/contract_asyncapi_test.go | 1 + notification/docs/examples.md | 4 +- notification/docs/postgres-migration.md | 265 +++ notification/docs/runbook.md | 25 +- notification/docs/runtime.md | 27 +- notification/go.mod | 9 + .../jet/notification/model/dead_letters.go | 25 + .../notification/model/goose_db_version.go | 19 + .../notification/model/malformed_intents.go | 23 + .../jet/notification/model/records.go | 29 + .../postgres/jet/notification/model/routes.go | 33 + .../jet/notification/table/dead_letters.go | 105 + .../notification/table/goose_db_version.go | 87 + .../notification/table/malformed_intents.go | 99 + .../jet/notification/table/records.go | 117 + .../postgres/jet/notification/table/routes.go | 129 ++ .../notification/table/table_use_schema.go | 18 + .../postgres/migrations/00001_init.sql | 105 + .../postgres/migrations/migrations.go | 19 + .../postgres/notificationstore/acceptance.go | 118 + .../postgres/notificationstore/codecs.go | 65 + .../notificationstore/dead_letters.go | 61 + .../notificationstore/harness_test.go | 200 ++ .../postgres/notificationstore/helpers.go | 68 + .../notificationstore/malformed_intents.go | 131 ++ .../postgres/notificationstore/records.go | 223 ++ .../postgres/notificationstore/retention.go | 67 + .../postgres/notificationstore/routes.go | 248 +++ .../postgres/notificationstore/scheduler.go | 262 +++ .../postgres/notificationstore/store.go | 126 ++ .../postgres/notificationstore/store_test.go | 567 +++++ .../adapters/postgres/routepublisher/store.go | 86 + .../internal/adapters/redis/client.go | 27 +- .../adapters/redisstate/acceptance_store.go | 140 -- .../redisstate/acceptance_store_test.go | 311 --- .../adapters/redisstate/atomic_writer.go | 157 -- .../internal/adapters/redisstate/codecs.go | 516 +---- .../internal/adapters/redisstate/errors.go | 14 +- .../internal/adapters/redisstate/keyspace.go | 82 +- .../adapters/redisstate/lease_store.go | 108 + .../redisstate/malformed_intent_store.go | 59 - .../adapters/redisstate/route_state_store.go | 657 ------ .../redisstate/route_state_store_test.go | 465 ---- notification/internal/app/runtime.go | 100 +- .../internal/app/runtime_smoke_test.go | 72 - notification/internal/app/runtime_test.go | 581 ----- notification/internal/config/config.go | 494 ++--- notification/internal/config/config_test.go | 183 +- notification/internal/config/env.go | 262 +++ .../internal/service/routestate/types.go | 254 +++ .../internal/worker/email_publisher.go | 41 +- .../internal/worker/email_publisher_test.go | 232 -- .../internal/worker/intent_consumer_test.go | 422 ---- .../internal/worker/push_publisher.go | 46 +- .../internal/worker/push_publisher_test.go | 318 --- notification/internal/worker/sqlretention.go | 161 ++ .../internal/worker/stream_publisher.go | 18 + notification/redis_state_contract_test.go | 69 +- pkg/geoip/go.mod | 2 +- pkg/geoip/go.sum | 3 +- pkg/notificationintent/go.mod | 2 +- pkg/notificationintent/go.sum | 2 +- pkg/postgres/config.go | 196 ++ pkg/postgres/config_test.go | 198 ++ pkg/postgres/go.mod | 72 + pkg/postgres/go.sum | 185 ++ pkg/postgres/health.go | 30 + pkg/postgres/migrate.go | 53 + pkg/postgres/open.go | 136 ++ pkg/postgres/otel.go | 38 + pkg/postgres/postgres_test.go | 115 + .../testdata/migrations/00001_smoke.sql | 8 + pkg/redisconn/client.go | 43 + pkg/redisconn/config.go | 187 ++ pkg/redisconn/go.mod | 28 + pkg/redisconn/go.sum | 47 + pkg/redisconn/health.go | 31 + pkg/redisconn/otel.go | 75 + pkg/redisconn/redisconn_test.go | 258 +++ pkg/util/go.mod | 2 +- pkg/util/go.sum | 2 +- user/Makefile | 10 + user/README.md | 56 + user/cmd/jetgen/main.go | 236 ++ user/docs/README.md | 7 + user/docs/postgres-migration.md | 206 ++ user/docs/runbook.md | 40 +- user/docs/runtime.md | 88 +- user/go.mod | 73 +- user/go.sum | 299 ++- .../postgres/jet/user/model/accounts.go | 25 + .../postgres/jet/user/model/blocked_emails.go | 21 + .../jet/user/model/entitlement_records.go | 29 + .../jet/user/model/entitlement_snapshots.go | 25 + .../jet/user/model/goose_db_version.go | 19 + .../postgres/jet/user/model/limit_active.go | 15 + .../postgres/jet/user/model/limit_records.go | 28 + .../jet/user/model/sanction_active.go | 14 + .../jet/user/model/sanction_records.go | 28 + .../postgres/jet/user/table/accounts.go | 105 + .../postgres/jet/user/table/blocked_emails.go | 93 + .../jet/user/table/entitlement_records.go | 117 + .../jet/user/table/entitlement_snapshots.go | 105 + .../jet/user/table/goose_db_version.go | 87 + .../postgres/jet/user/table/limit_active.go | 87 + .../postgres/jet/user/table/limit_records.go | 114 + .../jet/user/table/sanction_active.go | 84 + .../jet/user/table/sanction_records.go | 114 + .../jet/user/table/table_use_schema.go | 22 + .../postgres/migrations/00001_init.sql | 169 ++ .../postgres/migrations/migrations.go | 19 + .../adapters/postgres/userstore/accounts.go | 375 ++++ .../postgres/userstore/auth_directory.go | 280 +++ .../postgres/userstore/blocked_emails.go | 175 ++ .../postgres/userstore/entitlement_store.go | 729 +++++++ .../postgres/userstore/harness_test.go | 203 ++ .../adapters/postgres/userstore/helpers.go | 149 ++ .../adapters/postgres/userstore/list_store.go | 160 ++ .../adapters/postgres/userstore/page_token.go | 198 ++ .../postgres/userstore/policy_store.go | 870 ++++++++ .../adapters/postgres/userstore/store.go | 138 ++ .../adapters/postgres/userstore/store_test.go | 656 ++++++ .../adapters/redis/domainevents/publisher.go | 57 +- .../redis/domainevents/publisher_test.go | 7 +- .../redis/lifecycleevents/publisher.go | 54 +- .../redis/lifecycleevents/publisher_test.go | 16 +- .../adapters/redis/userstore/admin_index.go | 227 -- .../redis/userstore/admin_index_test.go | 58 - .../redis/userstore/admin_list_test.go | 445 ---- .../redis/userstore/entitlement_store.go | 752 ------- .../adapters/redis/userstore/list_store.go | 137 -- .../adapters/redis/userstore/policy_store.go | 445 ---- .../adapters/redis/userstore/store.go | 1908 ----------------- .../adapters/redis/userstore/store_test.go | 879 -------- user/internal/adapters/redisstate/keyspace.go | 193 -- .../adapters/redisstate/keyspace_test.go | 57 - .../adapters/redisstate/page_token.go | 191 -- .../adapters/redisstate/page_token_test.go | 70 - user/internal/app/runtime.go | 124 +- user/internal/config/config.go | 169 +- user/internal/config/config_test.go | 153 +- .../service/lobbyeligibility/service_test.go | 89 +- user/runtime_contract_test.go | 82 +- 365 files changed, 29223 insertions(+), 24049 deletions(-) create mode 100644 PG_PLAN.md create mode 100644 authsession/docs/redis-config.md create mode 100644 authsession/internal/adapters/redis/client.go delete mode 100644 authsession/user_service_real_runtime_compatibility_test.go create mode 100644 gateway/docs/redis-config.md create mode 100644 gateway/internal/redisclient/client.go create mode 100644 integration/internal/harness/authsessionservice.go create mode 100644 integration/internal/harness/gatewayservice.go create mode 100644 integration/internal/harness/lobbyservice.go create mode 100644 integration/internal/harness/mailservice.go create mode 100644 integration/internal/harness/notificationservice.go create mode 100644 integration/internal/harness/postgres_container.go create mode 100644 integration/internal/harness/postgres_container_test.go create mode 100644 integration/internal/harness/userservice.go create mode 100644 lobby/Makefile create mode 100644 lobby/cmd/jetgen/main.go create mode 100644 lobby/docs/postgres-migration.md create mode 100644 lobby/internal/adapters/postgres/applicationstore/store.go create mode 100644 lobby/internal/adapters/postgres/applicationstore/store_test.go create mode 100644 lobby/internal/adapters/postgres/gamestore/codecs.go create mode 100644 lobby/internal/adapters/postgres/gamestore/store.go create mode 100644 lobby/internal/adapters/postgres/gamestore/store_test.go create mode 100644 lobby/internal/adapters/postgres/internal/pgtest/pgtest.go create mode 100644 lobby/internal/adapters/postgres/internal/sqlx/sqlx.go create mode 100644 lobby/internal/adapters/postgres/invitestore/store.go create mode 100644 lobby/internal/adapters/postgres/invitestore/store_test.go create mode 100644 lobby/internal/adapters/postgres/jet/lobby/model/applications.go create mode 100644 lobby/internal/adapters/postgres/jet/lobby/model/games.go create mode 100644 lobby/internal/adapters/postgres/jet/lobby/model/goose_db_version.go create mode 100644 lobby/internal/adapters/postgres/jet/lobby/model/invites.go create mode 100644 lobby/internal/adapters/postgres/jet/lobby/model/memberships.go create mode 100644 lobby/internal/adapters/postgres/jet/lobby/model/race_names.go create mode 100644 lobby/internal/adapters/postgres/jet/lobby/table/applications.go create mode 100644 lobby/internal/adapters/postgres/jet/lobby/table/games.go create mode 100644 lobby/internal/adapters/postgres/jet/lobby/table/goose_db_version.go create mode 100644 lobby/internal/adapters/postgres/jet/lobby/table/invites.go create mode 100644 lobby/internal/adapters/postgres/jet/lobby/table/memberships.go create mode 100644 lobby/internal/adapters/postgres/jet/lobby/table/race_names.go create mode 100644 lobby/internal/adapters/postgres/jet/lobby/table/table_use_schema.go create mode 100644 lobby/internal/adapters/postgres/membershipstore/store.go create mode 100644 lobby/internal/adapters/postgres/membershipstore/store_test.go create mode 100644 lobby/internal/adapters/postgres/migrations/00001_init.sql create mode 100644 lobby/internal/adapters/postgres/migrations/migrations.go create mode 100644 lobby/internal/adapters/postgres/racenamedir/directory.go create mode 100644 lobby/internal/adapters/postgres/racenamedir/directory_test.go delete mode 100644 lobby/internal/adapters/redisstate/applicationstore.go delete mode 100644 lobby/internal/adapters/redisstate/applicationstore_test.go delete mode 100644 lobby/internal/adapters/redisstate/codecs.go delete mode 100644 lobby/internal/adapters/redisstate/codecs_application.go delete mode 100644 lobby/internal/adapters/redisstate/codecs_invite.go delete mode 100644 lobby/internal/adapters/redisstate/codecs_membership.go delete mode 100644 lobby/internal/adapters/redisstate/codecs_racename.go delete mode 100644 lobby/internal/adapters/redisstate/gamestore.go delete mode 100644 lobby/internal/adapters/redisstate/gamestore_test.go delete mode 100644 lobby/internal/adapters/redisstate/invitestore.go delete mode 100644 lobby/internal/adapters/redisstate/invitestore_test.go create mode 100644 lobby/internal/adapters/redisstate/keyspace_test_helpers_test.go delete mode 100644 lobby/internal/adapters/redisstate/membershipstore.go delete mode 100644 lobby/internal/adapters/redisstate/membershipstore_test.go delete mode 100644 lobby/internal/adapters/redisstate/racenamedir.go delete mode 100644 lobby/internal/adapters/redisstate/racenamedir_lua.go delete mode 100644 lobby/internal/adapters/redisstate/racenamedir_test.go delete mode 100644 lobby/internal/app/runtime_smoke_test.go delete mode 100644 lobby/internal/app/runtime_test.go create mode 100644 mail/Makefile create mode 100644 mail/cmd/jetgen/main.go create mode 100644 mail/docs/postgres-migration.md create mode 100644 mail/internal/adapters/postgres/jet/mail/model/attempts.go create mode 100644 mail/internal/adapters/postgres/jet/mail/model/dead_letters.go create mode 100644 mail/internal/adapters/postgres/jet/mail/model/deliveries.go create mode 100644 mail/internal/adapters/postgres/jet/mail/model/delivery_payloads.go create mode 100644 mail/internal/adapters/postgres/jet/mail/model/delivery_recipients.go create mode 100644 mail/internal/adapters/postgres/jet/mail/model/goose_db_version.go create mode 100644 mail/internal/adapters/postgres/jet/mail/model/malformed_commands.go create mode 100644 mail/internal/adapters/postgres/jet/mail/table/attempts.go create mode 100644 mail/internal/adapters/postgres/jet/mail/table/dead_letters.go create mode 100644 mail/internal/adapters/postgres/jet/mail/table/deliveries.go create mode 100644 mail/internal/adapters/postgres/jet/mail/table/delivery_payloads.go create mode 100644 mail/internal/adapters/postgres/jet/mail/table/delivery_recipients.go create mode 100644 mail/internal/adapters/postgres/jet/mail/table/goose_db_version.go create mode 100644 mail/internal/adapters/postgres/jet/mail/table/malformed_commands.go create mode 100644 mail/internal/adapters/postgres/jet/mail/table/table_use_schema.go create mode 100644 mail/internal/adapters/postgres/mailstore/attempt_execution.go create mode 100644 mail/internal/adapters/postgres/mailstore/auth_acceptance.go create mode 100644 mail/internal/adapters/postgres/mailstore/codecs.go create mode 100644 mail/internal/adapters/postgres/mailstore/deliveries.go create mode 100644 mail/internal/adapters/postgres/mailstore/generic_acceptance.go create mode 100644 mail/internal/adapters/postgres/mailstore/harness_test.go create mode 100644 mail/internal/adapters/postgres/mailstore/helpers.go create mode 100644 mail/internal/adapters/postgres/mailstore/malformed_command.go create mode 100644 mail/internal/adapters/postgres/mailstore/operator.go create mode 100644 mail/internal/adapters/postgres/mailstore/render.go create mode 100644 mail/internal/adapters/postgres/mailstore/store.go create mode 100644 mail/internal/adapters/postgres/mailstore/store_test.go create mode 100644 mail/internal/adapters/postgres/migrations/00001_init.sql create mode 100644 mail/internal/adapters/postgres/migrations/migrations.go delete mode 100644 mail/internal/adapters/redisstate/atomic_writer.go delete mode 100644 mail/internal/adapters/redisstate/atomic_writer_test.go delete mode 100644 mail/internal/adapters/redisstate/attempt_execution_store.go delete mode 100644 mail/internal/adapters/redisstate/attempt_execution_store_test.go delete mode 100644 mail/internal/adapters/redisstate/auth_acceptance_store.go delete mode 100644 mail/internal/adapters/redisstate/auth_acceptance_store_test.go delete mode 100644 mail/internal/adapters/redisstate/codecs.go delete mode 100644 mail/internal/adapters/redisstate/codecs_test.go delete mode 100644 mail/internal/adapters/redisstate/errors.go delete mode 100644 mail/internal/adapters/redisstate/fixtures_test.go delete mode 100644 mail/internal/adapters/redisstate/generic_acceptance_store.go delete mode 100644 mail/internal/adapters/redisstate/generic_acceptance_store_test.go delete mode 100644 mail/internal/adapters/redisstate/index_cleaner.go delete mode 100644 mail/internal/adapters/redisstate/index_cleaner_test.go delete mode 100644 mail/internal/adapters/redisstate/malformed_command_store.go create mode 100644 mail/internal/adapters/redisstate/offset_codec.go delete mode 100644 mail/internal/adapters/redisstate/operator_store.go delete mode 100644 mail/internal/adapters/redisstate/operator_store_test.go delete mode 100644 mail/internal/adapters/redisstate/render_store.go create mode 100644 mail/internal/app/runtime_pgharness_test.go delete mode 100644 mail/internal/worker/attempt_worker_test.go delete mode 100644 mail/internal/worker/cleanup_worker.go delete mode 100644 mail/internal/worker/command_consumer_test.go create mode 100644 mail/internal/worker/sqlretention.go create mode 100644 notification/Makefile create mode 100644 notification/cmd/jetgen/main.go create mode 100644 notification/docs/postgres-migration.md create mode 100644 notification/internal/adapters/postgres/jet/notification/model/dead_letters.go create mode 100644 notification/internal/adapters/postgres/jet/notification/model/goose_db_version.go create mode 100644 notification/internal/adapters/postgres/jet/notification/model/malformed_intents.go create mode 100644 notification/internal/adapters/postgres/jet/notification/model/records.go create mode 100644 notification/internal/adapters/postgres/jet/notification/model/routes.go create mode 100644 notification/internal/adapters/postgres/jet/notification/table/dead_letters.go create mode 100644 notification/internal/adapters/postgres/jet/notification/table/goose_db_version.go create mode 100644 notification/internal/adapters/postgres/jet/notification/table/malformed_intents.go create mode 100644 notification/internal/adapters/postgres/jet/notification/table/records.go create mode 100644 notification/internal/adapters/postgres/jet/notification/table/routes.go create mode 100644 notification/internal/adapters/postgres/jet/notification/table/table_use_schema.go create mode 100644 notification/internal/adapters/postgres/migrations/00001_init.sql create mode 100644 notification/internal/adapters/postgres/migrations/migrations.go create mode 100644 notification/internal/adapters/postgres/notificationstore/acceptance.go create mode 100644 notification/internal/adapters/postgres/notificationstore/codecs.go create mode 100644 notification/internal/adapters/postgres/notificationstore/dead_letters.go create mode 100644 notification/internal/adapters/postgres/notificationstore/harness_test.go create mode 100644 notification/internal/adapters/postgres/notificationstore/helpers.go create mode 100644 notification/internal/adapters/postgres/notificationstore/malformed_intents.go create mode 100644 notification/internal/adapters/postgres/notificationstore/records.go create mode 100644 notification/internal/adapters/postgres/notificationstore/retention.go create mode 100644 notification/internal/adapters/postgres/notificationstore/routes.go create mode 100644 notification/internal/adapters/postgres/notificationstore/scheduler.go create mode 100644 notification/internal/adapters/postgres/notificationstore/store.go create mode 100644 notification/internal/adapters/postgres/notificationstore/store_test.go create mode 100644 notification/internal/adapters/postgres/routepublisher/store.go delete mode 100644 notification/internal/adapters/redisstate/acceptance_store.go delete mode 100644 notification/internal/adapters/redisstate/acceptance_store_test.go delete mode 100644 notification/internal/adapters/redisstate/atomic_writer.go create mode 100644 notification/internal/adapters/redisstate/lease_store.go delete mode 100644 notification/internal/adapters/redisstate/malformed_intent_store.go delete mode 100644 notification/internal/adapters/redisstate/route_state_store.go delete mode 100644 notification/internal/adapters/redisstate/route_state_store_test.go delete mode 100644 notification/internal/app/runtime_smoke_test.go delete mode 100644 notification/internal/app/runtime_test.go create mode 100644 notification/internal/config/env.go create mode 100644 notification/internal/service/routestate/types.go delete mode 100644 notification/internal/worker/email_publisher_test.go delete mode 100644 notification/internal/worker/intent_consumer_test.go delete mode 100644 notification/internal/worker/push_publisher_test.go create mode 100644 notification/internal/worker/sqlretention.go create mode 100644 notification/internal/worker/stream_publisher.go create mode 100644 pkg/postgres/config.go create mode 100644 pkg/postgres/config_test.go create mode 100644 pkg/postgres/go.mod create mode 100644 pkg/postgres/go.sum create mode 100644 pkg/postgres/health.go create mode 100644 pkg/postgres/migrate.go create mode 100644 pkg/postgres/open.go create mode 100644 pkg/postgres/otel.go create mode 100644 pkg/postgres/postgres_test.go create mode 100644 pkg/postgres/testdata/migrations/00001_smoke.sql create mode 100644 pkg/redisconn/client.go create mode 100644 pkg/redisconn/config.go create mode 100644 pkg/redisconn/go.mod create mode 100644 pkg/redisconn/go.sum create mode 100644 pkg/redisconn/health.go create mode 100644 pkg/redisconn/otel.go create mode 100644 pkg/redisconn/redisconn_test.go create mode 100644 user/Makefile create mode 100644 user/cmd/jetgen/main.go create mode 100644 user/docs/postgres-migration.md create mode 100644 user/internal/adapters/postgres/jet/user/model/accounts.go create mode 100644 user/internal/adapters/postgres/jet/user/model/blocked_emails.go create mode 100644 user/internal/adapters/postgres/jet/user/model/entitlement_records.go create mode 100644 user/internal/adapters/postgres/jet/user/model/entitlement_snapshots.go create mode 100644 user/internal/adapters/postgres/jet/user/model/goose_db_version.go create mode 100644 user/internal/adapters/postgres/jet/user/model/limit_active.go create mode 100644 user/internal/adapters/postgres/jet/user/model/limit_records.go create mode 100644 user/internal/adapters/postgres/jet/user/model/sanction_active.go create mode 100644 user/internal/adapters/postgres/jet/user/model/sanction_records.go create mode 100644 user/internal/adapters/postgres/jet/user/table/accounts.go create mode 100644 user/internal/adapters/postgres/jet/user/table/blocked_emails.go create mode 100644 user/internal/adapters/postgres/jet/user/table/entitlement_records.go create mode 100644 user/internal/adapters/postgres/jet/user/table/entitlement_snapshots.go create mode 100644 user/internal/adapters/postgres/jet/user/table/goose_db_version.go create mode 100644 user/internal/adapters/postgres/jet/user/table/limit_active.go create mode 100644 user/internal/adapters/postgres/jet/user/table/limit_records.go create mode 100644 user/internal/adapters/postgres/jet/user/table/sanction_active.go create mode 100644 user/internal/adapters/postgres/jet/user/table/sanction_records.go create mode 100644 user/internal/adapters/postgres/jet/user/table/table_use_schema.go create mode 100644 user/internal/adapters/postgres/migrations/00001_init.sql create mode 100644 user/internal/adapters/postgres/migrations/migrations.go create mode 100644 user/internal/adapters/postgres/userstore/accounts.go create mode 100644 user/internal/adapters/postgres/userstore/auth_directory.go create mode 100644 user/internal/adapters/postgres/userstore/blocked_emails.go create mode 100644 user/internal/adapters/postgres/userstore/entitlement_store.go create mode 100644 user/internal/adapters/postgres/userstore/harness_test.go create mode 100644 user/internal/adapters/postgres/userstore/helpers.go create mode 100644 user/internal/adapters/postgres/userstore/list_store.go create mode 100644 user/internal/adapters/postgres/userstore/page_token.go create mode 100644 user/internal/adapters/postgres/userstore/policy_store.go create mode 100644 user/internal/adapters/postgres/userstore/store.go create mode 100644 user/internal/adapters/postgres/userstore/store_test.go delete mode 100644 user/internal/adapters/redis/userstore/admin_index.go delete mode 100644 user/internal/adapters/redis/userstore/admin_index_test.go delete mode 100644 user/internal/adapters/redis/userstore/admin_list_test.go delete mode 100644 user/internal/adapters/redis/userstore/entitlement_store.go delete mode 100644 user/internal/adapters/redis/userstore/list_store.go delete mode 100644 user/internal/adapters/redis/userstore/policy_store.go delete mode 100644 user/internal/adapters/redis/userstore/store.go delete mode 100644 user/internal/adapters/redis/userstore/store_test.go delete mode 100644 user/internal/adapters/redisstate/keyspace.go delete mode 100644 user/internal/adapters/redisstate/keyspace_test.go delete mode 100644 user/internal/adapters/redisstate/page_token.go delete mode 100644 user/internal/adapters/redisstate/page_token_test.go diff --git a/ARCHITECTURE.md b/ARCHITECTURE.md index fe739e2..c75b162 100644 --- a/ARCHITECTURE.md +++ b/ARCHITECTURE.md @@ -37,7 +37,7 @@ Core product properties: * in-place upgrade of a running game is allowed only as a patch update within the same semver major/minor line; * player commands are turn-bound and are accepted only before the next scheduled turn generation cutoff. -The current v1 platform uses Redis as the main data store and Redis Streams as the internal event bus. +The platform stores durable business state in PostgreSQL (one shared database, schema per service) and uses Redis with Redis Streams for ephemeral state, caches, and the internal event bus. The backend split, library stack, and staged migration plan live in [`PG_PLAN.md`](PG_PLAN.md) and the [Persistence Backends](#persistence-backends) section below. ## Main Principles @@ -124,7 +124,8 @@ flowchart LR Mail["Mail Service"] Geo["Geo Profile Service"] Billing["Billing Service\nfuture"] - Redis["Redis\nKV + Streams"] + Redis["Redis\nCache, Streams, Leases"] + Postgres["PostgreSQL\nDurable Business State"] Telemetry["Telemetry"] Client --> Gateway @@ -162,6 +163,13 @@ flowchart LR Notify --> Redis Runtime --> Redis + + Mail --> Redis + User --> Postgres + Mail --> Postgres + Notify --> Postgres + Lobby --> Postgres + Billing --> User Telemetry --- Gateway Telemetry --- Auth @@ -332,8 +340,10 @@ For auth callers, a successful result means the request was durably accepted into the mail-delivery pipeline or intentionally suppressed; it does not require that the external SMTP exchange already completed before the response is returned. -Stable service-local delivery rules, retry semantics, and Redis-backed -processing details belong in [`mail/README.md`](mail/README.md), not in the +Stable service-local delivery rules, retry semantics, and storage details +(PostgreSQL for the durable delivery record, attempt history, dead letters, +and audit; Redis for the inbound `mail:delivery_commands` stream and its +consumer offset) belong in [`mail/README.md`](mail/README.md), not in the root architecture document. ## 5. [Geo Profile Service](geoprofile/README.md) @@ -490,7 +500,7 @@ service-layer logic. RND owns three levels of state per name: -- **registered** — platform-unique permanent names owned by one regular user. +* **registered** — platform-unique permanent names owned by one regular user. A registered name cannot be transferred, released, or renamed; the only path back to availability is `permanent_block` or `DeleteUser` on the owning account. The number of registered names a user can hold is bounded by the @@ -498,13 +508,13 @@ RND owns three levels of state per name: snapshot): `free=1`, `paid_monthly=2`, `paid_yearly=6`, `paid_lifetime=unlimited`. Tariff downgrade never revokes existing registrations; it only constrains new ones. -- **reservation** — per-game binding created when a participant joins a game +* **reservation** — per-game binding created when a participant joins a game through application approval or invite redeem. The reservation key is `(game_id, canonical_key)`. One user may hold the same name simultaneously across multiple active games. A reservation survives until the game finishes, then either becomes a `pending_registration` (see below) or is released. -- **pending_registration** — a reservation that survived a capable finish and +* **pending_registration** — a reservation that survived a capable finish and is now waiting up to 30 days for the owner to upgrade it into a registered name via `lobby.race_name.register`. Expiration releases the binding. @@ -807,25 +817,143 @@ The main example is `Lobby -> Game Master`: * synchronous for critical registration/update after successful start; * asynchronous for secondary propagation and denormalized status fan-out. -## Redis as Data and Event Infrastructure +## Persistence Backends -Redis is the initial shared infrastructure for: +The platform splits durable state across two backends. -* main persistent data of services where no SQL backend is yet introduced; -* gateway session cache backing data; -* replay reservation store for gateway; -* session lifecycle projection; -* internal event bus using Redis Streams; -* notification-intent ingress through `notification:intents`; -* notification fan-out; -* runtime job completion events; -* lobby/game-master propagation events; -* geo auxiliary events. +PostgreSQL is the source of truth for table-shaped business state: -Redis Streams are therefore the platform event bus in v1. +* user identity, profile settings, tariffs/entitlements, sanctions, limits, + and the blocked-email registry; +* mail deliveries, attempt history, dead letters, payloads, and + malformed-command audit; +* notification records, route materialisations, dead letters, and + malformed-intent audit; +* lobby games, applications, invites, memberships, and the race-name + registry (registered/reservation/pending tiers); +* idempotency records, expressed as `UNIQUE` constraints on the durable + table — not as a separate kv; +* retry scheduling state, expressed as a `next_attempt_at` column on the + durable table and worked off via `SELECT ... FOR UPDATE SKIP LOCKED`. -This is an accepted trade-off for simpler early infrastructure. -Service boundaries must still stay storage-agnostic where future SQL migration is expected, especially in `Auth / Session Service`. +Redis is the source of truth for ephemeral and runtime-coordination state: + +* the platform event bus implemented as Redis Streams (`user:domain_events`, + `user:lifecycle_events`, `gm:lobby_events`, `runtime:job_results`, + `notification:intents`, `gateway:client-events`, `mail:delivery_commands`); +* stream consumer offsets; +* gateway session cache, replay reservations, rate-limit counters, and + short-lived runtime locks/leases (e.g. notification `route_leases`); +* `Auth / Session Service` challenges and active session tokens, which are + TTL-bounded and where loss is recoverable by re-authentication; +* lobby per-game runtime aggregates that are deleted at game finish + (`game_turn_stats`, `gap_activated_at`, capability evaluation marker). + +### Database topology + +* Single PostgreSQL database `galaxy`. +* Schema per service: `user`, `mail`, `notification`, `lobby`. Reserved for + future use: `geoprofile`. Not allocated unless needed: `gateway`, + `authsession`. +* Each service connects with its own PostgreSQL role whose grants are + restricted to its own schema (defense-in-depth). +* Authentication is username + password only. `sslmode=disable`. No client + certificates and no SCRAM channel binding. +* Each service connects to one primary plus zero-or-more read-only + replicas. Only the primary is used in this iteration; the replica pool + is wired but receives no traffic. Future read-routing is a non-breaking + change. + +### Redis topology + +* Each service connects to one master plus zero-or-more replicas. +* All connections require a password. `USERNAME`/ACL is not used. TLS is + off. +* Only the master is used in this iteration; the replica list is wired but + unused. Failover/read routing is added later without a config break. +* The legacy env vars `*_REDIS_TLS_ENABLED` and `*_REDIS_USERNAME` are + removed without a backward-compat shim. + +### Library stack and migration discipline + +* Driver: `github.com/jackc/pgx/v5`, exposed as `*sql.DB` via + `github.com/jackc/pgx/v5/stdlib` so it is consumable by query builders + written against `database/sql`. +* Query layer: `github.com/go-jet/jet/v2` (PostgreSQL dialect). Generated + code lives under each service `internal/adapters/postgres/jet/`, + regenerated by a per-service `make jet` target (testcontainers + goose + + jet) and committed to the repo so consumers don't need Docker just to + build. +* Migrations: `github.com/pressly/goose/v3` library API. Migration files + are embedded via `//go:embed *.sql`, applied at service startup before + any listener opens; the service exits non-zero on failure. Files are + forward-only, sequence-numbered, and use the standard `-- +goose Up` / + `-- +goose Down` markers. +* Single-init policy during pre-launch development: each PG-backed + service ships exactly one migration file, `00001_init.sql`, that + represents the full current schema. New tables, columns, and indexes + are added by editing that file directly rather than by appending + `00002_*.sql`, `00003_*.sql`, etc. The trade-off is intentional — + schema clarity beats migration-history granularity while no production + database exists. Once the platform reaches its first production + deploy, future schema evolution switches to additive sequence-numbered + migrations. +* Test infrastructure: `github.com/testcontainers/testcontainers-go` plus + the `modules/postgres` submodule for unit tests and for `make jet`. + +Per-service decision records that capture schema and adapter choices live +at `galaxy//docs/postgres-migration.md`. + +### Timestamp handling + +Every time-valued column in every Galaxy schema is `timestamptz`. The +adapter layer is responsible for ensuring that all `time.Time` values +crossing the SQL boundary carry `time.UTC` as their location. + +* **Writes.** Every `time.Time` parameter bound through `database/sql` + (`ExecContext`, `QueryContext`, `QueryRowContext`) is normalised with + `.UTC()` at the binding site. Optional `*time.Time` columns are bound + through a shared helper (`nullableTime` or equivalent per adapter) that + returns `value.UTC()` when non-nil and SQL `NULL` otherwise. Helper + bindings of `cutoff`, `now`, etc. (retention, schedulers) follow the + same rule even when the input was already produced via + `clock.Now().UTC()` — defensive `.UTC()` calls are intentional and + cheap. +* **Reads.** Every `time.Time` scanned out of PostgreSQL is re-wrapped + with `.UTC()` (directly or via a small helper that mirrors + `nullableTime` for the read path) before it leaves the adapter. The + domain layer therefore never observes a `time.Time` whose location is + anything other than `time.UTC`. +* **Why.** PostgreSQL stores `timestamptz` as UTC at rest, but the Go + driver returns scanned values in `time.Local`. Mixing locations across + the boundary produces inequalities in tests, drift in JSON output, and + comparison bugs against pointer fields. The defensive `.UTC()` rule on + both sides removes that class of bug entirely. + +### Configuration + +For each service `` ∈ { `USERSERVICE`, `MAIL`, `NOTIFICATION`, +`LOBBY`, `GATEWAY`, `AUTHSESSION` }, the Redis connection accepts: + +* `_REDIS_MASTER_ADDR` (required) +* `_REDIS_REPLICA_ADDRS` (optional, comma-separated) +* `_REDIS_PASSWORD` (required) +* `_REDIS_DB`, `_REDIS_OPERATION_TIMEOUT` + +For PG-backed services (`USERSERVICE`, `MAIL`, `NOTIFICATION`, `LOBBY`) +the Postgres connection accepts: + +* `_POSTGRES_PRIMARY_DSN` (required; + `postgres://:@:5432/galaxy?search_path=&sslmode=disable`) +* `_POSTGRES_REPLICA_DSNS` (optional, comma-separated) +* `_POSTGRES_OPERATION_TIMEOUT`, `_POSTGRES_MAX_OPEN_CONNS`, + `_POSTGRES_MAX_IDLE_CONNS`, `_POSTGRES_CONN_MAX_LIFETIME` + +Stream- and key-shape env vars (`*_REDIS_DOMAIN_EVENTS_STREAM`, +`*_REDIS_LIFECYCLE_EVENTS_STREAM`, `*_REDIS_KEYSPACE_PREFIX`, +`MAIL_REDIS_COMMAND_STREAM`, `NOTIFICATION_INTENTS_STREAM`, etc.) keep +their current names and semantics — they describe stream/key shapes, not +connection topology. ## Main End-to-End Flows @@ -1122,7 +1250,6 @@ The architecture intentionally does not try to solve all future concerns now. Current non-goals: -* a separate global SQL storage layer in v1; * a separate policy engine; * automatic billing integration in v1; * automatic match balancing in v1; diff --git a/PG_PLAN.md b/PG_PLAN.md new file mode 100644 index 0000000..d5fac13 --- /dev/null +++ b/PG_PLAN.md @@ -0,0 +1,920 @@ +# PostgreSQL Migration Plan + +This plan has been already implemented and stays here for historical reasons. + +It should NOT be threated as source of truth for service functionality. + +## Context + +The Galaxy Game project currently uses Redis as the only persistence backend +across all implemented services (`user`, `mail`, `notification`, `lobby`, +`gateway`, `authsession`). Redis serves both kinds of state: ephemeral and +runtime-coordination state (where it shines — Streams, caches, replay keys, +runtime queues, session caches, leases) and table-shaped business state where +it is a poor fit (durable user accounts, entitlements/sanctions, mail audit +records, notification routes/idempotency, lobby memberships and invites). +Replication and standby for Redis are not configured anywhere. There is no +SQL/migration tooling in the repo at all. + +We migrate to a Redis + PostgreSQL split where each backend owns the data it +serves best. PostgreSQL becomes the source of truth for table-shaped business +state, gives us ACID transactions, mature physical/logical replication, and +backup/restore via `pg_dump` and WAL archiving. Redis remains the source of +truth for streams, pub/sub, caches, leases, replay keys, rate limits, session +caches, runtime queues, and stream consumer offsets. + +The plan migrates only services already implemented and explicitly excludes +`galaxy/game`. It targets steady-state architecture rules first (one +authoritative document, `ARCHITECTURE.md`), then walks each service end to end +— code, tests, service-local README/docs, and integration suites — so that no +intermediate commit leaves docs and code in conflict. + +## Confirmed decisions (with project owner) + +1. **Documentation strategy**: `ARCHITECTURE.md` is updated as the very first + stage with the architecture-wide rules. Each per-service README and per- + service `docs/` change inside that service's own stage, paired with code + and tests. This keeps `ARCHITECTURE.md` ≡ policy, README ≡ current state, + and ensures any commit can be checked out without code/doc divergence. +2. **Service scope**: full migration of durable storage to PostgreSQL for + `user`, `mail`, `notification`, `lobby`. Only Redis configuration refactor + (master/replica + mandatory password, drop `TLS_ENABLED` / `USERNAME`) for + `gateway` and `authsession` — these services intentionally stay Redis- + only. `geoprofile` has no implementation; its `PLAN.md` and `README.md` + absorb the new persistence rules so future implementation follows them. +3. **Idempotency and retry-schedule placement**: idempotency records and + retry schedule queues live in PostgreSQL on the same table as the durable + record they protect (`(producer, idempotency_key)` UNIQUE on `records`, + `next_attempt_at` column on `deliveries` / `routes`). One source of truth, + no dual-write hazard between PG and Redis ZSETs. +4. **Stack**: `github.com/jackc/pgx/v5` driver, exposed as `*sql.DB` via + `github.com/jackc/pgx/v5/stdlib`. `github.com/go-jet/jet/v2` for + type-safe query building + code generation, generated against a + testcontainers PostgreSQL instance with migrations applied (Makefile + target per service). `github.com/pressly/goose/v3` library API for + embedded migrations applied at service startup; the `goose` CLI may be + used for local development and rollback investigations but is not in the + service binary path. +5. **Code**: all postgres queries must use pre-generated code with `jet` and + appropriate builders rather than raw SQL queries, unless this usage cannot + achive the goal of businness-scenario due to lack of `go-jet` functionality. + +## Architectural rules (target steady-state) + +These rules land in `ARCHITECTURE.md` in Stage 0 and govern every subsequent +service stage. + +### Backend assignment + +PostgreSQL is the source of truth for: + +- Domain entities with table-shaped business state (`accounts`, + `entitlement_records`, `sanction_records`, `limit_records`, + `blocked_emails`, `deliveries`, `attempts`, `dead_letters`, + `malformed_commands`, `notification_records`, `notification_routes`, + `games`, `applications`, `invites`, `memberships`, `race_names`). +- Idempotency records (UNIQUE constraint on the durable table, not a + separate kv). +- Retry scheduling state (`next_attempt_at` column + supporting index on the + durable table). +- Audit history records that must outlive any Redis snapshot. + +Redis is the source of truth for: + +- Redis Streams used as the event bus (`user:domain_events`, + `user:lifecycle_events`, `gm:lobby_events`, `runtime:job_results`, + `notification:intents`, `gateway:client-events`, `mail:delivery_commands`). +- Stream consumer offsets (small runtime coordination state, rebuildable). +- Caches and projections (gateway session cache). +- Replay reservation keys. +- Rate limit counters. +- Runtime coordination locks/leases (e.g. notification `route_leases`). +- Authentication challenge state and active session tokens (TTL-bounded; loss + is recoverable by re-authentication). +- Ephemeral per-game runtime aggregates that are deleted at game finish + (lobby `game_turn_stats`, `gap_activated_at`, capability evaluation + marker). + +### Database topology + +- Single PostgreSQL database `galaxy`. +- Schema-per-service: `user`, `mail`, `notification`, `lobby`. Reserved for + later: `geoprofile`. Not allocated unless needed: `gateway`, `authsession`. +- Per-service PostgreSQL role with grants restricted to its own schema + (defense-in-depth, simple to express in the initial migration). +- Authentication: username + password only. `sslmode=disable`. No client + certificates, no SCRAM channel binding, no custom auth plugins. +- Each service connects to one primary plus zero-or-more read-only replicas. + In this iteration only the primary is used; the replica pool is wired but + receives no traffic. Future read-routing is non-breaking. + +### Redis topology + +- Each service connects to one master Redis plus zero-or-more replica Redis + hosts. +- All connections use a mandatory password. `USERNAME`/ACL not used. TLS off. +- In this iteration only the master is used; the replica list is wired but + unused — non-breaking switch later when the app starts routing reads. +- Existing env vars `*_REDIS_TLS_ENABLED`, `*_REDIS_USERNAME` are removed + (hard rename; no backward-compat shim — fresh project, no production + deploys to migrate). + +### Library stack + +- Driver: `github.com/jackc/pgx/v5` (modern, actively maintained), exposed + to `database/sql` via `github.com/jackc/pgx/v5/stdlib` so go-jet's + `qrm.Queryable` interface is satisfied without changes. +- Query layer: `github.com/go-jet/jet/v2` (PostgreSQL dialect). Generated + code lives under each service `internal/adapters/postgres/jet/`, + regenerated via a `make jet` target and committed to the repo. +- Migrations: `github.com/pressly/goose/v3` library API; migration files + embedded via `//go:embed *.sql`; applied at startup, before opening any + HTTP/gRPC listener; non-zero exit on failure. +- Test infrastructure: `github.com/testcontainers/testcontainers-go` plus + the `modules/postgres` submodule; the same setup is reused by `make jet` + to host a transient instance for jet codegen. + +### Migration discipline + +- Forward-only sequence-numbered files: `00001_init.sql`, `00002_*.sql`, … +- Lowercase snake_case names; goose `-- +goose Up` / `-- +goose Down` + markers; statements that need transaction-wrapping use + `-- +goose StatementBegin` / `-- +goose StatementEnd`. +- Migrations apply at service startup; service exits non-zero on failure. +- Per-service decision record at `galaxy//docs/postgres-migration.md` + captures schema decisions and any non-trivial deviation from the rules. + +### Per-service code organisation + +```text +galaxy// + internal/ + adapters/ + postgres/ + migrations/ # *.sql files + migrations.go (//go:embed) + jet/ # generated; commit-checked + / # adapter implementations matching internal/ports + config/ + config.go # adds Postgres + new Redis schema + Makefile # `jet` target: testcontainers + goose + jet +``` + +### Test patterns + +- Per-service unit tests against a real PostgreSQL via + `testcontainers-go`; replace the corresponding miniredis test path where + storage moved to PG. +- Shared port-test suites (e.g. `lobby/internal/ports/racenamedirtest/`) + gain a Postgres harness; they remain backend-agnostic in shape. +- `integration/internal/harness/postgres_container.go` is added; integration + suites that need PG declare it next to their existing Redis container. +- Stub adapters (`*stub/`) are kept where the in-memory port is useful for + tests that don't need a real backend. Redis adapters that previously + implemented these ports are removed (no dead code). + +### Configuration env vars (target) + +For each service `` ∈ { `USERSERVICE`, `MAIL`, `NOTIFICATION`, `LOBBY`, +`GATEWAY`, `AUTHSESSION` }: + +- `_REDIS_MASTER_ADDR` (required) +- `_REDIS_REPLICA_ADDRS` (optional, comma-separated; default empty) +- `_REDIS_PASSWORD` (required) +- `_REDIS_DB` (default 0) +- `_REDIS_OPERATION_TIMEOUT` (default 250ms) + +For PG-backed services (`USERSERVICE`, `MAIL`, `NOTIFICATION`, `LOBBY`): + +- `_POSTGRES_PRIMARY_DSN` (required; + e.g. `postgres://userservice:secret@postgres:5432/galaxy?search_path=user&sslmode=disable`) +- `_POSTGRES_REPLICA_DSNS` (optional, comma-separated) +- `_POSTGRES_OPERATION_TIMEOUT` (default 1s) +- `_POSTGRES_MAX_OPEN_CONNS` (default 25) +- `_POSTGRES_MAX_IDLE_CONNS` (default 5) +- `_POSTGRES_CONN_MAX_LIFETIME` (default 30m) + +DSN sets `search_path=` so unqualified table references resolve into +the service-owned schema; `sslmode=disable` is set explicitly per the +"no TLS" requirement. + +Service-prefix-specific stream/keyspace env vars (`*_REDIS_DOMAIN_EVENTS_STREAM`, +`*_REDIS_LIFECYCLE_EVENTS_STREAM`, `*_REDIS_KEYSPACE_PREFIX`, +`MAIL_REDIS_COMMAND_STREAM`, etc.) keep their current names and semantics — +they describe stream/key shapes, not connection topology. + +--- + +## Stages + +Each stage is independently executable and shippable. + +### ~~Stage 0~~ — Architecture-wide rules and PG_PLAN.md materialisation + +This stage is implemented. + +**Goal**: land the steady-state rules in `ARCHITECTURE.md` and place +`PG_PLAN.md` at the project root so subsequent `/stage-implementation` +invocations have an authoritative reference. + +**Actions**: + +1. Write the contents of this plan file to `/Users/id/src/go/galaxy/PG_PLAN.md`. +2. Add a new section to `ARCHITECTURE.md` (e.g. `§9 Persistence Backends`) + capturing every rule under the *Architectural rules* heading above: + backend assignment, database/Redis topology, library stack, migration + discipline, code organisation, test patterns, env-var conventions. +3. Add a short *Migration Window* sub-section to `ARCHITECTURE.md` noting + that until all `PG_PLAN.md` stages complete, each service's `README.md` + continues to describe its actual current state — this caveat is removed + in Stage 9. +4. Adjust `ARCHITECTURE.md §8` (publisher rules) so cross-references + distinguish "Redis Stream" (event bus, stays Redis) from "PG-backed + table" (durable record). + +**Files (modified / new)**: + +- `/Users/id/src/go/galaxy/PG_PLAN.md` — new +- `/Users/id/src/go/galaxy/ARCHITECTURE.md` — modified + +**Out of scope**: zero service code, zero per-service README/docs, zero +`go.mod` changes, zero new dependencies in service modules. + +**Verification**: + +- `git diff --stat` reports two paths only: `PG_PLAN.md`, `ARCHITECTURE.md`. +- `ARCHITECTURE.md` reads coherently end to end, with the new section + cross-referenced from §8 and from any other place that today says + "Redis is the v1 backend". +- Manual: read `PG_PLAN.md` top to bottom, confirm every architectural + decision matches the section in `ARCHITECTURE.md`. + +--- + +### ~~Stage 1~~ — Shared infrastructure packages (`pkg/postgres`, `pkg/redisconn`) + +This stage is implemented. + +**Goal**: provide one canonical helper each for Postgres and Redis so per- +service stages don't reinvent connection/migration wiring. No service +consumes them yet. + +**Files (new)**: + +- `pkg/postgres/config.go` — `Config` struct (PrimaryDSN, ReplicaDSNs, + OperationTimeout, MaxOpenConns, MaxIdleConns, ConnMaxLifetime); helper + `LoadFromEnv(prefix string) (Config, error)` that reads + `_POSTGRES_*`. +- `pkg/postgres/open.go` — `OpenPrimary(ctx, cfg) (*sql.DB, error)` and + `OpenReplicas(ctx, cfg) ([]*sql.DB, error)` using + `pgx.ConnConfig` → `stdlib.OpenDB(...)`; configures pool sizes and + per-statement context timeout. +- `pkg/postgres/migrate.go` — `RunMigrations(ctx context.Context, db *sql.DB, + fs embed.FS) error` wrapping `goose.SetBaseFS(fs)` + `goose.UpContext`. +- `pkg/postgres/otel.go` — `Instrument(db *sql.DB, telemetry telemetry.Runtime)` + applying `otelsql.RegisterDBStatsMetrics` and statement spans. +- `pkg/postgres/postgres_test.go` — testcontainers-backed smoke test: + open primary, run a one-line migration, insert/select. +- `pkg/redisconn/config.go` — `Config` struct (MasterAddr, ReplicaAddrs, + Password, DB, OperationTimeout); helper `LoadFromEnv(prefix string) + (Config, error)` that reads `_REDIS_*` (the new shape only; + rejects deprecated TLS/USERNAME vars with a clear error). +- `pkg/redisconn/client.go` — `NewMasterClient(cfg) *redis.Client` and + `NewReplicaClients(cfg) []*redis.Client` (latter returns nil/empty when + replicas not configured). +- `pkg/redisconn/otel.go` — `Instrument(client *redis.Client, + telemetry telemetry.Runtime)` applying `redisotel.InstrumentTracing` / + `InstrumentMetrics`. +- `pkg/redisconn/redisconn_test.go` — miniredis-backed config and master + client tests. + +**Files (touched)**: + +- `pkg/go.mod` — add `github.com/jackc/pgx/v5`, + `github.com/jackc/pgx/v5/stdlib`, `github.com/pressly/goose/v3`, + `github.com/testcontainers/testcontainers-go/modules/postgres`, + `github.com/XSAM/otelsql` (for db instrumentation; alternative: + `go.nhat.io/otelsql` — pick one in implementation). +- `go.work` — confirm `pkg/` is registered (already is). + +**Verification**: + +- `cd /Users/id/src/go/galaxy/pkg && go test ./postgres/... ./redisconn/...` + passes locally with Docker available. +- `go vet ./...` clean. + +--- + +### ~~Stage 2~~ — Integration test harness extension + +This stage is implemented. + +**Goal**: extend `integration/internal/harness/` with a Postgres container +helper and a service-bootstrap helper that builds the per-service DSN with +the right `search_path`. All existing integration suites stay green. + +**Files (new)**: + +- `integration/internal/harness/postgres_container.go` — + `StartPostgresContainer(t testing.TB) *PostgresRuntime`. The runtime + exposes `BaseDSN()`, `DSNForSchema(schema, role string) string`, and + `EnsureRoleAndSchema(ctx, schema, role, password string) error` so each + test can prepare an isolated schema for the service it is booting. +- `integration/internal/harness/postgres_container_test.go` — smoke test. + +**Files (touched)**: + +- `integration/internal/harness/binary.go` — extend `Process`/launch + helpers with `WithPostgres(rt *PostgresRuntime, schema, role string)` + that injects the right `_POSTGRES_PRIMARY_DSN`. (Existing API already + takes `env map[string]string`; this is a thin wrapper.) +- `integration/go.mod` — add the testcontainers Postgres module. + +**Out of scope**: no integration suite is yet wired to Postgres; each +service stage wires in its suites. + +**Verification**: + +- `cd integration && go test ./internal/harness/...` passes. +- `cd integration && go test ./...` still green for all existing suites + (Redis-only services remain Redis-only). + +--- + +### ~~Stage 3~~ — User Service migration (pilot) + +**Goal**: replace User Service's Redis durable storage with PostgreSQL. The +two Redis Streams (`user:domain_events`, `user:lifecycle_events`) remain on +Redis. This stage is the pilot; subsequent service stages copy its shape. + +**Schema (`user` schema)**: + +- `accounts` (user_id PK, email UNIQUE, user_name UNIQUE, display_name, + preferred_language, time_zone, declared_country, created_at, updated_at, + deleted_at). +- `blocked_emails` (email PK, reason_code, blocked_at, actor_type, actor_id, + resolved_user_id). +- `entitlement_records` (record_id PK, user_id FK, plan_code, is_paid, + starts_at, ends_at, source, actor_type, actor_id, reason_code, + updated_at). +- `entitlement_snapshots` (user_id PK FK → accounts, …current effective + values mirroring Redis snapshot shape). +- `sanction_records` (record_id PK, user_id FK, sanction_code, scope, + reason_code, actor_type, actor_id, applied_at, expires_at, removed_at, + removed_by_type, removed_by_id, removed_reason_code). +- `sanction_active` (user_id, sanction_code, record_id) PRIMARY KEY + (user_id, sanction_code). +- `limit_records`, `limit_active` — analogous to sanctions. +- Indexes: `accounts(created_at DESC, user_id DESC)` for newest-first + pagination; `accounts(declared_country)`; + `entitlement_snapshots(plan_code, is_paid)`; + `entitlement_snapshots(ends_at) WHERE is_paid AND ends_at IS NOT NULL`; + `sanction_active(sanction_code)`; `limit_active(limit_code)`. Eligibility + flags become computed predicates on these columns. + +**Files (new)**: + +- `galaxy/user/internal/adapters/postgres/migrations/00001_init.sql` — + full schema with grants (`GRANT USAGE ON SCHEMA user TO userservice; + GRANT … ON ALL TABLES …;`). +- `galaxy/user/internal/adapters/postgres/migrations/migrations.go` — + `//go:embed *.sql` and a `Migrations() embed.FS` accessor. +- `galaxy/user/internal/adapters/postgres/jet/...` — generated code + (commit-checked). +- `galaxy/user/internal/adapters/postgres/userstore/store.go` — Postgres + implementation of `ports.UserAccountStore` and `ports.AuthDirectoryStore`. +- `galaxy/user/internal/adapters/postgres/userstore/entitlement_store.go` — + Postgres implementation of `EntitlementSnapshotStore` and + `EntitlementHistoryStore`. +- `galaxy/user/internal/adapters/postgres/userstore/policy_store.go` — + Postgres implementation of `SanctionStore` and `LimitStore`. +- `galaxy/user/internal/adapters/postgres/userstore/list_store.go` — + Postgres implementation of `UserListStore` (pagination + filters + expressed as SQL). +- `galaxy/user/internal/adapters/postgres/userstore/store_test.go` and + siblings — testcontainers-backed unit tests covering the same matrix the + current Redis tests cover. +- `galaxy/user/Makefile` — `jet` target. +- `galaxy/user/docs/postgres-migration.md` — decision record (schema + shape, why we keep `entitlement_snapshots` denormalised, eligibility + expressed as SQL predicates, schema role grants). + +**Files (touched)**: + +- `galaxy/user/internal/config/config.go` — add Postgres config; refactor + Redis config to master/replica/password (drop `TLS_ENABLED`, `USERNAME`). +- `galaxy/user/internal/config/config_test.go` — update to new env shape. +- `galaxy/user/internal/app/runtime.go` — open Postgres pool, run + migrations on startup before listeners open, wire postgres adapters + into services. Redis client now serves only the two stream publishers. +- `galaxy/user/README.md` — replace "Redis-backed user state" with the + new persistence model, update env-var section. +- `galaxy/user/docs/runbook.md`, `galaxy/user/docs/runtime.md`, + `galaxy/user/docs/examples.md` — update storage references and + config sections. +- `galaxy/user/go.mod` — add `github.com/jackc/pgx/v5{,/stdlib}`, + `github.com/pressly/goose/v3`, `github.com/go-jet/jet/v2`, + `github.com/testcontainers/testcontainers-go/modules/postgres`. Use + `pkg/postgres`, `pkg/redisconn`. + +**Files (deleted)**: + +- `galaxy/user/internal/adapters/redis/userstore/` — entire directory. +- The portions of `galaxy/user/internal/adapters/redisstate/keyspace.go` + that defined account/entitlement/sanction/limit/index keys (keep only + what `domainevents` and `lifecycleevents` publishers still require — if + none, delete the file outright). + +**Files retained on Redis**: + +- `galaxy/user/internal/adapters/redis/domainevents/publisher.go`. +- `galaxy/user/internal/adapters/redis/lifecycleevents/publisher.go`. + +**Touched integration suites** (each gets a Postgres container in addition +to the existing Redis one): + +- `integration/authsessionuser/` +- `integration/gatewayauthsessionuser/` +- `integration/gatewayauthsessionusermail/` +- `integration/notificationuser/` +- `integration/lobbyuser/` + +**Verification**: + +- `cd galaxy/user && make jet && go test ./...` (Docker needed). +- `cd integration && go test ./authsessionuser/... ./gatewayauthsessionuser/... ./gatewayauthsessionusermail/... ./notificationuser/... ./lobbyuser/...` +- Manual smoke against a `docker-compose` stack (PG + Redis with + passwords) using flows from `galaxy/user/docs/examples.md`. + +--- + +### ~~Stage 4~~ — Mail Service migration + +This stage is implemented. + +**Goal**: move durable mail storage (deliveries, attempts, dead letters, +malformed commands, payloads, idempotency, attempt schedule) into +PostgreSQL. Keep Redis only for the inbound `mail:delivery_commands` +stream and its consumer offset. + +**Schema (`mail` schema)**: + +- `deliveries` (delivery_id PK, source, status, recipient_envelope JSONB, + subject, text_body, html_body, payload_mode, template_id, + idempotency_source, idempotency_key, locale_fallback_used, + next_attempt_at, attempt_count, max_attempts, created_at, updated_at). + - INDEX (status, next_attempt_at) for the scheduler. + - UNIQUE (idempotency_source, idempotency_key) — the idempotency record + IS this row (no separate kv). + - INDEX (created_at DESC) for operator listings; INDEX on status, source, + template_id, recipient as needed. +- `attempts` (delivery_id FK, attempt_no, status, provider_summary, + scheduled_for_ms, started_at_ms, completed_at_ms, PRIMARY KEY + (delivery_id, attempt_no)). +- `dead_letters` (delivery_id PK FK, final_attempt_count, max_attempts, + failure_classification, failure_message, created_at_ms). +- `delivery_payloads` (delivery_id PK FK, template_variables JSONB). +- `malformed_commands` (stream_entry_id PK, failure_code, failure_message, + raw_fields JSONB, recorded_at_ms; INDEX created_at). + +**Files**: mirror Stage 3 (postgres adapter package, migrations, jet +codegen, Makefile, decision record, removal of corresponding +`internal/adapters/redisstate/*` files for migrated entities, retention +of stream offset and consumer wiring on Redis). + +**Worker change**: the mail attempt scheduler loop replaces +`ZRANGEBYSCORE` over `mail:attempt_schedule` with +`SELECT … FROM deliveries WHERE status IN ('queued','retry_pending') AND next_attempt_at <= now() ORDER BY next_attempt_at LIMIT N FOR UPDATE SKIP LOCKED`. + +**Files (deleted)**: + +- `galaxy/mail/internal/adapters/redisstate/auth_acceptance_store.go` +- `galaxy/mail/internal/adapters/redisstate/generic_acceptance_store.go` +- `galaxy/mail/internal/adapters/redisstate/attempt_execution_store.go` +- `galaxy/mail/internal/adapters/redisstate/operator_store.go` +- `galaxy/mail/internal/adapters/redisstate/malformed_command_store.go` +- `galaxy/mail/internal/adapters/redisstate/render_store.go` +- The portions of `galaxy/mail/internal/adapters/redisstate/keyspace.go` + no longer used (`mail:attempt_schedule`, `mail:idempotency:*`, all + delivery/attempt/dead-letter/index keys). + +**Files retained on Redis**: + +- `galaxy/mail/internal/adapters/redisstate/stream_offset_store.go` (offset + for `mail:delivery_commands` consumer). +- The command stream consumer wiring itself. + +**Touched integration suites**: + +- `integration/authsessionmail/` +- `integration/gatewayauthsessionmail/` +- `integration/gatewayauthsessionusermail/` +- `integration/notificationmail/` + +**Verification**: per Stage 3 pattern; plus end-to-end smoke that pushes +a delivery through retry_pending → provider_accepted using the SMTP stub. + +--- + +### ~~Stage 5~~ — Notification Service migration + +This stage is implemented. + +**Goal**: move durable notification storage (records, routes, idempotency, +dead letters, malformed intents) into PostgreSQL. Keep Redis for the +inbound `notification:intents` stream, the outbound `gateway:client-events` +stream, the outbound `mail:delivery_commands` stream, the corresponding +stream offsets, and the short-lived per-route lease (`route_leases:*`). + +**Schema (`notification` schema)**: + +- `records` (notification_id PK, notification_type, producer, audience_kind, + recipient_user_ids JSONB, payload JSONB, idempotency_key, + request_fingerprint, request_id, trace_id, occurred_at_ms, + accepted_at_ms, updated_at_ms). + - UNIQUE (producer, idempotency_key) — idempotency record IS this row. +- `routes` (notification_id, route_id, channel, recipient_ref, status, + attempt_count, max_attempts, next_attempt_at_ms, resolved_email, + resolved_locale, last_error_classification, last_error_message, + last_error_at_ms, created_at_ms, updated_at_ms, published_at_ms, + dead_lettered_at_ms, skipped_at_ms, PRIMARY KEY + (notification_id, route_id)). + - INDEX (status, next_attempt_at_ms) for the scheduler. +- `dead_letters` (notification_id, route_id PK FK, channel, recipient_ref, + final_attempt_count, max_attempts, failure_classification, + failure_message, recovery_hint, created_at_ms). +- `malformed_intents` (stream_entry_id PK, notification_type, producer, + idempotency_key, failure_code, failure_message, raw_fields JSONB, + recorded_at_ms). + +**Worker change**: route publisher selects work via the same +`FOR UPDATE SKIP LOCKED` pattern as Mail. The Redis lease is still used +as a short-lived, per-process exclusivity hint atop the SQL claim. + +**Files (deleted)**: + +- `galaxy/notification/internal/adapters/redisstate/acceptance_store.go` +- `galaxy/notification/internal/adapters/redisstate/route_state_store.go` +- `galaxy/notification/internal/adapters/redisstate/malformed_intent_store.go` +- The portions of + `galaxy/notification/internal/adapters/redisstate/keyspace.go` no longer + used (records, routes, idempotency, dead_letters, malformed_intents). + +**Files retained on Redis**: + +- `galaxy/notification/internal/adapters/redisstate/stream_offset_store.go`. +- Route lease key generator (still under `redisstate/`, narrowed to leases + only). +- All stream consumer/publisher wiring. + +**Touched integration suites**: + +- `integration/notificationgateway/` +- `integration/notificationmail/` +- `integration/notificationuser/` + +--- + +### ~~Stage 6A~~ — Lobby Service: core enrollment entities + +**Goal**: move `Game`, `Application`, `Invite`, `Membership` records and +their indexes into PostgreSQL. RaceNameDirectory, GameTurnStats, +GapActivation, EvaluationGuard, StreamOffset remain on Redis until later +sub-stages. + +**Schema (`lobby` schema, partial)**: + +- `games` (game_id PK, owner_id, kind ('public'|'private'), status, + created_at, updated_at, runtime_snapshot JSONB, runtime_binding JSONB, + …other denormalised game settings). + - INDEX (status, created_at). + - INDEX (owner_id) WHERE kind = 'private'. +- `applications` (application_id PK, game_id FK, user_id, status, + canonical_key, submitted_at, decided_at). + - PARTIAL UNIQUE INDEX (user_id, game_id) WHERE status = 'active' — + enforces the single-active constraint at the DB level (replaces + `lobby:user_game_application:*:*`). + - INDEX (game_id), INDEX (user_id). +- `invites` (invite_id PK, game_id FK, inviter_id, invitee_id, race_name, + status, created_at, expires_at, decided_at). + - INDEX (game_id), INDEX (invitee_id), INDEX (inviter_id). + - INDEX (status, expires_at) for any expiration scanner if needed. +- `memberships` (membership_id PK, game_id FK, user_id, status, joined_at, + canonical_key, …). + - INDEX (game_id), INDEX (user_id). + +**Files (new)**: + +- `galaxy/lobby/internal/adapters/postgres/migrations/00001_core_entities.sql`. +- `galaxy/lobby/internal/adapters/postgres/migrations/migrations.go`. +- `galaxy/lobby/internal/adapters/postgres/jet/...`. +- `galaxy/lobby/internal/adapters/postgres/gamestore/store.go`. +- `galaxy/lobby/internal/adapters/postgres/applicationstore/store.go`. +- `galaxy/lobby/internal/adapters/postgres/invitestore/store.go`. +- `galaxy/lobby/internal/adapters/postgres/membershipstore/store.go`. +- Test files for each store using the existing test patterns. +- `galaxy/lobby/Makefile` (`jet` target). +- `galaxy/lobby/docs/postgres-migration.md` (decision record covering + this sub-stage and what is intentionally left for 6B/6C). + +**Files (touched)**: + +- `galaxy/lobby/internal/config/config.go` — add Postgres config; refactor + Redis config to the new shape. +- `galaxy/lobby/internal/app/runtime.go` — open Postgres pool, run + migrations on startup, wire core PG-backed stores into services. + RaceNameDirectory and stats/guard stores still wired to Redis until 6B/6C. +- `galaxy/lobby/README.md` and `galaxy/lobby/docs/runbook.md` — updated + to describe core entities on PG, RND/stats still on Redis until 6B/6C. + +**Files (deleted)**: + +- `galaxy/lobby/internal/adapters/redisstate/gamestore.go`, + `applicationstore.go`, `invitestore.go`, `membershipstore.go`. +- The corresponding sections of `redisstate/keyspace.go`. + +**Stub adapters retained**: `gamestub/`, `applicationstub/`, `invitestub/`, +`membershipstub/` stay — they are pure in-memory ports useful for tests +that don't need real PG. + +**Touched integration suites**: + +- `integration/lobbyuser/` +- `integration/lobbynotification/` + +**Verification**: per Stage 3 pattern; plus the existing lobby HTTP +contract tests against the public/internal ports. + +--- + +### ~~Stage 6B~~ — Lobby Service: RaceNameDirectory + +This stage is implemented. + +**Goal**: replace the Lua-backed Redis `RaceNameDirectory` with a PG +implementation that preserves the two-tier model (registered / reservation / +pending_registration) and atomic registration semantics via SQL +transactions and (where required) advisory locks. + +**Schema (additions to `lobby` schema)**: + +- `race_names` (canonical_key PK, holder_user_id, binding_kind ('registered' + | 'reserved' | 'pending_registration'), source_game_id, eligible_until_ms, + registered_at_ms, reserved_at_ms). + - INDEX (holder_user_id) for `ListRegistered`/`ListReservations`/ + `ListPendingRegistrations` queries. + - PARTIAL INDEX (eligible_until_ms) WHERE binding_kind = + 'pending_registration' for the expiration scanner. + - The confusable-pair policy is enforced at write time inside + `BEGIN … COMMIT` transactions; `Reserve`/`Register`/ + `MarkPendingRegistration` use `SELECT … FOR UPDATE` on the canonical + keys involved (or PG advisory locks keyed by `hashtext(canonical_key)`) + to serialise concurrent attempts. + +**Files (new)**: + +- `galaxy/lobby/internal/adapters/postgres/migrations/00002_race_names.sql`. +- `galaxy/lobby/internal/adapters/postgres/racenamedir/directory.go` — + Postgres implementation of `ports.RaceNameDirectory`. +- `galaxy/lobby/internal/adapters/postgres/racenamedir/directory_test.go` + — runs the existing shared suite at + `galaxy/lobby/internal/ports/racenamedirtest/suite.go`. + +**Files (touched)**: + +- `galaxy/lobby/internal/app/runtime.go` — wire PG RND. +- `galaxy/lobby/internal/ports/racenamedirtest/suite.go` — only + shape-preserving updates if the suite assumed Redis-only behaviour + (e.g. SCAN-based list ordering). +- `galaxy/lobby/README.md`, `galaxy/lobby/docs/runbook.md` — RND now PG- + backed; canonical_lookup cache no longer needed (PG indexed lookup is + fast enough; remove the Redis cache key from `redisstate/keyspace.go`). + +**Files (deleted)**: + +- `galaxy/lobby/internal/adapters/redisstate/racenamedir.go` and the + embedded Lua scripts. +- `galaxy/lobby/internal/adapters/racenamestub/` stays (useful for unit + tests that don't need PG). + +**Worker change**: the pending-registration expiration worker switches +from `ZRANGEBYSCORE` on `lobby:race_names:pending_index` to +`SELECT … FROM race_names WHERE binding_kind='pending_registration' AND eligible_until_ms <= now()`. + +**Verification**: shared port suite (`racenamedirtest`) green against PG +adapter; lobby unit tests green; `integration/lobbyuser/`, +`integration/lobbynotification/` green. + +--- + +### ~~Stage 6C~~ — Lobby Service: workers, ephemeral stores, cleanup + +This stage is implemented. + +**Goal**: finish the lobby migration. Confirm what stays Redis-only, +update workers that touch both backends, drop dead Redis adapters. + +**Stays on Redis (per architectural rules)**: + +- `GameTurnStatsStore` — ephemeral per-game aggregate, deleted at game + finish, rebuildable from GM events. +- `EvaluationGuardStore` — ephemeral marker. +- `GapActivationStore` — short-lived gap-window timestamp cache. +- `StreamOffsetStore` — runtime coordination per the architectural rule. +- All stream consumers and publishers (`gm:lobby_events`, + `runtime:job_results`, `user:lifecycle_events`, `notification:intents`). + +This is documented in `galaxy/lobby/docs/postgres-migration.md`. + +**Files (touched)**: + +- `galaxy/lobby/internal/worker/gmevents/consumer.go` — write durable + updates via PG-backed `GameStore`. +- `galaxy/lobby/internal/worker/runtimejobresult/consumer.go` — same. +- `galaxy/lobby/internal/adapters/userlifecycle/consumer.go` (and the + worker that drives it) — RND release, membership/application/invite + cascade all flow through PG. +- `galaxy/lobby/internal/worker/pendingregistration/worker.go` — PG-based + scan, no Redis ZSET. +- `galaxy/lobby/internal/worker/enrollmentautomation/worker.go` — uses PG + `GameStore.GetByStatus("enrollment_open")`. +- `galaxy/lobby/internal/adapters/redisstate/keyspace.go` — pruned to the + remaining Redis keys (turn stats, gap activation, evaluation guard, + stream offsets, lifecycle stream consumer state). +- `galaxy/lobby/README.md`, `galaxy/lobby/docs/runtime.md`, + `galaxy/lobby/docs/runbook.md`, `galaxy/lobby/docs/examples.md` — + finalised storage descriptions. + +**Files (deleted)**: + +- Anything left in `galaxy/lobby/internal/adapters/redisstate/` whose + only consumer was a port now PG-backed (see 6A/6B deletions). + +**Verification**: + +- All previously-green lobby unit tests pass with PG-backed adapters. +- `integration/lobbyuser/`, `integration/lobbynotification/` pass. +- `grep -rn "redisstate" galaxy/lobby/internal/` returns only the keys + intentionally retained on Redis. + +--- + +### ~~Stage 7~~ — Gateway and Auth/Session: Redis configuration refactor + +This stage is implemented. + +**Goal**: apply the new Redis configuration shape (master/replica/password, +drop TLS/USERNAME) to Gateway and Auth/Session. No PG migration; these +services intentionally stay Redis-only. + +**Files (touched)**: + +- `galaxy/gateway/internal/config/config.go` — switch `RedisConfig` + fields to the `pkg/redisconn.Config` shape; update the three + prefixes: `GATEWAY_SESSION_CACHE_REDIS_*`, `GATEWAY_REPLAY_REDIS_*`, + `GATEWAY_SESSION_EVENTS_REDIS_*`. Drop `TLS_ENABLED`, `USERNAME`. +- `galaxy/gateway/internal/session/redis.go`, + `galaxy/gateway/internal/replay/redis.go`, + `galaxy/gateway/internal/events/subscriber.go` — adopt new client + constructor via `pkg/redisconn`. +- `galaxy/gateway/internal/config/config_test.go`, + `galaxy/gateway/internal/session/redis_test.go`, + `galaxy/gateway/internal/replay/redis_test.go` — updated to new env shape. +- `galaxy/authsession/internal/config/config.go` — same pattern; drop + TLS, USERNAME. +- `galaxy/authsession/internal/adapters/redis/sessionstore/store.go`, + `challengestore/store.go`, `projectionpublisher/publisher.go`, + `sendemailcodeabuse/protector.go`, `configprovider/store.go` — adopt + new client. +- `galaxy/authsession/internal/config/config_test.go` — updated. +- `galaxy/gateway/README.md`, `galaxy/authsession/README.md`, + `galaxy/gateway/docs/runbook.md`, `galaxy/authsession/docs/runbook.md` + — note that Redis-only is intentional and reference the `ARCHITECTURE.md` + rule on TTL-bounded auth state. + +**No deletions of business logic**; only env-var refactor and adapter +plumbing through `pkg/redisconn`. + +**Touched integration suites**: + +- `integration/gatewayauthsession/` +- `integration/authsession/` +- (every suite that boots gateway or authsession picks up the new env vars + via the harness; confirm none still pass `*_REDIS_TLS_ENABLED`). + +**Verification**: + +- `cd galaxy/gateway && go test ./...` +- `cd galaxy/authsession && go test ./...` +- `cd integration && go test ./gatewayauthsession/... ./authsession/...` + +--- + +### ~~Stage 8~~ — GeoProfile: documentation only + +**Goal**: ensure the GeoProfile plan and README reflect the new +persistence rules so its future implementation follows them. No code +exists yet. + +**Files (touched)**: + +- `galaxy/geoprofile/PLAN.md` — add a stage referencing `pkg/postgres` + and `pkg/redisconn`; specify that observed-country aggregates, + declared_country history and review records will live in a `geoprofile` + schema, while ephemeral per-session signals (if any) stay on Redis. +- `galaxy/geoprofile/README.md` — note ownership of the `geoprofile` + schema and the stack choices. + +**No code change**. + +--- + +### ~~Stage 9~~ — Final sweep + +**Goal**: confirm no dead Redis adapter code, no orphaned stub, no +broken doc reference. Remove the *Migration Window* caveat from +`ARCHITECTURE.md` once all stages are done. + +**Activities**: + +- Walk every PG-backed service: `grep -rn "redis" galaxy//internal/adapters/` + and verify every match belongs to a still-active stream/cache/runtime + use case. +- Walk integration suites: confirm each one provisions only the + containers it actually needs; no stale env vars. +- Update `ARCHITECTURE.md` to drop the *Migration Window* sub-section. +- Combine sequences of migration `.sql` files into a single first file. + Rewrite SQL-code, not just concat. + The reason is that project still in in development state and all schema updates + can go directly in the only and first step of relevant migrations. This should + be represented in `ARCHITECTURE.md` as well. +- One round of `go test ./...` in every module plus + `cd integration && go test ./...`. + +**Verification**: + +- All tests pass in every module. +- No file matches `// TODO.*postgres` or `// TODO.*migrate`. +- `git grep -n REDIS_TLS_ENABLED REDIS_USERNAME` returns nothing under + `galaxy/` (these env vars are fully retired). + +--- + +## Verification strategy (whole project) + +After each stage: + +- `cd /Users/id/src/go/galaxy/pkg && go test ./...` +- `cd /Users/id/src/go/galaxy/ && go test ./...` + (with Docker available for testcontainers). +- `cd /Users/id/src/go/galaxy/integration && go test .//...` +- Manual smoke against a `docker-compose` stack (PG + Redis, both with + passwords) using the example flows in each service's `docs/examples.md`. + +After Stage 9: + +- `cd /Users/id/src/go/galaxy/integration && go test ./...` end to end + against real PG + real Redis. +- Confirm `git grep -nE 'REDIS_(TLS_ENABLED|USERNAME)'` returns nothing + under `galaxy/`. +- Confirm `git grep -n 'TODO.*(postgres|migrate)'` returns nothing. + +## Out of scope + +- `galaxy/game` — explicitly excluded by the project owner. +- Production deployment manifests (Helm/k8s) — local `docker-compose` is + enough for development. +- Backup/restore tooling configuration — `pg_dump` and WAL archiving are + available out of the box; operational setup is not part of this plan. +- Sentinel/Cluster Redis topology code paths — config exposes replica + addresses for future use; no failover routing implemented yet. +- Read-traffic routing to PG replicas — config exposes + `*_POSTGRES_REPLICA_DSNS` for future use; no routing implemented yet. +- `golangci-lint` config addition — not part of this migration. +- CI pipeline — no `.github/workflows/` exists; not added by this plan. + +## Risks and notes + +- **`go-jet` codegen requires a live database**. The `make jet` target + per service uses `testcontainers-go` to bring up a transient PG, applies + the same goose migrations the service applies at startup, then runs + `jet -dsn=… -path=internal/adapters/postgres/jet`. Generated code is + committed; consumers don't need Docker just to build. +- **Schema-per-service vs single-DB cross-service joins**: there are no + cross-schema joins in this plan. Each service reads only its own schema; + cross-service data flows go via Redis Streams (event bus) or HTTP + contracts (User Service is queried by Lobby for eligibility) — same as + today. The DB-level role grants enforce this. +- **Pending registration expiration worker**: under Redis it scanned a + global ZSET; under PG it does an indexed scan. The partial index on + `eligible_until_ms WHERE binding_kind='pending_registration'` keeps the + scan cheap. +- **Idempotency under crash**: with idempotency expressed as a UNIQUE + constraint on the durable record, recovery is "the row either exists or + it doesn't" — no Redis-loss window where duplicates can sneak through. +- **lib/pq vs pgx (revisit)**: confirmed pgx/v5 + jet via stdlib adapter. + The `make jet` target will pass `-source=postgres` to jet (the dialect + is independent of which Go driver runs the queries at runtime). +- **No backward-compat shim for env vars**: `*_REDIS_TLS_ENABLED` and + `*_REDIS_USERNAME` are retired in one cut. Any external dev environment + that sets these will start failing fast at startup with a clear error + emitted by `pkg/redisconn.LoadFromEnv`. diff --git a/authsession/README.md b/authsession/README.md index 7f90f82..1ab142c 100644 --- a/authsession/README.md +++ b/authsession/README.md @@ -9,7 +9,11 @@ Startup requires: -- one reachable Redis deployment configured by `AUTHSESSION_REDIS_ADDR` +- one reachable Redis master configured by `AUTHSESSION_REDIS_MASTER_ADDR` + with mandatory `AUTHSESSION_REDIS_PASSWORD`. The connection topology + follows the project-wide rules in `ARCHITECTURE.md §Persistence Backends` + (one master plus zero-or-more replicas, no TLS, no Redis ACL username); + see also `docs/redis-config.md`. That Redis deployment is used for: diff --git a/authsession/docs/redis-config.md b/authsession/docs/redis-config.md new file mode 100644 index 0000000..3117a75 --- /dev/null +++ b/authsession/docs/redis-config.md @@ -0,0 +1,88 @@ +# Decision: Redis configuration shape + +PG_PLAN.md §7. Captures the standing rules adopted by Auth/Session Service +when it joined the project-wide Redis topology defined in +`ARCHITECTURE.md §Persistence Backends`. + +## Context + +Auth/Session Service intentionally stays Redis-only. All authsession state +is TTL-bounded and recoverable from a fresh login flow: + +- challenge records expire with the login window; +- device-session records expire with their session TTL; +- gateway projection cache keys are write-through reflections of the + source-of-truth session record; +- the gateway-session-events stream is consumed lazily by the gateway and + trimmed by `MAXLEN ~`; +- the resend-throttle protector is purely TTL-driven. + +Stage 7 brought authsession in line with the steady-state rules established +in Stage 0: every Galaxy service uses one master plus zero-or-more replicas +with a mandatory password, no TLS, and no Redis ACL username; the connection +is configured by the shared `pkg/redisconn` helper. + +## Decisions + +### One shared `*redis.Client` owned by the runtime + +`internal/app/runtime.go` constructs a single `*redis.Client` via +`internal/adapters/redis.NewClient`, attaches OpenTelemetry tracing and +metrics via `internal/adapters/redis.InstrumentClient`, performs one bounded +`PING` via `internal/adapters/redis.Ping`, and registers `client.Close` for +shutdown. The challenge store, session store, config provider, projection +publisher and resend-throttle protector all receive this same client. + +Adapters no longer build or own a Redis client. Their `Config` structs hold +only namespace and per-adapter timeout settings (no Addr/Username/Password/ +DB/TLSEnabled). Adapter constructors take `(*redis.Client, Config)`. + +### One env-var prefix per service + +Connection topology is loaded from a single +`AUTHSESSION_REDIS_*` group via `redisconn.LoadFromEnv("AUTHSESSION")`: + +- `AUTHSESSION_REDIS_MASTER_ADDR` (required) +- `AUTHSESSION_REDIS_REPLICA_ADDRS` (optional, comma-separated; currently + unused, reserved for future read-routing) +- `AUTHSESSION_REDIS_PASSWORD` (required) +- `AUTHSESSION_REDIS_DB` (default `0`) +- `AUTHSESSION_REDIS_OPERATION_TIMEOUT` (default `250ms`) + +The per-adapter namespace and stream env vars (`*_KEY_PREFIX`, +`*_STREAM`, `*_STREAM_MAX_LEN`) keep their existing names and semantics — +they describe key shape, not connection topology. + +### Retired env vars (hard removal) + +- `AUTHSESSION_REDIS_ADDR` — replaced by `AUTHSESSION_REDIS_MASTER_ADDR`. +- `AUTHSESSION_REDIS_USERNAME` — Redis ACL not used. +- `AUTHSESSION_REDIS_TLS_ENABLED` — TLS disabled by policy. +- `AUTHSESSION_REDIS_OPERATION_TIMEOUT` keeps its name (it now lives in + `redisconn.Config`). + +`pkg/redisconn.LoadFromEnv` rejects `AUTHSESSION_REDIS_TLS_ENABLED` and +`AUTHSESSION_REDIS_USERNAME` at startup with a clear error pointing to +`ARCHITECTURE.md §Persistence Backends`. There is no backward-compatibility +shim; this is consistent with the project-wide rule that the migration +window has no production deploys to preserve. + +### Telemetry + +`redisconn.Instrument` wires `redisotel.InstrumentTracing` (with +`WithDBStatement(false)`) and `redisotel.InstrumentMetrics`. This is the +first authsession release that emits Redis tracing and connection-pool +metrics; downstream dashboards will start populating without further +changes. + +## Consequences + +- Test code that previously constructed a Redis client per adapter must now + construct one client and pass it to every adapter under test (see the + pattern in `internal/adapters/redis//store_test.go`). +- Operators must set `AUTHSESSION_REDIS_PASSWORD`. A passwordless local + Redis is still acceptable as long as a placeholder password is supplied + to the binary; Redis without `requirepass` accepts AUTH unconditionally. +- The integration test harness passes `AUTHSESSION_REDIS_PASSWORD = + "integration"` alongside `AUTHSESSION_REDIS_MASTER_ADDR` (see + `integration/internal/harness/authsessionservice.go`). diff --git a/authsession/docs/runbook.md b/authsession/docs/runbook.md index 50082e4..3975d86 100644 --- a/authsession/docs/runbook.md +++ b/authsession/docs/runbook.md @@ -7,10 +7,16 @@ verification, shutdown, and common authsession incidents. Before starting the process, confirm: -- `AUTHSESSION_REDIS_ADDR` points to the Redis deployment used for authsession - source-of-truth data, resend throttling, and gateway projection -- the configured Redis ACL, DB, TLS, and key-prefix settings match the target - environment +- `AUTHSESSION_REDIS_MASTER_ADDR` and `AUTHSESSION_REDIS_PASSWORD` point to the + Redis deployment used for authsession source-of-truth data, resend + throttling, and gateway projection. Optional read replicas may be listed in + `AUTHSESSION_REDIS_REPLICA_ADDRS` (currently unused; reserved for future + read-routing). +- the configured Redis DB and key-prefix settings match the target environment. + Per `ARCHITECTURE.md §Persistence Backends`, Redis traffic is + password-protected and TLS is disabled by policy; the deprecated + `AUTHSESSION_REDIS_TLS_ENABLED` and `AUTHSESSION_REDIS_USERNAME` variables + are no longer accepted and cause a hard fail at startup. - if `AUTHSESSION_USER_SERVICE_MODE=rest`, both `AUTHSESSION_USER_SERVICE_BASE_URL` and `AUTHSESSION_USER_SERVICE_REQUEST_TIMEOUT` are configured @@ -21,15 +27,10 @@ Before starting the process, confirm: - `gateway:session:` cache key prefix - `gateway:session_events` stream name -At startup the process performs bounded `PING` checks for: - -- challenge store -- session store -- config provider -- gateway projection publisher -- resend-throttle protector - -Startup fails fast if any of those checks fail. +At startup the process performs one bounded `PING` against the shared Redis +client used by every adapter (challenge store, session store, config provider, +gateway projection publisher, resend-throttle protector). Startup fails fast +if the ping fails. Expected listener state after a healthy start: diff --git a/authsession/docs/runtime.md b/authsession/docs/runtime.md index 05b321b..f3ee402 100644 --- a/authsession/docs/runtime.md +++ b/authsession/docs/runtime.md @@ -101,7 +101,8 @@ gateway-facing projection namespaces as a derived integration view. Required for all process starts: -- `AUTHSESSION_REDIS_ADDR` +- `AUTHSESSION_REDIS_MASTER_ADDR` +- `AUTHSESSION_REDIS_PASSWORD` Core process config: @@ -124,13 +125,23 @@ Internal HTTP config: - `AUTHSESSION_INTERNAL_HTTP_IDLE_TIMEOUT` - `AUTHSESSION_INTERNAL_HTTP_REQUEST_TIMEOUT` -Redis connectivity and namespace config: +Redis connection topology (managed by `pkg/redisconn`, +see `ARCHITECTURE.md §Persistence Backends`): -- `AUTHSESSION_REDIS_USERNAME` -- `AUTHSESSION_REDIS_PASSWORD` +- `AUTHSESSION_REDIS_MASTER_ADDR` (required) +- `AUTHSESSION_REDIS_REPLICA_ADDRS` (optional, comma-separated; reserved for + future read-routing — currently unused) +- `AUTHSESSION_REDIS_PASSWORD` (required) - `AUTHSESSION_REDIS_DB` -- `AUTHSESSION_REDIS_TLS_ENABLED` - `AUTHSESSION_REDIS_OPERATION_TIMEOUT` + +> Removed: `AUTHSESSION_REDIS_ADDR`, `AUTHSESSION_REDIS_USERNAME`, +> `AUTHSESSION_REDIS_TLS_ENABLED`. `pkg/redisconn.LoadFromEnv` rejects the +> deprecated `*_REDIS_TLS_ENABLED` and `*_REDIS_USERNAME` variables at +> startup; see `docs/redis-config.md` for the rationale. + +Redis namespace and stream config: + - `AUTHSESSION_REDIS_CHALLENGE_KEY_PREFIX` - `AUTHSESSION_REDIS_SESSION_KEY_PREFIX` - `AUTHSESSION_REDIS_USER_SESSIONS_KEY_PREFIX` diff --git a/authsession/gateway_compatibility_test.go b/authsession/gateway_compatibility_test.go index 2f8a854..2e9a2ca 100644 --- a/authsession/gateway_compatibility_test.go +++ b/authsession/gateway_compatibility_test.go @@ -292,53 +292,33 @@ func newGatewayCompatibilityHarness(t *testing.T, options gatewayCompatibilityOp redisServer.Set(gatewayCompatibilitySessionLimitKey, strconv.Itoa(*options.SessionLimit)) } - challengeStore, err := challengestore.New(challengestore.Config{ - Addr: redisServer.Addr(), - DB: 0, + challengeStore, err := challengestore.New(redisClient, challengestore.Config{ KeyPrefix: gatewayCompatibilityChallengeKeyPrefix, OperationTimeout: 250 * time.Millisecond, }) require.NoError(t, err) - t.Cleanup(func() { - assert.NoError(t, challengeStore.Close()) - }) - sessionStore, err := sessionstore.New(sessionstore.Config{ - Addr: redisServer.Addr(), - DB: 0, + sessionStore, err := sessionstore.New(redisClient, sessionstore.Config{ SessionKeyPrefix: gatewayCompatibilitySessionKeyPrefix, UserSessionsKeyPrefix: gatewayCompatibilityUserSessionsKeyPrefix, UserActiveSessionsKeyPrefix: gatewayCompatibilityUserActiveKeyPrefix, OperationTimeout: 250 * time.Millisecond, }) require.NoError(t, err) - t.Cleanup(func() { - assert.NoError(t, sessionStore.Close()) - }) - configStore, err := configprovider.New(configprovider.Config{ - Addr: redisServer.Addr(), - DB: 0, + configStore, err := configprovider.New(redisClient, configprovider.Config{ SessionLimitKey: gatewayCompatibilitySessionLimitKey, OperationTimeout: 250 * time.Millisecond, }) require.NoError(t, err) - t.Cleanup(func() { - assert.NoError(t, configStore.Close()) - }) - publisher, err := projectionpublisher.New(projectionpublisher.Config{ - Addr: redisServer.Addr(), - DB: 0, + publisher, err := projectionpublisher.New(redisClient, projectionpublisher.Config{ SessionCacheKeyPrefix: gatewayCompatibilitySessionCacheKeyPrefix, SessionEventsStream: gatewayCompatibilitySessionEventsStream, StreamMaxLen: gatewayCompatibilityStreamMaxLen, OperationTimeout: 250 * time.Millisecond, }) require.NoError(t, err) - t.Cleanup(func() { - assert.NoError(t, publisher.Close()) - }) userDirectory := &userservice.StubDirectory{} if options.SeedBlockedEmail { diff --git a/authsession/go.mod b/authsession/go.mod index 6a1b07b..f09789c 100644 --- a/authsession/go.mod +++ b/authsession/go.mod @@ -1,8 +1,9 @@ module galaxy/authsession -go 1.26.0 +go 1.26.1 require ( + galaxy/redisconn v0.0.0-00010101000000-000000000000 github.com/alicebob/miniredis/v2 v2.37.0 github.com/getkin/kin-openapi v0.135.0 github.com/gin-gonic/gin v1.12.0 @@ -21,7 +22,7 @@ require ( go.opentelemetry.io/otel/sdk/metric v1.43.0 go.opentelemetry.io/otel/trace v1.43.0 go.uber.org/zap v1.27.1 - golang.org/x/crypto v0.49.0 + golang.org/x/crypto v0.50.0 golang.org/x/text v0.36.0 ) @@ -52,7 +53,7 @@ require ( github.com/klauspost/cpuid/v2 v2.3.0 // indirect github.com/leodido/go-urn v1.4.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect - github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mattn/go-isatty v0.0.21 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect @@ -72,13 +73,20 @@ require ( go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.43.0 // indirect go.opentelemetry.io/proto/otlp v1.10.0 // indirect go.uber.org/atomic v1.11.0 // indirect - go.uber.org/multierr v1.10.0 // indirect + go.uber.org/multierr v1.11.0 // indirect golang.org/x/arch v0.25.0 // indirect - golang.org/x/net v0.52.0 // indirect - golang.org/x/sys v0.42.0 // indirect + golang.org/x/net v0.53.0 // indirect + golang.org/x/sys v0.43.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20260420184626-e10c466a9529 // indirect google.golang.org/grpc v1.80.0 // indirect google.golang.org/protobuf v1.36.11 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) + +require ( + github.com/redis/go-redis/extra/rediscmd/v9 v9.18.0 // indirect + github.com/redis/go-redis/extra/redisotel/v9 v9.18.0 // indirect +) + +replace galaxy/redisconn => ../pkg/redisconn diff --git a/authsession/go.sum b/authsession/go.sum index faf28ad..b69b933 100644 --- a/authsession/go.sum +++ b/authsession/go.sum @@ -5,9 +5,11 @@ github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdb github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= github.com/bytedance/gopkg v0.1.4 h1:oZnQwnX82KAIWb7033bEwtxvTqXcYMxDBaQxo5JJHWM= +github.com/bytedance/gopkg v0.1.4/go.mod h1:v1zWfPm21Fb+OsyXN2VAHdL6TBb2L88anLQgdyje6R4= github.com/bytedance/sonic v1.15.0 h1:/PXeWFaR5ElNcVE84U0dOHjiMHQOwNIx3K4ymzh/uSE= github.com/bytedance/sonic v1.15.0/go.mod h1:tFkWrPz0/CUCLEF4ri4UkHekCIcdnkqXw9VduqpJh0k= github.com/bytedance/sonic/loader v0.5.1 h1:Ygpfa9zwRCCKSlrp5bBP/b/Xzc3VxsAW+5NIYXrOOpI= +github.com/bytedance/sonic/loader v0.5.1/go.mod h1:AR4NYCk5DdzZizZ5djGqQ92eEhCCcdf5x77udYiSJRo= github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= @@ -17,12 +19,15 @@ github.com/cloudwego/base64x v0.1.6/go.mod h1:OFcloc187FXDaYHvrNIjxSe8ncn0OOM8gE github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/gabriel-vasile/mimetype v1.4.13 h1:46nXokslUBsAJE/wMsp5gtO500a4F3Nkz9Ufpk2AcUM= github.com/gabriel-vasile/mimetype v1.4.13/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s= github.com/getkin/kin-openapi v0.135.0 h1:751SjYfbiwqukYuVjwYEIKNfrSwS5YpA7DZnKSwQgtg= +github.com/getkin/kin-openapi v0.135.0/go.mod h1:6dd5FJl6RdX4usBtFBaQhk9q62Yb2J0Mk5IhUO/QqFI= github.com/gin-contrib/sse v1.1.1 h1:uGYpNwTacv5R68bSGMapo62iLTRa9l5zxGCps4hK6ko= +github.com/gin-contrib/sse v1.1.1/go.mod h1:QXzuVkA0YO7o/gun03UI1Q+FTI8ZV/n5t03kIQAI89s= github.com/gin-gonic/gin v1.12.0 h1:b3YAbrZtnf8N//yjKeU2+MQsh2mY5htkZidOM7O0wG8= github.com/gin-gonic/gin v1.12.0/go.mod h1:VxccKfsSllpKshkBWgVgRniFFAzFb9csfngsqANjnLc= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -41,9 +46,11 @@ github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/Nu github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= github.com/go-playground/validator/v10 v10.30.2 h1:JiFIMtSSHb2/XBUbWM4i/MpeQm9ZK2xqPNk8vgvu5JQ= +github.com/go-playground/validator/v10 v10.30.2/go.mod h1:mAf2pIOVXjTEBrwUMGKkCWKKPs9NheYGabeB04txQSc= github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM= github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= github.com/goccy/go-json v0.10.6 h1:p8HrPJzOakx/mn/bQtjgNjdTcN+/S6FcG2CTtQOrHVU= +github.com/goccy/go-json v0.10.6/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/goccy/go-yaml v1.19.2 h1:PmFC1S6h8ljIz6gMRBopkjP1TVT7xuwrButHID66PoM= github.com/goccy/go-yaml v1.19.2/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= @@ -69,8 +76,8 @@ github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= -github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-isatty v0.0.21 h1:xYae+lCNBP7QuW4PUnNG61ffM4hVIfm+zUzDuSzYLGs= +github.com/mattn/go-isatty v0.0.21/go.mod h1:ZXfXG4SQHsB/w3ZeOYbR0PrPwLy+n6xiMrJlRFqopa4= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -79,16 +86,24 @@ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjY github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= github.com/oasdiff/yaml v0.0.9 h1:zQOvd2UKoozsSsAknnWoDJlSK4lC0mpmjfDsfqNwX48= +github.com/oasdiff/yaml v0.0.9/go.mod h1:8lvhgJG4xiKPj3HN5lDow4jZHPlx1i7dIwzkdAo6oAM= github.com/oasdiff/yaml3 v0.0.9 h1:rWPrKccrdUm8J0F3sGuU+fuh9+1K/RdJlWF7O/9yw2g= +github.com/oasdiff/yaml3 v0.0.9/go.mod h1:y5+oSEHCPT/DGrS++Wc/479ERge0zTFxaF8PbGKcg2o= github.com/pelletier/go-toml/v2 v2.3.0 h1:k59bC/lIZREW0/iVaQR8nDHxVq8OVlIzYCOJf421CaM= +github.com/pelletier/go-toml/v2 v2.3.0/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= github.com/perimeterx/marshmallow v1.1.5 h1:a2LALqQ1BlHM8PZblsDdidgv1mWi1DgC2UmX50IvK2s= github.com/perimeterx/marshmallow v1.1.5/go.mod h1:dsXbUu8CRzfYP5a87xpp0xq9S3u0Vchtcl8we9tYaXw= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/quic-go/qpack v0.6.0 h1:g7W+BMYynC1LbYLSqRt8PBg5Tgwxn214ZZR34VIOjz8= github.com/quic-go/qpack v0.6.0/go.mod h1:lUpLKChi8njB4ty2bFLX2x4gzDqXwUpaO1DP9qMDZII= github.com/quic-go/quic-go v0.59.0 h1:OLJkp1Mlm/aS7dpKgTc6cnpynnD2Xg7C1pwL6vy/SAw= github.com/quic-go/quic-go v0.59.0/go.mod h1:upnsH4Ju1YkqpLXC305eW3yDZ4NfnNbmQRCMWS58IKU= +github.com/redis/go-redis/extra/rediscmd/v9 v9.18.0 h1:QY4nmPHLFAJjtT5O4OMUEOxP8WVaRNOFpcbmxT2NLZU= +github.com/redis/go-redis/extra/rediscmd/v9 v9.18.0/go.mod h1:WH8cY/0fT41Bsf341qzo8v4nx0GCE8FykAA23IVbVmo= +github.com/redis/go-redis/extra/redisotel/v9 v9.18.0 h1:2dKdoEYBJ0CZCLPiCdvvc7luz3DPwY6hKdzjL6m1eHE= +github.com/redis/go-redis/extra/redisotel/v9 v9.18.0/go.mod h1:WzkrVG9ro9BwCQD0eJOWn6AGL4Z1CleGflM45w1hu10= github.com/redis/go-redis/v9 v9.18.0 h1:pMkxYPkEbMPwRdenAzUNyFNrDgHx9U+DrBabWNfSRQs= github.com/redis/go-redis/v9 v9.18.0/go.mod h1:k3ufPphLU5YXwNTUcCRXGxUoF1fqxnhFQmscfkCoDA0= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= @@ -119,19 +134,33 @@ go.mongodb.org/mongo-driver/v2 v2.5.0/go.mod h1:yOI9kBsufol30iFsl1slpdq1I0eHPzyb go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.68.0 h1:5FXSL2s6afUC1bzNzl1iedZZ8yqR7GOhbCoEXtyeK6Q= +go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.68.0/go.mod h1:MdHW7tLtkeGJnR4TyOrnd5D0zUGZQB1l84uHCe8hRpE= go.opentelemetry.io/contrib/propagators/b3 v1.43.0 h1:CETqV3QLLPTy5yNrqyMr41VnAOOD4lsRved7n4QG00A= +go.opentelemetry.io/contrib/propagators/b3 v1.43.0/go.mod h1:Q4mCiCdziYzpNR0g+6UqVotAlCDZdzz6L8jwY4knOrw= go.opentelemetry.io/otel v1.43.0 h1:mYIM03dnh5zfN7HautFE4ieIig9amkNANT+xcVxAj9I= +go.opentelemetry.io/otel v1.43.0/go.mod h1:JuG+u74mvjvcm8vj8pI5XiHy1zDeoCS2LB1spIq7Ay0= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.43.0 h1:8UQVDcZxOJLtX6gxtDt3vY2WTgvZqMQRzjsqiIHQdkc= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.43.0/go.mod h1:2lmweYCiHYpEjQ/lSJBYhj9jP1zvCvQW4BqL9dnT7FQ= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.43.0 h1:w1K+pCJoPpQifuVpsKamUdn9U0zM3xUziVOqsGksUrY= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.43.0/go.mod h1:HBy4BjzgVE8139ieRI75oXm3EcDN+6GhD88JT1Kjvxg= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.43.0 h1:88Y4s2C8oTui1LGM6bTWkw0ICGcOLCAI5l6zsD1j20k= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.43.0/go.mod h1:Vl1/iaggsuRlrHf/hfPJPvVag77kKyvrLeD10kpMl+A= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.43.0 h1:RAE+JPfvEmvy+0LzyUA25/SGawPwIUbZ6u0Wug54sLc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.43.0/go.mod h1:AGmbycVGEsRx9mXMZ75CsOyhSP6MFIcj/6dnG+vhVjk= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.43.0 h1:3iZJKlCZufyRzPzlQhUIWVmfltrXuGyfjREgGP3UUjc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.43.0/go.mod h1:/G+nUPfhq2e+qiXMGxMwumDrP5jtzU+mWN7/sjT2rak= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.43.0 h1:TC+BewnDpeiAmcscXbGMfxkO+mwYUwE/VySwvw88PfA= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.43.0/go.mod h1:J/ZyF4vfPwsSr9xJSPyQ4LqtcTPULFR64KwTikGLe+A= go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.43.0 h1:mS47AX77OtFfKG4vtp+84kuGSFZHTyxtXIN269vChY0= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.43.0/go.mod h1:PJnsC41lAGncJlPUniSwM81gc80GkgWJWr3cu2nKEtU= go.opentelemetry.io/otel/metric v1.43.0 h1:d7638QeInOnuwOONPp4JAOGfbCEpYb+K6DVWvdxGzgM= +go.opentelemetry.io/otel/metric v1.43.0/go.mod h1:RDnPtIxvqlgO8GRW18W6Z/4P462ldprJtfxHxyKd2PY= go.opentelemetry.io/otel/sdk v1.43.0 h1:pi5mE86i5rTeLXqoF/hhiBtUNcrAGHLKQdhg4h4V9Dg= +go.opentelemetry.io/otel/sdk v1.43.0/go.mod h1:P+IkVU3iWukmiit/Yf9AWvpyRDlUeBaRg6Y+C58QHzg= go.opentelemetry.io/otel/sdk/metric v1.43.0 h1:S88dyqXjJkuBNLeMcVPRFXpRw2fuwdvfCGLEo89fDkw= +go.opentelemetry.io/otel/sdk/metric v1.43.0/go.mod h1:C/RJtwSEJ5hzTiUz5pXF1kILHStzb9zFlIEe85bhj6A= go.opentelemetry.io/otel/trace v1.43.0 h1:BkNrHpup+4k4w+ZZ86CZoHHEkohws8AY+WTX09nk+3A= +go.opentelemetry.io/otel/trace v1.43.0/go.mod h1:/QJhyVBUUswCphDVxq+8mld+AvhXZLhe+8WVFxiFff0= go.opentelemetry.io/proto/otlp v1.10.0 h1:IQRWgT5srOCYfiWnpqUYz9CVmbO8bFmKcwYxpuCSL2g= go.opentelemetry.io/proto/otlp v1.10.0/go.mod h1:/CV4QoCR/S9yaPj8utp3lvQPoqMtxXdzn7ozvvozVqk= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= @@ -140,25 +169,26 @@ go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/mock v0.6.0 h1:hyF9dfmbgIX5EfOdasqLsWD6xqpNZlXblLB/Dbnwv3Y= go.uber.org/mock v0.6.0/go.mod h1:KiVJ4BqZJaMj4svdfmHM0AUx4NJYO8ZNpPnZn1Z+BBU= -go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ= -go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc= go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/arch v0.25.0 h1:qnk6Ksugpi5Bz32947rkUgDt9/s5qvqDPl/gBKdMJLE= -golang.org/x/crypto v0.49.0 h1:+Ng2ULVvLHnJ/ZFEq4KdcDd/cfjrrjjNSXNzxg0Y4U4= -golang.org/x/crypto v0.49.0/go.mod h1:ErX4dUh2UM+CFYiXZRTcMpEcN8b/1gxEuv3nODoYtCA= -golang.org/x/net v0.52.0 h1:He/TN1l0e4mmR3QqHMT2Xab3Aj3L9qjbhRm78/6jrW0= -golang.org/x/net v0.52.0/go.mod h1:R1MAz7uMZxVMualyPXb+VaqGSa3LIaUqk0eEt3w36Sw= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.42.0 h1:omrd2nAlyT5ESRdCLYdm3+fMfNFE/+Rf4bDIQImRJeo= -golang.org/x/sys v0.42.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw= +golang.org/x/arch v0.25.0/go.mod h1:0X+GdSIP+kL5wPmpK7sdkEVTt2XoYP0cSjQSbZBwOi8= +golang.org/x/crypto v0.50.0 h1:zO47/JPrL6vsNkINmLoo/PH1gcxpls50DNogFvB5ZGI= +golang.org/x/crypto v0.50.0/go.mod h1:3muZ7vA7PBCE6xgPX7nkzzjiUq87kRItoJQM1Yo8S+Q= +golang.org/x/net v0.53.0 h1:d+qAbo5L0orcWAr0a9JweQpjXF19LMXJE8Ey7hwOdUA= +golang.org/x/net v0.53.0/go.mod h1:JvMuJH7rrdiCfbeHoo3fCQU24Lf5JJwT9W3sJFulfgs= +golang.org/x/sys v0.43.0 h1:Rlag2XtaFTxp19wS8MXlJwTvoh8ArU6ezoyFsMyCTNI= +golang.org/x/sys v0.43.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw= golang.org/x/text v0.36.0 h1:JfKh3XmcRPqZPKevfXVpI1wXPTqbkE5f7JA92a55Yxg= +golang.org/x/text v0.36.0/go.mod h1:NIdBknypM8iqVmPiuco0Dh6P5Jcdk8lJL0CUebqK164= gonum.org/v1/gonum v0.17.0 h1:VbpOemQlsSMrYmn7T2OUvQ4dqxQXU+ouZFQsZOx50z4= gonum.org/v1/gonum v0.17.0/go.mod h1:El3tOrEuMpv2UdMrbNlKEh9vd86bmQ6vqIcDwxEOc1E= google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9 h1:VPWxll4HlMw1Vs/qXtN7BvhZqsS9cdAittCNvVENElA= google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9/go.mod h1:7QBABkRtR8z+TEnmXTqIqwJLlzrZKVfAUm7tY3yGv0M= -google.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9 h1:m8qni9SQFH0tJc1X0vmnpw/0t+AImlSvp30sEupozUg= -google.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260420184626-e10c466a9529 h1:XF8+t6QQiS0o9ArVan/HW8Q7cycNPGsJf6GA2nXxYAg= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260420184626-e10c466a9529/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8= google.golang.org/grpc v1.80.0 h1:Xr6m2WmWZLETvUNvIUmeD5OAagMw3FiKmMlTdViWsHM= google.golang.org/grpc v1.80.0/go.mod h1:ho/dLnxwi3EDJA4Zghp7k2Ec1+c2jqup0bFkw07bwF4= google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= diff --git a/authsession/internal/adapters/redis/challengestore/store.go b/authsession/internal/adapters/redis/challengestore/store.go index a195408..4b68795 100644 --- a/authsession/internal/adapters/redis/challengestore/store.go +++ b/authsession/internal/adapters/redis/challengestore/store.go @@ -5,7 +5,6 @@ package challengestore import ( "bytes" "context" - "crypto/tls" "encoding/base64" "encoding/json" "errors" @@ -26,23 +25,10 @@ const expirationGracePeriod = 5 * time.Minute const defaultPreferredLanguage = "en" -// Config configures one Redis-backed challenge store instance. +// Config configures one Redis-backed challenge store instance. The store does +// not own its Redis client; the runtime supplies a shared client constructed +// via `pkg/redisconn`. type Config struct { - // Addr is the Redis network address in host:port form. - Addr string - - // Username is the optional Redis ACL username. - Username string - - // Password is the optional Redis ACL password. - Password string - - // DB is the Redis logical database index. - DB int - - // TLSEnabled enables TLS with a conservative minimum protocol version. - TLSEnabled bool - // KeyPrefix is the namespace prefix applied to every challenge key. KeyPrefix string @@ -74,13 +60,11 @@ type redisRecord struct { ConfirmedAt *string `json:"confirmed_at,omitempty"` } -// New constructs a Redis-backed challenge store from cfg. -func New(cfg Config) (*Store, error) { - if strings.TrimSpace(cfg.Addr) == "" { - return nil, errors.New("new redis challenge store: redis addr must not be empty") - } - if cfg.DB < 0 { - return nil, errors.New("new redis challenge store: redis db must not be negative") +// New constructs a Redis-backed challenge store that uses client and applies +// the namespace and timeout settings from cfg. +func New(client *redis.Client, cfg Config) (*Store, error) { + if client == nil { + return nil, errors.New("new redis challenge store: nil redis client") } if strings.TrimSpace(cfg.KeyPrefix) == "" { return nil, errors.New("new redis challenge store: redis key prefix must not be empty") @@ -89,50 +73,13 @@ func New(cfg Config) (*Store, error) { return nil, errors.New("new redis challenge store: operation timeout must be positive") } - options := &redis.Options{ - Addr: cfg.Addr, - Username: cfg.Username, - Password: cfg.Password, - DB: cfg.DB, - Protocol: 2, - DisableIdentity: true, - } - if cfg.TLSEnabled { - options.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12} - } - return &Store{ - client: redis.NewClient(options), + client: client, keyPrefix: cfg.KeyPrefix, operationTimeout: cfg.OperationTimeout, }, nil } -// Close releases the underlying Redis client resources. -func (s *Store) Close() error { - if s == nil || s.client == nil { - return nil - } - - return s.client.Close() -} - -// Ping verifies that the configured Redis backend is reachable within the -// adapter operation timeout budget. -func (s *Store) Ping(ctx context.Context) error { - operationCtx, cancel, err := s.operationContext(ctx, "ping redis challenge store") - if err != nil { - return err - } - defer cancel() - - if err := s.client.Ping(operationCtx).Err(); err != nil { - return fmt.Errorf("ping redis challenge store: %w", err) - } - - return nil -} - // Get returns the stored challenge for challengeID. func (s *Store) Get(ctx context.Context, challengeID common.ChallengeID) (challenge.Challenge, error) { if err := challengeID.Validate(); err != nil { diff --git a/authsession/internal/adapters/redis/challengestore/store_test.go b/authsession/internal/adapters/redis/challengestore/store_test.go index b1729ba..806aa8d 100644 --- a/authsession/internal/adapters/redis/challengestore/store_test.go +++ b/authsession/internal/adapters/redis/challengestore/store_test.go @@ -13,10 +13,26 @@ import ( "galaxy/authsession/internal/ports" "github.com/alicebob/miniredis/v2" + "github.com/redis/go-redis/v9" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) +func newRedisClient(t *testing.T, server *miniredis.Miniredis) *redis.Client { + t.Helper() + + client := redis.NewClient(&redis.Options{ + Addr: server.Addr(), + Protocol: 2, + DisableIdentity: true, + }) + t.Cleanup(func() { + assert.NoError(t, client.Close()) + }) + + return client +} + func TestStoreContract(t *testing.T) { t.Parallel() @@ -32,64 +48,44 @@ func TestNew(t *testing.T) { t.Parallel() server := miniredis.RunT(t) + client := newRedisClient(t, server) tests := []struct { name string + client *redis.Client cfg Config wantErr string }{ { - name: "valid config", - cfg: Config{ - Addr: server.Addr(), - DB: 2, - KeyPrefix: "authsession:challenge:", - OperationTimeout: 250 * time.Millisecond, - }, + name: "valid config", + client: client, + cfg: Config{KeyPrefix: "authsession:challenge:", OperationTimeout: 250 * time.Millisecond}, }, { - name: "empty addr", - cfg: Config{ - KeyPrefix: "authsession:challenge:", - OperationTimeout: 250 * time.Millisecond, - }, - wantErr: "redis addr must not be empty", + name: "nil client", + client: nil, + cfg: Config{KeyPrefix: "authsession:challenge:", OperationTimeout: 250 * time.Millisecond}, + wantErr: "nil redis client", }, { - name: "negative db", - cfg: Config{ - Addr: server.Addr(), - DB: -1, - KeyPrefix: "authsession:challenge:", - OperationTimeout: 250 * time.Millisecond, - }, - wantErr: "redis db must not be negative", - }, - { - name: "empty key prefix", - cfg: Config{ - Addr: server.Addr(), - OperationTimeout: 250 * time.Millisecond, - }, + name: "empty key prefix", + client: client, + cfg: Config{OperationTimeout: 250 * time.Millisecond}, wantErr: "redis key prefix must not be empty", }, { - name: "non-positive operation timeout", - cfg: Config{ - Addr: server.Addr(), - KeyPrefix: "authsession:challenge:", - }, + name: "non-positive operation timeout", + client: client, + cfg: Config{KeyPrefix: "authsession:challenge:"}, wantErr: "operation timeout must be positive", }, } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() - store, err := New(tt.cfg) + store, err := New(tt.client, tt.cfg) if tt.wantErr != "" { require.Error(t, err) assert.ErrorContains(t, err, tt.wantErr) @@ -97,22 +93,11 @@ func TestNew(t *testing.T) { } require.NoError(t, err) - t.Cleanup(func() { - assert.NoError(t, store.Close()) - }) + require.NotNil(t, store) }) } } -func TestStorePing(t *testing.T) { - t.Parallel() - - server := miniredis.RunT(t) - store := newTestStore(t, server, Config{}) - - require.NoError(t, store.Ping(context.Background())) -} - func TestStoreCreateAndGet(t *testing.T) { t.Parallel() @@ -429,9 +414,6 @@ func TestStoreCompareAndSwap(t *testing.T) { func newTestStore(t *testing.T, server *miniredis.Miniredis, cfg Config) *Store { t.Helper() - if cfg.Addr == "" { - cfg.Addr = server.Addr() - } if cfg.KeyPrefix == "" { cfg.KeyPrefix = "authsession:challenge:" } @@ -439,13 +421,9 @@ func newTestStore(t *testing.T, server *miniredis.Miniredis, cfg Config) *Store cfg.OperationTimeout = 250 * time.Millisecond } - store, err := New(cfg) + store, err := New(newRedisClient(t, server), cfg) require.NoError(t, err) - t.Cleanup(func() { - assert.NoError(t, store.Close()) - }) - return store } @@ -540,17 +518,6 @@ func mustMarshalJSON(t *testing.T, value any) string { return string(payload) } -func TestStorePingNilContext(t *testing.T) { - t.Parallel() - - server := miniredis.RunT(t) - store := newTestStore(t, server, Config{}) - - err := store.Ping(nil) - require.Error(t, err) - assert.ErrorContains(t, err, "nil context") -} - func TestStoreGetNilContext(t *testing.T) { t.Parallel() diff --git a/authsession/internal/adapters/redis/client.go b/authsession/internal/adapters/redis/client.go new file mode 100644 index 0000000..06a0c6f --- /dev/null +++ b/authsession/internal/adapters/redis/client.go @@ -0,0 +1,56 @@ +// Package redisadapter provides the Redis client helpers used by Auth/Session +// Service runtime wiring. The helpers wrap `pkg/redisconn` so the runtime +// keeps the same construction surface as the other Galaxy services. +package redisadapter + +import ( + "context" + "fmt" + + "galaxy/authsession/internal/config" + "galaxy/authsession/internal/telemetry" + "galaxy/redisconn" + + "github.com/redis/go-redis/v9" +) + +// NewClient constructs one Redis client from cfg using the shared +// `pkg/redisconn` helper, which enforces the master/replica/password env-var +// shape. +func NewClient(cfg config.RedisConfig) *redis.Client { + return redisconn.NewMasterClient(cfg.Conn) +} + +// InstrumentClient attaches Redis tracing and metrics exporters to client +// when telemetryRuntime is available. +func InstrumentClient(client *redis.Client, telemetryRuntime *telemetry.Runtime) error { + if client == nil { + return fmt.Errorf("instrument redis client: nil client") + } + if telemetryRuntime == nil { + return nil + } + + return redisconn.Instrument( + client, + redisconn.WithTracerProvider(telemetryRuntime.TracerProvider()), + redisconn.WithMeterProvider(telemetryRuntime.MeterProvider()), + ) +} + +// Ping performs the startup Redis connectivity check bounded by +// cfg.Conn.OperationTimeout. +func Ping(ctx context.Context, cfg config.RedisConfig, client *redis.Client) error { + if client == nil { + return fmt.Errorf("ping redis: nil client") + } + + pingCtx, cancel := context.WithTimeout(ctx, cfg.Conn.OperationTimeout) + defer cancel() + + if err := client.Ping(pingCtx).Err(); err != nil { + return fmt.Errorf("ping redis: %w", err) + } + + return nil +} diff --git a/authsession/internal/adapters/redis/configprovider/store.go b/authsession/internal/adapters/redis/configprovider/store.go index 7f0c096..7e66915 100644 --- a/authsession/internal/adapters/redis/configprovider/store.go +++ b/authsession/internal/adapters/redis/configprovider/store.go @@ -4,7 +4,6 @@ package configprovider import ( "context" - "crypto/tls" "errors" "fmt" "strconv" @@ -16,23 +15,10 @@ import ( "github.com/redis/go-redis/v9" ) -// Config configures one Redis-backed config provider instance. +// Config configures one Redis-backed config provider instance. The store does +// not own its Redis client; the runtime supplies a shared client constructed +// via `pkg/redisconn`. type Config struct { - // Addr is the Redis network address in host:port form. - Addr string - - // Username is the optional Redis ACL username. - Username string - - // Password is the optional Redis ACL password. - Password string - - // DB is the Redis logical database index. - DB int - - // TLSEnabled enables TLS with a conservative minimum protocol version. - TLSEnabled bool - // SessionLimitKey identifies the single Redis string key that stores the // active-session-limit configuration value. SessionLimitKey string @@ -48,63 +34,25 @@ type Store struct { operationTimeout time.Duration } -// New constructs a Redis-backed config provider from cfg. -func New(cfg Config) (*Store, error) { +// New constructs a Redis-backed config provider that uses client and applies +// the namespace and timeout settings from cfg. +func New(client *redis.Client, cfg Config) (*Store, error) { switch { - case strings.TrimSpace(cfg.Addr) == "": - return nil, errors.New("new redis config provider: redis addr must not be empty") - case cfg.DB < 0: - return nil, errors.New("new redis config provider: redis db must not be negative") + case client == nil: + return nil, errors.New("new redis config provider: nil redis client") case strings.TrimSpace(cfg.SessionLimitKey) == "": return nil, errors.New("new redis config provider: session limit key must not be empty") case cfg.OperationTimeout <= 0: return nil, errors.New("new redis config provider: operation timeout must be positive") } - options := &redis.Options{ - Addr: cfg.Addr, - Username: cfg.Username, - Password: cfg.Password, - DB: cfg.DB, - Protocol: 2, - DisableIdentity: true, - } - if cfg.TLSEnabled { - options.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12} - } - return &Store{ - client: redis.NewClient(options), + client: client, sessionLimitKey: cfg.SessionLimitKey, operationTimeout: cfg.OperationTimeout, }, nil } -// Close releases the underlying Redis client resources. -func (s *Store) Close() error { - if s == nil || s.client == nil { - return nil - } - - return s.client.Close() -} - -// Ping verifies that the configured Redis backend is reachable within the -// adapter operation timeout budget. -func (s *Store) Ping(ctx context.Context) error { - operationCtx, cancel, err := s.operationContext(ctx, "ping redis config provider") - if err != nil { - return err - } - defer cancel() - - if err := s.client.Ping(operationCtx).Err(); err != nil { - return fmt.Errorf("ping redis config provider: %w", err) - } - - return nil -} - // LoadSessionLimit returns the current active-session-limit configuration. // Missing or invalid Redis values are treated as “limit absent” by policy. func (s *Store) LoadSessionLimit(ctx context.Context) (ports.SessionLimitConfig, error) { diff --git a/authsession/internal/adapters/redis/configprovider/store_test.go b/authsession/internal/adapters/redis/configprovider/store_test.go index fe88ac6..7037db4 100644 --- a/authsession/internal/adapters/redis/configprovider/store_test.go +++ b/authsession/internal/adapters/redis/configprovider/store_test.go @@ -10,10 +10,26 @@ import ( "galaxy/authsession/internal/ports" "github.com/alicebob/miniredis/v2" + "github.com/redis/go-redis/v9" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) +func newRedisClient(t *testing.T, server *miniredis.Miniredis) *redis.Client { + t.Helper() + + client := redis.NewClient(&redis.Options{ + Addr: server.Addr(), + Protocol: 2, + DisableIdentity: true, + }) + t.Cleanup(func() { + assert.NoError(t, client.Close()) + }) + + return client +} + func TestStoreContract(t *testing.T) { t.Parallel() @@ -41,64 +57,40 @@ func TestNew(t *testing.T) { t.Parallel() server := miniredis.RunT(t) + client := newRedisClient(t, server) + + validCfg := Config{ + SessionLimitKey: "authsession:config:active-session-limit", + OperationTimeout: 250 * time.Millisecond, + } tests := []struct { name string + client *redis.Client cfg Config wantErr string }{ + {name: "valid config", client: client, cfg: validCfg}, + {name: "nil client", client: nil, cfg: validCfg, wantErr: "nil redis client"}, { - name: "valid config", - cfg: Config{ - Addr: server.Addr(), - DB: 2, - SessionLimitKey: "authsession:config:active-session-limit", - OperationTimeout: 250 * time.Millisecond, - }, - }, - { - name: "empty addr", - cfg: Config{ - SessionLimitKey: "authsession:config:active-session-limit", - OperationTimeout: 250 * time.Millisecond, - }, - wantErr: "redis addr must not be empty", - }, - { - name: "negative db", - cfg: Config{ - Addr: server.Addr(), - DB: -1, - SessionLimitKey: "authsession:config:active-session-limit", - OperationTimeout: 250 * time.Millisecond, - }, - wantErr: "redis db must not be negative", - }, - { - name: "empty session limit key", - cfg: Config{ - Addr: server.Addr(), - OperationTimeout: 250 * time.Millisecond, - }, + name: "empty session limit key", + client: client, + cfg: Config{OperationTimeout: 250 * time.Millisecond}, wantErr: "session limit key must not be empty", }, { - name: "non positive timeout", - cfg: Config{ - Addr: server.Addr(), - SessionLimitKey: "authsession:config:active-session-limit", - }, + name: "non positive timeout", + client: client, + cfg: Config{SessionLimitKey: "authsession:config:active-session-limit"}, wantErr: "operation timeout must be positive", }, } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() - store, err := New(tt.cfg) + store, err := New(tt.client, tt.cfg) if tt.wantErr != "" { require.Error(t, err) assert.ErrorContains(t, err, tt.wantErr) @@ -106,22 +98,11 @@ func TestNew(t *testing.T) { } require.NoError(t, err) - t.Cleanup(func() { - assert.NoError(t, store.Close()) - }) + require.NotNil(t, store) }) } } -func TestStorePing(t *testing.T) { - t.Parallel() - - server := miniredis.RunT(t) - store := newTestStore(t, server, Config{}) - - require.NoError(t, store.Ping(context.Background())) -} - func TestStoreLoadSessionLimit(t *testing.T) { t.Parallel() @@ -201,8 +182,6 @@ func TestStoreLoadSessionLimit(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -242,23 +221,9 @@ func TestStoreLoadSessionLimitNilContext(t *testing.T) { assert.ErrorContains(t, err, "nil context") } -func TestStorePingNilContext(t *testing.T) { - t.Parallel() - - server := miniredis.RunT(t) - store := newTestStore(t, server, Config{}) - - err := store.Ping(nil) - require.Error(t, err) - assert.ErrorContains(t, err, "nil context") -} - func newTestStore(t *testing.T, server *miniredis.Miniredis, cfg Config) *Store { t.Helper() - if cfg.Addr == "" { - cfg.Addr = server.Addr() - } if cfg.SessionLimitKey == "" { cfg.SessionLimitKey = "authsession:config:active-session-limit" } @@ -266,13 +231,9 @@ func newTestStore(t *testing.T, server *miniredis.Miniredis, cfg Config) *Store cfg.OperationTimeout = 250 * time.Millisecond } - store, err := New(cfg) + store, err := New(newRedisClient(t, server), cfg) require.NoError(t, err) - t.Cleanup(func() { - assert.NoError(t, store.Close()) - }) - return store } diff --git a/authsession/internal/adapters/redis/projectionpublisher/publisher.go b/authsession/internal/adapters/redis/projectionpublisher/publisher.go index 7897de0..9f28888 100644 --- a/authsession/internal/adapters/redis/projectionpublisher/publisher.go +++ b/authsession/internal/adapters/redis/projectionpublisher/publisher.go @@ -5,7 +5,6 @@ package projectionpublisher import ( "context" - "crypto/tls" "encoding/json" "errors" "fmt" @@ -19,22 +18,9 @@ import ( ) // Config configures one Redis-backed gateway session projection publisher. +// The publisher does not own its Redis client; the runtime supplies a shared +// client constructed via `pkg/redisconn`. type Config struct { - // Addr is the Redis network address in host:port form. - Addr string - - // Username is the optional Redis ACL username. - Username string - - // Password is the optional Redis ACL password. - Password string - - // DB is the Redis logical database index. - DB int - - // TLSEnabled enables TLS with a conservative minimum protocol version. - TLSEnabled bool - // SessionCacheKeyPrefix is the namespace prefix applied to gateway session // cache keys. The raw device session identifier is appended directly. SessionCacheKeyPrefix string @@ -68,14 +54,12 @@ type cacheRecord struct { RevokedAtMS *int64 `json:"revoked_at_ms,omitempty"` } -// New constructs a Redis-backed gateway session projection publisher from -// cfg. -func New(cfg Config) (*Publisher, error) { +// New constructs a Redis-backed gateway session projection publisher that +// uses client and applies the namespace and timeout settings from cfg. +func New(client *redis.Client, cfg Config) (*Publisher, error) { switch { - case strings.TrimSpace(cfg.Addr) == "": - return nil, errors.New("new redis projection publisher: redis addr must not be empty") - case cfg.DB < 0: - return nil, errors.New("new redis projection publisher: redis db must not be negative") + case client == nil: + return nil, errors.New("new redis projection publisher: nil redis client") case strings.TrimSpace(cfg.SessionCacheKeyPrefix) == "": return nil, errors.New("new redis projection publisher: session cache key prefix must not be empty") case strings.TrimSpace(cfg.SessionEventsStream) == "": @@ -86,20 +70,8 @@ func New(cfg Config) (*Publisher, error) { return nil, errors.New("new redis projection publisher: operation timeout must be positive") } - options := &redis.Options{ - Addr: cfg.Addr, - Username: cfg.Username, - Password: cfg.Password, - DB: cfg.DB, - Protocol: 2, - DisableIdentity: true, - } - if cfg.TLSEnabled { - options.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12} - } - return &Publisher{ - client: redis.NewClient(options), + client: client, sessionCacheKeyPrefix: cfg.SessionCacheKeyPrefix, sessionEventsStream: cfg.SessionEventsStream, streamMaxLen: cfg.StreamMaxLen, @@ -107,31 +79,6 @@ func New(cfg Config) (*Publisher, error) { }, nil } -// Close releases the underlying Redis client resources. -func (p *Publisher) Close() error { - if p == nil || p.client == nil { - return nil - } - - return p.client.Close() -} - -// Ping verifies that the configured Redis backend is reachable within the -// adapter operation timeout budget. -func (p *Publisher) Ping(ctx context.Context) error { - operationCtx, cancel, err := p.operationContext(ctx, "ping redis projection publisher") - if err != nil { - return err - } - defer cancel() - - if err := p.client.Ping(operationCtx).Err(); err != nil { - return fmt.Errorf("ping redis projection publisher: %w", err) - } - - return nil -} - // PublishSession writes one gateway-compatible session snapshot into the // gateway cache namespace and appends the same snapshot to the gateway session // event stream within one Redis transaction. diff --git a/authsession/internal/adapters/redis/projectionpublisher/publisher_test.go b/authsession/internal/adapters/redis/projectionpublisher/publisher_test.go index 5b3db20..e36f60c 100644 --- a/authsession/internal/adapters/redis/projectionpublisher/publisher_test.go +++ b/authsession/internal/adapters/redis/projectionpublisher/publisher_test.go @@ -15,57 +15,51 @@ import ( "galaxy/authsession/internal/domain/gatewayprojection" "github.com/alicebob/miniredis/v2" + "github.com/redis/go-redis/v9" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) +func newRedisClient(t *testing.T, server *miniredis.Miniredis) *redis.Client { + t.Helper() + + client := redis.NewClient(&redis.Options{ + Addr: server.Addr(), + Protocol: 2, + DisableIdentity: true, + }) + t.Cleanup(func() { + assert.NoError(t, client.Close()) + }) + + return client +} + func TestNew(t *testing.T) { t.Parallel() server := miniredis.RunT(t) + client := newRedisClient(t, server) + + validCfg := Config{ + SessionCacheKeyPrefix: "gateway:session:", + SessionEventsStream: "gateway:session_events", + StreamMaxLen: 1024, + OperationTimeout: 250 * time.Millisecond, + } tests := []struct { name string + client *redis.Client cfg Config wantErr string }{ + {name: "valid config", client: client, cfg: validCfg}, + {name: "nil client", client: nil, cfg: validCfg, wantErr: "nil redis client"}, { - name: "valid config", + name: "empty session cache key prefix", + client: client, cfg: Config{ - Addr: server.Addr(), - DB: 3, - SessionCacheKeyPrefix: "gateway:session:", - SessionEventsStream: "gateway:session_events", - StreamMaxLen: 1024, - OperationTimeout: 250 * time.Millisecond, - }, - }, - { - name: "empty addr", - cfg: Config{ - SessionCacheKeyPrefix: "gateway:session:", - SessionEventsStream: "gateway:session_events", - StreamMaxLen: 1024, - OperationTimeout: 250 * time.Millisecond, - }, - wantErr: "redis addr must not be empty", - }, - { - name: "negative db", - cfg: Config{ - Addr: server.Addr(), - DB: -1, - SessionCacheKeyPrefix: "gateway:session:", - SessionEventsStream: "gateway:session_events", - StreamMaxLen: 1024, - OperationTimeout: 250 * time.Millisecond, - }, - wantErr: "redis db must not be negative", - }, - { - name: "empty session cache key prefix", - cfg: Config{ - Addr: server.Addr(), SessionEventsStream: "gateway:session_events", StreamMaxLen: 1024, OperationTimeout: 250 * time.Millisecond, @@ -73,9 +67,9 @@ func TestNew(t *testing.T) { wantErr: "session cache key prefix must not be empty", }, { - name: "empty session events stream", + name: "empty session events stream", + client: client, cfg: Config{ - Addr: server.Addr(), SessionCacheKeyPrefix: "gateway:session:", StreamMaxLen: 1024, OperationTimeout: 250 * time.Millisecond, @@ -83,9 +77,9 @@ func TestNew(t *testing.T) { wantErr: "session events stream must not be empty", }, { - name: "non positive stream max len", + name: "non positive stream max len", + client: client, cfg: Config{ - Addr: server.Addr(), SessionCacheKeyPrefix: "gateway:session:", SessionEventsStream: "gateway:session_events", OperationTimeout: 250 * time.Millisecond, @@ -93,9 +87,9 @@ func TestNew(t *testing.T) { wantErr: "stream max len must be positive", }, { - name: "non positive timeout", + name: "non positive timeout", + client: client, cfg: Config{ - Addr: server.Addr(), SessionCacheKeyPrefix: "gateway:session:", SessionEventsStream: "gateway:session_events", StreamMaxLen: 1024, @@ -105,12 +99,10 @@ func TestNew(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() - publisher, err := New(tt.cfg) + publisher, err := New(tt.client, tt.cfg) if tt.wantErr != "" { require.Error(t, err) assert.ErrorContains(t, err, tt.wantErr) @@ -118,22 +110,11 @@ func TestNew(t *testing.T) { } require.NoError(t, err) - t.Cleanup(func() { - assert.NoError(t, publisher.Close()) - }) + require.NotNil(t, publisher) }) } } -func TestPublisherPing(t *testing.T) { - t.Parallel() - - server := miniredis.RunT(t) - publisher := newTestPublisher(t, server, Config{}) - - require.NoError(t, publisher.Ping(context.Background())) -} - func TestPublisherPublishSessionActive(t *testing.T) { t.Parallel() @@ -331,23 +312,9 @@ func TestPublisherPublishSessionBackendFailure(t *testing.T) { assert.ErrorContains(t, err, "publish session projection") } -func TestPublisherPingNilContext(t *testing.T) { - t.Parallel() - - server := miniredis.RunT(t) - publisher := newTestPublisher(t, server, Config{}) - - err := publisher.Ping(nil) - require.Error(t, err) - assert.ErrorContains(t, err, "nil context") -} - func newTestPublisher(t *testing.T, server *miniredis.Miniredis, cfg Config) *Publisher { t.Helper() - if cfg.Addr == "" { - cfg.Addr = server.Addr() - } if cfg.SessionCacheKeyPrefix == "" { cfg.SessionCacheKeyPrefix = "gateway:session:" } @@ -361,11 +328,8 @@ func newTestPublisher(t *testing.T, server *miniredis.Miniredis, cfg Config) *Pu cfg.OperationTimeout = 250 * time.Millisecond } - publisher, err := New(cfg) + publisher, err := New(newRedisClient(t, server), cfg) require.NoError(t, err) - t.Cleanup(func() { - assert.NoError(t, publisher.Close()) - }) return publisher } diff --git a/authsession/internal/adapters/redis/sendemailcodeabuse/protector.go b/authsession/internal/adapters/redis/sendemailcodeabuse/protector.go index ff52de0..7242b7b 100644 --- a/authsession/internal/adapters/redis/sendemailcodeabuse/protector.go +++ b/authsession/internal/adapters/redis/sendemailcodeabuse/protector.go @@ -4,7 +4,6 @@ package sendemailcodeabuse import ( "context" - "crypto/tls" "encoding/base64" "errors" "fmt" @@ -18,23 +17,10 @@ import ( "github.com/redis/go-redis/v9" ) -// Config configures one Redis-backed send-email-code abuse protector. +// Config configures one Redis-backed send-email-code abuse protector. The +// protector does not own its Redis client; the runtime supplies a shared +// client constructed via `pkg/redisconn`. type Config struct { - // Addr is the Redis network address in host:port form. - Addr string - - // Username is the optional Redis ACL username. - Username string - - // Password is the optional Redis ACL password. - Password string - - // DB is the Redis logical database index. - DB int - - // TLSEnabled enables TLS with a conservative minimum protocol version. - TLSEnabled bool - // KeyPrefix is the namespace prefix applied to every resend-throttle key. KeyPrefix string @@ -50,63 +36,25 @@ type Protector struct { operationTimeout time.Duration } -// New constructs a Redis-backed resend-throttle protector from cfg. -func New(cfg Config) (*Protector, error) { +// New constructs a Redis-backed resend-throttle protector that uses client +// and applies the namespace and timeout settings from cfg. +func New(client *redis.Client, cfg Config) (*Protector, error) { switch { - case strings.TrimSpace(cfg.Addr) == "": - return nil, errors.New("new redis send email code abuse protector: redis addr must not be empty") - case cfg.DB < 0: - return nil, errors.New("new redis send email code abuse protector: redis db must not be negative") + case client == nil: + return nil, errors.New("new redis send email code abuse protector: nil redis client") case strings.TrimSpace(cfg.KeyPrefix) == "": return nil, errors.New("new redis send email code abuse protector: redis key prefix must not be empty") case cfg.OperationTimeout <= 0: return nil, errors.New("new redis send email code abuse protector: operation timeout must be positive") } - options := &redis.Options{ - Addr: cfg.Addr, - Username: cfg.Username, - Password: cfg.Password, - DB: cfg.DB, - Protocol: 2, - DisableIdentity: true, - } - if cfg.TLSEnabled { - options.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12} - } - return &Protector{ - client: redis.NewClient(options), + client: client, keyPrefix: cfg.KeyPrefix, operationTimeout: cfg.OperationTimeout, }, nil } -// Close releases the underlying Redis client resources. -func (p *Protector) Close() error { - if p == nil || p.client == nil { - return nil - } - - return p.client.Close() -} - -// Ping verifies that the configured Redis backend is reachable within the -// adapter operation timeout budget. -func (p *Protector) Ping(ctx context.Context) error { - operationCtx, cancel, err := p.operationContext(ctx, "ping redis send email code abuse protector") - if err != nil { - return err - } - defer cancel() - - if err := p.client.Ping(operationCtx).Err(); err != nil { - return fmt.Errorf("ping redis send email code abuse protector: %w", err) - } - - return nil -} - // CheckAndReserve applies the fixed resend cooldown using one TTL key per // normalized e-mail address. func (p *Protector) CheckAndReserve(ctx context.Context, input ports.SendEmailCodeAbuseInput) (ports.SendEmailCodeAbuseResult, error) { diff --git a/authsession/internal/adapters/redis/sendemailcodeabuse/protector_test.go b/authsession/internal/adapters/redis/sendemailcodeabuse/protector_test.go index c791dca..89bcc81 100644 --- a/authsession/internal/adapters/redis/sendemailcodeabuse/protector_test.go +++ b/authsession/internal/adapters/redis/sendemailcodeabuse/protector_test.go @@ -10,72 +10,64 @@ import ( "galaxy/authsession/internal/ports" "github.com/alicebob/miniredis/v2" + "github.com/redis/go-redis/v9" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) +func newRedisClient(t *testing.T, server *miniredis.Miniredis) *redis.Client { + t.Helper() + + client := redis.NewClient(&redis.Options{ + Addr: server.Addr(), + Protocol: 2, + DisableIdentity: true, + }) + t.Cleanup(func() { + assert.NoError(t, client.Close()) + }) + + return client +} + func TestNew(t *testing.T) { t.Parallel() server := miniredis.RunT(t) + client := newRedisClient(t, server) + + validCfg := Config{ + KeyPrefix: "authsession:send-email-code-throttle:", + OperationTimeout: 250 * time.Millisecond, + } tests := []struct { name string + client *redis.Client cfg Config wantErr string }{ + {name: "valid config", client: client, cfg: validCfg}, + {name: "nil client", client: nil, cfg: validCfg, wantErr: "nil redis client"}, { - name: "valid config", - cfg: Config{ - Addr: server.Addr(), - DB: 1, - KeyPrefix: "authsession:send-email-code-throttle:", - OperationTimeout: 250 * time.Millisecond, - }, - }, - { - name: "empty addr", - cfg: Config{ - KeyPrefix: "authsession:send-email-code-throttle:", - OperationTimeout: 250 * time.Millisecond, - }, - wantErr: "redis addr must not be empty", - }, - { - name: "negative db", - cfg: Config{ - Addr: server.Addr(), - DB: -1, - KeyPrefix: "authsession:send-email-code-throttle:", - OperationTimeout: 250 * time.Millisecond, - }, - wantErr: "redis db must not be negative", - }, - { - name: "empty key prefix", - cfg: Config{ - Addr: server.Addr(), - OperationTimeout: 250 * time.Millisecond, - }, + name: "empty key prefix", + client: client, + cfg: Config{OperationTimeout: 250 * time.Millisecond}, wantErr: "redis key prefix must not be empty", }, { - name: "non-positive timeout", - cfg: Config{ - Addr: server.Addr(), - KeyPrefix: "authsession:send-email-code-throttle:", - }, + name: "non-positive timeout", + client: client, + cfg: Config{KeyPrefix: "authsession:send-email-code-throttle:"}, wantErr: "operation timeout must be positive", }, } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() - protector, err := New(tt.cfg) + protector, err := New(tt.client, tt.cfg) if tt.wantErr != "" { require.Error(t, err) assert.ErrorContains(t, err, tt.wantErr) @@ -83,22 +75,11 @@ func TestNew(t *testing.T) { } require.NoError(t, err) - t.Cleanup(func() { - assert.NoError(t, protector.Close()) - }) + require.NotNil(t, protector) }) } } -func TestProtectorPing(t *testing.T) { - t.Parallel() - - server := miniredis.RunT(t) - protector := newTestProtector(t, server, Config{}) - - require.NoError(t, protector.Ping(context.Background())) -} - func TestProtectorCheckAndReserve(t *testing.T) { t.Parallel() @@ -156,9 +137,6 @@ func TestProtectorNilContext(t *testing.T) { func newTestProtector(t *testing.T, server *miniredis.Miniredis, cfg Config) *Protector { t.Helper() - if cfg.Addr == "" { - cfg.Addr = server.Addr() - } if cfg.KeyPrefix == "" { cfg.KeyPrefix = "authsession:send-email-code-throttle:" } @@ -166,11 +144,8 @@ func newTestProtector(t *testing.T, server *miniredis.Miniredis, cfg Config) *Pr cfg.OperationTimeout = 250 * time.Millisecond } - protector, err := New(cfg) + protector, err := New(newRedisClient(t, server), cfg) require.NoError(t, err) - t.Cleanup(func() { - assert.NoError(t, protector.Close()) - }) return protector } diff --git a/authsession/internal/adapters/redis/sessionstore/store.go b/authsession/internal/adapters/redis/sessionstore/store.go index 7827e42..61e0c66 100644 --- a/authsession/internal/adapters/redis/sessionstore/store.go +++ b/authsession/internal/adapters/redis/sessionstore/store.go @@ -5,7 +5,6 @@ package sessionstore import ( "bytes" "context" - "crypto/tls" "encoding/base64" "encoding/json" "errors" @@ -24,23 +23,10 @@ import ( const mutationRetryLimit = 3 -// Config configures one Redis-backed session store instance. +// Config configures one Redis-backed session store instance. The store does +// not own its Redis client; the runtime supplies a shared client constructed +// via `pkg/redisconn`. type Config struct { - // Addr is the Redis network address in host:port form. - Addr string - - // Username is the optional Redis ACL username. - Username string - - // Password is the optional Redis ACL password. - Password string - - // DB is the Redis logical database index. - DB int - - // TLSEnabled enables TLS with a conservative minimum protocol version. - TLSEnabled bool - // SessionKeyPrefix is the namespace prefix applied to primary session keys. SessionKeyPrefix string @@ -78,13 +64,12 @@ type redisRecord struct { RevokeActorID string `json:"revoke_actor_id,omitempty"` } -// New constructs a Redis-backed session store from cfg. -func New(cfg Config) (*Store, error) { +// New constructs a Redis-backed session store that uses client and applies +// the namespace and timeout settings from cfg. +func New(client *redis.Client, cfg Config) (*Store, error) { switch { - case strings.TrimSpace(cfg.Addr) == "": - return nil, errors.New("new redis session store: redis addr must not be empty") - case cfg.DB < 0: - return nil, errors.New("new redis session store: redis db must not be negative") + case client == nil: + return nil, errors.New("new redis session store: nil redis client") case strings.TrimSpace(cfg.SessionKeyPrefix) == "": return nil, errors.New("new redis session store: session key prefix must not be empty") case strings.TrimSpace(cfg.UserSessionsKeyPrefix) == "": @@ -95,20 +80,8 @@ func New(cfg Config) (*Store, error) { return nil, errors.New("new redis session store: operation timeout must be positive") } - options := &redis.Options{ - Addr: cfg.Addr, - Username: cfg.Username, - Password: cfg.Password, - DB: cfg.DB, - Protocol: 2, - DisableIdentity: true, - } - if cfg.TLSEnabled { - options.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12} - } - return &Store{ - client: redis.NewClient(options), + client: client, sessionKeyPrefix: cfg.SessionKeyPrefix, userSessionsKeyPrefix: cfg.UserSessionsKeyPrefix, userActiveSessionsKeyPrefix: cfg.UserActiveSessionsKeyPrefix, @@ -116,31 +89,6 @@ func New(cfg Config) (*Store, error) { }, nil } -// Close releases the underlying Redis client resources. -func (s *Store) Close() error { - if s == nil || s.client == nil { - return nil - } - - return s.client.Close() -} - -// Ping verifies that the configured Redis backend is reachable within the -// adapter operation timeout budget. -func (s *Store) Ping(ctx context.Context) error { - operationCtx, cancel, err := s.operationContext(ctx, "ping redis session store") - if err != nil { - return err - } - defer cancel() - - if err := s.client.Ping(operationCtx).Err(); err != nil { - return fmt.Errorf("ping redis session store: %w", err) - } - - return nil -} - // Get returns the stored session for deviceSessionID. func (s *Store) Get(ctx context.Context, deviceSessionID common.DeviceSessionID) (devicesession.Session, error) { if err := deviceSessionID.Validate(); err != nil { diff --git a/authsession/internal/adapters/redis/sessionstore/store_test.go b/authsession/internal/adapters/redis/sessionstore/store_test.go index a7c2661..d151c39 100644 --- a/authsession/internal/adapters/redis/sessionstore/store_test.go +++ b/authsession/internal/adapters/redis/sessionstore/store_test.go @@ -13,10 +13,26 @@ import ( "galaxy/authsession/internal/ports" "github.com/alicebob/miniredis/v2" + "github.com/redis/go-redis/v9" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) +func newRedisClient(t *testing.T, server *miniredis.Miniredis) *redis.Client { + t.Helper() + + client := redis.NewClient(&redis.Options{ + Addr: server.Addr(), + Protocol: 2, + DisableIdentity: true, + }) + t.Cleanup(func() { + assert.NoError(t, client.Close()) + }) + + return client +} + func TestStoreContract(t *testing.T) { t.Parallel() @@ -32,49 +48,27 @@ func TestNew(t *testing.T) { t.Parallel() server := miniredis.RunT(t) + client := newRedisClient(t, server) + + validCfg := Config{ + SessionKeyPrefix: "authsession:session:", + UserSessionsKeyPrefix: "authsession:user-sessions:", + UserActiveSessionsKeyPrefix: "authsession:user-active-sessions:", + OperationTimeout: 250 * time.Millisecond, + } tests := []struct { name string + client *redis.Client cfg Config wantErr string }{ + {name: "valid config", client: client, cfg: validCfg}, + {name: "nil client", client: nil, cfg: validCfg, wantErr: "nil redis client"}, { - name: "valid config", + name: "empty session prefix", + client: client, cfg: Config{ - Addr: server.Addr(), - DB: 1, - SessionKeyPrefix: "authsession:session:", - UserSessionsKeyPrefix: "authsession:user-sessions:", - UserActiveSessionsKeyPrefix: "authsession:user-active-sessions:", - OperationTimeout: 250 * time.Millisecond, - }, - }, - { - name: "empty addr", - cfg: Config{ - SessionKeyPrefix: "authsession:session:", - UserSessionsKeyPrefix: "authsession:user-sessions:", - UserActiveSessionsKeyPrefix: "authsession:user-active-sessions:", - OperationTimeout: 250 * time.Millisecond, - }, - wantErr: "redis addr must not be empty", - }, - { - name: "negative db", - cfg: Config{ - Addr: server.Addr(), - DB: -1, - SessionKeyPrefix: "authsession:session:", - UserSessionsKeyPrefix: "authsession:user-sessions:", - UserActiveSessionsKeyPrefix: "authsession:user-active-sessions:", - OperationTimeout: 250 * time.Millisecond, - }, - wantErr: "redis db must not be negative", - }, - { - name: "empty session prefix", - cfg: Config{ - Addr: server.Addr(), UserSessionsKeyPrefix: "authsession:user-sessions:", UserActiveSessionsKeyPrefix: "authsession:user-active-sessions:", OperationTimeout: 250 * time.Millisecond, @@ -82,9 +76,9 @@ func TestNew(t *testing.T) { wantErr: "session key prefix must not be empty", }, { - name: "empty all sessions prefix", + name: "empty all sessions prefix", + client: client, cfg: Config{ - Addr: server.Addr(), SessionKeyPrefix: "authsession:session:", UserActiveSessionsKeyPrefix: "authsession:user-active-sessions:", OperationTimeout: 250 * time.Millisecond, @@ -92,9 +86,9 @@ func TestNew(t *testing.T) { wantErr: "user sessions key prefix must not be empty", }, { - name: "empty active sessions prefix", + name: "empty active sessions prefix", + client: client, cfg: Config{ - Addr: server.Addr(), SessionKeyPrefix: "authsession:session:", UserSessionsKeyPrefix: "authsession:user-sessions:", OperationTimeout: 250 * time.Millisecond, @@ -102,9 +96,9 @@ func TestNew(t *testing.T) { wantErr: "user active sessions key prefix must not be empty", }, { - name: "non positive timeout", + name: "non positive timeout", + client: client, cfg: Config{ - Addr: server.Addr(), SessionKeyPrefix: "authsession:session:", UserSessionsKeyPrefix: "authsession:user-sessions:", UserActiveSessionsKeyPrefix: "authsession:user-active-sessions:", @@ -114,12 +108,10 @@ func TestNew(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() - store, err := New(tt.cfg) + store, err := New(tt.client, tt.cfg) if tt.wantErr != "" { require.Error(t, err) assert.ErrorContains(t, err, tt.wantErr) @@ -127,22 +119,11 @@ func TestNew(t *testing.T) { } require.NoError(t, err) - t.Cleanup(func() { - assert.NoError(t, store.Close()) - }) + require.NotNil(t, store) }) } } -func TestStorePing(t *testing.T) { - t.Parallel() - - server := miniredis.RunT(t) - store := newTestStore(t, server, Config{}) - - require.NoError(t, store.Ping(context.Background())) -} - func TestStoreCreateAndGetActive(t *testing.T) { t.Parallel() @@ -558,9 +539,6 @@ func TestStoreRevokeAllByUserIDDetectsCorruptActiveIndex(t *testing.T) { func newTestStore(t *testing.T, server *miniredis.Miniredis, cfg Config) *Store { t.Helper() - if cfg.Addr == "" { - cfg.Addr = server.Addr() - } if cfg.SessionKeyPrefix == "" { cfg.SessionKeyPrefix = "authsession:session:" } @@ -574,13 +552,9 @@ func newTestStore(t *testing.T, server *miniredis.Miniredis, cfg Config) *Store cfg.OperationTimeout = 250 * time.Millisecond } - store, err := New(cfg) + store, err := New(newRedisClient(t, server), cfg) require.NoError(t, err) - t.Cleanup(func() { - assert.NoError(t, store.Close()) - }) - return store } diff --git a/authsession/internal/app/runtime.go b/authsession/internal/app/runtime.go index dd6f10f..78efb82 100644 --- a/authsession/internal/app/runtime.go +++ b/authsession/internal/app/runtime.go @@ -7,6 +7,7 @@ import ( "galaxy/authsession/internal/adapters/local" "galaxy/authsession/internal/adapters/mail" + redisadapter "galaxy/authsession/internal/adapters/redis" "galaxy/authsession/internal/adapters/redis/challengestore" "galaxy/authsession/internal/adapters/redis/configprovider" "galaxy/authsession/internal/adapters/redis/projectionpublisher" @@ -26,17 +27,10 @@ import ( "galaxy/authsession/internal/service/sendemailcode" "galaxy/authsession/internal/telemetry" + "github.com/redis/go-redis/v9" "go.uber.org/zap" ) -type pinger interface { - Ping(context.Context) error -} - -type closer interface { - Close() error -} - // Runtime owns the runnable authsession application plus the adapter cleanup // functions that must run after the process stops. type Runtime struct { @@ -65,91 +59,64 @@ func NewRuntime(ctx context.Context, cfg config.Config, logger *zap.Logger, tele return nil, errors.Join(err, runtime.Close()) } - challengeStore, err := challengestore.New(challengestore.Config{ - Addr: cfg.Redis.Addr, - Username: cfg.Redis.Username, - Password: cfg.Redis.Password, - DB: cfg.Redis.DB, - TLSEnabled: cfg.Redis.TLSEnabled, + redisClient := redisadapter.NewClient(cfg.Redis) + if err := redisadapter.InstrumentClient(redisClient, telemetryRuntime); err != nil { + return cleanupOnError(fmt.Errorf("new authsession runtime: %w", err)) + } + runtime.cleanupFns = append(runtime.cleanupFns, func() error { + err := redisClient.Close() + if errors.Is(err, redis.ErrClosed) { + return nil + } + return err + }) + if err := redisadapter.Ping(ctx, cfg.Redis, redisClient); err != nil { + return cleanupOnError(fmt.Errorf("new authsession runtime: %w", err)) + } + + challengeStore, err := challengestore.New(redisClient, challengestore.Config{ KeyPrefix: cfg.Redis.ChallengeKeyPrefix, - OperationTimeout: cfg.Redis.OperationTimeout, + OperationTimeout: cfg.Redis.Conn.OperationTimeout, }) if err != nil { return cleanupOnError(fmt.Errorf("new authsession runtime: challenge store: %w", err)) } - runtime.cleanupFns = append(runtime.cleanupFns, challengeStore.Close) - sessionStore, err := sessionstore.New(sessionstore.Config{ - Addr: cfg.Redis.Addr, - Username: cfg.Redis.Username, - Password: cfg.Redis.Password, - DB: cfg.Redis.DB, - TLSEnabled: cfg.Redis.TLSEnabled, + sessionStore, err := sessionstore.New(redisClient, sessionstore.Config{ SessionKeyPrefix: cfg.Redis.SessionKeyPrefix, UserSessionsKeyPrefix: cfg.Redis.UserSessionsKeyPrefix, UserActiveSessionsKeyPrefix: cfg.Redis.UserActiveSessionsKeyPrefix, - OperationTimeout: cfg.Redis.OperationTimeout, + OperationTimeout: cfg.Redis.Conn.OperationTimeout, }) if err != nil { return cleanupOnError(fmt.Errorf("new authsession runtime: session store: %w", err)) } - runtime.cleanupFns = append(runtime.cleanupFns, sessionStore.Close) - configStore, err := configprovider.New(configprovider.Config{ - Addr: cfg.Redis.Addr, - Username: cfg.Redis.Username, - Password: cfg.Redis.Password, - DB: cfg.Redis.DB, - TLSEnabled: cfg.Redis.TLSEnabled, + configStore, err := configprovider.New(redisClient, configprovider.Config{ SessionLimitKey: cfg.Redis.SessionLimitKey, - OperationTimeout: cfg.Redis.OperationTimeout, + OperationTimeout: cfg.Redis.Conn.OperationTimeout, }) if err != nil { return cleanupOnError(fmt.Errorf("new authsession runtime: config provider: %w", err)) } - runtime.cleanupFns = append(runtime.cleanupFns, configStore.Close) - publisher, err := projectionpublisher.New(projectionpublisher.Config{ - Addr: cfg.Redis.Addr, - Username: cfg.Redis.Username, - Password: cfg.Redis.Password, - DB: cfg.Redis.DB, - TLSEnabled: cfg.Redis.TLSEnabled, + publisher, err := projectionpublisher.New(redisClient, projectionpublisher.Config{ SessionCacheKeyPrefix: cfg.Redis.GatewaySessionCacheKeyPrefix, SessionEventsStream: cfg.Redis.GatewaySessionEventsStream, StreamMaxLen: cfg.Redis.GatewaySessionEventsStreamMaxLen, - OperationTimeout: cfg.Redis.OperationTimeout, + OperationTimeout: cfg.Redis.Conn.OperationTimeout, }) if err != nil { return cleanupOnError(fmt.Errorf("new authsession runtime: projection publisher: %w", err)) } - runtime.cleanupFns = append(runtime.cleanupFns, publisher.Close) - abuseProtector, err := sendemailcodeabuse.New(sendemailcodeabuse.Config{ - Addr: cfg.Redis.Addr, - Username: cfg.Redis.Username, - Password: cfg.Redis.Password, - DB: cfg.Redis.DB, - TLSEnabled: cfg.Redis.TLSEnabled, + abuseProtector, err := sendemailcodeabuse.New(redisClient, sendemailcodeabuse.Config{ KeyPrefix: cfg.Redis.SendEmailCodeThrottleKeyPrefix, - OperationTimeout: cfg.Redis.OperationTimeout, + OperationTimeout: cfg.Redis.Conn.OperationTimeout, }) if err != nil { return cleanupOnError(fmt.Errorf("new authsession runtime: send email code abuse protector: %w", err)) } - runtime.cleanupFns = append(runtime.cleanupFns, abuseProtector.Close) - - for name, dependency := range map[string]pinger{ - "challenge store": challengeStore, - "session store": sessionStore, - "config provider": configStore, - "projection publisher": publisher, - "send email code abuse protector": abuseProtector, - } { - if err := dependency.Ping(ctx); err != nil { - return cleanupOnError(fmt.Errorf("new authsession runtime: ping %s: %w", name, err)) - } - } clock := local.Clock{} idGenerator := local.IDGenerator{} diff --git a/authsession/internal/app/runtime_test.go b/authsession/internal/app/runtime_test.go index 0c8050c..14a17f0 100644 --- a/authsession/internal/app/runtime_test.go +++ b/authsession/internal/app/runtime_test.go @@ -26,7 +26,8 @@ func TestNewRuntimeStartsAndStopsHTTPServers(t *testing.T) { redisServer := miniredis.RunT(t) cfg := config.DefaultConfig() - cfg.Redis.Addr = redisServer.Addr() + cfg.Redis.Conn.MasterAddr = redisServer.Addr() + cfg.Redis.Conn.Password = "integration" cfg.PublicHTTP.Addr = mustFreeAddr(t) cfg.InternalHTTP.Addr = mustFreeAddr(t) cfg.ShutdownTimeout = 10 * time.Second @@ -69,7 +70,8 @@ func TestNewRuntimeUsesRESTUserDirectoryWhenConfigured(t *testing.T) { defer userService.Close() cfg := config.DefaultConfig() - cfg.Redis.Addr = redisServer.Addr() + cfg.Redis.Conn.MasterAddr = redisServer.Addr() + cfg.Redis.Conn.Password = "integration" cfg.PublicHTTP.Addr = mustFreeAddr(t) cfg.InternalHTTP.Addr = mustFreeAddr(t) cfg.UserService.Mode = "rest" @@ -116,7 +118,8 @@ func TestNewRuntimeUsesRESTMailSenderWhenConfigured(t *testing.T) { defer mailService.Close() cfg := config.DefaultConfig() - cfg.Redis.Addr = redisServer.Addr() + cfg.Redis.Conn.MasterAddr = redisServer.Addr() + cfg.Redis.Conn.Password = "integration" cfg.PublicHTTP.Addr = mustFreeAddr(t) cfg.InternalHTTP.Addr = mustFreeAddr(t) cfg.MailService.Mode = "rest" @@ -152,12 +155,13 @@ func TestNewRuntimeFailsFastWhenRedisPingChecksFail(t *testing.T) { t.Parallel() cfg := config.DefaultConfig() - cfg.Redis.Addr = mustFreeAddr(t) + cfg.Redis.Conn.MasterAddr = mustFreeAddr(t) + cfg.Redis.Conn.Password = "integration" runtime, err := NewRuntime(context.Background(), cfg, zap.NewNop(), nil) require.Nil(t, runtime) require.Error(t, err) - assert.ErrorContains(t, err, "new authsession runtime: ping") + assert.ErrorContains(t, err, "ping redis") } func mustFreeAddr(t *testing.T) string { diff --git a/authsession/internal/config/config.go b/authsession/internal/config/config.go index 8b65385..b24c1bf 100644 --- a/authsession/internal/config/config.go +++ b/authsession/internal/config/config.go @@ -11,10 +11,13 @@ import ( "galaxy/authsession/internal/api/internalhttp" "galaxy/authsession/internal/api/publichttp" + "galaxy/redisconn" "go.uber.org/zap/zapcore" ) +const authsessionRedisEnvPrefix = "AUTHSESSION" + const ( shutdownTimeoutEnvVar = "AUTHSESSION_SHUTDOWN_TIMEOUT" logLevelEnvVar = "AUTHSESSION_LOG_LEVEL" @@ -31,13 +34,6 @@ const ( internalHTTPIdleTimeoutEnvVar = "AUTHSESSION_INTERNAL_HTTP_IDLE_TIMEOUT" internalHTTPRequestTimeoutEnvVar = "AUTHSESSION_INTERNAL_HTTP_REQUEST_TIMEOUT" - redisAddrEnvVar = "AUTHSESSION_REDIS_ADDR" - redisUsernameEnvVar = "AUTHSESSION_REDIS_USERNAME" - redisPasswordEnvVar = "AUTHSESSION_REDIS_PASSWORD" - redisDBEnvVar = "AUTHSESSION_REDIS_DB" - redisTLSEnabledEnvVar = "AUTHSESSION_REDIS_TLS_ENABLED" - redisOperationTimeoutEnvVar = "AUTHSESSION_REDIS_OPERATION_TIMEOUT" - redisChallengeKeyPrefixEnvVar = "AUTHSESSION_REDIS_CHALLENGE_KEY_PREFIX" redisSessionKeyPrefixEnvVar = "AUTHSESSION_REDIS_SESSION_KEY_PREFIX" redisUserSessionsKeyPrefixEnvVar = "AUTHSESSION_REDIS_USER_SESSIONS_KEY_PREFIX" @@ -67,8 +63,6 @@ const ( defaultShutdownTimeout = 5 * time.Second defaultLogLevel = "info" - defaultRedisDB = 0 - defaultRedisOperationTimeout = 250 * time.Millisecond defaultChallengeKeyPrefix = "authsession:challenge:" defaultSessionKeyPrefix = "authsession:session:" defaultUserSessionsKeyPrefix = "authsession:user-sessions:" @@ -128,23 +122,10 @@ type LoggingConfig struct { // RedisConfig configures the Redis-backed authsession adapters. type RedisConfig struct { - // Addr is the shared Redis address used by the authsession adapters. - Addr string - - // Username is the optional Redis ACL username. - Username string - - // Password is the optional Redis ACL password. - Password string - - // DB is the Redis logical database index. - DB int - - // TLSEnabled configures whether Redis connections use TLS. - TLSEnabled bool - - // OperationTimeout bounds each adapter Redis round trip. - OperationTimeout time.Duration + // Conn carries the master/replica/password connection topology shared by + // every authsession Redis adapter, sourced from the AUTHSESSION_REDIS_* + // environment variables managed by `pkg/redisconn`. + Conn redisconn.Config // ChallengeKeyPrefix namespaces the challenge source-of-truth records. ChallengeKeyPrefix string @@ -248,8 +229,7 @@ func DefaultConfig() Config { PublicHTTP: publichttp.DefaultConfig(), InternalHTTP: internalhttp.DefaultConfig(), Redis: RedisConfig{ - DB: defaultRedisDB, - OperationTimeout: defaultRedisOperationTimeout, + Conn: redisconn.DefaultConfig(), ChallengeKeyPrefix: defaultChallengeKeyPrefix, SessionKeyPrefix: defaultSessionKeyPrefix, UserSessionsKeyPrefix: defaultUserSessionsKeyPrefix, @@ -329,21 +309,11 @@ func LoadFromEnv() (Config, error) { return Config{}, fmt.Errorf("load authsession config: %w", err) } - cfg.Redis.Addr = loadStringEnvWithDefault(redisAddrEnvVar, cfg.Redis.Addr) - cfg.Redis.Username = os.Getenv(redisUsernameEnvVar) - cfg.Redis.Password = os.Getenv(redisPasswordEnvVar) - cfg.Redis.DB, err = loadIntEnvWithDefault(redisDBEnvVar, cfg.Redis.DB) - if err != nil { - return Config{}, fmt.Errorf("load authsession config: %w", err) - } - cfg.Redis.TLSEnabled, err = loadBoolEnvWithDefault(redisTLSEnabledEnvVar, cfg.Redis.TLSEnabled) - if err != nil { - return Config{}, fmt.Errorf("load authsession config: %w", err) - } - cfg.Redis.OperationTimeout, err = loadDurationEnvWithDefault(redisOperationTimeoutEnvVar, cfg.Redis.OperationTimeout) + redisConn, err := redisconn.LoadFromEnv(authsessionRedisEnvPrefix) if err != nil { return Config{}, fmt.Errorf("load authsession config: %w", err) } + cfg.Redis.Conn = redisConn cfg.Redis.ChallengeKeyPrefix = loadStringEnvWithDefault(redisChallengeKeyPrefixEnvVar, cfg.Redis.ChallengeKeyPrefix) cfg.Redis.SessionKeyPrefix = loadStringEnvWithDefault(redisSessionKeyPrefixEnvVar, cfg.Redis.SessionKeyPrefix) cfg.Redis.UserSessionsKeyPrefix = loadStringEnvWithDefault(redisUserSessionsKeyPrefixEnvVar, cfg.Redis.UserSessionsKeyPrefix) @@ -404,15 +374,13 @@ func LoadFromEnv() (Config, error) { // Validate reports whether cfg contains a consistent authsession process // configuration. func (cfg Config) Validate() error { - switch { - case cfg.ShutdownTimeout <= 0: + if cfg.ShutdownTimeout <= 0 { return fmt.Errorf("load authsession config: %s must be positive", shutdownTimeoutEnvVar) - case strings.TrimSpace(cfg.Redis.Addr) == "": - return fmt.Errorf("load authsession config: %s must not be empty", redisAddrEnvVar) - case cfg.Redis.DB < 0: - return fmt.Errorf("load authsession config: %s must not be negative", redisDBEnvVar) - case cfg.Redis.OperationTimeout <= 0: - return fmt.Errorf("load authsession config: %s must be positive", redisOperationTimeoutEnvVar) + } + if err := cfg.Redis.Conn.Validate(); err != nil { + return fmt.Errorf("load authsession config: redis: %w", err) + } + switch { case strings.TrimSpace(cfg.Redis.ChallengeKeyPrefix) == "": return fmt.Errorf("load authsession config: %s must not be empty", redisChallengeKeyPrefixEnvVar) case strings.TrimSpace(cfg.Redis.SessionKeyPrefix) == "": diff --git a/authsession/internal/config/config_test.go b/authsession/internal/config/config_test.go index aafe427..2863b74 100644 --- a/authsession/internal/config/config_test.go +++ b/authsession/internal/config/config_test.go @@ -8,8 +8,24 @@ import ( "github.com/stretchr/testify/require" ) +const ( + testRedisMasterAddrEnvVar = "AUTHSESSION_REDIS_MASTER_ADDR" + testRedisPasswordEnvVar = "AUTHSESSION_REDIS_PASSWORD" + testRedisReplicaEnvVar = "AUTHSESSION_REDIS_REPLICA_ADDRS" + testRedisDBEnvVar = "AUTHSESSION_REDIS_DB" + testRedisOpTimeoutEnvVar = "AUTHSESSION_REDIS_OPERATION_TIMEOUT" + testRedisTLSEnabledEnvVar = "AUTHSESSION_REDIS_TLS_ENABLED" + testRedisUsernameEnvVar = "AUTHSESSION_REDIS_USERNAME" +) + +func setRequiredRedisEnv(t *testing.T) { + t.Helper() + t.Setenv(testRedisMasterAddrEnvVar, "127.0.0.1:6379") + t.Setenv(testRedisPasswordEnvVar, "secret") +} + func TestLoadFromEnvUsesDefaults(t *testing.T) { - t.Setenv(redisAddrEnvVar, "127.0.0.1:6379") + setRequiredRedisEnv(t) cfg, err := LoadFromEnv() require.NoError(t, err) @@ -19,9 +35,11 @@ func TestLoadFromEnvUsesDefaults(t *testing.T) { assert.Equal(t, defaults.Logging.Level, cfg.Logging.Level) assert.Equal(t, defaults.PublicHTTP, cfg.PublicHTTP) assert.Equal(t, defaults.InternalHTTP, cfg.InternalHTTP) - assert.Equal(t, "127.0.0.1:6379", cfg.Redis.Addr) - assert.Equal(t, defaults.Redis.DB, cfg.Redis.DB) - assert.Equal(t, defaults.Redis.OperationTimeout, cfg.Redis.OperationTimeout) + assert.Equal(t, "127.0.0.1:6379", cfg.Redis.Conn.MasterAddr) + assert.Equal(t, "secret", cfg.Redis.Conn.Password) + assert.Equal(t, defaults.Redis.Conn.DB, cfg.Redis.Conn.DB) + assert.Equal(t, defaults.Redis.Conn.OperationTimeout, cfg.Redis.Conn.OperationTimeout) + assert.Empty(t, cfg.Redis.Conn.ReplicaAddrs) assert.Equal(t, defaults.UserService, cfg.UserService) assert.Equal(t, defaults.MailService, cfg.MailService) assert.Equal(t, defaults.Telemetry.ServiceName, cfg.Telemetry.ServiceName) @@ -36,12 +54,11 @@ func TestLoadFromEnvAppliesOverrides(t *testing.T) { t.Setenv(logLevelEnvVar, "debug") t.Setenv(publicHTTPAddrEnvVar, "127.0.0.1:18080") t.Setenv(internalHTTPAddrEnvVar, "127.0.0.1:18081") - t.Setenv(redisAddrEnvVar, "127.0.0.1:6380") - t.Setenv(redisUsernameEnvVar, "alice") - t.Setenv(redisPasswordEnvVar, "secret") - t.Setenv(redisDBEnvVar, "3") - t.Setenv(redisTLSEnabledEnvVar, "true") - t.Setenv(redisOperationTimeoutEnvVar, "750ms") + t.Setenv(testRedisMasterAddrEnvVar, "127.0.0.1:6380") + t.Setenv(testRedisPasswordEnvVar, "secret") + t.Setenv(testRedisReplicaEnvVar, "127.0.0.1:6381,127.0.0.1:6382") + t.Setenv(testRedisDBEnvVar, "3") + t.Setenv(testRedisOpTimeoutEnvVar, "750ms") t.Setenv(userServiceModeEnvVar, "rest") t.Setenv(userServiceBaseURLEnvVar, "http://127.0.0.1:19090") t.Setenv(userServiceRequestTimeoutEnvVar, "900ms") @@ -62,12 +79,11 @@ func TestLoadFromEnvAppliesOverrides(t *testing.T) { assert.Equal(t, "debug", cfg.Logging.Level) assert.Equal(t, "127.0.0.1:18080", cfg.PublicHTTP.Addr) assert.Equal(t, "127.0.0.1:18081", cfg.InternalHTTP.Addr) - assert.Equal(t, "127.0.0.1:6380", cfg.Redis.Addr) - assert.Equal(t, "alice", cfg.Redis.Username) - assert.Equal(t, "secret", cfg.Redis.Password) - assert.Equal(t, 3, cfg.Redis.DB) - assert.True(t, cfg.Redis.TLSEnabled) - assert.Equal(t, 750*time.Millisecond, cfg.Redis.OperationTimeout) + assert.Equal(t, "127.0.0.1:6380", cfg.Redis.Conn.MasterAddr) + assert.Equal(t, "secret", cfg.Redis.Conn.Password) + assert.Equal(t, []string{"127.0.0.1:6381", "127.0.0.1:6382"}, cfg.Redis.Conn.ReplicaAddrs) + assert.Equal(t, 3, cfg.Redis.Conn.DB) + assert.Equal(t, 750*time.Millisecond, cfg.Redis.Conn.OperationTimeout) assert.Equal(t, UserServiceConfig{ Mode: "rest", BaseURL: "http://127.0.0.1:19090", @@ -104,10 +120,8 @@ func TestLoadFromEnvRejectsInvalidValues(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - t.Setenv(redisAddrEnvVar, "127.0.0.1:6379") + setRequiredRedisEnv(t) t.Setenv(tt.envName, tt.envVal) if tt.envName == otelExporterOTLPTracesProtocolEnvVar { t.Setenv(otelTracesExporterEnvVar, "otlp") @@ -121,7 +135,7 @@ func TestLoadFromEnvRejectsInvalidValues(t *testing.T) { } func TestLoadFromEnvRejectsInvalidRESTUserServiceConfiguration(t *testing.T) { - t.Setenv(redisAddrEnvVar, "127.0.0.1:6379") + setRequiredRedisEnv(t) t.Setenv(userServiceModeEnvVar, "rest") t.Run("missing base url", func(t *testing.T) { @@ -141,7 +155,7 @@ func TestLoadFromEnvRejectsInvalidRESTUserServiceConfiguration(t *testing.T) { } func TestLoadFromEnvRejectsInvalidRESTMailServiceConfiguration(t *testing.T) { - t.Setenv(redisAddrEnvVar, "127.0.0.1:6379") + setRequiredRedisEnv(t) t.Setenv(mailServiceModeEnvVar, "rest") t.Run("missing base url", func(t *testing.T) { @@ -159,3 +173,40 @@ func TestLoadFromEnvRejectsInvalidRESTMailServiceConfiguration(t *testing.T) { assert.Contains(t, err.Error(), mailServiceRequestTimeoutEnvVar) }) } + +func TestLoadFromEnvRejectsDeprecatedRedisVars(t *testing.T) { + tests := []struct { + name string + envName string + }{ + {name: "tls enabled deprecated", envName: testRedisTLSEnabledEnvVar}, + {name: "username deprecated", envName: testRedisUsernameEnvVar}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + setRequiredRedisEnv(t) + t.Setenv(tt.envName, "true") + + _, err := LoadFromEnv() + require.Error(t, err) + assert.Contains(t, err.Error(), tt.envName) + }) + } +} + +func TestLoadFromEnvRequiresRedisMasterAddr(t *testing.T) { + t.Setenv(testRedisPasswordEnvVar, "secret") + + _, err := LoadFromEnv() + require.Error(t, err) + assert.Contains(t, err.Error(), testRedisMasterAddrEnvVar) +} + +func TestLoadFromEnvRequiresRedisPassword(t *testing.T) { + t.Setenv(testRedisMasterAddrEnvVar, "127.0.0.1:6379") + + _, err := LoadFromEnv() + require.Error(t, err) + assert.Contains(t, err.Error(), testRedisPasswordEnvVar) +} diff --git a/authsession/production_hardening_test.go b/authsession/production_hardening_test.go index 833f955..87306bc 100644 --- a/authsession/production_hardening_test.go +++ b/authsession/production_hardening_test.go @@ -231,17 +231,13 @@ func newHardeningApp(t *testing.T, env *hardeningEnvironment, options hardeningA env.redisServer.Set(gatewayCompatibilitySessionLimitKey, strconv.Itoa(*options.SessionLimit)) } - challengeStore, err := challengestore.New(challengestore.Config{ - Addr: env.redisAddr, - DB: 0, + challengeStore, err := challengestore.New(env.redisClient, challengestore.Config{ KeyPrefix: gatewayCompatibilityChallengeKeyPrefix, OperationTimeout: 250 * time.Millisecond, }) require.NoError(t, err) - redisSessionStore, err := sessionstore.New(sessionstore.Config{ - Addr: env.redisAddr, - DB: 0, + redisSessionStore, err := sessionstore.New(env.redisClient, sessionstore.Config{ SessionKeyPrefix: gatewayCompatibilitySessionKeyPrefix, UserSessionsKeyPrefix: gatewayCompatibilityUserSessionsKeyPrefix, UserActiveSessionsKeyPrefix: gatewayCompatibilityUserActiveKeyPrefix, @@ -249,17 +245,13 @@ func newHardeningApp(t *testing.T, env *hardeningEnvironment, options hardeningA }) require.NoError(t, err) - configStore, err := configprovider.New(configprovider.Config{ - Addr: env.redisAddr, - DB: 0, + configStore, err := configprovider.New(env.redisClient, configprovider.Config{ SessionLimitKey: gatewayCompatibilitySessionLimitKey, OperationTimeout: 250 * time.Millisecond, }) require.NoError(t, err) - redisPublisher, err := projectionpublisher.New(projectionpublisher.Config{ - Addr: env.redisAddr, - DB: 0, + redisPublisher, err := projectionpublisher.New(env.redisClient, projectionpublisher.Config{ SessionCacheKeyPrefix: gatewayCompatibilitySessionCacheKeyPrefix, SessionEventsStream: gatewayCompatibilitySessionEventsStream, StreamMaxLen: gatewayCompatibilityStreamMaxLen, @@ -373,10 +365,6 @@ func newHardeningApp(t *testing.T, env *hardeningEnvironment, options hardeningA app.closeFn = func() { stopPublic() stopInternal() - assert.NoError(t, challengeStore.Close()) - assert.NoError(t, redisSessionStore.Close()) - assert.NoError(t, configStore.Close()) - assert.NoError(t, redisPublisher.Close()) } t.Cleanup(func() { app.Close() @@ -678,18 +666,13 @@ func TestProductionHardeningDuplicatePublishKeepsGatewayCacheCanonical(t *testin t.Parallel() env := newHardeningEnvironment(t) - publisher, err := projectionpublisher.New(projectionpublisher.Config{ - Addr: env.redisAddr, - DB: 0, + publisher, err := projectionpublisher.New(env.redisClient, projectionpublisher.Config{ SessionCacheKeyPrefix: gatewayCompatibilitySessionCacheKeyPrefix, SessionEventsStream: gatewayCompatibilitySessionEventsStream, StreamMaxLen: gatewayCompatibilityStreamMaxLen, OperationTimeout: 250 * time.Millisecond, }) require.NoError(t, err) - defer func() { - assert.NoError(t, publisher.Close()) - }() snapshot := gatewayprojection.Snapshot{ DeviceSessionID: common.DeviceSessionID("device-session-1"), diff --git a/authsession/user_service_real_runtime_compatibility_test.go b/authsession/user_service_real_runtime_compatibility_test.go deleted file mode 100644 index 78ad37d..0000000 --- a/authsession/user_service_real_runtime_compatibility_test.go +++ /dev/null @@ -1,273 +0,0 @@ -package authsession - -import ( - "bytes" - "context" - "fmt" - "io" - "net" - "os" - "os/exec" - "path/filepath" - "runtime" - "strings" - "syscall" - "testing" - "time" - - "galaxy/authsession/internal/adapters/userservice" - "galaxy/authsession/internal/domain/common" - "galaxy/authsession/internal/domain/userresolution" - "galaxy/authsession/internal/ports" - - "github.com/alicebob/miniredis/v2" -) - -func TestUserServiceRESTClientWorksAgainstRealUserServiceRuntime(t *testing.T) { - redisServer := miniredis.RunT(t) - internalAddr := freeTCPAddress(t) - binaryPath := buildUserServiceBinary(t) - process := startUserServiceProcess(t, binaryPath, map[string]string{ - "USERSERVICE_INTERNAL_HTTP_ADDR": internalAddr, - "USERSERVICE_REDIS_ADDR": redisServer.Addr(), - }) - waitForTCP(t, process, internalAddr) - - client, err := userservice.NewRESTClient(userservice.Config{ - BaseURL: "http://" + internalAddr, - RequestTimeout: 500 * time.Millisecond, - }) - if err != nil { - t.Fatalf("NewRESTClient() error = %v, want nil", err) - } - t.Cleanup(func() { - _ = client.Close() - }) - - creatableEmail := common.Email("pilot@example.com") - - resolution, err := client.ResolveByEmail(context.Background(), creatableEmail) - if err != nil { - t.Fatalf("ResolveByEmail(creatable) error = %v, want nil", err) - } - if got, want := resolution.Kind, userresolution.KindCreatable; got != want { - t.Fatalf("ResolveByEmail(creatable).Kind = %q, want %q", got, want) - } - - created, err := client.EnsureUserByEmail(context.Background(), ports.EnsureUserInput{ - Email: creatableEmail, - RegistrationContext: &ports.RegistrationContext{ - PreferredLanguage: "en", - TimeZone: "Europe/Kaliningrad", - }, - }) - if err != nil { - t.Fatalf("EnsureUserByEmail(created) error = %v, want nil", err) - } - if got, want := created.Outcome, ports.EnsureUserOutcomeCreated; got != want { - t.Fatalf("EnsureUserByEmail(created).Outcome = %q, want %q", got, want) - } - if created.UserID.IsZero() { - t.Fatalf("EnsureUserByEmail(created).UserID = zero, want non-zero") - } - - existing, err := client.ResolveByEmail(context.Background(), creatableEmail) - if err != nil { - t.Fatalf("ResolveByEmail(existing) error = %v, want nil", err) - } - if got, want := existing.Kind, userresolution.KindExisting; got != want { - t.Fatalf("ResolveByEmail(existing).Kind = %q, want %q", got, want) - } - if got, want := existing.UserID, created.UserID; got != want { - t.Fatalf("ResolveByEmail(existing).UserID = %q, want %q", got, want) - } - - exists, err := client.ExistsByUserID(context.Background(), created.UserID) - if err != nil { - t.Fatalf("ExistsByUserID(existing) error = %v, want nil", err) - } - if !exists { - t.Fatalf("ExistsByUserID(existing) = false, want true") - } - - blocked, err := client.BlockByUserID(context.Background(), ports.BlockUserByIDInput{ - UserID: created.UserID, - ReasonCode: userresolution.BlockReasonCode("policy_blocked"), - }) - if err != nil { - t.Fatalf("BlockByUserID() error = %v, want nil", err) - } - if got, want := blocked.Outcome, ports.BlockUserOutcomeBlocked; got != want { - t.Fatalf("BlockByUserID().Outcome = %q, want %q", got, want) - } - if got, want := blocked.UserID, created.UserID; got != want { - t.Fatalf("BlockByUserID().UserID = %q, want %q", got, want) - } - - repeated, err := client.BlockByEmail(context.Background(), ports.BlockUserByEmailInput{ - Email: creatableEmail, - ReasonCode: userresolution.BlockReasonCode("policy_blocked"), - }) - if err != nil { - t.Fatalf("BlockByEmail(repeated) error = %v, want nil", err) - } - if got, want := repeated.Outcome, ports.BlockUserOutcomeAlreadyBlocked; got != want { - t.Fatalf("BlockByEmail(repeated).Outcome = %q, want %q", got, want) - } - if got, want := repeated.UserID, created.UserID; got != want { - t.Fatalf("BlockByEmail(repeated).UserID = %q, want %q", got, want) - } - - blockedResolution, err := client.ResolveByEmail(context.Background(), creatableEmail) - if err != nil { - t.Fatalf("ResolveByEmail(blocked) error = %v, want nil", err) - } - if got, want := blockedResolution.Kind, userresolution.KindBlocked; got != want { - t.Fatalf("ResolveByEmail(blocked).Kind = %q, want %q", got, want) - } - if got, want := blockedResolution.BlockReasonCode, userresolution.BlockReasonCode("policy_blocked"); got != want { - t.Fatalf("ResolveByEmail(blocked).BlockReasonCode = %q, want %q", got, want) - } -} - -type userServiceProcess struct { - cmd *exec.Cmd - doneCh chan struct{} - logs bytes.Buffer -} - -func startUserServiceProcess(t *testing.T, binaryPath string, env map[string]string) *userServiceProcess { - t.Helper() - - cmd := exec.Command(binaryPath) - cmd.Env = mergeEnvironment(os.Environ(), env) - - process := &userServiceProcess{ - cmd: cmd, - doneCh: make(chan struct{}), - } - cmd.Stdout = &process.logs - cmd.Stderr = &process.logs - - if err := cmd.Start(); err != nil { - t.Fatalf("start user service process: %v", err) - } - - go func() { - _ = cmd.Wait() - close(process.doneCh) - }() - - t.Cleanup(func() { - stopUserServiceProcess(t, process) - if t.Failed() { - t.Logf("userservice logs:\n%s", process.logs.String()) - } - }) - - return process -} - -func stopUserServiceProcess(t *testing.T, process *userServiceProcess) { - t.Helper() - - if process == nil || process.cmd == nil || process.cmd.Process == nil { - return - } - - select { - case <-process.doneCh: - return - default: - } - - _ = process.cmd.Process.Signal(syscall.SIGTERM) - - select { - case <-process.doneCh: - case <-time.After(5 * time.Second): - _ = process.cmd.Process.Kill() - <-process.doneCh - } -} - -func waitForTCP(t *testing.T, process *userServiceProcess, address string) { - t.Helper() - - deadline := time.Now().Add(10 * time.Second) - for time.Now().Before(deadline) { - select { - case <-process.doneCh: - t.Fatalf("userservice exited before %s became reachable\n%s", address, process.logs.String()) - default: - } - - conn, err := net.DialTimeout("tcp", address, 100*time.Millisecond) - if err == nil { - _ = conn.Close() - return - } - - time.Sleep(25 * time.Millisecond) - } - - t.Fatalf("userservice did not become reachable at %s\n%s", address, process.logs.String()) -} - -func freeTCPAddress(t *testing.T) string { - t.Helper() - - listener, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatalf("reserve free TCP address: %v", err) - } - defer listener.Close() - - return listener.Addr().String() -} - -func buildUserServiceBinary(t *testing.T) string { - t.Helper() - - outputPath := filepath.Join(t.TempDir(), "userservice") - cmd := exec.Command("go", "build", "-o", outputPath, "./user/cmd/userservice") - cmd.Dir = repositoryRoot(t) - output, err := cmd.CombinedOutput() - if err != nil { - t.Fatalf("build userservice binary: %v\n%s", err, output) - } - - return outputPath -} - -func repositoryRoot(t *testing.T) string { - t.Helper() - - _, file, _, ok := runtime.Caller(0) - if !ok { - t.Fatal("resolve repository root: runtime caller unavailable") - } - - return filepath.Clean(filepath.Join(filepath.Dir(file), "..")) -} - -func mergeEnvironment(base []string, overrides map[string]string) []string { - values := make(map[string]string, len(base)+len(overrides)) - for _, entry := range base { - name, value, ok := strings.Cut(entry, "=") - if ok { - values[name] = value - } - } - for name, value := range overrides { - values[name] = value - } - - merged := make([]string, 0, len(values)) - for name, value := range values { - merged = append(merged, fmt.Sprintf("%s=%s", name, value)) - } - return merged -} - -var _ io.Writer = (*bytes.Buffer)(nil) diff --git a/client/go.mod b/client/go.mod index 458b5e5..6e95bbc 100644 --- a/client/go.mod +++ b/client/go.mod @@ -38,8 +38,8 @@ require ( github.com/srwiley/rasterx v0.0.0-20220730225603-2ab79fcdd4ef // indirect github.com/yuin/goldmark v1.7.16 // indirect golang.org/x/image v0.36.0 // indirect - golang.org/x/net v0.52.0 // indirect - golang.org/x/sys v0.42.0 // indirect + golang.org/x/net v0.53.0 // indirect + golang.org/x/sys v0.43.0 // indirect golang.org/x/text v0.36.0 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/client/go.sum b/client/go.sum index f9915af..cc38db8 100644 --- a/client/go.sum +++ b/client/go.sum @@ -72,8 +72,8 @@ go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/image v0.36.0 h1:Iknbfm1afbgtwPTmHnS2gTM/6PPZfH+z2EFuOkSbqwc= golang.org/x/image v0.36.0/go.mod h1:YsWD2TyyGKiIX1kZlu9QfKIsQ4nAAK9bdgdrIsE7xy4= -golang.org/x/net v0.52.0 h1:He/TN1l0e4mmR3QqHMT2Xab3Aj3L9qjbhRm78/6jrW0= -golang.org/x/sys v0.42.0 h1:omrd2nAlyT5ESRdCLYdm3+fMfNFE/+Rf4bDIQImRJeo= +golang.org/x/net v0.53.0 h1:d+qAbo5L0orcWAr0a9JweQpjXF19LMXJE8Ey7hwOdUA= +golang.org/x/sys v0.43.0 h1:Rlag2XtaFTxp19wS8MXlJwTvoh8ArU6ezoyFsMyCTNI= golang.org/x/text v0.36.0 h1:JfKh3XmcRPqZPKevfXVpI1wXPTqbkE5f7JA92a55Yxg= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= diff --git a/game/go.mod b/game/go.mod index 97ff1e2..309b22f 100644 --- a/game/go.mod +++ b/game/go.mod @@ -24,7 +24,7 @@ require ( github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/cpuid/v2 v2.3.0 // indirect github.com/leodido/go-urn v1.4.0 // indirect - github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mattn/go-isatty v0.0.21 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/pelletier/go-toml/v2 v2.3.0 // indirect @@ -36,9 +36,9 @@ require ( github.com/ugorji/go/codec v1.3.1 // indirect go.mongodb.org/mongo-driver/v2 v2.5.0 // indirect golang.org/x/arch v0.25.0 // indirect - golang.org/x/crypto v0.49.0 // indirect - golang.org/x/net v0.52.0 // indirect - golang.org/x/sys v0.42.0 // indirect + golang.org/x/crypto v0.50.0 // indirect + golang.org/x/net v0.53.0 // indirect + golang.org/x/sys v0.43.0 // indirect golang.org/x/text v0.36.0 // indirect google.golang.org/protobuf v1.36.11 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/game/go.sum b/game/go.sum index d498415..f7dc1e8 100644 --- a/game/go.sum +++ b/game/go.sum @@ -36,8 +36,7 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= -github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= -github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-isatty v0.0.21 h1:xYae+lCNBP7QuW4PUnNG61ffM4hVIfm+zUzDuSzYLGs= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -70,10 +69,9 @@ go.mongodb.org/mongo-driver/v2 v2.5.0 h1:yXUhImUjjAInNcpTcAlPHiT7bIXhshCTL3jVBkF go.uber.org/mock v0.6.0 h1:hyF9dfmbgIX5EfOdasqLsWD6xqpNZlXblLB/Dbnwv3Y= go.uber.org/mock v0.6.0/go.mod h1:KiVJ4BqZJaMj4svdfmHM0AUx4NJYO8ZNpPnZn1Z+BBU= golang.org/x/arch v0.25.0 h1:qnk6Ksugpi5Bz32947rkUgDt9/s5qvqDPl/gBKdMJLE= -golang.org/x/crypto v0.49.0 h1:+Ng2ULVvLHnJ/ZFEq4KdcDd/cfjrrjjNSXNzxg0Y4U4= -golang.org/x/net v0.52.0 h1:He/TN1l0e4mmR3QqHMT2Xab3Aj3L9qjbhRm78/6jrW0= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.42.0 h1:omrd2nAlyT5ESRdCLYdm3+fMfNFE/+Rf4bDIQImRJeo= +golang.org/x/crypto v0.50.0 h1:zO47/JPrL6vsNkINmLoo/PH1gcxpls50DNogFvB5ZGI= +golang.org/x/net v0.53.0 h1:d+qAbo5L0orcWAr0a9JweQpjXF19LMXJE8Ey7hwOdUA= +golang.org/x/sys v0.43.0 h1:Rlag2XtaFTxp19wS8MXlJwTvoh8ArU6ezoyFsMyCTNI= golang.org/x/text v0.36.0 h1:JfKh3XmcRPqZPKevfXVpI1wXPTqbkE5f7JA92a55Yxg= google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= diff --git a/gateway/.env.example b/gateway/.env.example index 9d493ef..f4051b3 100644 --- a/gateway/.env.example +++ b/gateway/.env.example @@ -1,5 +1,6 @@ # Required startup settings. -GATEWAY_SESSION_CACHE_REDIS_ADDR=127.0.0.1:6379 +GATEWAY_REDIS_MASTER_ADDR=127.0.0.1:6379 +GATEWAY_REDIS_PASSWORD=changeme GATEWAY_SESSION_EVENTS_REDIS_STREAM=gateway:session_events GATEWAY_CLIENT_EVENTS_REDIS_STREAM=gateway:client_events GATEWAY_RESPONSE_SIGNER_PRIVATE_KEY_PEM_PATH=./secrets/response-signer.pem @@ -11,11 +12,14 @@ GATEWAY_AUTHENTICATED_GRPC_ADDR=127.0.0.1:9090 # Optional admin listener. # GATEWAY_ADMIN_HTTP_ADDR=127.0.0.1:9091 -# Optional Redis tuning. -# GATEWAY_SESSION_CACHE_REDIS_DB=0 +# Optional Redis tuning. The legacy GATEWAY_REDIS_TLS_ENABLED and +# GATEWAY_REDIS_USERNAME variables are no longer accepted; see +# docs/redis-config.md. +# GATEWAY_REDIS_REPLICA_ADDRS=127.0.0.1:6479,127.0.0.1:6480 +# GATEWAY_REDIS_DB=0 +# GATEWAY_REDIS_OPERATION_TIMEOUT=250ms # GATEWAY_SESSION_CACHE_REDIS_KEY_PREFIX=gateway:session: # GATEWAY_REPLAY_REDIS_KEY_PREFIX=gateway:replay: -# GATEWAY_SESSION_CACHE_REDIS_TLS_ENABLED=false # Optional public-auth integration. Without a configured Auth / Session Service # base URL the routes stay mounted and return 503 service_unavailable. diff --git a/gateway/README.md b/gateway/README.md index 6c8e2f0..08901cd 100644 --- a/gateway/README.md +++ b/gateway/README.md @@ -13,7 +13,8 @@ Required startup environment variables: -- `GATEWAY_SESSION_CACHE_REDIS_ADDR` +- `GATEWAY_REDIS_MASTER_ADDR` +- `GATEWAY_REDIS_PASSWORD` - `GATEWAY_SESSION_EVENTS_REDIS_STREAM` - `GATEWAY_CLIENT_EVENTS_REDIS_STREAM` - `GATEWAY_RESPONSE_SIGNER_PRIVATE_KEY_PEM_PATH` @@ -609,23 +610,45 @@ eviction policy. Session lifecycle events are the authoritative mechanism for keeping the hot path current, while Redis fallback remains the safety net for cold misses and process restarts. -The Redis fallback implementation uses `go-redis/v9`. -`cmd/gateway` requires the Redis fallback backend during startup, issues a -bounded `PING`, and refuses to start when Redis is misconfigured or -unavailable. +The Redis fallback implementation uses `go-redis/v9`. `cmd/gateway` opens one +shared `*redis.Client` via `pkg/redisconn` (instrumented with OpenTelemetry +tracing and metrics), issues a single bounded `PING` on startup, and refuses +to start when Redis is misconfigured or unavailable. The session cache, +replay store, session-events subscriber, and client-events subscriber all +use that shared client. See `docs/redis-config.md` for the rationale behind +the shape and the project-wide rules in +`ARCHITECTURE.md §Persistence Backends`. -Required environment variable: +Required Redis connection variables: -- `GATEWAY_SESSION_CACHE_REDIS_ADDR` +- `GATEWAY_REDIS_MASTER_ADDR` +- `GATEWAY_REDIS_PASSWORD` -Optional environment variables: +Optional Redis connection variables: + +- `GATEWAY_REDIS_REPLICA_ADDRS` (comma-separated; reserved for future + read-routing — currently unused) +- `GATEWAY_REDIS_DB` with default `0` +- `GATEWAY_REDIS_OPERATION_TIMEOUT` with default `250ms` + +> Removed: `GATEWAY_SESSION_CACHE_REDIS_ADDR`, +> `GATEWAY_SESSION_CACHE_REDIS_USERNAME`, +> `GATEWAY_SESSION_CACHE_REDIS_PASSWORD`, +> `GATEWAY_SESSION_CACHE_REDIS_DB`, +> `GATEWAY_SESSION_CACHE_REDIS_TLS_ENABLED`. `pkg/redisconn.LoadFromEnv` +> rejects the deprecated `GATEWAY_REDIS_TLS_ENABLED` and +> `GATEWAY_REDIS_USERNAME` variables at startup. + +Per-subsystem Redis behavior variables (namespace, stream, timeouts): -- `GATEWAY_SESSION_CACHE_REDIS_USERNAME` -- `GATEWAY_SESSION_CACHE_REDIS_PASSWORD` -- `GATEWAY_SESSION_CACHE_REDIS_DB` with default `0` - `GATEWAY_SESSION_CACHE_REDIS_KEY_PREFIX` with default `gateway:session:` - `GATEWAY_SESSION_CACHE_REDIS_LOOKUP_TIMEOUT` with default `250ms` -- `GATEWAY_SESSION_CACHE_REDIS_TLS_ENABLED` with default `false` +- `GATEWAY_REPLAY_REDIS_KEY_PREFIX` with default `gateway:replay:` +- `GATEWAY_REPLAY_REDIS_RESERVE_TIMEOUT` with default `250ms` +- `GATEWAY_SESSION_EVENTS_REDIS_STREAM` +- `GATEWAY_SESSION_EVENTS_REDIS_READ_BLOCK_TIMEOUT` with default `1s` +- `GATEWAY_CLIENT_EVENTS_REDIS_STREAM` +- `GATEWAY_CLIENT_EVENTS_REDIS_READ_BLOCK_TIMEOUT` with default `1s` The Redis key format is: diff --git a/gateway/cmd/gateway/main.go b/gateway/cmd/gateway/main.go index 0398724..ef3b396 100644 --- a/gateway/cmd/gateway/main.go +++ b/gateway/cmd/gateway/main.go @@ -18,11 +18,13 @@ import ( "galaxy/gateway/internal/grpcapi" "galaxy/gateway/internal/logging" "galaxy/gateway/internal/push" + "galaxy/gateway/internal/redisclient" "galaxy/gateway/internal/replay" "galaxy/gateway/internal/restapi" "galaxy/gateway/internal/session" "galaxy/gateway/internal/telemetry" + "github.com/redis/go-redis/v9" "go.uber.org/zap" ) @@ -132,112 +134,83 @@ func newAuthenticatedGRPCDependencies(ctx context.Context, cfg config.Config, lo return grpcapi.ServerDependencies{}, nil, nil, fmt.Errorf("build authenticated grpc dependencies: load response signer: %w", err) } - fallbackSessionCache, err := session.NewRedisCache(cfg.SessionCacheRedis) - if err != nil { - return grpcapi.ServerDependencies{}, nil, nil, fmt.Errorf("build authenticated grpc dependencies: %w", err) - } - - replayStore, err := replay.NewRedisStore(cfg.SessionCacheRedis, cfg.ReplayRedis) - if err != nil { - closeErr := fallbackSessionCache.Close() + redisClient := redisclient.NewClient(cfg.Redis) + if err := redisclient.InstrumentClient(redisClient, telemetryRuntime); err != nil { + closeErr := redisClient.Close() return grpcapi.ServerDependencies{}, nil, nil, errors.Join( fmt.Errorf("build authenticated grpc dependencies: %w", err), closeErr, ) } + closeRedisClient := func() error { + err := redisClient.Close() + if errors.Is(err, redis.ErrClosed) { + return nil + } + return err + } + if err := redisclient.Ping(ctx, cfg.Redis, redisClient); err != nil { + closeErr := closeRedisClient() + return grpcapi.ServerDependencies{}, nil, nil, errors.Join( + fmt.Errorf("build authenticated grpc dependencies: %w", err), + closeErr, + ) + } + + fallbackSessionCache, err := session.NewRedisCache(redisClient, cfg.SessionCacheRedis) + if err != nil { + return grpcapi.ServerDependencies{}, nil, nil, errors.Join( + fmt.Errorf("build authenticated grpc dependencies: %w", err), + closeRedisClient(), + ) + } + + replayStore, err := replay.NewRedisStore(redisClient, cfg.ReplayRedis) + if err != nil { + return grpcapi.ServerDependencies{}, nil, nil, errors.Join( + fmt.Errorf("build authenticated grpc dependencies: %w", err), + closeRedisClient(), + ) + } localSessionCache := session.NewMemoryCache() sessionCache, err := session.NewReadThroughCache(localSessionCache, fallbackSessionCache) if err != nil { - closeErr := errors.Join( - fallbackSessionCache.Close(), - replayStore.Close(), - ) return grpcapi.ServerDependencies{}, nil, nil, errors.Join( fmt.Errorf("build authenticated grpc dependencies: %w", err), - closeErr, + closeRedisClient(), ) } pushHub := push.NewHubWithObserver(0, telemetry.NewPushObserver(telemetryRuntime)) - sessionSubscriber, err := events.NewRedisSessionSubscriberWithObservability(cfg.SessionCacheRedis, cfg.SessionEventsRedis, localSessionCache, pushHub, logger, telemetryRuntime) + sessionSubscriber, err := events.NewRedisSessionSubscriberWithObservability(redisClient, cfg.SessionCacheRedis, cfg.SessionEventsRedis, localSessionCache, pushHub, logger, telemetryRuntime) if err != nil { - closeErr := errors.Join( - fallbackSessionCache.Close(), - replayStore.Close(), - ) return grpcapi.ServerDependencies{}, nil, nil, errors.Join( fmt.Errorf("build authenticated grpc dependencies: %w", err), - closeErr, + closeRedisClient(), ) } - clientEventSubscriber, err := events.NewRedisClientEventSubscriberWithObservability(cfg.SessionCacheRedis, cfg.ClientEventsRedis, pushHub, logger, telemetryRuntime) + clientEventSubscriber, err := events.NewRedisClientEventSubscriberWithObservability(redisClient, cfg.SessionCacheRedis, cfg.ClientEventsRedis, pushHub, logger, telemetryRuntime) if err != nil { - closeErr := errors.Join( - fallbackSessionCache.Close(), - replayStore.Close(), - sessionSubscriber.Close(), - ) return grpcapi.ServerDependencies{}, nil, nil, errors.Join( fmt.Errorf("build authenticated grpc dependencies: %w", err), - closeErr, + closeRedisClient(), ) } userRoutes, closeUserServiceRoutes, err := userservice.NewRoutes(cfg.UserService.BaseURL) if err != nil { - closeErr := errors.Join( - fallbackSessionCache.Close(), - replayStore.Close(), - sessionSubscriber.Close(), - clientEventSubscriber.Close(), - ) return grpcapi.ServerDependencies{}, nil, nil, errors.Join( fmt.Errorf("build authenticated grpc dependencies: user service routes: %w", err), - closeErr, + closeRedisClient(), ) } cleanup := func() error { return errors.Join( - fallbackSessionCache.Close(), - replayStore.Close(), - sessionSubscriber.Close(), - clientEventSubscriber.Close(), closeUserServiceRoutes(), - ) - } - - if err := fallbackSessionCache.Ping(ctx); err != nil { - closeErr := cleanup() - return grpcapi.ServerDependencies{}, nil, nil, errors.Join( - fmt.Errorf("build authenticated grpc dependencies: %w", err), - closeErr, - ) - } - - if err := replayStore.Ping(ctx); err != nil { - closeErr := cleanup() - return grpcapi.ServerDependencies{}, nil, nil, errors.Join( - fmt.Errorf("build authenticated grpc dependencies: %w", err), - closeErr, - ) - } - - if err := sessionSubscriber.Ping(ctx); err != nil { - closeErr := cleanup() - return grpcapi.ServerDependencies{}, nil, nil, errors.Join( - fmt.Errorf("build authenticated grpc dependencies: %w", err), - closeErr, - ) - } - - if err := clientEventSubscriber.Ping(ctx); err != nil { - closeErr := cleanup() - return grpcapi.ServerDependencies{}, nil, nil, errors.Join( - fmt.Errorf("build authenticated grpc dependencies: %w", err), - closeErr, + closeRedisClient(), ) } diff --git a/gateway/cmd/gateway/main_test.go b/gateway/cmd/gateway/main_test.go index e005d20..c54c9cb 100644 --- a/gateway/cmd/gateway/main_test.go +++ b/gateway/cmd/gateway/main_test.go @@ -15,6 +15,7 @@ import ( "galaxy/gateway/internal/config" "galaxy/gateway/internal/restapi" + "galaxy/redisconn" "github.com/alicebob/miniredis/v2" "github.com/stretchr/testify/assert" @@ -22,6 +23,16 @@ import ( "go.uber.org/zap" ) +func testRedisConn(masterAddr string, opTimeout time.Duration) redisconn.Config { + cfg := redisconn.DefaultConfig() + cfg.MasterAddr = masterAddr + cfg.Password = "integration" + if opTimeout > 0 { + cfg.OperationTimeout = opTimeout + } + return cfg +} + func TestNewPublicRESTDependencies(t *testing.T) { t.Parallel() @@ -102,8 +113,8 @@ func TestNewAuthenticatedGRPCDependencies(t *testing.T) { { name: "success", cfg: config.Config{ + Redis: testRedisConn(server.Addr(), 250*time.Millisecond), SessionCacheRedis: config.SessionCacheRedisConfig{ - Addr: server.Addr(), KeyPrefix: "gateway:session:", LookupTimeout: 250 * time.Millisecond, }, @@ -125,8 +136,9 @@ func TestNewAuthenticatedGRPCDependencies(t *testing.T) { }, }, { - name: "invalid redis config", + name: "invalid session cache key prefix", cfg: config.Config{ + Redis: testRedisConn(server.Addr(), 250*time.Millisecond), SessionCacheRedis: config.SessionCacheRedisConfig{ LookupTimeout: 250 * time.Millisecond, }, @@ -146,13 +158,13 @@ func TestNewAuthenticatedGRPCDependencies(t *testing.T) { PrivateKeyPEMPath: responseSignerPEMPath, }, }, - wantErr: "redis addr must not be empty", + wantErr: "redis key prefix must not be empty", }, { name: "startup ping failure", cfg: config.Config{ + Redis: testRedisConn(unusedTCPAddr(t), 100*time.Millisecond), SessionCacheRedis: config.SessionCacheRedisConfig{ - Addr: unusedTCPAddr(t), KeyPrefix: "gateway:session:", LookupTimeout: 100 * time.Millisecond, }, @@ -172,13 +184,13 @@ func TestNewAuthenticatedGRPCDependencies(t *testing.T) { PrivateKeyPEMPath: responseSignerPEMPath, }, }, - wantErr: "ping redis session cache", + wantErr: "ping redis", }, { name: "invalid replay config", cfg: config.Config{ + Redis: testRedisConn(server.Addr(), 250*time.Millisecond), SessionCacheRedis: config.SessionCacheRedisConfig{ - Addr: server.Addr(), KeyPrefix: "gateway:session:", LookupTimeout: 250 * time.Millisecond, }, @@ -202,8 +214,8 @@ func TestNewAuthenticatedGRPCDependencies(t *testing.T) { { name: "invalid client event config", cfg: config.Config{ + Redis: testRedisConn(server.Addr(), 250*time.Millisecond), SessionCacheRedis: config.SessionCacheRedisConfig{ - Addr: server.Addr(), KeyPrefix: "gateway:session:", LookupTimeout: 250 * time.Millisecond, }, @@ -227,8 +239,8 @@ func TestNewAuthenticatedGRPCDependencies(t *testing.T) { { name: "missing response signer path", cfg: config.Config{ + Redis: testRedisConn(server.Addr(), 250*time.Millisecond), SessionCacheRedis: config.SessionCacheRedisConfig{ - Addr: server.Addr(), KeyPrefix: "gateway:session:", LookupTimeout: 250 * time.Millisecond, }, @@ -250,8 +262,8 @@ func TestNewAuthenticatedGRPCDependencies(t *testing.T) { { name: "invalid response signer pem", cfg: config.Config{ + Redis: testRedisConn(server.Addr(), 250*time.Millisecond), SessionCacheRedis: config.SessionCacheRedisConfig{ - Addr: server.Addr(), KeyPrefix: "gateway:session:", LookupTimeout: 250 * time.Millisecond, }, diff --git a/gateway/docs/redis-config.md b/gateway/docs/redis-config.md new file mode 100644 index 0000000..99c9e61 --- /dev/null +++ b/gateway/docs/redis-config.md @@ -0,0 +1,109 @@ +# Decision: Redis configuration shape + +PG_PLAN.md §7. Captures the standing rules adopted by Edge Gateway when it +joined the project-wide Redis topology defined in +`ARCHITECTURE.md §Persistence Backends`. + +## Context + +Gateway intentionally stays Redis-only. All gateway state Redis serves is +TTL-bounded or runtime-coordination state: + +- the session cache is a read-through projection of authsession's + source-of-truth session records (rebuildable via re-authentication); +- the replay store is a short-lived `SETNX` reservation namespace per + authenticated request (`GATEWAY_REPLAY_REDIS_RESERVE_TIMEOUT`); +- the session-events stream is a runtime fan-out of session lifecycle + updates; +- the client-events stream is a runtime push fan-out. + +Stage 7 brought gateway in line with the steady-state rules established in +Stage 0: every Galaxy service uses one master plus zero-or-more replicas +with a mandatory password, no TLS, and no Redis ACL username; the connection +is configured by the shared `pkg/redisconn` helper. + +## Decisions + +### One shared `*redis.Client` owned by the runtime + +`cmd/gateway/main.go` constructs a single `*redis.Client` via +`internal/redisclient.NewClient`, attaches OpenTelemetry tracing and metrics +via `internal/redisclient.InstrumentClient`, performs one bounded `PING` +via `internal/redisclient.Ping`, and registers `client.Close` for shutdown. +The session cache, replay store, session-events subscriber, and +client-events subscriber all receive this same client. + +Adapters no longer build or own a Redis client. Their `Config` structs hold +only behavior settings (key prefix, stream name, per-subsystem timeouts). +Adapter constructors take `(*redis.Client, …)`. The stream subscribers' +`Close`/`Shutdown` methods became no-ops; the runtime's context cancellation +unblocks the `XRead` loop and the runtime closes the shared client. + +### One env-var prefix for the connection + +Connection topology is loaded from a single `GATEWAY_REDIS_*` group via +`redisconn.LoadFromEnv("GATEWAY")`: + +- `GATEWAY_REDIS_MASTER_ADDR` (required) +- `GATEWAY_REDIS_REPLICA_ADDRS` (optional, comma-separated; currently + unused, reserved for future read-routing) +- `GATEWAY_REDIS_PASSWORD` (required) +- `GATEWAY_REDIS_DB` (default `0`) +- `GATEWAY_REDIS_OPERATION_TIMEOUT` (default `250ms`) + +Per-subsystem behavior env vars keep their existing prefixes — they do not +describe connection topology, only namespace and timing: + +- `GATEWAY_SESSION_CACHE_REDIS_KEY_PREFIX`, + `GATEWAY_SESSION_CACHE_REDIS_LOOKUP_TIMEOUT` +- `GATEWAY_REPLAY_REDIS_KEY_PREFIX`, + `GATEWAY_REPLAY_REDIS_RESERVE_TIMEOUT` +- `GATEWAY_SESSION_EVENTS_REDIS_STREAM`, + `GATEWAY_SESSION_EVENTS_REDIS_READ_BLOCK_TIMEOUT` +- `GATEWAY_CLIENT_EVENTS_REDIS_STREAM`, + `GATEWAY_CLIENT_EVENTS_REDIS_READ_BLOCK_TIMEOUT` + +### Retired env vars (hard removal) + +The following variables are no longer read or honored: + +- `GATEWAY_SESSION_CACHE_REDIS_ADDR` — replaced by + `GATEWAY_REDIS_MASTER_ADDR`. +- `GATEWAY_SESSION_CACHE_REDIS_USERNAME` — Redis ACL not used. +- `GATEWAY_SESSION_CACHE_REDIS_PASSWORD` — replaced by + `GATEWAY_REDIS_PASSWORD`. +- `GATEWAY_SESSION_CACHE_REDIS_DB` — replaced by `GATEWAY_REDIS_DB`. +- `GATEWAY_SESSION_CACHE_REDIS_TLS_ENABLED` — TLS disabled by policy. + +`pkg/redisconn.LoadFromEnv` rejects `GATEWAY_REDIS_TLS_ENABLED` and +`GATEWAY_REDIS_USERNAME` at startup with a clear error pointing to +`ARCHITECTURE.md §Persistence Backends`. + +> **Compound legacy prefixes (`GATEWAY_SESSION_CACHE_REDIS_USERNAME` etc.) +> are not actively rejected.** `pkg/redisconn`'s deprecated-env detector +> only watches the canonical `GATEWAY_REDIS_*` form. The compound legacy +> vars become silently inert. The architecture rule explicitly accepts this +> ("no backward-compat shim — fresh project, no production deploys to +> migrate"); operators upgrading should remove the variables from their +> deployment manifests. + +### Telemetry + +`redisconn.Instrument` wires `redisotel.InstrumentTracing` (with +`WithDBStatement(false)`) and `redisotel.InstrumentMetrics`. This is the +first gateway release that emits Redis tracing and connection-pool metrics; +downstream dashboards will start populating without further changes. + +## Consequences + +- Gateway test code that previously constructed a Redis client per adapter + must now construct one client and pass it to every adapter under test + (see `internal/session/redis_test.go`, `internal/replay/redis_test.go`, + `internal/events/subscriber_test.go`, + `internal/events/client_subscriber_test.go`). +- Operators must set `GATEWAY_REDIS_PASSWORD`. A passwordless local Redis + is still acceptable as long as a placeholder password is supplied to the + binary; Redis without `requirepass` accepts AUTH unconditionally. +- The integration test harness passes `GATEWAY_REDIS_PASSWORD = + "integration"` alongside `GATEWAY_REDIS_MASTER_ADDR` (see + `integration/internal/harness/gatewayservice.go`). diff --git a/gateway/docs/runbook.md b/gateway/docs/runbook.md index beae48c..287a5f4 100644 --- a/gateway/docs/runbook.md +++ b/gateway/docs/runbook.md @@ -7,25 +7,28 @@ readiness, shutdown, and push or revoke incidents. Before starting the process, confirm: -- `GATEWAY_SESSION_CACHE_REDIS_ADDR` points to the Redis deployment used for - session lookup and both internal event streams. +- `GATEWAY_REDIS_MASTER_ADDR` and `GATEWAY_REDIS_PASSWORD` point to the Redis + deployment used for session lookup, replay reservations, session-events + consumption, and client-events fan-out. Optional read replicas may be + listed in `GATEWAY_REDIS_REPLICA_ADDRS` (currently unused; reserved for + future read-routing). - `GATEWAY_SESSION_EVENTS_REDIS_STREAM` and - `GATEWAY_CLIENT_EVENTS_REDIS_STREAM` reference existing Redis Stream keys or - the names publishers will use. + `GATEWAY_CLIENT_EVENTS_REDIS_STREAM` reference existing Redis Stream keys + or the names publishers will use. - `GATEWAY_RESPONSE_SIGNER_PRIVATE_KEY_PEM_PATH` points to a readable PKCS#8 PEM-encoded Ed25519 private key. -- the configured Redis ACL, DB, TLS, and key-prefix settings match the target - environment. +- the configured Redis DB and key-prefix settings match the target + environment. Per `ARCHITECTURE.md §Persistence Backends`, Redis traffic is + password-protected and TLS is disabled by policy; the deprecated + `GATEWAY_REDIS_TLS_ENABLED` and `GATEWAY_REDIS_USERNAME` variables are no + longer accepted and cause a hard fail at startup. -At startup the process performs bounded `PING` checks for: +At startup the process opens one shared `*redis.Client` (instrumented via +OpenTelemetry tracing and metrics) and performs one bounded `PING`. The +session cache, replay store, session-events subscriber, and client-events +subscriber all use that client. -- the Redis-backed session cache adapter; -- the replay store; -- the session event subscriber; -- the client event subscriber. - -Startup fails fast if any of those checks fail or if the signer key cannot be -loaded. +Startup fails fast if the ping fails or if the signer key cannot be loaded. Expected listener state after a healthy start: diff --git a/gateway/go.mod b/gateway/go.mod index ca06495..e5aebf9 100644 --- a/gateway/go.mod +++ b/gateway/go.mod @@ -1,10 +1,11 @@ module galaxy/gateway -go 1.26.0 +go 1.26.1 require ( buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.11-20260209202127-80ab13bee0bf.1 buf.build/go/protovalidate v1.1.3 + galaxy/redisconn v0.0.0-00010101000000-000000000000 github.com/alicebob/miniredis/v2 v2.37.0 github.com/getkin/kin-openapi v0.135.0 github.com/gin-gonic/gin v1.12.0 @@ -61,7 +62,7 @@ require ( github.com/klauspost/cpuid/v2 v2.3.0 // indirect github.com/leodido/go-urn v1.4.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect - github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mattn/go-isatty v0.0.21 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect @@ -77,6 +78,8 @@ require ( github.com/prometheus/procfs v0.20.1 // indirect github.com/quic-go/qpack v0.6.0 // indirect github.com/quic-go/quic-go v0.59.0 // indirect + github.com/redis/go-redis/extra/rediscmd/v9 v9.18.0 // indirect + github.com/redis/go-redis/extra/redisotel/v9 v9.18.0 // indirect github.com/twitchyliquid64/golang-asm v0.15.1 // indirect github.com/ugorji/go/codec v1.3.1 // indirect github.com/woodsbury/decimal128 v1.3.0 // indirect @@ -86,14 +89,16 @@ require ( go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.43.0 // indirect go.opentelemetry.io/proto/otlp v1.10.0 // indirect go.uber.org/atomic v1.11.0 // indirect - go.uber.org/multierr v1.10.0 // indirect + go.uber.org/multierr v1.11.0 // indirect go.yaml.in/yaml/v2 v2.4.4 // indirect golang.org/x/arch v0.25.0 // indirect - golang.org/x/crypto v0.49.0 // indirect - golang.org/x/exp v0.0.0-20250813145105-42675adae3e6 // indirect - golang.org/x/net v0.52.0 // indirect - golang.org/x/sys v0.42.0 // indirect + golang.org/x/crypto v0.50.0 // indirect + golang.org/x/exp v0.0.0-20260410095643-746e56fc9e2f // indirect + golang.org/x/net v0.53.0 // indirect + golang.org/x/sys v0.43.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20260420184626-e10c466a9529 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) + +replace galaxy/redisconn => ../pkg/redisconn diff --git a/gateway/go.sum b/gateway/go.sum index 6db306d..aca342f 100644 --- a/gateway/go.sum +++ b/gateway/go.sum @@ -83,6 +83,7 @@ github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFF github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/klauspost/compress v1.18.5 h1:/h1gH5Ce+VWNLSWqPzOVn6XBO+vJbCNGvjoaGBFW2IE= +github.com/klauspost/compress v1.18.5/go.mod h1:cwPg85FWrGar70rWktvGQj8/hthj3wpl0PGDogxkrSQ= github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y= github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= @@ -95,8 +96,8 @@ github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= -github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-isatty v0.0.21 h1:xYae+lCNBP7QuW4PUnNG61ffM4hVIfm+zUzDuSzYLGs= +github.com/mattn/go-isatty v0.0.21/go.mod h1:ZXfXG4SQHsB/w3ZeOYbR0PrPwLy+n6xiMrJlRFqopa4= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -131,6 +132,10 @@ github.com/quic-go/qpack v0.6.0 h1:g7W+BMYynC1LbYLSqRt8PBg5Tgwxn214ZZR34VIOjz8= github.com/quic-go/qpack v0.6.0/go.mod h1:lUpLKChi8njB4ty2bFLX2x4gzDqXwUpaO1DP9qMDZII= github.com/quic-go/quic-go v0.59.0 h1:OLJkp1Mlm/aS7dpKgTc6cnpynnD2Xg7C1pwL6vy/SAw= github.com/quic-go/quic-go v0.59.0/go.mod h1:upnsH4Ju1YkqpLXC305eW3yDZ4NfnNbmQRCMWS58IKU= +github.com/redis/go-redis/extra/rediscmd/v9 v9.18.0 h1:QY4nmPHLFAJjtT5O4OMUEOxP8WVaRNOFpcbmxT2NLZU= +github.com/redis/go-redis/extra/rediscmd/v9 v9.18.0/go.mod h1:WH8cY/0fT41Bsf341qzo8v4nx0GCE8FykAA23IVbVmo= +github.com/redis/go-redis/extra/redisotel/v9 v9.18.0 h1:2dKdoEYBJ0CZCLPiCdvvc7luz3DPwY6hKdzjL6m1eHE= +github.com/redis/go-redis/extra/redisotel/v9 v9.18.0/go.mod h1:WzkrVG9ro9BwCQD0eJOWn6AGL4Z1CleGflM45w1hu10= github.com/redis/go-redis/v9 v9.18.0 h1:pMkxYPkEbMPwRdenAzUNyFNrDgHx9U+DrBabWNfSRQs= github.com/redis/go-redis/v9 v9.18.0/go.mod h1:k3ufPphLU5YXwNTUcCRXGxUoF1fqxnhFQmscfkCoDA0= github.com/rodaine/protogofakeit v0.1.1 h1:ZKouljuRM3A+TArppfBqnH8tGZHOwM/pjvtXe9DaXH8= @@ -196,8 +201,8 @@ go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/mock v0.6.0 h1:hyF9dfmbgIX5EfOdasqLsWD6xqpNZlXblLB/Dbnwv3Y= go.uber.org/mock v0.6.0/go.mod h1:KiVJ4BqZJaMj4svdfmHM0AUx4NJYO8ZNpPnZn1Z+BBU= -go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ= -go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc= go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= go.yaml.in/yaml/v2 v2.4.4 h1:tuyd0P+2Ont/d6e2rl3be67goVK4R6deVxCUX5vyPaQ= @@ -206,24 +211,24 @@ go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/arch v0.25.0 h1:qnk6Ksugpi5Bz32947rkUgDt9/s5qvqDPl/gBKdMJLE= golang.org/x/arch v0.25.0/go.mod h1:0X+GdSIP+kL5wPmpK7sdkEVTt2XoYP0cSjQSbZBwOi8= -golang.org/x/crypto v0.49.0 h1:+Ng2ULVvLHnJ/ZFEq4KdcDd/cfjrrjjNSXNzxg0Y4U4= -golang.org/x/crypto v0.49.0/go.mod h1:ErX4dUh2UM+CFYiXZRTcMpEcN8b/1gxEuv3nODoYtCA= -golang.org/x/exp v0.0.0-20250813145105-42675adae3e6 h1:SbTAbRFnd5kjQXbczszQ0hdk3ctwYf3qBNH9jIsGclE= -golang.org/x/exp v0.0.0-20250813145105-42675adae3e6/go.mod h1:4QTo5u+SEIbbKW1RacMZq1YEfOBqeXa19JeshGi+zc4= -golang.org/x/net v0.52.0 h1:He/TN1l0e4mmR3QqHMT2Xab3Aj3L9qjbhRm78/6jrW0= -golang.org/x/net v0.52.0/go.mod h1:R1MAz7uMZxVMualyPXb+VaqGSa3LIaUqk0eEt3w36Sw= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.42.0 h1:omrd2nAlyT5ESRdCLYdm3+fMfNFE/+Rf4bDIQImRJeo= -golang.org/x/sys v0.42.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw= +golang.org/x/crypto v0.50.0 h1:zO47/JPrL6vsNkINmLoo/PH1gcxpls50DNogFvB5ZGI= +golang.org/x/crypto v0.50.0/go.mod h1:3muZ7vA7PBCE6xgPX7nkzzjiUq87kRItoJQM1Yo8S+Q= +golang.org/x/exp v0.0.0-20260410095643-746e56fc9e2f h1:W3F4c+6OLc6H2lb//N1q4WpJkhzJCK5J6kUi1NTVXfM= +golang.org/x/exp v0.0.0-20260410095643-746e56fc9e2f/go.mod h1:J1xhfL/vlindoeF/aINzNzt2Bket5bjo9sdOYzOsU80= +golang.org/x/net v0.53.0 h1:d+qAbo5L0orcWAr0a9JweQpjXF19LMXJE8Ey7hwOdUA= +golang.org/x/net v0.53.0/go.mod h1:JvMuJH7rrdiCfbeHoo3fCQU24Lf5JJwT9W3sJFulfgs= +golang.org/x/sys v0.43.0 h1:Rlag2XtaFTxp19wS8MXlJwTvoh8ArU6ezoyFsMyCTNI= +golang.org/x/sys v0.43.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw= golang.org/x/text v0.36.0 h1:JfKh3XmcRPqZPKevfXVpI1wXPTqbkE5f7JA92a55Yxg= +golang.org/x/text v0.36.0/go.mod h1:NIdBknypM8iqVmPiuco0Dh6P5Jcdk8lJL0CUebqK164= golang.org/x/time v0.15.0 h1:bbrp8t3bGUeFOx08pvsMYRTCVSMk89u4tKbNOZbp88U= golang.org/x/time v0.15.0/go.mod h1:Y4YMaQmXwGQZoFaVFk4YpCt4FLQMYKZe9oeV/f4MSno= gonum.org/v1/gonum v0.17.0 h1:VbpOemQlsSMrYmn7T2OUvQ4dqxQXU+ouZFQsZOx50z4= gonum.org/v1/gonum v0.17.0/go.mod h1:El3tOrEuMpv2UdMrbNlKEh9vd86bmQ6vqIcDwxEOc1E= google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9 h1:VPWxll4HlMw1Vs/qXtN7BvhZqsS9cdAittCNvVENElA= google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9/go.mod h1:7QBABkRtR8z+TEnmXTqIqwJLlzrZKVfAUm7tY3yGv0M= -google.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9 h1:m8qni9SQFH0tJc1X0vmnpw/0t+AImlSvp30sEupozUg= -google.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260420184626-e10c466a9529 h1:XF8+t6QQiS0o9ArVan/HW8Q7cycNPGsJf6GA2nXxYAg= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260420184626-e10c466a9529/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8= google.golang.org/grpc v1.80.0 h1:Xr6m2WmWZLETvUNvIUmeD5OAagMw3FiKmMlTdViWsHM= google.golang.org/grpc v1.80.0/go.mod h1:ho/dLnxwi3EDJA4Zghp7k2Ec1+c2jqup0bFkw07bwF4= google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= diff --git a/gateway/internal/config/config.go b/gateway/internal/config/config.go index ef4a062..b9e4681 100644 --- a/gateway/internal/config/config.go +++ b/gateway/internal/config/config.go @@ -9,8 +9,12 @@ import ( "strconv" "strings" "time" + + "galaxy/redisconn" ) +const gatewayRedisEnvPrefix = "GATEWAY" + const ( // shutdownTimeoutEnvVar names the environment variable that controls the // maximum time granted to each component shutdown call. @@ -143,35 +147,14 @@ const ( // rate-limit burst. authenticatedGRPCMessageClassRateLimitBurstEnvVar = "GATEWAY_AUTHENTICATED_GRPC_ANTI_ABUSE_MESSAGE_CLASS_RATE_LIMIT_BURST" - // sessionCacheRedisAddrEnvVar names the environment variable that configures - // the Redis address used for SessionCache lookups. - sessionCacheRedisAddrEnvVar = "GATEWAY_SESSION_CACHE_REDIS_ADDR" - - // sessionCacheRedisUsernameEnvVar names the environment variable that - // configures the Redis username used for SessionCache lookups. - sessionCacheRedisUsernameEnvVar = "GATEWAY_SESSION_CACHE_REDIS_USERNAME" - - // sessionCacheRedisPasswordEnvVar names the environment variable that - // configures the Redis password used for SessionCache lookups. - sessionCacheRedisPasswordEnvVar = "GATEWAY_SESSION_CACHE_REDIS_PASSWORD" - - // sessionCacheRedisDBEnvVar names the environment variable that configures - // the Redis logical database used for SessionCache lookups. - sessionCacheRedisDBEnvVar = "GATEWAY_SESSION_CACHE_REDIS_DB" - // sessionCacheRedisKeyPrefixEnvVar names the environment variable that // configures the Redis key prefix used for SessionCache records. sessionCacheRedisKeyPrefixEnvVar = "GATEWAY_SESSION_CACHE_REDIS_KEY_PREFIX" // sessionCacheRedisLookupTimeoutEnvVar names the environment variable that - // configures the timeout used for SessionCache Redis lookups and startup - // connectivity checks. + // configures the timeout used for SessionCache Redis lookups. sessionCacheRedisLookupTimeoutEnvVar = "GATEWAY_SESSION_CACHE_REDIS_LOOKUP_TIMEOUT" - // sessionCacheRedisTLSEnabledEnvVar names the environment variable that - // configures whether SessionCache Redis connections use TLS. - sessionCacheRedisTLSEnabledEnvVar = "GATEWAY_SESSION_CACHE_REDIS_TLS_ENABLED" - // replayRedisKeyPrefixEnvVar names the environment variable that configures // the Redis key prefix used for authenticated replay reservations. replayRedisKeyPrefixEnvVar = "GATEWAY_REPLAY_REDIS_KEY_PREFIX" @@ -333,7 +316,6 @@ const ( defaultAuthenticatedGRPCMessageClassRateLimitRequests = 60 defaultAuthenticatedGRPCMessageClassRateLimitBurst = 20 - defaultSessionCacheRedisDB = 0 defaultSessionCacheRedisKeyPrefix = "gateway:session:" defaultSessionCacheRedisLookupTimeout = 250 * time.Millisecond @@ -535,29 +517,16 @@ type AuthenticatedGRPCConfig struct { AntiAbuse AuthenticatedGRPCAntiAbuseConfig } -// SessionCacheRedisConfig describes the Redis connection used for authenticated -// SessionCache lookups. +// SessionCacheRedisConfig describes the namespace and timeout used for +// authenticated SessionCache lookups. Connection topology is shared with the +// other Redis-backed gateway components and lives on Config.Redis (see +// `pkg/redisconn`). type SessionCacheRedisConfig struct { - // Addr is the Redis endpoint used for SessionCache requests. - Addr string - - // Username is the optional Redis ACL username used for authentication. - Username string - - // Password is the optional Redis password used for authentication. - Password string - - // DB is the Redis logical database number used for SessionCache keys. - DB int - // KeyPrefix is prepended to every SessionCache Redis key. KeyPrefix string // LookupTimeout bounds individual SessionCache Redis operations. LookupTimeout time.Duration - - // TLSEnabled reports whether SessionCache Redis connections should use TLS. - TLSEnabled bool } // ReplayRedisConfig describes the Redis namespace and timeout used for @@ -635,6 +604,11 @@ type Config struct { // AuthenticatedGRPC configures the authenticated gRPC listener. AuthenticatedGRPC AuthenticatedGRPCConfig + // Redis carries the master/replica/password connection topology shared by + // every gateway Redis component, sourced from the GATEWAY_REDIS_* + // environment variables managed by `pkg/redisconn`. + Redis redisconn.Config + // SessionCacheRedis configures the Redis-backed authenticated SessionCache. SessionCacheRedis SessionCacheRedisConfig @@ -759,12 +733,10 @@ func DefaultLoggingConfig() LoggingConfig { return LoggingConfig{Level: defaultLogLevel} } -// DefaultSessionCacheRedisConfig returns the default optional settings for the -// Redis-backed authenticated SessionCache. Addr remains empty and must be -// supplied explicitly. +// DefaultSessionCacheRedisConfig returns the default optional namespace and +// timeout settings for the Redis-backed authenticated SessionCache. func DefaultSessionCacheRedisConfig() SessionCacheRedisConfig { return SessionCacheRedisConfig{ - DB: defaultSessionCacheRedisDB, KeyPrefix: defaultSessionCacheRedisKeyPrefix, LookupTimeout: defaultSessionCacheRedisLookupTimeout, } @@ -827,6 +799,7 @@ func LoadFromEnv() (Config, error) { UserService: DefaultUserServiceConfig(), AdminHTTP: DefaultAdminHTTPConfig(), AuthenticatedGRPC: DefaultAuthenticatedGRPCConfig(), + Redis: redisconn.DefaultConfig(), SessionCacheRedis: DefaultSessionCacheRedisConfig(), ReplayRedis: DefaultReplayRedisConfig(), SessionEventsRedis: DefaultSessionEventsRedisConfig(), @@ -977,26 +950,11 @@ func LoadFromEnv() (Config, error) { } cfg.AuthenticatedGRPC.AntiAbuse.MessageClass = messageClassRateLimit - rawSessionCacheRedisAddr, ok := os.LookupEnv(sessionCacheRedisAddrEnvVar) - if ok { - cfg.SessionCacheRedis.Addr = rawSessionCacheRedisAddr - } - - rawSessionCacheRedisUsername, ok := os.LookupEnv(sessionCacheRedisUsernameEnvVar) - if ok { - cfg.SessionCacheRedis.Username = rawSessionCacheRedisUsername - } - - rawSessionCacheRedisPassword, ok := os.LookupEnv(sessionCacheRedisPasswordEnvVar) - if ok { - cfg.SessionCacheRedis.Password = rawSessionCacheRedisPassword - } - - sessionCacheRedisDB, err := loadIntEnvWithDefault(sessionCacheRedisDBEnvVar, cfg.SessionCacheRedis.DB) + redisConn, err := redisconn.LoadFromEnv(gatewayRedisEnvPrefix) if err != nil { return Config{}, err } - cfg.SessionCacheRedis.DB = sessionCacheRedisDB + cfg.Redis = redisConn rawSessionCacheRedisKeyPrefix, ok := os.LookupEnv(sessionCacheRedisKeyPrefixEnvVar) if ok { @@ -1009,12 +967,6 @@ func LoadFromEnv() (Config, error) { } cfg.SessionCacheRedis.LookupTimeout = sessionCacheRedisLookupTimeout - sessionCacheRedisTLSEnabled, err := loadBoolEnvWithDefault(sessionCacheRedisTLSEnabledEnvVar, cfg.SessionCacheRedis.TLSEnabled) - if err != nil { - return Config{}, err - } - cfg.SessionCacheRedis.TLSEnabled = sessionCacheRedisTLSEnabled - rawReplayRedisKeyPrefix, ok := os.LookupEnv(replayRedisKeyPrefixEnvVar) if ok { cfg.ReplayRedis.KeyPrefix = rawReplayRedisKeyPrefix @@ -1222,11 +1174,11 @@ func LoadFromEnv() (Config, error) { ); err != nil { return Config{}, err } - if strings.TrimSpace(cfg.SessionCacheRedis.Addr) == "" { - return Config{}, fmt.Errorf("load gateway config: %s must not be empty", sessionCacheRedisAddrEnvVar) + if err := cfg.Redis.Validate(); err != nil { + return Config{}, fmt.Errorf("load gateway config: redis: %w", err) } - if cfg.SessionCacheRedis.DB < 0 { - return Config{}, fmt.Errorf("load gateway config: %s must not be negative", sessionCacheRedisDBEnvVar) + if strings.TrimSpace(cfg.SessionCacheRedis.KeyPrefix) == "" { + return Config{}, fmt.Errorf("load gateway config: %s must not be empty", sessionCacheRedisKeyPrefixEnvVar) } if cfg.SessionCacheRedis.LookupTimeout <= 0 { return Config{}, fmt.Errorf("load gateway config: %s must be positive", sessionCacheRedisLookupTimeoutEnvVar) diff --git a/gateway/internal/config/config_test.go b/gateway/internal/config/config_test.go index 949cabc..76bac12 100644 --- a/gateway/internal/config/config_test.go +++ b/gateway/internal/config/config_test.go @@ -11,12 +11,36 @@ import ( "testing" "time" + "galaxy/redisconn" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) var configEnvMu sync.Mutex +const ( + gatewayRedisMasterAddrEnvVar = "GATEWAY_REDIS_MASTER_ADDR" + gatewayRedisPasswordEnvVar = "GATEWAY_REDIS_PASSWORD" + gatewayRedisReplicaAddrsEnvVar = "GATEWAY_REDIS_REPLICA_ADDRS" + gatewayRedisDBEnvVar = "GATEWAY_REDIS_DB" + gatewayRedisOpTimeoutEnvVar = "GATEWAY_REDIS_OPERATION_TIMEOUT" + gatewayRedisTLSEnabledEnvVar = "GATEWAY_REDIS_TLS_ENABLED" + gatewayRedisUsernameEnvVar = "GATEWAY_REDIS_USERNAME" +) + +var ( + defaultTestRedisMasterAddrValue = "127.0.0.1:6379" + defaultTestRedisPasswordValue = "secret" +) + +func defaultRedisConnConfigForTest() redisconn.Config { + cfg := redisconn.DefaultConfig() + cfg.MasterAddr = defaultTestRedisMasterAddrValue + cfg.Password = defaultTestRedisPasswordValue + return cfg +} + func TestLoadFromEnv(t *testing.T) { customResponseSignerPrivateKeyPEMPath := new(string) *customResponseSignerPrivateKeyPEMPath = writeTestResponseSignerPEMFile(t) @@ -90,6 +114,7 @@ func TestLoadFromEnv(t *testing.T) { authenticatedGRPCAddr *string authenticatedGRPCFreshnessWindow *string sessionCacheRedisAddr *string + skipRedis bool responseSignerPrivateKeyPEMPath *string want Config wantErr string @@ -104,9 +129,8 @@ func TestLoadFromEnv(t *testing.T) { PublicHTTP: DefaultPublicHTTPConfig(), AdminHTTP: DefaultAdminHTTPConfig(), AuthenticatedGRPC: DefaultAuthenticatedGRPCConfig(), + Redis: defaultRedisConnConfigForTest(), SessionCacheRedis: SessionCacheRedisConfig{ - Addr: "127.0.0.1:6379", - DB: defaultSessionCacheRedisDB, KeyPrefix: defaultSessionCacheRedisKeyPrefix, LookupTimeout: defaultSessionCacheRedisLookupTimeout, }, @@ -135,9 +159,8 @@ func TestLoadFromEnv(t *testing.T) { PublicHTTP: DefaultPublicHTTPConfig(), AdminHTTP: DefaultAdminHTTPConfig(), AuthenticatedGRPC: DefaultAuthenticatedGRPCConfig(), + Redis: defaultRedisConnConfigForTest(), SessionCacheRedis: SessionCacheRedisConfig{ - Addr: "127.0.0.1:6379", - DB: defaultSessionCacheRedisDB, KeyPrefix: defaultSessionCacheRedisKeyPrefix, LookupTimeout: defaultSessionCacheRedisLookupTimeout, }, @@ -170,9 +193,8 @@ func TestLoadFromEnv(t *testing.T) { }(), AdminHTTP: DefaultAdminHTTPConfig(), AuthenticatedGRPC: DefaultAuthenticatedGRPCConfig(), + Redis: defaultRedisConnConfigForTest(), SessionCacheRedis: SessionCacheRedisConfig{ - Addr: "127.0.0.1:6379", - DB: defaultSessionCacheRedisDB, KeyPrefix: defaultSessionCacheRedisKeyPrefix, LookupTimeout: defaultSessionCacheRedisLookupTimeout, }, @@ -204,9 +226,8 @@ func TestLoadFromEnv(t *testing.T) { }, AdminHTTP: DefaultAdminHTTPConfig(), AuthenticatedGRPC: DefaultAuthenticatedGRPCConfig(), + Redis: defaultRedisConnConfigForTest(), SessionCacheRedis: SessionCacheRedisConfig{ - Addr: "127.0.0.1:6379", - DB: defaultSessionCacheRedisDB, KeyPrefix: defaultSessionCacheRedisKeyPrefix, LookupTimeout: defaultSessionCacheRedisLookupTimeout, }, @@ -238,9 +259,8 @@ func TestLoadFromEnv(t *testing.T) { }, AdminHTTP: DefaultAdminHTTPConfig(), AuthenticatedGRPC: DefaultAuthenticatedGRPCConfig(), + Redis: defaultRedisConnConfigForTest(), SessionCacheRedis: SessionCacheRedisConfig{ - Addr: "127.0.0.1:6379", - DB: defaultSessionCacheRedisDB, KeyPrefix: defaultSessionCacheRedisKeyPrefix, LookupTimeout: defaultSessionCacheRedisLookupTimeout, }, @@ -273,9 +293,8 @@ func TestLoadFromEnv(t *testing.T) { cfg.Addr = "127.0.0.1:9191" return cfg }(), + Redis: defaultRedisConnConfigForTest(), SessionCacheRedis: SessionCacheRedisConfig{ - Addr: "127.0.0.1:6379", - DB: defaultSessionCacheRedisDB, KeyPrefix: defaultSessionCacheRedisKeyPrefix, LookupTimeout: defaultSessionCacheRedisLookupTimeout, }, @@ -308,9 +327,8 @@ func TestLoadFromEnv(t *testing.T) { cfg.FreshnessWindow = 90 * time.Second return cfg }(), + Redis: defaultRedisConnConfigForTest(), SessionCacheRedis: SessionCacheRedisConfig{ - Addr: "127.0.0.1:6379", - DB: defaultSessionCacheRedisDB, KeyPrefix: defaultSessionCacheRedisKeyPrefix, LookupTimeout: defaultSessionCacheRedisLookupTimeout, }, @@ -378,21 +396,10 @@ func TestLoadFromEnv(t *testing.T) { wantErr: "parse " + authenticatedGRPCFreshnessWindowEnvVar, }, { - name: "missing session cache redis address", + name: "missing redis master addr", responseSignerPrivateKeyPEMPath: customResponseSignerPrivateKeyPEMPath, - wantErr: "GATEWAY_SESSION_CACHE_REDIS_ADDR must not be empty", - }, - { - name: "empty session cache redis address", - sessionCacheRedisAddr: emptySessionCacheRedisAddr, - responseSignerPrivateKeyPEMPath: customResponseSignerPrivateKeyPEMPath, - wantErr: "GATEWAY_SESSION_CACHE_REDIS_ADDR must not be empty", - }, - { - name: "whitespace session cache redis address", - sessionCacheRedisAddr: whitespaceSessionCacheRedisAddr, - responseSignerPrivateKeyPEMPath: customResponseSignerPrivateKeyPEMPath, - wantErr: "GATEWAY_SESSION_CACHE_REDIS_ADDR must not be empty", + skipRedis: true, + wantErr: "GATEWAY_REDIS_MASTER_ADDR must be set", }, { name: "missing response signer private key path", @@ -412,7 +419,8 @@ func TestLoadFromEnv(t *testing.T) { userServiceBaseURLEnvVar, authenticatedGRPCAddrEnvVar, authenticatedGRPCFreshnessWindowEnvVar, - sessionCacheRedisAddrEnvVar, + gatewayRedisMasterAddrEnvVar, + gatewayRedisPasswordEnvVar, sessionEventsRedisStreamEnvVar, clientEventsRedisStreamEnvVar, responseSignerPrivateKeyPEMPathEnvVar, @@ -424,7 +432,14 @@ func TestLoadFromEnv(t *testing.T) { setEnvValue(t, userServiceBaseURLEnvVar, tt.userServiceBaseURL) setEnvValue(t, authenticatedGRPCAddrEnvVar, tt.authenticatedGRPCAddr) setEnvValue(t, authenticatedGRPCFreshnessWindowEnvVar, tt.authenticatedGRPCFreshnessWindow) - setEnvValue(t, sessionCacheRedisAddrEnvVar, tt.sessionCacheRedisAddr) + redisAddr := tt.sessionCacheRedisAddr + if !tt.skipRedis && redisAddr == nil { + redisAddr = customSessionCacheRedisAddr + } + setEnvValue(t, gatewayRedisMasterAddrEnvVar, redisAddr) + if !tt.skipRedis { + setEnvValue(t, gatewayRedisPasswordEnvVar, &defaultTestRedisPasswordValue) + } setEnvValue(t, sessionEventsRedisStreamEnvVar, customSessionEventsRedisStream) setEnvValue(t, clientEventsRedisStreamEnvVar, customClientEventsRedisStream) setEnvValue(t, responseSignerPrivateKeyPEMPathEnvVar, tt.responseSignerPrivateKeyPEMPath) @@ -490,7 +505,7 @@ func TestLoadFromEnvOperationalSettings(t *testing.T) { { name: "custom operational settings", envs: map[string]*string{ - sessionCacheRedisAddrEnvVar: customSessionCacheRedisAddr, + gatewayRedisMasterAddrEnvVar: customSessionCacheRedisAddr, sessionEventsRedisStreamEnvVar: customSessionEventsRedisStream, clientEventsRedisStreamEnvVar: customClientEventsRedisStream, responseSignerPrivateKeyPEMPathEnvVar: customResponseSignerPrivateKeyPEMPath, @@ -516,7 +531,7 @@ func TestLoadFromEnvOperationalSettings(t *testing.T) { { name: "invalid log level", envs: map[string]*string{ - sessionCacheRedisAddrEnvVar: customSessionCacheRedisAddr, + gatewayRedisMasterAddrEnvVar: customSessionCacheRedisAddr, sessionEventsRedisStreamEnvVar: customSessionEventsRedisStream, clientEventsRedisStreamEnvVar: customClientEventsRedisStream, responseSignerPrivateKeyPEMPathEnvVar: customResponseSignerPrivateKeyPEMPath, @@ -608,13 +623,14 @@ func TestLoadFromEnvAuthService(t *testing.T) { authServiceBaseURLEnvVar, userServiceBaseURLEnvVar, logLevelEnvVar, - sessionCacheRedisAddrEnvVar, + gatewayRedisMasterAddrEnvVar, sessionEventsRedisStreamEnvVar, clientEventsRedisStreamEnvVar, responseSignerPrivateKeyPEMPathEnvVar, ) setEnvValue(t, authServiceBaseURLEnvVar, tt.value) - setEnvValue(t, sessionCacheRedisAddrEnvVar, customSessionCacheRedisAddr) + setEnvValue(t, gatewayRedisMasterAddrEnvVar, customSessionCacheRedisAddr) + setEnvValue(t, gatewayRedisPasswordEnvVar, &defaultTestRedisPasswordValue) setEnvValue(t, sessionEventsRedisStreamEnvVar, customSessionEventsRedisStream) setEnvValue(t, clientEventsRedisStreamEnvVar, customClientEventsRedisStream) setEnvValue(t, responseSignerPrivateKeyPEMPathEnvVar, customResponseSignerPrivateKeyPEMPath) @@ -674,13 +690,14 @@ func TestLoadFromEnvUserService(t *testing.T) { authServiceBaseURLEnvVar, userServiceBaseURLEnvVar, logLevelEnvVar, - sessionCacheRedisAddrEnvVar, + gatewayRedisMasterAddrEnvVar, sessionEventsRedisStreamEnvVar, clientEventsRedisStreamEnvVar, responseSignerPrivateKeyPEMPathEnvVar, ) setEnvValue(t, userServiceBaseURLEnvVar, tt.value) - setEnvValue(t, sessionCacheRedisAddrEnvVar, customSessionCacheRedisAddr) + setEnvValue(t, gatewayRedisMasterAddrEnvVar, customSessionCacheRedisAddr) + setEnvValue(t, gatewayRedisPasswordEnvVar, &defaultTestRedisPasswordValue) setEnvValue(t, sessionEventsRedisStreamEnvVar, customSessionEventsRedisStream) setEnvValue(t, clientEventsRedisStreamEnvVar, customClientEventsRedisStream) setEnvValue(t, responseSignerPrivateKeyPEMPathEnvVar, customResponseSignerPrivateKeyPEMPath) @@ -811,7 +828,7 @@ func TestLoadFromEnvAuthenticatedGRPCAntiAbuse(t *testing.T) { t.Run(tt.name, func(t *testing.T) { restoreEnvs( t, - sessionCacheRedisAddrEnvVar, + gatewayRedisMasterAddrEnvVar, authenticatedGRPCIPRateLimitRequestsEnvVar, authenticatedGRPCIPRateLimitWindowEnvVar, authenticatedGRPCIPRateLimitBurstEnvVar, @@ -829,7 +846,8 @@ func TestLoadFromEnvAuthenticatedGRPCAntiAbuse(t *testing.T) { responseSignerPrivateKeyPEMPathEnvVar, ) - setEnvValue(t, sessionCacheRedisAddrEnvVar, customSessionCacheRedisAddr) + setEnvValue(t, gatewayRedisMasterAddrEnvVar, customSessionCacheRedisAddr) + setEnvValue(t, gatewayRedisPasswordEnvVar, &defaultTestRedisPasswordValue) setEnvValue(t, sessionEventsRedisStreamEnvVar, customSessionEventsRedisStream) setEnvValue(t, clientEventsRedisStreamEnvVar, customClientEventsRedisStream) setEnvValue(t, responseSignerPrivateKeyPEMPathEnvVar, customResponseSignerPrivateKeyPEMPath) @@ -859,7 +877,7 @@ func TestLoadFromEnvAuthenticatedGRPCAntiAbuse(t *testing.T) { } } -func TestLoadFromEnvSessionCacheRedis(t *testing.T) { +func TestLoadFromEnvRedis(t *testing.T) { customResponseSignerPrivateKeyPEMPath := new(string) *customResponseSignerPrivateKeyPEMPath = writeTestResponseSignerPEMFile(t) @@ -872,8 +890,8 @@ func TestLoadFromEnvSessionCacheRedis(t *testing.T) { customRedisAddr := new(string) *customRedisAddr = "127.0.0.1:6380" - customRedisUsername := new(string) - *customRedisUsername = "gateway" + customRedisReplicas := new(string) + *customRedisReplicas = "127.0.0.1:6481,127.0.0.1:6482" customRedisPassword := new(string) *customRedisPassword = "secret" @@ -881,14 +899,14 @@ func TestLoadFromEnvSessionCacheRedis(t *testing.T) { customRedisDB := new(string) *customRedisDB = "7" + customRedisOpTimeout := new(string) + *customRedisOpTimeout = "750ms" + customRedisKeyPrefix := new(string) *customRedisKeyPrefix = "edge:session:" customRedisLookupTimeout := new(string) - *customRedisLookupTimeout = "750ms" - - customRedisTLSEnabled := new(string) - *customRedisTLSEnabled = "true" + *customRedisLookupTimeout = "950ms" negativeRedisDB := new(string) *negativeRedisDB = "-1" @@ -896,67 +914,100 @@ func TestLoadFromEnvSessionCacheRedis(t *testing.T) { invalidRedisLookupTimeout := new(string) *invalidRedisLookupTimeout = "later" - invalidRedisTLSEnabled := new(string) - *invalidRedisTLSEnabled = "maybe" + deprecatedTLSEnabled := new(string) + *deprecatedTLSEnabled = "true" + + deprecatedUsername := new(string) + *deprecatedUsername = "gateway" + + type want struct { + conn redisconn.Config + sessionRedis SessionCacheRedisConfig + } tests := []struct { name string envs map[string]*string - want SessionCacheRedisConfig + want *want wantErr string }{ { name: "custom redis config", envs: map[string]*string{ - sessionCacheRedisAddrEnvVar: customRedisAddr, - sessionCacheRedisUsernameEnvVar: customRedisUsername, - sessionCacheRedisPasswordEnvVar: customRedisPassword, - sessionCacheRedisDBEnvVar: customRedisDB, + gatewayRedisMasterAddrEnvVar: customRedisAddr, + gatewayRedisReplicaAddrsEnvVar: customRedisReplicas, + gatewayRedisPasswordEnvVar: customRedisPassword, + gatewayRedisDBEnvVar: customRedisDB, + gatewayRedisOpTimeoutEnvVar: customRedisOpTimeout, sessionCacheRedisKeyPrefixEnvVar: customRedisKeyPrefix, sessionCacheRedisLookupTimeoutEnvVar: customRedisLookupTimeout, - sessionCacheRedisTLSEnabledEnvVar: customRedisTLSEnabled, }, - want: SessionCacheRedisConfig{ - Addr: "127.0.0.1:6380", - Username: "gateway", - Password: "secret", - DB: 7, - KeyPrefix: "edge:session:", - LookupTimeout: 750 * time.Millisecond, - TLSEnabled: true, + want: &want{ + conn: redisconn.Config{ + MasterAddr: "127.0.0.1:6380", + ReplicaAddrs: []string{"127.0.0.1:6481", "127.0.0.1:6482"}, + Password: "secret", + DB: 7, + OperationTimeout: 750 * time.Millisecond, + }, + sessionRedis: SessionCacheRedisConfig{ + KeyPrefix: "edge:session:", + LookupTimeout: 950 * time.Millisecond, + }, }, }, { - name: "negative redis db", + name: "negative redis db rejected by pkg/redisconn", envs: map[string]*string{ - sessionCacheRedisAddrEnvVar: customRedisAddr, - sessionCacheRedisDBEnvVar: negativeRedisDB, + gatewayRedisMasterAddrEnvVar: customRedisAddr, + gatewayRedisPasswordEnvVar: customRedisPassword, + gatewayRedisDBEnvVar: negativeRedisDB, }, - wantErr: sessionCacheRedisDBEnvVar + " must not be negative", + wantErr: "redis db must not be negative", }, { - name: "invalid redis lookup timeout", + name: "invalid session cache lookup timeout", envs: map[string]*string{ - sessionCacheRedisAddrEnvVar: customRedisAddr, + gatewayRedisMasterAddrEnvVar: customRedisAddr, + gatewayRedisPasswordEnvVar: customRedisPassword, sessionCacheRedisLookupTimeoutEnvVar: invalidRedisLookupTimeout, }, wantErr: "parse " + sessionCacheRedisLookupTimeoutEnvVar, }, { - name: "invalid redis tls flag", + name: "missing redis password rejected", envs: map[string]*string{ - sessionCacheRedisAddrEnvVar: customRedisAddr, - sessionCacheRedisTLSEnabledEnvVar: invalidRedisTLSEnabled, + gatewayRedisMasterAddrEnvVar: customRedisAddr, }, - wantErr: "parse " + sessionCacheRedisTLSEnabledEnvVar, + wantErr: gatewayRedisPasswordEnvVar + " must be set", + }, + { + name: "deprecated tls enabled var rejected", + envs: map[string]*string{ + gatewayRedisMasterAddrEnvVar: customRedisAddr, + gatewayRedisPasswordEnvVar: customRedisPassword, + gatewayRedisTLSEnabledEnvVar: deprecatedTLSEnabled, + }, + wantErr: gatewayRedisTLSEnabledEnvVar, + }, + { + name: "deprecated username var rejected", + envs: map[string]*string{ + gatewayRedisMasterAddrEnvVar: customRedisAddr, + gatewayRedisPasswordEnvVar: customRedisPassword, + gatewayRedisUsernameEnvVar: deprecatedUsername, + }, + wantErr: gatewayRedisUsernameEnvVar, }, } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - restoreEnvs(t, append(append(append(sessionCacheRedisEnvVars(), sessionEventsRedisEnvVars()...), clientEventsRedisEnvVars()...), responseSignerPrivateKeyPEMPathEnvVar)...) + redisEnvVars := sessionCacheRedisEnvVars() + restoreEnvs(t, append(append(append(redisEnvVars, sessionEventsRedisEnvVars()...), clientEventsRedisEnvVars()...), responseSignerPrivateKeyPEMPathEnvVar)...) + for _, envVar := range redisEnvVars { + setEnvValue(t, envVar, nil) + } setEnvValue(t, responseSignerPrivateKeyPEMPathEnvVar, customResponseSignerPrivateKeyPEMPath) setEnvValue(t, sessionEventsRedisStreamEnvVar, customSessionEventsRedisStream) setEnvValue(t, clientEventsRedisStreamEnvVar, customClientEventsRedisStream) @@ -973,7 +1024,9 @@ func TestLoadFromEnvSessionCacheRedis(t *testing.T) { } require.NoError(t, err) - assert.Equal(t, tt.want, cfg.SessionCacheRedis) + require.NotNil(t, tt.want) + assert.Equal(t, tt.want.conn, cfg.Redis) + assert.Equal(t, tt.want.sessionRedis, cfg.SessionCacheRedis) }) } } @@ -1012,7 +1065,7 @@ func TestLoadFromEnvReplayRedis(t *testing.T) { { name: "custom replay redis config", envs: map[string]*string{ - sessionCacheRedisAddrEnvVar: customSessionCacheRedisAddr, + gatewayRedisMasterAddrEnvVar: customSessionCacheRedisAddr, replayRedisKeyPrefixEnvVar: customReplayRedisKeyPrefix, replayRedisReserveTimeoutEnvVar: customReplayRedisReserveTimeout, }, @@ -1024,7 +1077,7 @@ func TestLoadFromEnvReplayRedis(t *testing.T) { { name: "empty replay redis key prefix", envs: map[string]*string{ - sessionCacheRedisAddrEnvVar: customSessionCacheRedisAddr, + gatewayRedisMasterAddrEnvVar: customSessionCacheRedisAddr, replayRedisKeyPrefixEnvVar: emptyReplayRedisKeyPrefix, }, wantErr: replayRedisKeyPrefixEnvVar + " must not be empty", @@ -1032,7 +1085,7 @@ func TestLoadFromEnvReplayRedis(t *testing.T) { { name: "invalid replay redis reserve timeout", envs: map[string]*string{ - sessionCacheRedisAddrEnvVar: customSessionCacheRedisAddr, + gatewayRedisMasterAddrEnvVar: customSessionCacheRedisAddr, replayRedisReserveTimeoutEnvVar: invalidReplayRedisReserveTimeout, }, wantErr: "parse " + replayRedisReserveTimeoutEnvVar, @@ -1096,7 +1149,7 @@ func TestLoadFromEnvSessionEventsRedis(t *testing.T) { { name: "custom session events redis config", envs: map[string]*string{ - sessionCacheRedisAddrEnvVar: customSessionCacheRedisAddr, + gatewayRedisMasterAddrEnvVar: customSessionCacheRedisAddr, sessionEventsRedisStreamEnvVar: customStream, sessionEventsRedisReadBlockTimeoutEnvVar: customReadBlockTimeout, }, @@ -1108,14 +1161,14 @@ func TestLoadFromEnvSessionEventsRedis(t *testing.T) { { name: "missing session events redis stream", envs: map[string]*string{ - sessionCacheRedisAddrEnvVar: customSessionCacheRedisAddr, + gatewayRedisMasterAddrEnvVar: customSessionCacheRedisAddr, }, wantErr: sessionEventsRedisStreamEnvVar + " must not be empty", }, { name: "empty session events redis stream", envs: map[string]*string{ - sessionCacheRedisAddrEnvVar: customSessionCacheRedisAddr, + gatewayRedisMasterAddrEnvVar: customSessionCacheRedisAddr, sessionEventsRedisStreamEnvVar: emptyStream, }, wantErr: sessionEventsRedisStreamEnvVar + " must not be empty", @@ -1123,7 +1176,7 @@ func TestLoadFromEnvSessionEventsRedis(t *testing.T) { { name: "invalid session events read block timeout", envs: map[string]*string{ - sessionCacheRedisAddrEnvVar: customSessionCacheRedisAddr, + gatewayRedisMasterAddrEnvVar: customSessionCacheRedisAddr, sessionEventsRedisStreamEnvVar: customStream, sessionEventsRedisReadBlockTimeoutEnvVar: invalidReadBlockTimeout, }, @@ -1187,7 +1240,7 @@ func TestLoadFromEnvClientEventsRedis(t *testing.T) { { name: "custom client events redis config", envs: map[string]*string{ - sessionCacheRedisAddrEnvVar: customSessionCacheRedisAddr, + gatewayRedisMasterAddrEnvVar: customSessionCacheRedisAddr, clientEventsRedisStreamEnvVar: customStream, clientEventsRedisReadBlockTimeoutEnvVar: customReadBlockTimeout, }, @@ -1199,14 +1252,14 @@ func TestLoadFromEnvClientEventsRedis(t *testing.T) { { name: "missing client events redis stream", envs: map[string]*string{ - sessionCacheRedisAddrEnvVar: customSessionCacheRedisAddr, + gatewayRedisMasterAddrEnvVar: customSessionCacheRedisAddr, }, wantErr: clientEventsRedisStreamEnvVar + " must not be empty", }, { name: "empty client events redis stream", envs: map[string]*string{ - sessionCacheRedisAddrEnvVar: customSessionCacheRedisAddr, + gatewayRedisMasterAddrEnvVar: customSessionCacheRedisAddr, clientEventsRedisStreamEnvVar: emptyStream, }, wantErr: clientEventsRedisStreamEnvVar + " must not be empty", @@ -1214,7 +1267,7 @@ func TestLoadFromEnvClientEventsRedis(t *testing.T) { { name: "invalid client events read block timeout", envs: map[string]*string{ - sessionCacheRedisAddrEnvVar: customSessionCacheRedisAddr, + gatewayRedisMasterAddrEnvVar: customSessionCacheRedisAddr, clientEventsRedisStreamEnvVar: customStream, clientEventsRedisReadBlockTimeoutEnvVar: invalidReadBlockTimeout, }, @@ -1331,8 +1384,9 @@ func TestLoadFromEnvPublicHTTPAntiAbuse(t *testing.T) { tt := tt t.Run(tt.name, func(t *testing.T) { - restoreEnvs(t, append(append(append(append(publicAntiAbuseEnvVars(), sessionCacheRedisAddrEnvVar), sessionEventsRedisEnvVars()...), clientEventsRedisEnvVars()...), responseSignerPrivateKeyPEMPathEnvVar)...) - setEnvValue(t, sessionCacheRedisAddrEnvVar, requiredSessionCacheRedisAddr) + restoreEnvs(t, append(append(append(append(publicAntiAbuseEnvVars(), gatewayRedisMasterAddrEnvVar), sessionEventsRedisEnvVars()...), clientEventsRedisEnvVars()...), responseSignerPrivateKeyPEMPathEnvVar)...) + setEnvValue(t, gatewayRedisMasterAddrEnvVar, requiredSessionCacheRedisAddr) + setEnvValue(t, gatewayRedisPasswordEnvVar, &defaultTestRedisPasswordValue) setEnvValue(t, sessionEventsRedisStreamEnvVar, requiredSessionEventsRedisStream) setEnvValue(t, clientEventsRedisStreamEnvVar, requiredClientEventsRedisStream) setEnvValue(t, responseSignerPrivateKeyPEMPathEnvVar, requiredResponseSignerPrivateKeyPEMPath) @@ -1444,13 +1498,15 @@ func operationalEnvVars() []string { func sessionCacheRedisEnvVars() []string { return []string{ - sessionCacheRedisAddrEnvVar, - sessionCacheRedisUsernameEnvVar, - sessionCacheRedisPasswordEnvVar, - sessionCacheRedisDBEnvVar, + gatewayRedisMasterAddrEnvVar, + gatewayRedisReplicaAddrsEnvVar, + gatewayRedisPasswordEnvVar, + gatewayRedisDBEnvVar, + gatewayRedisOpTimeoutEnvVar, + gatewayRedisTLSEnabledEnvVar, + gatewayRedisUsernameEnvVar, sessionCacheRedisKeyPrefixEnvVar, sessionCacheRedisLookupTimeoutEnvVar, - sessionCacheRedisTLSEnabledEnvVar, } } diff --git a/gateway/internal/events/client_subscriber.go b/gateway/internal/events/client_subscriber.go index 4ef4356..6b4a9be 100644 --- a/gateway/internal/events/client_subscriber.go +++ b/gateway/internal/events/client_subscriber.go @@ -3,7 +3,6 @@ package events import ( "bytes" "context" - "crypto/tls" "errors" "fmt" "strings" @@ -39,26 +38,23 @@ type RedisClientEventSubscriber struct { logger *zap.Logger metrics *telemetry.Runtime - closeOnce sync.Once startedOnce sync.Once started chan struct{} } -// NewRedisClientEventSubscriber constructs a Redis Stream subscriber that -// reuses the SessionCache Redis connection settings and forwards decoded -// client-facing events to publisher. -func NewRedisClientEventSubscriber(sessionCfg config.SessionCacheRedisConfig, eventsCfg config.ClientEventsRedisConfig, publisher ClientEventPublisher) (*RedisClientEventSubscriber, error) { - return NewRedisClientEventSubscriberWithObservability(sessionCfg, eventsCfg, publisher, nil, nil) +// NewRedisClientEventSubscriber constructs a Redis Stream subscriber that uses +// client and forwards decoded client-facing events to publisher. +func NewRedisClientEventSubscriber(client *redis.Client, sessionCfg config.SessionCacheRedisConfig, eventsCfg config.ClientEventsRedisConfig, publisher ClientEventPublisher) (*RedisClientEventSubscriber, error) { + return NewRedisClientEventSubscriberWithObservability(client, sessionCfg, eventsCfg, publisher, nil, nil) } // NewRedisClientEventSubscriberWithObservability constructs a Redis Stream -// subscriber that also records malformed or dropped internal events. -func NewRedisClientEventSubscriberWithObservability(sessionCfg config.SessionCacheRedisConfig, eventsCfg config.ClientEventsRedisConfig, publisher ClientEventPublisher, logger *zap.Logger, metrics *telemetry.Runtime) (*RedisClientEventSubscriber, error) { - if strings.TrimSpace(sessionCfg.Addr) == "" { - return nil, errors.New("new redis client event subscriber: redis addr must not be empty") - } - if sessionCfg.DB < 0 { - return nil, errors.New("new redis client event subscriber: redis db must not be negative") +// subscriber that also records malformed or dropped internal events. The +// subscriber does not own the client; the runtime supplies a shared +// *redis.Client. +func NewRedisClientEventSubscriberWithObservability(client *redis.Client, sessionCfg config.SessionCacheRedisConfig, eventsCfg config.ClientEventsRedisConfig, publisher ClientEventPublisher, logger *zap.Logger, metrics *telemetry.Runtime) (*RedisClientEventSubscriber, error) { + if client == nil { + return nil, errors.New("new redis client event subscriber: nil redis client") } if sessionCfg.LookupTimeout <= 0 { return nil, errors.New("new redis client event subscriber: lookup timeout must be positive") @@ -73,23 +69,12 @@ func NewRedisClientEventSubscriberWithObservability(sessionCfg config.SessionCac return nil, errors.New("new redis client event subscriber: nil publisher") } - options := &redis.Options{ - Addr: sessionCfg.Addr, - Username: sessionCfg.Username, - Password: sessionCfg.Password, - DB: sessionCfg.DB, - Protocol: 2, - DisableIdentity: true, - } - if sessionCfg.TLSEnabled { - options.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12} - } if logger == nil { logger = zap.NewNop() } return &RedisClientEventSubscriber{ - client: redis.NewClient(options), + client: client, stream: eventsCfg.Stream, pingTimeout: sessionCfg.LookupTimeout, readBlockTimeout: eventsCfg.ReadBlockTimeout, @@ -100,26 +85,6 @@ func NewRedisClientEventSubscriberWithObservability(sessionCfg config.SessionCac }, nil } -// Ping verifies that the Redis backend used for client-facing event fan-out is -// reachable within the configured timeout budget. -func (s *RedisClientEventSubscriber) Ping(ctx context.Context) error { - if s == nil || s.client == nil { - return errors.New("ping redis client event subscriber: nil subscriber") - } - if ctx == nil { - return errors.New("ping redis client event subscriber: nil context") - } - - pingCtx, cancel := context.WithTimeout(ctx, s.pingTimeout) - defer cancel() - - if err := s.client.Ping(pingCtx).Err(); err != nil { - return fmt.Errorf("ping redis client event subscriber: %w", err) - } - - return nil -} - // Run consumes client-facing events until ctx is canceled or Redis returns an // unexpected error. func (s *RedisClientEventSubscriber) Run(ctx context.Context) error { @@ -184,28 +149,21 @@ func (s *RedisClientEventSubscriber) resolveStartID(ctx context.Context) (string return messages[0].ID, nil } -// Shutdown closes the Redis client so a blocking stream read can terminate -// promptly during gateway shutdown. +// Shutdown is a no-op kept for App framework compatibility. The blocking +// XRead loop terminates when its context is cancelled by the parent runtime, +// which also owns and closes the shared Redis client. func (s *RedisClientEventSubscriber) Shutdown(ctx context.Context) error { if ctx == nil { return errors.New("shutdown redis client event subscriber: nil context") } - return s.Close() + return nil } -// Close releases the underlying Redis client resources. +// Close is a no-op kept for backwards-compatible cleanup wiring; the +// subscriber does not own the shared Redis client. func (s *RedisClientEventSubscriber) Close() error { - if s == nil || s.client == nil { - return nil - } - - var err error - s.closeOnce.Do(func() { - err = s.client.Close() - }) - - return err + return nil } func (s *RedisClientEventSubscriber) signalStarted() { diff --git a/gateway/internal/events/client_subscriber_test.go b/gateway/internal/events/client_subscriber_test.go index 2d2c347..009e13c 100644 --- a/gateway/internal/events/client_subscriber_test.go +++ b/gateway/internal/events/client_subscriber_test.go @@ -153,8 +153,9 @@ func TestRedisClientEventSubscriberLogsAndCountsMalformedEvents(t *testing.T) { telemetryRuntime := testutil.NewTelemetryRuntime(t, logger) subscriber, err := NewRedisClientEventSubscriberWithObservability( + newTestRedisClient(t, server), config.SessionCacheRedisConfig{ - Addr: server.Addr(), + KeyPrefix: "gateway:session:", LookupTimeout: 250 * time.Millisecond, }, config.ClientEventsRedisConfig{ @@ -166,9 +167,6 @@ func TestRedisClientEventSubscriberLogsAndCountsMalformedEvents(t *testing.T) { telemetryRuntime, ) require.NoError(t, err) - t.Cleanup(func() { - assert.NoError(t, subscriber.Close()) - }) running := runTestClientEventSubscriber(t, subscriber) defer running.stop(t) @@ -195,8 +193,9 @@ func newTestRedisClientEventSubscriber(t *testing.T, server *miniredis.Miniredis t.Helper() subscriber, err := NewRedisClientEventSubscriber( + newTestRedisClient(t, server), config.SessionCacheRedisConfig{ - Addr: server.Addr(), + KeyPrefix: "gateway:session:", LookupTimeout: 250 * time.Millisecond, }, config.ClientEventsRedisConfig{ @@ -207,10 +206,6 @@ func newTestRedisClientEventSubscriber(t *testing.T, server *miniredis.Miniredis ) require.NoError(t, err) - t.Cleanup(func() { - assert.NoError(t, subscriber.Close()) - }) - return subscriber } diff --git a/gateway/internal/events/subscriber.go b/gateway/internal/events/subscriber.go index 605f80c..0dc924e 100644 --- a/gateway/internal/events/subscriber.go +++ b/gateway/internal/events/subscriber.go @@ -5,7 +5,6 @@ package events import ( "context" - "crypto/tls" "errors" "fmt" "strconv" @@ -43,33 +42,30 @@ type RedisSessionSubscriber struct { logger *zap.Logger metrics *telemetry.Runtime - closeOnce sync.Once startedOnce sync.Once started chan struct{} } -// NewRedisSessionSubscriber constructs a Redis Stream subscriber that reuses -// the SessionCache Redis connection settings and applies updates to store. -func NewRedisSessionSubscriber(sessionCfg config.SessionCacheRedisConfig, eventsCfg config.SessionEventsRedisConfig, store session.SnapshotStore) (*RedisSessionSubscriber, error) { - return NewRedisSessionSubscriberWithObservability(sessionCfg, eventsCfg, store, nil, nil, nil) +// NewRedisSessionSubscriber constructs a Redis Stream subscriber that uses +// client and applies updates to store. +func NewRedisSessionSubscriber(client *redis.Client, sessionCfg config.SessionCacheRedisConfig, eventsCfg config.SessionEventsRedisConfig, store session.SnapshotStore) (*RedisSessionSubscriber, error) { + return NewRedisSessionSubscriberWithObservability(client, sessionCfg, eventsCfg, store, nil, nil, nil) } // NewRedisSessionSubscriberWithRevocationHandler constructs a Redis Stream -// subscriber that reuses the SessionCache Redis connection settings, applies -// updates to store, and optionally tears down active resources for revoked -// sessions. -func NewRedisSessionSubscriberWithRevocationHandler(sessionCfg config.SessionCacheRedisConfig, eventsCfg config.SessionEventsRedisConfig, store session.SnapshotStore, revocationHandler SessionRevocationHandler) (*RedisSessionSubscriber, error) { - return NewRedisSessionSubscriberWithObservability(sessionCfg, eventsCfg, store, revocationHandler, nil, nil) +// subscriber that uses client, applies updates to store, and optionally tears +// down active resources for revoked sessions. +func NewRedisSessionSubscriberWithRevocationHandler(client *redis.Client, sessionCfg config.SessionCacheRedisConfig, eventsCfg config.SessionEventsRedisConfig, store session.SnapshotStore, revocationHandler SessionRevocationHandler) (*RedisSessionSubscriber, error) { + return NewRedisSessionSubscriberWithObservability(client, sessionCfg, eventsCfg, store, revocationHandler, nil, nil) } // NewRedisSessionSubscriberWithObservability constructs a Redis Stream -// subscriber that also logs and counts malformed internal session events. -func NewRedisSessionSubscriberWithObservability(sessionCfg config.SessionCacheRedisConfig, eventsCfg config.SessionEventsRedisConfig, store session.SnapshotStore, revocationHandler SessionRevocationHandler, logger *zap.Logger, metrics *telemetry.Runtime) (*RedisSessionSubscriber, error) { - if strings.TrimSpace(sessionCfg.Addr) == "" { - return nil, errors.New("new redis session subscriber: redis addr must not be empty") - } - if sessionCfg.DB < 0 { - return nil, errors.New("new redis session subscriber: redis db must not be negative") +// subscriber that also logs and counts malformed internal session events. The +// subscriber does not own the client; the runtime supplies a shared +// *redis.Client. +func NewRedisSessionSubscriberWithObservability(client *redis.Client, sessionCfg config.SessionCacheRedisConfig, eventsCfg config.SessionEventsRedisConfig, store session.SnapshotStore, revocationHandler SessionRevocationHandler, logger *zap.Logger, metrics *telemetry.Runtime) (*RedisSessionSubscriber, error) { + if client == nil { + return nil, errors.New("new redis session subscriber: nil redis client") } if sessionCfg.LookupTimeout <= 0 { return nil, errors.New("new redis session subscriber: lookup timeout must be positive") @@ -84,23 +80,12 @@ func NewRedisSessionSubscriberWithObservability(sessionCfg config.SessionCacheRe return nil, errors.New("new redis session subscriber: nil session snapshot store") } - options := &redis.Options{ - Addr: sessionCfg.Addr, - Username: sessionCfg.Username, - Password: sessionCfg.Password, - DB: sessionCfg.DB, - Protocol: 2, - DisableIdentity: true, - } - if sessionCfg.TLSEnabled { - options.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12} - } if logger == nil { logger = zap.NewNop() } return &RedisSessionSubscriber{ - client: redis.NewClient(options), + client: client, stream: eventsCfg.Stream, pingTimeout: sessionCfg.LookupTimeout, readBlockTimeout: eventsCfg.ReadBlockTimeout, @@ -112,26 +97,6 @@ func NewRedisSessionSubscriberWithObservability(sessionCfg config.SessionCacheRe }, nil } -// Ping verifies that the Redis backend used for session lifecycle events is -// reachable within the configured timeout budget. -func (s *RedisSessionSubscriber) Ping(ctx context.Context) error { - if s == nil || s.client == nil { - return errors.New("ping redis session subscriber: nil subscriber") - } - if ctx == nil { - return errors.New("ping redis session subscriber: nil context") - } - - pingCtx, cancel := context.WithTimeout(ctx, s.pingTimeout) - defer cancel() - - if err := s.client.Ping(pingCtx).Err(); err != nil { - return fmt.Errorf("ping redis session subscriber: %w", err) - } - - return nil -} - // Run consumes session lifecycle events until ctx is canceled or Redis returns // an unexpected error. func (s *RedisSessionSubscriber) Run(ctx context.Context) error { @@ -196,28 +161,21 @@ func (s *RedisSessionSubscriber) resolveStartID(ctx context.Context) (string, er return messages[0].ID, nil } -// Shutdown closes the Redis client so a blocking stream read can terminate -// promptly during gateway shutdown. +// Shutdown is a no-op kept for App framework compatibility. The blocking +// XRead loop terminates when its context is cancelled by the parent runtime, +// which also owns and closes the shared Redis client. func (s *RedisSessionSubscriber) Shutdown(ctx context.Context) error { if ctx == nil { return errors.New("shutdown redis session subscriber: nil context") } - return s.Close() + return nil } -// Close releases the underlying Redis client resources. +// Close is a no-op kept for backwards-compatible cleanup wiring; the +// subscriber does not own the shared Redis client. func (s *RedisSessionSubscriber) Close() error { - if s == nil || s.client == nil { - return nil - } - - var err error - s.closeOnce.Do(func() { - err = s.client.Close() - }) - - return err + return nil } func (s *RedisSessionSubscriber) signalStarted() { diff --git a/gateway/internal/events/subscriber_test.go b/gateway/internal/events/subscriber_test.go index 6193ae1..60c88b2 100644 --- a/gateway/internal/events/subscriber_test.go +++ b/gateway/internal/events/subscriber_test.go @@ -10,6 +10,7 @@ import ( "galaxy/gateway/internal/session" "github.com/alicebob/miniredis/v2" + "github.com/redis/go-redis/v9" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -262,9 +263,12 @@ func newTestRedisSessionSubscriber(t *testing.T, server *miniredis.Miniredis, st func newTestRedisSessionSubscriberWithRevocationHandler(t *testing.T, server *miniredis.Miniredis, store session.SnapshotStore, revocationHandler SessionRevocationHandler) *RedisSessionSubscriber { t.Helper() + client := newTestRedisClient(t, server) + subscriber, err := NewRedisSessionSubscriberWithRevocationHandler( + client, config.SessionCacheRedisConfig{ - Addr: server.Addr(), + KeyPrefix: "gateway:session:", LookupTimeout: 250 * time.Millisecond, }, config.SessionEventsRedisConfig{ @@ -276,11 +280,22 @@ func newTestRedisSessionSubscriberWithRevocationHandler(t *testing.T, server *mi ) require.NoError(t, err) + return subscriber +} + +func newTestRedisClient(t *testing.T, server *miniredis.Miniredis) *redis.Client { + t.Helper() + + client := redis.NewClient(&redis.Options{ + Addr: server.Addr(), + Protocol: 2, + DisableIdentity: true, + }) t.Cleanup(func() { - assert.NoError(t, subscriber.Close()) + assert.NoError(t, client.Close()) }) - return subscriber + return client } type recordingSessionRevocationHandler struct { diff --git a/gateway/internal/redisclient/client.go b/gateway/internal/redisclient/client.go new file mode 100644 index 0000000..58b2411 --- /dev/null +++ b/gateway/internal/redisclient/client.go @@ -0,0 +1,55 @@ +// Package redisclient provides the Redis client helpers used by Gateway +// runtime wiring. The helpers wrap `pkg/redisconn` so the runtime keeps the +// same construction surface as the other Galaxy services. +package redisclient + +import ( + "context" + "fmt" + + "galaxy/gateway/internal/telemetry" + "galaxy/redisconn" + + "github.com/redis/go-redis/v9" +) + +// NewClient constructs one Redis client from cfg using the shared +// `pkg/redisconn` helper, which enforces the master/replica/password env-var +// shape. +func NewClient(cfg redisconn.Config) *redis.Client { + return redisconn.NewMasterClient(cfg) +} + +// InstrumentClient attaches Redis tracing and metrics exporters to client +// when telemetryRuntime is available. +func InstrumentClient(client *redis.Client, telemetryRuntime *telemetry.Runtime) error { + if client == nil { + return fmt.Errorf("instrument redis client: nil client") + } + if telemetryRuntime == nil { + return nil + } + + return redisconn.Instrument( + client, + redisconn.WithTracerProvider(telemetryRuntime.TracerProvider()), + redisconn.WithMeterProvider(telemetryRuntime.MeterProvider()), + ) +} + +// Ping performs the startup Redis connectivity check bounded by +// cfg.OperationTimeout. +func Ping(ctx context.Context, cfg redisconn.Config, client *redis.Client) error { + if client == nil { + return fmt.Errorf("ping redis: nil client") + } + + pingCtx, cancel := context.WithTimeout(ctx, cfg.OperationTimeout) + defer cancel() + + if err := client.Ping(pingCtx).Err(); err != nil { + return fmt.Errorf("ping redis: %w", err) + } + + return nil +} diff --git a/gateway/internal/replay/redis.go b/gateway/internal/replay/redis.go index 0823f1c..700bff6 100644 --- a/gateway/internal/replay/redis.go +++ b/gateway/internal/replay/redis.go @@ -2,7 +2,6 @@ package replay import ( "context" - "crypto/tls" "encoding/base64" "errors" "fmt" @@ -22,15 +21,13 @@ type RedisStore struct { reserveTimeout time.Duration } -// NewRedisStore constructs a Redis-backed replay store that reuses the -// SessionCache Redis deployment settings and applies the replay-specific key -// namespace and timeout controls from replayCfg. -func NewRedisStore(sessionCfg config.SessionCacheRedisConfig, replayCfg config.ReplayRedisConfig) (*RedisStore, error) { - if strings.TrimSpace(sessionCfg.Addr) == "" { - return nil, errors.New("new redis replay store: redis addr must not be empty") - } - if sessionCfg.DB < 0 { - return nil, errors.New("new redis replay store: redis db must not be negative") +// NewRedisStore constructs a Redis-backed replay store that uses client and +// applies the replay-specific namespace and timeout controls from replayCfg. +// The store does not own the client; the runtime supplies a shared +// *redis.Client. +func NewRedisStore(client *redis.Client, replayCfg config.ReplayRedisConfig) (*RedisStore, error) { + if client == nil { + return nil, errors.New("new redis replay store: nil redis client") } if strings.TrimSpace(replayCfg.KeyPrefix) == "" { return nil, errors.New("new redis replay store: replay key prefix must not be empty") @@ -39,54 +36,13 @@ func NewRedisStore(sessionCfg config.SessionCacheRedisConfig, replayCfg config.R return nil, errors.New("new redis replay store: reserve timeout must be positive") } - options := &redis.Options{ - Addr: sessionCfg.Addr, - Username: sessionCfg.Username, - Password: sessionCfg.Password, - DB: sessionCfg.DB, - Protocol: 2, - DisableIdentity: true, - } - if sessionCfg.TLSEnabled { - options.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12} - } - return &RedisStore{ - client: redis.NewClient(options), + client: client, keyPrefix: replayCfg.KeyPrefix, reserveTimeout: replayCfg.ReserveTimeout, }, nil } -// Close releases the underlying Redis client resources. -func (s *RedisStore) Close() error { - if s == nil || s.client == nil { - return nil - } - - return s.client.Close() -} - -// Ping verifies that the configured Redis backend is reachable within the -// replay reserve timeout budget. -func (s *RedisStore) Ping(ctx context.Context) error { - if s == nil || s.client == nil { - return errors.New("ping redis replay store: nil store") - } - if ctx == nil { - return errors.New("ping redis replay store: nil context") - } - - pingCtx, cancel := context.WithTimeout(ctx, s.reserveTimeout) - defer cancel() - - if err := s.client.Ping(pingCtx).Err(); err != nil { - return fmt.Errorf("ping redis replay store: %w", err) - } - - return nil -} - // Reserve records the authenticated deviceSessionID and requestID pair for // ttl. It rejects duplicates while the reservation remains active. func (s *RedisStore) Reserve(ctx context.Context, deviceSessionID string, requestID string, ttl time.Duration) error { diff --git a/gateway/internal/replay/redis_test.go b/gateway/internal/replay/redis_test.go index 857449d..fcb725c 100644 --- a/gateway/internal/replay/redis_test.go +++ b/gateway/internal/replay/redis_test.go @@ -10,81 +10,64 @@ import ( "galaxy/gateway/internal/config" "github.com/alicebob/miniredis/v2" + "github.com/redis/go-redis/v9" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) +func newRedisClient(t *testing.T, addr string) *redis.Client { + t.Helper() + + client := redis.NewClient(&redis.Options{ + Addr: addr, + Protocol: 2, + DisableIdentity: true, + }) + t.Cleanup(func() { + assert.NoError(t, client.Close()) + }) + + return client +} + func TestNewRedisStore(t *testing.T) { t.Parallel() server := miniredis.RunT(t) + client := newRedisClient(t, server.Addr()) + + validCfg := config.ReplayRedisConfig{ + KeyPrefix: "gateway:replay:", + ReserveTimeout: 250 * time.Millisecond, + } tests := []struct { - name string - sessionCfg config.SessionCacheRedisConfig - replayCfg config.ReplayRedisConfig - wantErr string + name string + client *redis.Client + cfg config.ReplayRedisConfig + wantErr string }{ + {name: "valid config", client: client, cfg: validCfg}, + {name: "nil client", client: nil, cfg: validCfg, wantErr: "nil redis client"}, { - name: "valid config", - sessionCfg: config.SessionCacheRedisConfig{ - Addr: server.Addr(), - DB: 2, - }, - replayCfg: config.ReplayRedisConfig{ - KeyPrefix: "gateway:replay:", - ReserveTimeout: 250 * time.Millisecond, - }, - }, - { - name: "empty redis addr", - replayCfg: config.ReplayRedisConfig{ - KeyPrefix: "gateway:replay:", - ReserveTimeout: 250 * time.Millisecond, - }, - wantErr: "redis addr must not be empty", - }, - { - name: "negative redis db", - sessionCfg: config.SessionCacheRedisConfig{ - Addr: server.Addr(), - DB: -1, - }, - replayCfg: config.ReplayRedisConfig{ - KeyPrefix: "gateway:replay:", - ReserveTimeout: 250 * time.Millisecond, - }, - wantErr: "redis db must not be negative", - }, - { - name: "empty replay key prefix", - sessionCfg: config.SessionCacheRedisConfig{ - Addr: server.Addr(), - }, - replayCfg: config.ReplayRedisConfig{ - ReserveTimeout: 250 * time.Millisecond, - }, + name: "empty replay key prefix", + client: client, + cfg: config.ReplayRedisConfig{ReserveTimeout: 250 * time.Millisecond}, wantErr: "replay key prefix must not be empty", }, { - name: "non-positive reserve timeout", - sessionCfg: config.SessionCacheRedisConfig{ - Addr: server.Addr(), - }, - replayCfg: config.ReplayRedisConfig{ - KeyPrefix: "gateway:replay:", - }, + name: "non-positive reserve timeout", + client: client, + cfg: config.ReplayRedisConfig{KeyPrefix: "gateway:replay:"}, wantErr: "reserve timeout must be positive", }, } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() - store, err := NewRedisStore(tt.sessionCfg, tt.replayCfg) + store, err := NewRedisStore(tt.client, tt.cfg) if tt.wantErr != "" { require.Error(t, err) require.ErrorContains(t, err, tt.wantErr) @@ -92,28 +75,16 @@ func TestNewRedisStore(t *testing.T) { } require.NoError(t, err) - t.Cleanup(func() { - assert.NoError(t, store.Close()) - }) + require.NotNil(t, store) }) } } -func TestRedisStorePing(t *testing.T) { - t.Parallel() - - server := miniredis.RunT(t) - store := newTestRedisStore(t, server, config.SessionCacheRedisConfig{}, config.ReplayRedisConfig{}) - - require.NoError(t, store.Ping(context.Background())) -} - func TestRedisStoreReserve(t *testing.T) { t.Parallel() tests := []struct { name string - sessionCfg config.SessionCacheRedisConfig replayCfg config.ReplayRedisConfig deviceSessionID string requestID string @@ -170,13 +141,11 @@ func TestRedisStoreReserve(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() server := miniredis.RunT(t) - store := newTestRedisStore(t, server, tt.sessionCfg, tt.replayCfg) + store := newTestRedisStore(t, server, tt.replayCfg) err := store.Reserve(context.Background(), tt.deviceSessionID, tt.requestID, tt.ttl) if tt.wantErrIs != nil || tt.wantErrText != "" { @@ -201,17 +170,12 @@ func TestRedisStoreReserve(t *testing.T) { func TestRedisStoreReserveReturnsBackendError(t *testing.T) { t.Parallel() - store, err := NewRedisStore( - config.SessionCacheRedisConfig{Addr: unusedTCPAddr(t)}, - config.ReplayRedisConfig{ - KeyPrefix: "gateway:replay:", - ReserveTimeout: 100 * time.Millisecond, - }, - ) - require.NoError(t, err) - t.Cleanup(func() { - assert.NoError(t, store.Close()) + client := newRedisClient(t, unusedTCPAddr(t)) + store, err := NewRedisStore(client, config.ReplayRedisConfig{ + KeyPrefix: "gateway:replay:", + ReserveTimeout: 100 * time.Millisecond, }) + require.NoError(t, err) err = store.Reserve(context.Background(), "device-session-123", "request-123", 5*time.Second) require.Error(t, err) @@ -219,12 +183,9 @@ func TestRedisStoreReserveReturnsBackendError(t *testing.T) { assert.ErrorContains(t, err, "reserve replay request in redis") } -func newTestRedisStore(t *testing.T, server *miniredis.Miniredis, sessionCfg config.SessionCacheRedisConfig, replayCfg config.ReplayRedisConfig) *RedisStore { +func newTestRedisStore(t *testing.T, server *miniredis.Miniredis, replayCfg config.ReplayRedisConfig) *RedisStore { t.Helper() - if sessionCfg.Addr == "" { - sessionCfg.Addr = server.Addr() - } if replayCfg.KeyPrefix == "" { replayCfg.KeyPrefix = "gateway:replay:" } @@ -232,11 +193,8 @@ func newTestRedisStore(t *testing.T, server *miniredis.Miniredis, sessionCfg con replayCfg.ReserveTimeout = 250 * time.Millisecond } - store, err := NewRedisStore(sessionCfg, replayCfg) + store, err := NewRedisStore(newRedisClient(t, server.Addr()), replayCfg) require.NoError(t, err) - t.Cleanup(func() { - assert.NoError(t, store.Close()) - }) return store } diff --git a/gateway/internal/session/redis.go b/gateway/internal/session/redis.go index 73df8dd..771ef9f 100644 --- a/gateway/internal/session/redis.go +++ b/gateway/internal/session/redis.go @@ -3,7 +3,6 @@ package session import ( "bytes" "context" - "crypto/tls" "encoding/json" "errors" "fmt" @@ -32,68 +31,27 @@ type redisRecord struct { RevokedAtMS *int64 `json:"revoked_at_ms,omitempty"` } -// NewRedisCache constructs a Redis-backed SessionCache from cfg. The returned -// cache is read-only from the gateway perspective and does not write or mutate -// Redis state. -func NewRedisCache(cfg config.SessionCacheRedisConfig) (*RedisCache, error) { - if strings.TrimSpace(cfg.Addr) == "" { - return nil, errors.New("new redis session cache: redis addr must not be empty") +// NewRedisCache constructs a Redis-backed SessionCache that uses client and +// applies the namespace and timeout settings from cfg. The cache does not own +// the client; the runtime supplies a shared *redis.Client. +func NewRedisCache(client *redis.Client, cfg config.SessionCacheRedisConfig) (*RedisCache, error) { + if client == nil { + return nil, errors.New("new redis session cache: nil redis client") } - if cfg.DB < 0 { - return nil, errors.New("new redis session cache: redis db must not be negative") + if strings.TrimSpace(cfg.KeyPrefix) == "" { + return nil, errors.New("new redis session cache: redis key prefix must not be empty") } if cfg.LookupTimeout <= 0 { return nil, errors.New("new redis session cache: lookup timeout must be positive") } - options := &redis.Options{ - Addr: cfg.Addr, - Username: cfg.Username, - Password: cfg.Password, - DB: cfg.DB, - Protocol: 2, - DisableIdentity: true, - } - if cfg.TLSEnabled { - options.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12} - } - return &RedisCache{ - client: redis.NewClient(options), + client: client, keyPrefix: cfg.KeyPrefix, lookupTimeout: cfg.LookupTimeout, }, nil } -// Close releases the underlying Redis client resources. -func (c *RedisCache) Close() error { - if c == nil || c.client == nil { - return nil - } - - return c.client.Close() -} - -// Ping verifies that the configured Redis backend is reachable within the -// cache lookup timeout budget. -func (c *RedisCache) Ping(ctx context.Context) error { - if c == nil || c.client == nil { - return errors.New("ping redis session cache: nil cache") - } - if ctx == nil { - return errors.New("ping redis session cache: nil context") - } - - pingCtx, cancel := context.WithTimeout(ctx, c.lookupTimeout) - defer cancel() - - if err := c.client.Ping(pingCtx).Err(); err != nil { - return fmt.Errorf("ping redis session cache: %w", err) - } - - return nil -} - // Lookup resolves deviceSessionID from Redis, validates the cached JSON // payload strictly, and returns the decoded session record. func (c *RedisCache) Lookup(ctx context.Context, deviceSessionID string) (Record, error) { diff --git a/gateway/internal/session/redis_test.go b/gateway/internal/session/redis_test.go index a0ca24c..cbba993 100644 --- a/gateway/internal/session/redis_test.go +++ b/gateway/internal/session/redis_test.go @@ -10,61 +10,64 @@ import ( "galaxy/gateway/internal/config" "github.com/alicebob/miniredis/v2" + "github.com/redis/go-redis/v9" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) +func newRedisClient(t *testing.T, server *miniredis.Miniredis) *redis.Client { + t.Helper() + + client := redis.NewClient(&redis.Options{ + Addr: server.Addr(), + Protocol: 2, + DisableIdentity: true, + }) + t.Cleanup(func() { + assert.NoError(t, client.Close()) + }) + + return client +} + func TestNewRedisCache(t *testing.T) { t.Parallel() server := miniredis.RunT(t) + client := newRedisClient(t, server) + + validCfg := config.SessionCacheRedisConfig{ + KeyPrefix: "gateway:session:", + LookupTimeout: 250 * time.Millisecond, + } tests := []struct { name string + client *redis.Client cfg config.SessionCacheRedisConfig wantErr string }{ + {name: "valid config", client: client, cfg: validCfg}, + {name: "nil client", client: nil, cfg: validCfg, wantErr: "nil redis client"}, { - name: "valid config", - cfg: config.SessionCacheRedisConfig{ - Addr: server.Addr(), - DB: 2, - KeyPrefix: "gateway:session:", - LookupTimeout: 250 * time.Millisecond, - }, + name: "empty key prefix", + client: client, + cfg: config.SessionCacheRedisConfig{LookupTimeout: 250 * time.Millisecond}, + wantErr: "redis key prefix must not be empty", }, { - name: "empty addr", - cfg: config.SessionCacheRedisConfig{ - LookupTimeout: 250 * time.Millisecond, - }, - wantErr: "redis addr must not be empty", - }, - { - name: "negative db", - cfg: config.SessionCacheRedisConfig{ - Addr: server.Addr(), - DB: -1, - LookupTimeout: 250 * time.Millisecond, - }, - wantErr: "redis db must not be negative", - }, - { - name: "non-positive lookup timeout", - cfg: config.SessionCacheRedisConfig{ - Addr: server.Addr(), - }, + name: "non-positive lookup timeout", + client: client, + cfg: config.SessionCacheRedisConfig{KeyPrefix: "gateway:session:"}, wantErr: "lookup timeout must be positive", }, } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() - cache, err := NewRedisCache(tt.cfg) + cache, err := NewRedisCache(tt.client, tt.cfg) if tt.wantErr != "" { require.Error(t, err) require.ErrorContains(t, err, tt.wantErr) @@ -72,22 +75,11 @@ func TestNewRedisCache(t *testing.T) { } require.NoError(t, err) - t.Cleanup(func() { - assert.NoError(t, cache.Close()) - }) + require.NotNil(t, cache) }) } } -func TestRedisCachePing(t *testing.T) { - t.Parallel() - - server := miniredis.RunT(t) - cache := newTestRedisCache(t, server, config.SessionCacheRedisConfig{}) - - require.NoError(t, cache.Ping(context.Background())) -} - func TestRedisCacheLookup(t *testing.T) { t.Parallel() @@ -259,8 +251,6 @@ func TestRedisCacheLookup(t *testing.T) { server := miniredis.RunT(t) cfg := tt.cfg - cfg.Addr = server.Addr() - cfg.DB = 0 cfg.LookupTimeout = 250 * time.Millisecond if tt.seed != nil { @@ -292,20 +282,16 @@ func TestRedisCacheLookup(t *testing.T) { func newTestRedisCache(t *testing.T, server *miniredis.Miniredis, cfg config.SessionCacheRedisConfig) *RedisCache { t.Helper() - if cfg.Addr == "" { - cfg.Addr = server.Addr() + if cfg.KeyPrefix == "" { + cfg.KeyPrefix = "gateway:session:" } if cfg.LookupTimeout == 0 { cfg.LookupTimeout = 250 * time.Millisecond } - cache, err := NewRedisCache(cfg) + cache, err := NewRedisCache(newRedisClient(t, server), cfg) require.NoError(t, err) - t.Cleanup(func() { - assert.NoError(t, cache.Close()) - }) - return cache } diff --git a/gateway/internal/telemetry/runtime.go b/gateway/internal/telemetry/runtime.go index 549fbb6..5616c32 100644 --- a/gateway/internal/telemetry/runtime.go +++ b/gateway/internal/telemetry/runtime.go @@ -20,6 +20,7 @@ import ( sdkmetric "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/resource" sdktrace "go.opentelemetry.io/otel/sdk/trace" + oteltrace "go.opentelemetry.io/otel/trace" "go.uber.org/zap" ) @@ -149,6 +150,26 @@ func (r *Runtime) Handler() http.Handler { return r.promHandler } +// TracerProvider returns the runtime tracer provider, falling back to the +// global one when r is not initialised. +func (r *Runtime) TracerProvider() oteltrace.TracerProvider { + if r == nil || r.tracerProvider == nil { + return otel.GetTracerProvider() + } + + return r.tracerProvider +} + +// MeterProvider returns the runtime meter provider, falling back to the +// global one when r is not initialised. +func (r *Runtime) MeterProvider() metric.MeterProvider { + if r == nil || r.meterProvider == nil { + return otel.GetMeterProvider() + } + + return r.meterProvider +} + // Shutdown flushes the configured telemetry providers. func (r *Runtime) Shutdown(ctx context.Context) error { if r == nil { diff --git a/geoprofile/PLAN.md b/geoprofile/PLAN.md index dc412af..56284c2 100644 --- a/geoprofile/PLAN.md +++ b/geoprofile/PLAN.md @@ -13,6 +13,149 @@ Execution priorities: - Defer threshold tuning until after the basic data model is working. - Avoid unnecessary infrastructure on the first iteration. +## Stage 00 — Persistence Stack and Backend Assignment + +Goal: + +- Pin the platform-wide persistence stack and the per-service backend + ownership before any feature stage begins, so that subsequent stages + design schemas, queries, and worker loops consistently with the + project-wide rules in + [`../ARCHITECTURE.md §Persistence Backends`](../ARCHITECTURE.md#persistence-backends) + and the staged migration plan in + [`../PG_PLAN.md`](../PG_PLAN.md). + +This stage is documentation-only: no code exists in this service yet, and +this stage adds none. It is a prerequisite to every later stage and ships +as part of `PG_PLAN.md` Stage 8. + +Tasks: + +- Adopt the shared Postgres helper [`pkg/postgres`](../pkg/postgres) for + every durable storage path: + + - driver `github.com/jackc/pgx/v5`, exposed as `*sql.DB` via + `github.com/jackc/pgx/v5/stdlib`; + - query layer `github.com/go-jet/jet/v2` (PostgreSQL dialect) with + generated code under `internal/adapters/postgres/jet/`, regenerated + by a per-service `make jet` target and committed to the repo; + - migrations via `github.com/pressly/goose/v3` library API embedded + with `//go:embed`, applied at service startup before any HTTP + listener becomes ready, with non-zero exit on failure; + - `github.com/testcontainers/testcontainers-go` (`modules/postgres`) + for unit tests and for hosting the transient instance used by + `make jet`. +- Adopt the shared Redis helper [`pkg/redisconn`](../pkg/redisconn) for + every Redis client: + + - master/replica/password connection shape; + - mandatory password; + - no `TLS_ENABLED`, no `USERNAME` (rejected at startup with a clear + error from `pkg/redisconn.LoadFromEnv`). +- Own the `geoprofile` schema in the shared `galaxy` PostgreSQL database. + Connect with a dedicated `geoprofile` PG role whose grants are + restricted to its own schema (defense-in-depth, expressed in the + initial migration). +- Lay out the postgres-backed adapter directory consistently with the + PG-migrated services: + + ```text + geoprofile/ + internal/ + adapters/ + postgres/ + migrations/ # *.sql files + migrations.go (//go:embed) + jet/ # generated code, commit-checked + / # adapter implementations matching + # internal/ports + config/ + config.go # Postgres + Redis schemas + Makefile # `jet` target: testcontainers + goose + jet + ``` +- Backend assignment for the entities listed in + [`README.md §Data Entities`](README.md#data-entities): + + - PostgreSQL (`geoprofile` schema, source of truth): + + - `country_observation` — durable observed-country fact rows. + - `device_session_country_score` — per-`device_session_id` weighted + country aggregates. + - `device_session_geo_state` — current `usual_connection_country` + per `device_session_id`. + - `user_review_state` — `country_review_recommended` flag and last + evaluation timestamp. + - `declared_country_version` — immutable history of approved + `declared_country` changes (with version status `recorded` / + `applied` / `sync_failed`). + - `session_block_action` — local audit of block-request outcomes. + - Ingest-queue lifecycle from §Stage 05 (`accepted` / `processing` / + `processed` / `failed`) is materialised as `status` / + `next_attempt_at` columns on the durable observation row, not as a + Redis ZSET. Workers select pending work via + `SELECT ... FOR UPDATE SKIP LOCKED`, mirroring the pattern already + in use by Mail and Notification. + - Redis (`pkg/redisconn`): + + - only ephemeral runtime-coordination signals if any appear during + implementation — for example, transition-deduplication windows for + review-flag notifications, short worker leases on processing + claims. No durable business state. + - the `notification:intents` Redis Stream is used by this service + only as a producer to publish `geo.review_recommended` intents + (see §Stage 11 and `README.md §Integration with Notification + Service`); that connection is built via `pkg/redisconn`. +- **Idempotency**, if added for ingest deduplication, is a `UNIQUE` + constraint on the durable observation row, never a separate Redis kv. + **Retry scheduling**, if added for worker reprocessing or + `User Service` sync retries, is a column on the durable record, worked + off via `FOR UPDATE SKIP LOCKED`. Both rules align this service with + the platform-wide pattern. +- Time-valued columns are `timestamptz`. Adapters normalise every + `time.Time` value crossing the SQL boundary to `time.UTC` on bind and + scan, per + `../ARCHITECTURE.md §Persistence Backends — Timestamp handling`. +- Configuration (target): + + - PostgreSQL knobs (loaded via + `pkg/postgres.LoadFromEnv("GEOPROFILE")`): + + - `GEOPROFILE_POSTGRES_PRIMARY_DSN` (required; + `postgres://geoprofile:@:5432/galaxy?search_path=geoprofile&sslmode=disable`); + - `GEOPROFILE_POSTGRES_REPLICA_DSNS` (optional, comma-separated; + reserved for future read-routing, not consumed yet); + - `GEOPROFILE_POSTGRES_OPERATION_TIMEOUT`, + `GEOPROFILE_POSTGRES_MAX_OPEN_CONNS`, + `GEOPROFILE_POSTGRES_MAX_IDLE_CONNS`, + `GEOPROFILE_POSTGRES_CONN_MAX_LIFETIME`. + - Redis knobs (loaded via + `pkg/redisconn.LoadFromEnv("GEOPROFILE")`): + + - `GEOPROFILE_REDIS_MASTER_ADDR` (required), + `GEOPROFILE_REDIS_REPLICA_ADDRS` (optional, comma-separated); + - `GEOPROFILE_REDIS_PASSWORD` (required); + - `GEOPROFILE_REDIS_DB`, + `GEOPROFILE_REDIS_OPERATION_TIMEOUT`. +- Per-service decision record `geoprofile/docs/postgres-migration.md` + is created by the stage that actually implements the service. It must + capture: schema and role grants, queue materialisation choice, retry + pattern, and any non-trivial deviation from the platform-wide rules + (analogous to + [`../user/docs/postgres-migration.md`](../user/docs/postgres-migration.md), + [`../mail/docs/postgres-migration.md`](../mail/docs/postgres-migration.md), + [`../notification/docs/postgres-migration.md`](../notification/docs/postgres-migration.md), + and [`../lobby/docs/postgres-migration.md`](../lobby/docs/postgres-migration.md)). + +Exit criteria: + +- The persistence stack and schema ownership are fixed and visible to + implementers. +- Every later stage (Stage 01+) designs schemas and queries on top of + the `geoprofile` Postgres schema, or — for any ephemeral signal — on + top of `pkg/redisconn`. +- `../ARCHITECTURE.md §Persistence Backends` and `../PG_PLAN.md` remain + the canonical references; this PLAN points at them rather than + duplicating their content. + ## Stage 01 — Freeze Service Vocabulary and Contracts Goal: @@ -643,6 +786,7 @@ Exit criteria: Recommended delivery order: +- Persistence stack and backend assignment - Domain vocabulary and ownership - Domain model - FlatBuffers schema diff --git a/geoprofile/README.md b/geoprofile/README.md index b324212..d5aeb1e 100644 --- a/geoprofile/README.md +++ b/geoprofile/README.md @@ -137,6 +137,78 @@ To avoid divergence: - Geo Profile Service must then synchronously update the current value in `User Service`. - A version should become effective only after the `User Service` update succeeds. +## Persistence Backends + +The service follows the platform-wide split described in +[`../ARCHITECTURE.md §Persistence Backends`](../ARCHITECTURE.md#persistence-backends); +the staged migration plan that established this split is +[`../PG_PLAN.md`](../PG_PLAN.md). Per-service decisions and any deviation +from the platform-wide rules will be captured in +`docs/postgres-migration.md` once implementation begins, in the same +shape as +[`../user/docs/postgres-migration.md`](../user/docs/postgres-migration.md), +[`../mail/docs/postgres-migration.md`](../mail/docs/postgres-migration.md), +[`../notification/docs/postgres-migration.md`](../notification/docs/postgres-migration.md), +and [`../lobby/docs/postgres-migration.md`](../lobby/docs/postgres-migration.md). + +Geo Profile Service owns the `geoprofile` schema in the shared `galaxy` +PostgreSQL database. A dedicated `geoprofile` PG role connects with grants +restricted to its own schema (defense-in-depth, expressed in the initial +migration). + +PostgreSQL is the source of truth for all durable +[§Data Entities](#data-entities) of the service: + +- `country_observation` — durable observed-country fact rows. +- `device_session_country_score` — per-`device_session_id` weighted + ranking. +- `device_session_geo_state` — current `usual_connection_country` per + `device_session_id`. +- `user_review_state` — `country_review_recommended` plus last evaluation + timestamp. +- `declared_country_version` — immutable history of approved + `declared_country` changes (status `recorded` / `applied` / + `sync_failed`). +- `session_block_action` — local audit of block-request outcomes. +- Ingest-queue lifecycle (`accepted` / `processing` / `processed` / + `failed`, see [§Internal Queue and Worker Pipeline](#internal-queue-and-worker-pipeline)) + is materialised as `status` / `next_attempt_at` columns on the durable + observation row and worked off via + `SELECT ... FOR UPDATE SKIP LOCKED` — the same pattern Mail and + Notification already use for their durable retry schedules. + +Redis carries only ephemeral runtime-coordination signals if and when +they appear during implementation (short worker leases on processing +claims, transition-deduplication windows for review-flag notifications). +No durable business state lives on Redis. The `notification:intents` +Redis Stream is used solely as a producer channel through which this +service publishes `geo.review_recommended` intents (see +[§Integration with Notification Service](#integration-with-notification-service)); +that connection is built via `pkg/redisconn`. + +Stack: + +- driver `github.com/jackc/pgx/v5`, exposed as `*sql.DB` via + `github.com/jackc/pgx/v5/stdlib`; +- query layer `github.com/go-jet/jet/v2` (PostgreSQL dialect) with + generated code committed under `internal/adapters/postgres/jet/` and + regenerated by `make jet`; +- migrations via `github.com/pressly/goose/v3` library API embedded with + `//go:embed`, applied at service startup before any listener becomes + ready (non-zero exit on failure); +- testcontainers-backed unit tests using + `github.com/testcontainers/testcontainers-go/modules/postgres`; +- all Postgres connections are opened through + [`pkg/postgres`](../pkg/postgres); all Redis connections through + [`pkg/redisconn`](../pkg/redisconn). + +Every `time.Time` value crossing the SQL boundary is normalised to UTC +on bind and scan, per the platform-wide rule on `timestamptz` handling. + +The full target environment-variable matrix +(`GEOPROFILE_POSTGRES_*`, `GEOPROFILE_REDIS_*`) is fixed in +[`PLAN.md` Stage 00](PLAN.md#stage-00--persistence-stack-and-backend-assignment). + ## High-Level Architecture ```mermaid diff --git a/go.work b/go.work index a2825ab..6fb2587 100644 --- a/go.work +++ b/go.work @@ -15,6 +15,8 @@ use ( ./pkg/geoip ./pkg/model ./pkg/notificationintent + ./pkg/postgres + ./pkg/redisconn ./pkg/schema ./pkg/storage ./pkg/transcoder @@ -29,6 +31,8 @@ replace ( galaxy/geoip v0.0.0 => ./pkg/geoip galaxy/model v0.0.0 => ./pkg/model galaxy/notificationintent v0.0.0 => ./pkg/notificationintent + galaxy/postgres v0.0.0 => ./pkg/postgres + galaxy/redisconn v0.0.0 => ./pkg/redisconn galaxy/schema v0.0.0 => ./pkg/schema galaxy/storage v0.0.0 => ./pkg/storage galaxy/transcoder v0.0.0 => ./pkg/transcoder diff --git a/go.work.sum b/go.work.sum index 4127c42..f6da553 100644 --- a/go.work.sum +++ b/go.work.sum @@ -1,32 +1,58 @@ buf.build/go/hyperpb v0.1.3/go.mod h1:IHXAM5qnS0/Fsnd7/HGDghFNvUET646WoHmq1FDZXIE= cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10= +filippo.io/edwards25519 v1.2.0/go.mod h1:xzAOLCNug/yB62zG1bQ8uziwrIqIuxhctzJT18Q77mc= +github.com/ClickHouse/ch-go v0.71.0/go.mod h1:NwbNc+7jaqfY58dmdDUbG4Jl22vThgx1cYjBw0vtgXw= +github.com/ClickHouse/clickhouse-go/v2 v2.45.0/go.mod h1:giJfUVlMkcfUEPVfRpt51zZaGEx9i17gCos8gBl392c= github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.31.0/go.mod h1:P4WPRUkOhJC13W//jWpyfJNDAIpvRbAUIYLX/4jtlE0= github.com/akavel/rsrc v0.10.2/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c= github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs= +github.com/andybalholm/brotli v1.2.1/go.mod h1:rzTDkvFWvIrjDXZHkuS16NPggd91W3kUSvPlQ1pLaKY= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5/go.mod h1:KdCmV+x/BuvyMxRnYBlmVaq4OLiKW6iRQfvC62cvdkI= +github.com/coder/websocket v1.8.14/go.mod h1:NX3SzP+inril6yawo5CQXx8+fk145lPDC6pumgx0mVg= github.com/containerd/typeurl/v2 v2.2.0/go.mod h1:8XOOxnyatxSWuG8OfsZXVnAF4iZfedjS/8UHSPJnX4g= github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/elastic/go-sysinfo v1.15.4/go.mod h1:ZBVXmqS368dOn/jvijV/zHLfakWTYHBZPk3G244lHrU= +github.com/elastic/go-windows v1.0.2/go.mod h1:bGcDpBzXgYSqM0Gx3DM4+UxFj300SZLixie9u9ixLM8= github.com/envoyproxy/go-control-plane v0.14.0/go.mod h1:NcS5X47pLl/hfqxU70yPwL9ZMkUlwlKxtAohpi2wBEU= github.com/envoyproxy/go-control-plane/envoy v1.36.0/go.mod h1:ty89S1YCCVruQAm9OtKeEkQLTb+Lkz0k8v9W0Oxsv98= github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4= github.com/envoyproxy/protoc-gen-validate v1.3.0/go.mod h1:HvYl7zwPa5mffgyeTUHA9zHIH36nmrm7oCbo4YKoSWA= github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= +github.com/friendsofgo/errors v0.9.2/go.mod h1:yCvFW5AkDIL9qn7suHVLiI/gH228n7PC4Pn44IGoTOI= +github.com/go-faster/city v1.0.1/go.mod h1:jKcUJId49qdW3L1qKHH/3wPeUstCVpVSXTM6vO3VcTw= +github.com/go-faster/errors v0.7.1/go.mod h1:5ySTjWFiphBs07IKuiL69nxdfd5+fzh1u7FPGZP2quo= github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-sql-driver/mysql v1.9.3/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= +github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= +github.com/golang-sql/sqlexp v0.1.0/go.mod h1:J4ad9Vo8ZCWQ2GMrC4UCQy1JpCbwU9m3EOqtpKwwwHI= github.com/golang/glog v1.2.5/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/jackc/chunkreader v1.0.0 h1:4s39bBR8ByfqH+DKm8rQA3E1LHZWB9XWcrz8fqaZbe0= +github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/pgconn v1.14.3/go.mod h1:RZbme4uasqzybK2RK5c65VsHxoyaml09lx3tXOcO/VM= +github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= +github.com/jackc/pgproto3 v1.1.0 h1:FYYE4yRw+AgI8wXIinMlNjBbp/UitDJwfj5LqqewP1A= +github.com/jackc/pgproto3/v2 v2.3.3/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgtype v1.14.4/go.mod h1:aKeozOde08iifGosdJpz9MBZonJOUJxqNpPBcMJTlVA= +github.com/jackc/pgx/v4 v4.18.3/go.mod h1:Ey4Oru5tH5sB6tV7hDmfWFahwF15Eb7DNXlRKx2CkVw= +github.com/jackc/puddle v1.3.0 h1:eHK/5clGOatcjX3oWGBO/MpxpbHzSwud5EWTSCI+MX0= github.com/jackmordaunt/icns/v2 v2.2.6/go.mod h1:DqlVnR5iafSphrId7aSD06r3jg0KRC9V6lEBBp504ZQ= +github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= +github.com/jonboulle/clockwork v0.5.0/go.mod h1:3mZlmanh0g2NDKO5TWZVJAfofYk64M7XN3SzBPjZF60= github.com/jordanlewis/gcassert v0.0.0-20250430164644-389ef753e22e/go.mod h1:ZybsQk6DWyN5t7An1MuPm1gtSZ1xDaTXS9ZjIOxvQrk= github.com/josephspurrier/goversioninfo v1.4.0/go.mod h1:JWzv5rKQr+MmW+LvM412ToT/IkYDZjaclF2pKDss8IY= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= @@ -36,13 +62,18 @@ github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfn github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/lucor/goinfo v0.9.0/go.mod h1:L6m6tN5Rlova5Z83h1ZaKsMP1iiaoZ9vGTNzu5QKOD4= +github.com/mattn/go-sqlite3 v1.14.28/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/mcuadros/go-version v0.0.0-20190830083331-035f6764e8d2/go.mod h1:76rfSfYPWj01Z85hUf/ituArm797mNKcvINh1OlsZKo= +github.com/mfridman/xflag v0.1.0/go.mod h1:/483ywM5ZO5SuMVjrIGquYNE5CzLrj5Ux/LxWWnjRaE= +github.com/microsoft/go-mssqldb v1.9.8/go.mod h1:eGSRSGAW4hKMy5YcAenhCDjIRm2rhqIdmmwgciMzLus= github.com/moby/sys/mount v0.3.4/go.mod h1:KcQJMbQdJHPlq5lcYT+/CjatWM4PuxKe+XLSVS4J6Os= github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4= github.com/moby/sys/reexec v0.1.0/go.mod h1:EqjBg8F3X7iZe5pU6nRZnYCMUTXoxsjiIfHup5wYIN8= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/natefinch/atomic v1.0.1/go.mod h1:N/D/ELrljoqDyT3rZrsUmtsuzvHkeB/wWjHV22AZRbM= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/paulmach/orb v0.13.0/go.mod h1:6scRWINywA2Jf05dcjOfLfxrUIMECvTSG2MVbRLxu/k= +github.com/pierrec/lz4/v4 v4.1.26/go.mod h1:EoQMVJgeeEOMsCqCzqFm2O0cJvljX2nGZjcRIPL34O4= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= @@ -56,23 +87,41 @@ github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99 github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/santhosh-tekuri/jsonschema/v5 v5.3.1/go.mod h1:uToXkOrWAZ6/Oc07xWQrPOhJotwFIyu2bBVN41fcDUY= +github.com/segmentio/asm v1.2.1/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= +github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= github.com/spiffe/go-spiffe/v2 v2.6.0/go.mod h1:gm2SeUoMZEtpnzPNs2Csc0D/gX33k1xIx7lEzqblHEs= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.11.0/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/timandy/routine v1.1.6/go.mod h1:kXslgIosdY8LW0byTyPnenDgn4/azt2euufAq9rK51w= +github.com/tursodatabase/libsql-client-go v0.0.0-20251219100830-236aa1ff8acc/go.mod h1:08inkKyguB6CGGssc/JzhmQWwBgFQBgjlYFjxjRh7nU= github.com/urfave/cli/v2 v2.4.0/go.mod h1:NX9W0zmTvedE5oDoOMs2RTC8RvdK98NTYZE5LbaEYPg= +github.com/vertica/vertica-sql-go v1.3.6/go.mod h1:jnn2GFuv+O2Jcjktb7zyc4Utlbu9YVqpHH/lx63+1M4= +github.com/volatiletech/inflect v0.0.1/go.mod h1:IBti31tG6phkHitLlr5j7shC5SOo//x0AjDzaJU1PLA= +github.com/volatiletech/null/v8 v8.1.2/go.mod h1:98DbwNoKEpRrYtGjWFctievIfm4n4MxG0A6EBUcoS5g= +github.com/volatiletech/randomize v0.0.1/go.mod h1:GN3U0QYqfZ9FOJ67bzax1cqZ5q2xuj2mXrXBjWaRTlY= +github.com/volatiletech/strmangle v0.0.1/go.mod h1:F6RA6IkB5vq0yTG4GQ0UsbbRcl3ni9P76i+JrTBKFFg= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/scram v1.2.0/go.mod h1:3dlrS0iBaWKYVt2ZfA4cj48umJZ+cAEbR6/SjLA88I8= github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= +github.com/ydb-platform/ydb-go-genproto v0.0.0-20260311095541-ebbf792c1180/go.mod h1:Er+FePu1dNUieD+XTMDduGpQuCPssK5Q4BjF+IIXJ3I= +github.com/ydb-platform/ydb-go-sdk/v3 v3.135.0/go.mod h1:VYUUkRJkKuQPkIpgtZJj6+58Fa2g8ccAqdmaaK6HP5k= github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78/go.mod h1:aL8wCCfTfSfmXjznFBSZNN13rSJjlIOI1fUNAtF7rmI= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= go.opentelemetry.io/contrib/detectors/gcp v1.39.0/go.mod h1:t/OGqzHBa5v6RHZwrDBJ2OirWc+4q/w2fTbLZwAKjTk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0/go.mod h1:69uWxva0WgAA/4bu2Yy70SLDBwZXuQ6PbBpbsa5iZrQ= +go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= +go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= go.opentelemetry.io/otel v1.39.0/go.mod h1:kLlFTywNWrFyEdH0oj2xK0bFYZtHRYUdv1NklR/tgc8= +go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE= go.opentelemetry.io/otel/metric v1.39.0/go.mod h1:jrZSWL33sD7bBxg1xjrqyDjnuzTUB0x1nBERXd7Ftcs= go.opentelemetry.io/otel/sdk v1.39.0/go.mod h1:vDojkC4/jsTJsE+kh+LXYQlbL8CgrEcwmt1ENZszdJE= go.opentelemetry.io/otel/sdk/metric v1.39.0/go.mod h1:xq9HEVH7qeX69/JnwEfp6fVq5wosJsY1mt4lLfYdVew= +go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= +go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4EtqqlVdxWy+rNOA= go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o= golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= @@ -80,8 +129,10 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc= golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0= +golang.org/x/exp v0.0.0-20260410095643-746e56fc9e2f/go.mod h1:J1xhfL/vlindoeF/aINzNzt2Bket5bjo9sdOYzOsU80= golang.org/x/mobile v0.0.0-20231127183840-76ac6878050a/go.mod h1:Ede7gF0KGoHlj822RtphAHK1jLdrcuRBZg0sF1Q+SPc= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= @@ -99,6 +150,7 @@ golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= @@ -114,7 +166,6 @@ golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= -golang.org/x/sync v0.20.0/go.mod h1:9xrNwdLfx4jkKbNva9FpL6vEN7evnE43NNNJQ2LF3+0= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -122,12 +173,16 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= @@ -136,12 +191,14 @@ golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/telemetry v0.0.0-20240521205824-bda55230c457/go.mod h1:pRgIJT+bRLFKnoM1ldnzKoxTIn14Yxz928LQRYYgIN0= golang.org/x/telemetry v0.0.0-20260109210033-bd525da824e2/go.mod h1:b7fPSJ0pKZ3ccUh8gnTONJxhn3c/PS6tyzQvyqw4iA8= golang.org/x/telemetry v0.0.0-20260209163413-e7419c687ee4/go.mod h1:g5NllXBEermZrmR51cJDQxmJUHUOfRAaNyWBM+R+548= +golang.org/x/telemetry v0.0.0-20260409153401-be6f6cb8b1fa/go.mod h1:kHjTxDEnAu6/Nl9lDkzjWpR+bmKfxeiRuSDlsMb70gE= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0= golang.org/x/term v0.40.0/go.mod h1:w2P8uVp06p2iyKKuvXIm7N/y0UCRt3UfJTfZ7oOpglM= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -155,6 +212,7 @@ golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8= +golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= @@ -167,6 +225,7 @@ golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg= golang.org/x/tools v0.42.0/go.mod h1:Ma6lCIwGZvHK6XtgbswSoWroEkhugApmsXyrUmBhfr0= golang.org/x/tools v0.43.0/go.mod h1:uHkMso649BX2cZK6+RpuIPXS3ho2hZo4FVwfoy1vIk0= +golang.org/x/tools v0.44.0/go.mod h1:KA0AfVErSdxRZIsOVipbv3rQhVXTnlU6UhKxHd1seDI= golang.org/x/tools/go/expect v0.1.1-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY= golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated/go.mod h1:RVAQXBGNv1ib0J382/DPCRS/BPnsGebyM1Gj5VSDpG8= golang.org/x/tools/go/vcs v0.1.0-deprecated/go.mod h1:zUrvATBAvEI9535oC0yWYsLsHIV4Z7g63sNPVMtuBy8= @@ -181,4 +240,6 @@ google.golang.org/grpc v1.79.2/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhH google.golang.org/grpc v1.79.3/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +gopkg.in/guregu/null.v4 v4.0.0/go.mod h1:YoQhUrADuG3i9WqesrCmpNRwm1ypAgSHYqoOcTu/JrI= +howett.net/plist v1.0.1/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= diff --git a/integration/authsessionmail/harness_test.go b/integration/authsessionmail/harness_test.go index 5ec0621..9e8184c 100644 --- a/integration/authsessionmail/harness_test.go +++ b/integration/authsessionmail/harness_test.go @@ -97,17 +97,15 @@ func newAuthsessionMailHarness(t *testing.T, opts authsessionMailHarnessOptions) opts.mailSMTPMode = "stub" } - mailEnv := map[string]string{ - "MAIL_LOG_LEVEL": "info", - "MAIL_INTERNAL_HTTP_ADDR": mailInternalAddr, - "MAIL_REDIS_ADDR": redisRuntime.Addr, - "MAIL_TEMPLATE_DIR": moduleTemplateDir(t), - "MAIL_STREAM_BLOCK_TIMEOUT": "100ms", - "MAIL_OPERATOR_REQUEST_TIMEOUT": time.Second.String(), - "MAIL_SHUTDOWN_TIMEOUT": "2s", - "OTEL_TRACES_EXPORTER": "none", - "OTEL_METRICS_EXPORTER": "none", - } + mailEnv := harness.StartMailServicePersistence(t, redisRuntime.Addr).Env + mailEnv["MAIL_LOG_LEVEL"] = "info" + mailEnv["MAIL_INTERNAL_HTTP_ADDR"] = mailInternalAddr + mailEnv["MAIL_TEMPLATE_DIR"] = moduleTemplateDir(t) + mailEnv["MAIL_STREAM_BLOCK_TIMEOUT"] = "100ms" + mailEnv["MAIL_OPERATOR_REQUEST_TIMEOUT"] = time.Second.String() + mailEnv["MAIL_SHUTDOWN_TIMEOUT"] = "2s" + mailEnv["OTEL_TRACES_EXPORTER"] = "none" + mailEnv["OTEL_METRICS_EXPORTER"] = "none" var smtpCapture *harness.SMTPCapture switch opts.mailSMTPMode { @@ -135,7 +133,9 @@ func newAuthsessionMailHarness(t *testing.T, opts authsessionMailHarnessOptions) "AUTHSESSION_LOG_LEVEL": "info", "AUTHSESSION_PUBLIC_HTTP_ADDR": authsessionPublicAddr, "AUTHSESSION_INTERNAL_HTTP_ADDR": authsessionInternalAddr, - "AUTHSESSION_REDIS_ADDR": redisRuntime.Addr, + "AUTHSESSION_REDIS_MASTER_ADDR": redisRuntime.Addr, + + "AUTHSESSION_REDIS_PASSWORD": "integration", "AUTHSESSION_USER_SERVICE_MODE": "rest", "AUTHSESSION_USER_SERVICE_BASE_URL": userStub.BaseURL(), "AUTHSESSION_MAIL_SERVICE_MODE": "rest", diff --git a/integration/authsessionuser/harness_test.go b/integration/authsessionuser/harness_test.go index 3936f89..2743fea 100644 --- a/integration/authsessionuser/harness_test.go +++ b/integration/authsessionuser/harness_test.go @@ -43,13 +43,11 @@ func newAuthsessionUserHarness(t *testing.T) *authsessionUserHarness { userServiceBinary := harness.BuildBinary(t, "userservice", "./user/cmd/userservice") authsessionBinary := harness.BuildBinary(t, "authsession", "./authsession/cmd/authsession") - userServiceEnv := map[string]string{ - "USERSERVICE_LOG_LEVEL": "info", - "USERSERVICE_INTERNAL_HTTP_ADDR": userServiceAddr, - "USERSERVICE_REDIS_ADDR": redisServer.Addr(), - "OTEL_TRACES_EXPORTER": "none", - "OTEL_METRICS_EXPORTER": "none", - } + userServiceEnv := harness.StartUserServicePersistence(t, redisServer.Addr()).Env + userServiceEnv["USERSERVICE_LOG_LEVEL"] = "info" + userServiceEnv["USERSERVICE_INTERNAL_HTTP_ADDR"] = userServiceAddr + userServiceEnv["OTEL_TRACES_EXPORTER"] = "none" + userServiceEnv["OTEL_METRICS_EXPORTER"] = "none" userServiceProcess := harness.StartProcess(t, "userservice", userServiceBinary, userServiceEnv) waitForUserServiceReady(t, userServiceProcess, "http://"+userServiceAddr) @@ -57,7 +55,9 @@ func newAuthsessionUserHarness(t *testing.T) *authsessionUserHarness { "AUTHSESSION_LOG_LEVEL": "info", "AUTHSESSION_PUBLIC_HTTP_ADDR": authsessionPublicAddr, "AUTHSESSION_INTERNAL_HTTP_ADDR": authsessionInternalAddr, - "AUTHSESSION_REDIS_ADDR": redisServer.Addr(), + "AUTHSESSION_REDIS_MASTER_ADDR": redisServer.Addr(), + + "AUTHSESSION_REDIS_PASSWORD": "integration", "AUTHSESSION_USER_SERVICE_MODE": "rest", "AUTHSESSION_USER_SERVICE_BASE_URL": "http://" + userServiceAddr, "AUTHSESSION_MAIL_SERVICE_MODE": "rest", diff --git a/integration/gatewayauthsession/harness_test.go b/integration/gatewayauthsession/harness_test.go index 234cb58..fb69fdc 100644 --- a/integration/gatewayauthsession/harness_test.go +++ b/integration/gatewayauthsession/harness_test.go @@ -98,7 +98,9 @@ func newGatewayAuthSessionHarness(t *testing.T, opts gatewayAuthSessionOptions) "AUTHSESSION_PUBLIC_HTTP_REQUEST_TIMEOUT": opts.authsessionPublicHTTPTimeout.String(), "AUTHSESSION_INTERNAL_HTTP_ADDR": authsessionInternalAddr, "AUTHSESSION_INTERNAL_HTTP_REQUEST_TIMEOUT": defaultAuthsessionInternalHTTPTimeout.String(), - "AUTHSESSION_REDIS_ADDR": redisServer.Addr(), + "AUTHSESSION_REDIS_MASTER_ADDR": redisServer.Addr(), + + "AUTHSESSION_REDIS_PASSWORD": "integration", "AUTHSESSION_USER_SERVICE_MODE": "rest", "AUTHSESSION_USER_SERVICE_BASE_URL": userStub.BaseURL(), "AUTHSESSION_USER_SERVICE_REQUEST_TIMEOUT": defaultAuthsessionDependencyTimeout.String(), @@ -118,7 +120,9 @@ func newGatewayAuthSessionHarness(t *testing.T, opts gatewayAuthSessionOptions) "GATEWAY_LOG_LEVEL": "info", "GATEWAY_PUBLIC_HTTP_ADDR": gatewayPublicAddr, "GATEWAY_AUTHENTICATED_GRPC_ADDR": gatewayGRPCAddr, - "GATEWAY_SESSION_CACHE_REDIS_ADDR": redisServer.Addr(), + "GATEWAY_REDIS_MASTER_ADDR": redisServer.Addr(), + + "GATEWAY_REDIS_PASSWORD": "integration", "GATEWAY_SESSION_CACHE_REDIS_KEY_PREFIX": "gateway:session:", "GATEWAY_SESSION_EVENTS_REDIS_STREAM": "gateway:session_events", "GATEWAY_CLIENT_EVENTS_REDIS_STREAM": "gateway:client_events", diff --git a/integration/gatewayauthsessionmail/harness_test.go b/integration/gatewayauthsessionmail/harness_test.go index 2fa6744..dc26da2 100644 --- a/integration/gatewayauthsessionmail/harness_test.go +++ b/integration/gatewayauthsessionmail/harness_test.go @@ -126,18 +126,17 @@ func newGatewayAuthsessionMailHarness(t *testing.T) *gatewayAuthsessionMailHarne authsessionBinary := harness.BuildBinary(t, "authsession", "./authsession/cmd/authsession") gatewayBinary := harness.BuildBinary(t, "gateway", "./gateway/cmd/gateway") - mailProcess := harness.StartProcess(t, "mail", mailBinary, map[string]string{ - "MAIL_LOG_LEVEL": "info", - "MAIL_INTERNAL_HTTP_ADDR": mailInternalAddr, - "MAIL_REDIS_ADDR": redisRuntime.Addr, - "MAIL_TEMPLATE_DIR": moduleTemplateDir(t), - "MAIL_SMTP_MODE": "stub", - "MAIL_STREAM_BLOCK_TIMEOUT": "100ms", - "MAIL_OPERATOR_REQUEST_TIMEOUT": time.Second.String(), - "MAIL_SHUTDOWN_TIMEOUT": "2s", - "OTEL_TRACES_EXPORTER": "none", - "OTEL_METRICS_EXPORTER": "none", - }) + mailEnv := harness.StartMailServicePersistence(t, redisRuntime.Addr).Env + mailEnv["MAIL_LOG_LEVEL"] = "info" + mailEnv["MAIL_INTERNAL_HTTP_ADDR"] = mailInternalAddr + mailEnv["MAIL_TEMPLATE_DIR"] = moduleTemplateDir(t) + mailEnv["MAIL_SMTP_MODE"] = "stub" + mailEnv["MAIL_STREAM_BLOCK_TIMEOUT"] = "100ms" + mailEnv["MAIL_OPERATOR_REQUEST_TIMEOUT"] = time.Second.String() + mailEnv["MAIL_SHUTDOWN_TIMEOUT"] = "2s" + mailEnv["OTEL_TRACES_EXPORTER"] = "none" + mailEnv["OTEL_METRICS_EXPORTER"] = "none" + mailProcess := harness.StartProcess(t, "mail", mailBinary, mailEnv) waitForMailReady(t, mailProcess, "http://"+mailInternalAddr) authsessionProcess := harness.StartProcess(t, "authsession", authsessionBinary, map[string]string{ @@ -146,7 +145,9 @@ func newGatewayAuthsessionMailHarness(t *testing.T) *gatewayAuthsessionMailHarne "AUTHSESSION_PUBLIC_HTTP_REQUEST_TIMEOUT": time.Second.String(), "AUTHSESSION_INTERNAL_HTTP_ADDR": authsessionInternalAddr, "AUTHSESSION_INTERNAL_HTTP_REQUEST_TIMEOUT": time.Second.String(), - "AUTHSESSION_REDIS_ADDR": redisRuntime.Addr, + "AUTHSESSION_REDIS_MASTER_ADDR": redisRuntime.Addr, + + "AUTHSESSION_REDIS_PASSWORD": "integration", "AUTHSESSION_USER_SERVICE_MODE": "rest", "AUTHSESSION_USER_SERVICE_BASE_URL": userStub.BaseURL(), "AUTHSESSION_USER_SERVICE_REQUEST_TIMEOUT": time.Second.String(), @@ -164,7 +165,9 @@ func newGatewayAuthsessionMailHarness(t *testing.T) *gatewayAuthsessionMailHarne "GATEWAY_LOG_LEVEL": "info", "GATEWAY_PUBLIC_HTTP_ADDR": gatewayPublicAddr, "GATEWAY_AUTHENTICATED_GRPC_ADDR": gatewayGRPCAddr, - "GATEWAY_SESSION_CACHE_REDIS_ADDR": redisRuntime.Addr, + "GATEWAY_REDIS_MASTER_ADDR": redisRuntime.Addr, + + "GATEWAY_REDIS_PASSWORD": "integration", "GATEWAY_SESSION_CACHE_REDIS_KEY_PREFIX": "gateway:session:", "GATEWAY_SESSION_EVENTS_REDIS_STREAM": "gateway:session_events", "GATEWAY_CLIENT_EVENTS_REDIS_STREAM": "gateway:client_events", diff --git a/integration/gatewayauthsessionuser/harness_test.go b/integration/gatewayauthsessionuser/harness_test.go index 74ac183..a755cdb 100644 --- a/integration/gatewayauthsessionuser/harness_test.go +++ b/integration/gatewayauthsessionuser/harness_test.go @@ -71,13 +71,11 @@ func newGatewayAuthsessionUserHarness(t *testing.T) *gatewayAuthsessionUserHarne authsessionBinary := harness.BuildBinary(t, "authsession", "./authsession/cmd/authsession") gatewayBinary := harness.BuildBinary(t, "gateway", "./gateway/cmd/gateway") - userServiceEnv := map[string]string{ - "USERSERVICE_LOG_LEVEL": "info", - "USERSERVICE_INTERNAL_HTTP_ADDR": userServiceAddr, - "USERSERVICE_REDIS_ADDR": redisServer.Addr(), - "OTEL_TRACES_EXPORTER": "none", - "OTEL_METRICS_EXPORTER": "none", - } + userServiceEnv := harness.StartUserServicePersistence(t, redisServer.Addr()).Env + userServiceEnv["USERSERVICE_LOG_LEVEL"] = "info" + userServiceEnv["USERSERVICE_INTERNAL_HTTP_ADDR"] = userServiceAddr + userServiceEnv["OTEL_TRACES_EXPORTER"] = "none" + userServiceEnv["OTEL_METRICS_EXPORTER"] = "none" userServiceProcess := harness.StartProcess(t, "userservice", userServiceBinary, userServiceEnv) harness.WaitForHTTPStatus(t, userServiceProcess, "http://"+userServiceAddr+"/api/v1/internal/users/user-missing/exists", http.StatusOK) @@ -87,7 +85,9 @@ func newGatewayAuthsessionUserHarness(t *testing.T) *gatewayAuthsessionUserHarne "AUTHSESSION_PUBLIC_HTTP_REQUEST_TIMEOUT": time.Second.String(), "AUTHSESSION_INTERNAL_HTTP_ADDR": authsessionInternalAddr, "AUTHSESSION_INTERNAL_HTTP_REQUEST_TIMEOUT": time.Second.String(), - "AUTHSESSION_REDIS_ADDR": redisServer.Addr(), + "AUTHSESSION_REDIS_MASTER_ADDR": redisServer.Addr(), + + "AUTHSESSION_REDIS_PASSWORD": "integration", "AUTHSESSION_USER_SERVICE_MODE": "rest", "AUTHSESSION_USER_SERVICE_BASE_URL": "http://" + userServiceAddr, "AUTHSESSION_USER_SERVICE_REQUEST_TIMEOUT": time.Second.String(), @@ -109,7 +109,9 @@ func newGatewayAuthsessionUserHarness(t *testing.T) *gatewayAuthsessionUserHarne "GATEWAY_AUTH_SERVICE_BASE_URL": "http://" + authsessionPublicAddr, "GATEWAY_USER_SERVICE_BASE_URL": "http://" + userServiceAddr, "GATEWAY_PUBLIC_AUTH_UPSTREAM_TIMEOUT": (500 * time.Millisecond).String(), - "GATEWAY_SESSION_CACHE_REDIS_ADDR": redisServer.Addr(), + "GATEWAY_REDIS_MASTER_ADDR": redisServer.Addr(), + + "GATEWAY_REDIS_PASSWORD": "integration", "GATEWAY_SESSION_CACHE_REDIS_KEY_PREFIX": "gateway:session:", "GATEWAY_SESSION_EVENTS_REDIS_STREAM": "gateway:session_events", "GATEWAY_CLIENT_EVENTS_REDIS_STREAM": "gateway:client_events", diff --git a/integration/gatewayauthsessionusermail/gateway_authsession_user_mail_test.go b/integration/gatewayauthsessionusermail/gateway_authsession_user_mail_test.go index 64a9b27..1917b2f 100644 --- a/integration/gatewayauthsessionusermail/gateway_authsession_user_mail_test.go +++ b/integration/gatewayauthsessionusermail/gateway_authsession_user_mail_test.go @@ -186,27 +186,25 @@ func newGatewayAuthsessionUserMailHarness(t *testing.T) *gatewayAuthsessionUserM authsessionBinary := harness.BuildBinary(t, "authsession", "./authsession/cmd/authsession") gatewayBinary := harness.BuildBinary(t, "gateway", "./gateway/cmd/gateway") - userServiceProcess := harness.StartProcess(t, "userservice", userServiceBinary, map[string]string{ - "USERSERVICE_LOG_LEVEL": "info", - "USERSERVICE_INTERNAL_HTTP_ADDR": userServiceAddr, - "USERSERVICE_REDIS_ADDR": redisRuntime.Addr, - "OTEL_TRACES_EXPORTER": "none", - "OTEL_METRICS_EXPORTER": "none", - }) + userServiceEnv := harness.StartUserServicePersistence(t, redisRuntime.Addr).Env + userServiceEnv["USERSERVICE_LOG_LEVEL"] = "info" + userServiceEnv["USERSERVICE_INTERNAL_HTTP_ADDR"] = userServiceAddr + userServiceEnv["OTEL_TRACES_EXPORTER"] = "none" + userServiceEnv["OTEL_METRICS_EXPORTER"] = "none" + userServiceProcess := harness.StartProcess(t, "userservice", userServiceBinary, userServiceEnv) waitForUserServiceReady(t, userServiceProcess, "http://"+userServiceAddr) - mailProcess := harness.StartProcess(t, "mail", mailBinary, map[string]string{ - "MAIL_LOG_LEVEL": "info", - "MAIL_INTERNAL_HTTP_ADDR": mailInternalAddr, - "MAIL_REDIS_ADDR": redisRuntime.Addr, - "MAIL_TEMPLATE_DIR": moduleTemplateDir(t), - "MAIL_SMTP_MODE": "stub", - "MAIL_STREAM_BLOCK_TIMEOUT": "100ms", - "MAIL_OPERATOR_REQUEST_TIMEOUT": time.Second.String(), - "MAIL_SHUTDOWN_TIMEOUT": "2s", - "OTEL_TRACES_EXPORTER": "none", - "OTEL_METRICS_EXPORTER": "none", - }) + mailEnv := harness.StartMailServicePersistence(t, redisRuntime.Addr).Env + mailEnv["MAIL_LOG_LEVEL"] = "info" + mailEnv["MAIL_INTERNAL_HTTP_ADDR"] = mailInternalAddr + mailEnv["MAIL_TEMPLATE_DIR"] = moduleTemplateDir(t) + mailEnv["MAIL_SMTP_MODE"] = "stub" + mailEnv["MAIL_STREAM_BLOCK_TIMEOUT"] = "100ms" + mailEnv["MAIL_OPERATOR_REQUEST_TIMEOUT"] = time.Second.String() + mailEnv["MAIL_SHUTDOWN_TIMEOUT"] = "2s" + mailEnv["OTEL_TRACES_EXPORTER"] = "none" + mailEnv["OTEL_METRICS_EXPORTER"] = "none" + mailProcess := harness.StartProcess(t, "mail", mailBinary, mailEnv) waitForMailReady(t, mailProcess, "http://"+mailInternalAddr) authsessionProcess := harness.StartProcess(t, "authsession", authsessionBinary, map[string]string{ @@ -215,7 +213,9 @@ func newGatewayAuthsessionUserMailHarness(t *testing.T) *gatewayAuthsessionUserM "AUTHSESSION_PUBLIC_HTTP_REQUEST_TIMEOUT": time.Second.String(), "AUTHSESSION_INTERNAL_HTTP_ADDR": authsessionInternalAddr, "AUTHSESSION_INTERNAL_HTTP_REQUEST_TIMEOUT": time.Second.String(), - "AUTHSESSION_REDIS_ADDR": redisRuntime.Addr, + "AUTHSESSION_REDIS_MASTER_ADDR": redisRuntime.Addr, + + "AUTHSESSION_REDIS_PASSWORD": "integration", "AUTHSESSION_USER_SERVICE_MODE": "rest", "AUTHSESSION_USER_SERVICE_BASE_URL": "http://" + userServiceAddr, "AUTHSESSION_USER_SERVICE_REQUEST_TIMEOUT": time.Second.String(), @@ -233,7 +233,9 @@ func newGatewayAuthsessionUserMailHarness(t *testing.T) *gatewayAuthsessionUserM "GATEWAY_LOG_LEVEL": "info", "GATEWAY_PUBLIC_HTTP_ADDR": gatewayPublicAddr, "GATEWAY_AUTHENTICATED_GRPC_ADDR": gatewayGRPCAddr, - "GATEWAY_SESSION_CACHE_REDIS_ADDR": redisRuntime.Addr, + "GATEWAY_REDIS_MASTER_ADDR": redisRuntime.Addr, + + "GATEWAY_REDIS_PASSWORD": "integration", "GATEWAY_SESSION_CACHE_REDIS_KEY_PREFIX": "gateway:session:", "GATEWAY_SESSION_EVENTS_REDIS_STREAM": "gateway:session_events", "GATEWAY_CLIENT_EVENTS_REDIS_STREAM": "gateway:client_events", diff --git a/integration/gatewayuser/harness_test.go b/integration/gatewayuser/harness_test.go index 732b950..b75e65d 100644 --- a/integration/gatewayuser/harness_test.go +++ b/integration/gatewayuser/harness_test.go @@ -63,13 +63,11 @@ func newGatewayUserHarness(t *testing.T) *gatewayUserHarness { userServiceBinary := harness.BuildBinary(t, "userservice", "./user/cmd/userservice") gatewayBinary := harness.BuildBinary(t, "gateway", "./gateway/cmd/gateway") - userServiceEnv := map[string]string{ - "USERSERVICE_LOG_LEVEL": "info", - "USERSERVICE_INTERNAL_HTTP_ADDR": userServiceAddr, - "USERSERVICE_REDIS_ADDR": redisServer.Addr(), - "OTEL_TRACES_EXPORTER": "none", - "OTEL_METRICS_EXPORTER": "none", - } + userServiceEnv := harness.StartUserServicePersistence(t, redisServer.Addr()).Env + userServiceEnv["USERSERVICE_LOG_LEVEL"] = "info" + userServiceEnv["USERSERVICE_INTERNAL_HTTP_ADDR"] = userServiceAddr + userServiceEnv["OTEL_TRACES_EXPORTER"] = "none" + userServiceEnv["OTEL_METRICS_EXPORTER"] = "none" userServiceProcess := harness.StartProcess(t, "userservice", userServiceBinary, userServiceEnv) harness.WaitForHTTPStatus(t, userServiceProcess, "http://"+userServiceAddr+"/api/v1/internal/users/user-missing/exists", http.StatusOK) @@ -78,7 +76,9 @@ func newGatewayUserHarness(t *testing.T) *gatewayUserHarness { "GATEWAY_PUBLIC_HTTP_ADDR": gatewayPublicAddr, "GATEWAY_AUTHENTICATED_GRPC_ADDR": gatewayGRPCAddr, "GATEWAY_USER_SERVICE_BASE_URL": "http://" + userServiceAddr, - "GATEWAY_SESSION_CACHE_REDIS_ADDR": redisServer.Addr(), + "GATEWAY_REDIS_MASTER_ADDR": redisServer.Addr(), + + "GATEWAY_REDIS_PASSWORD": "integration", "GATEWAY_SESSION_CACHE_REDIS_KEY_PREFIX": "gateway:session:", "GATEWAY_SESSION_EVENTS_REDIS_STREAM": "gateway:session_events", "GATEWAY_CLIENT_EVENTS_REDIS_STREAM": "gateway:client_events", diff --git a/integration/go.mod b/integration/go.mod index 24ace24..2c27f1d 100644 --- a/integration/go.mod +++ b/integration/go.mod @@ -1,12 +1,15 @@ module galaxy/integration -go 1.26.0 +go 1.26.1 require ( + galaxy/postgres v0.0.0 github.com/alicebob/miniredis/v2 v2.37.0 + github.com/jackc/pgx/v5 v5.9.2 github.com/redis/go-redis/v9 v9.18.0 github.com/stretchr/testify v1.11.1 github.com/testcontainers/testcontainers-go v0.42.0 + github.com/testcontainers/testcontainers-go/modules/postgres v0.42.0 github.com/testcontainers/testcontainers-go/modules/redis v0.42.0 google.golang.org/grpc v1.80.0 ) @@ -15,6 +18,7 @@ require ( dario.cat/mergo v1.0.2 // indirect github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect github.com/Microsoft/go-winio v0.6.2 // indirect + github.com/XSAM/otelsql v0.42.0 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/containerd/errdefs v1.0.0 // indirect @@ -25,7 +29,7 @@ require ( github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/distribution/reference v0.6.0 // indirect - github.com/docker/go-connections v0.6.0 // indirect + github.com/docker/go-connections v0.7.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/ebitengine/purego v0.10.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -33,15 +37,19 @@ require ( github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.2.6 // indirect github.com/google/uuid v1.6.0 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect + github.com/jackc/puddle/v2 v2.2.2 // indirect github.com/klauspost/compress v1.18.5 // indirect github.com/klauspost/cpuid/v2 v2.3.0 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect github.com/magiconair/properties v1.8.10 // indirect github.com/mdelapenya/tlscert v0.2.0 // indirect + github.com/mfridman/interpolate v0.0.2 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/go-archive v0.2.0 // indirect - github.com/moby/moby/api v1.54.1 // indirect - github.com/moby/moby/client v0.4.0 // indirect + github.com/moby/moby/api v1.54.2 // indirect + github.com/moby/moby/client v0.4.1 // indirect github.com/moby/patternmatcher v0.6.1 // indirect github.com/moby/sys/sequential v0.6.0 // indirect github.com/moby/sys/user v0.4.0 // indirect @@ -51,6 +59,8 @@ require ( github.com/opencontainers/image-spec v1.1.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect + github.com/pressly/goose/v3 v3.27.1 // indirect + github.com/sethvargo/go-retry v0.3.0 // indirect github.com/shirou/gopsutil/v4 v4.26.3 // indirect github.com/sirupsen/logrus v1.9.4 // indirect github.com/tklauser/go-sysconf v0.3.16 // indirect @@ -63,11 +73,15 @@ require ( go.opentelemetry.io/otel/metric v1.43.0 // indirect go.opentelemetry.io/otel/trace v1.43.0 // indirect go.uber.org/atomic v1.11.0 // indirect - golang.org/x/crypto v0.49.0 // indirect - golang.org/x/net v0.52.0 // indirect - golang.org/x/sys v0.42.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/crypto v0.50.0 // indirect + golang.org/x/net v0.53.0 // indirect + golang.org/x/sync v0.20.0 // indirect + golang.org/x/sys v0.43.0 // indirect golang.org/x/text v0.36.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20260420184626-e10c466a9529 // indirect google.golang.org/protobuf v1.36.11 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) + +replace galaxy/postgres => ../pkg/postgres diff --git a/integration/go.sum b/integration/go.sum index 8e09b24..6c982ac 100644 --- a/integration/go.sum +++ b/integration/go.sum @@ -6,6 +6,8 @@ github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEK github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/XSAM/otelsql v0.42.0 h1:Li0xF4eJUxG2e0x3D4rvRlys1f27yJKvjTh7ljkUP5o= +github.com/XSAM/otelsql v0.42.0/go.mod h1:4mOrEv+cS1KmKzrvTktvJnstr5GtKSAK+QHvFR9OcpI= github.com/alicebob/miniredis/v2 v2.37.0 h1:RheObYW32G1aiJIj81XVt78ZHJpHonHLHW7OLIshq68= github.com/alicebob/miniredis/v2 v2.37.0/go.mod h1:TcL7YfarKPGDAthEtl5NBeHZfeUQj6OXMm/+iu5cLMM= github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= @@ -28,16 +30,19 @@ github.com/cpuguy83/dockercfg v0.3.2 h1:DlJTyZGBDlXqUZ2Dk2Q3xHs/FtnooJJVaad2S9GK github.com/cpuguy83/dockercfg v0.3.2/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc= github.com/creack/pty v1.1.24 h1:bJrF4RRfyJnbTJqzRLHzcGaZK1NeM5kTC9jGgovnR1s= github.com/creack/pty v1.1.24/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94= -github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE= +github.com/docker/go-connections v0.7.0 h1:6SsRfJddP22WMrCkj19x9WKjEDTB+ahsdiGYf0mN39c= +github.com/docker/go-connections v0.7.0/go.mod h1:no1qkHdjq7kLMGUXYAduOhYPSJxxvgWBh7ogVvptn3Q= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/ebitengine/purego v0.10.0 h1:QIw4xfpWT6GWTzaW5XEKy3HXoqrJGx1ijYHzTF0/ISU= github.com/ebitengine/purego v0.10.0/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= @@ -56,6 +61,14 @@ github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgx/v5 v5.9.2 h1:3ZhOzMWnR4yJ+RW1XImIPsD1aNSz4T4fyP7zlQb56hw= +github.com/jackc/pgx/v5 v5.9.2/go.mod h1:mal1tBGAFfLHvZzaYh77YS/eC6IX9OWbRV1QIIM0Jn4= +github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= +github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/klauspost/compress v1.18.5 h1:/h1gH5Ce+VWNLSWqPzOVn6XBO+vJbCNGvjoaGBFW2IE= github.com/klauspost/compress v1.18.5/go.mod h1:cwPg85FWrGar70rWktvGQj8/hthj3wpl0PGDogxkrSQ= github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y= @@ -64,20 +77,26 @@ github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= github.com/magiconair/properties v1.8.10 h1:s31yESBquKXCV9a/ScB3ESkOjUYYv+X0rg8SYxI99mE= github.com/magiconair/properties v1.8.10/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mattn/go-isatty v0.0.21 h1:xYae+lCNBP7QuW4PUnNG61ffM4hVIfm+zUzDuSzYLGs= +github.com/mattn/go-isatty v0.0.21/go.mod h1:ZXfXG4SQHsB/w3ZeOYbR0PrPwLy+n6xiMrJlRFqopa4= github.com/mdelapenya/tlscert v0.2.0 h1:7H81W6Z/4weDvZBNOfQte5GpIMo0lGYEeWbkGp5LJHI= github.com/mdelapenya/tlscert v0.2.0/go.mod h1:O4njj3ELLnJjGdkN7M/vIVCpZ+Cf0L6muqOG4tLSl8o= +github.com/mfridman/interpolate v0.0.2 h1:pnuTK7MQIxxFz1Gr+rjSIx9u7qVjf5VOoM/u6BbAxPY= +github.com/mfridman/interpolate v0.0.2/go.mod h1:p+7uk6oE07mpE/Ik1b8EckO0O4ZXiGAfshKBWLUM9Xg= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/go-archive v0.2.0 h1:zg5QDUM2mi0JIM9fdQZWC7U8+2ZfixfTYoHL7rWUcP8= github.com/moby/go-archive v0.2.0/go.mod h1:mNeivT14o8xU+5q1YnNrkQVpK+dnNe/K6fHqnTg4qPU= -github.com/moby/moby/api v1.54.1 h1:TqVzuJkOLsgLDDwNLmYqACUuTehOHRGKiPhvH8V3Nn4= -github.com/moby/moby/api v1.54.1/go.mod h1:+RQ6wluLwtYaTd1WnPLykIDPekkuyD/ROWQClE83pzs= -github.com/moby/moby/client v0.4.0 h1:S+2XegzHQrrvTCvF6s5HFzcrywWQmuVnhOXe2kiWjIw= -github.com/moby/moby/client v0.4.0/go.mod h1:QWPbvWchQbxBNdaLSpoKpCdf5E+WxFAgNHogCWDoa7g= +github.com/moby/moby/api v1.54.2 h1:wiat9QAhnDQjA7wk1kh/TqHz2I1uUA7M7t9SAl/JNXg= +github.com/moby/moby/api v1.54.2/go.mod h1:+RQ6wluLwtYaTd1WnPLykIDPekkuyD/ROWQClE83pzs= +github.com/moby/moby/client v0.4.1 h1:DMQgisVoMkmMs7fp3ROSdiBnoAu8+vo3GggFl06M/wY= +github.com/moby/moby/client v0.4.1/go.mod h1:z52C9O2POPOsnxZAy//WtKcQ32P+jT/NGeXu/7nfjGQ= github.com/moby/patternmatcher v0.6.1 h1:qlhtafmr6kgMIJjKJMDmMWq7WLkKIo23hsrpR3x084U= github.com/moby/patternmatcher v0.6.1/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU= @@ -88,28 +107,42 @@ github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28= github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= +github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w= +github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/pressly/goose/v3 v3.27.1 h1:6uEvcprBybDmW4hcz3gYujhARhye+GoWKhEWyzD5sh4= +github.com/pressly/goose/v3 v3.27.1/go.mod h1:maruOxsPnIG2yHHyo8UqKWXYKFcH7Q76csUV7+7KYoM= github.com/redis/go-redis/v9 v9.18.0 h1:pMkxYPkEbMPwRdenAzUNyFNrDgHx9U+DrBabWNfSRQs= github.com/redis/go-redis/v9 v9.18.0/go.mod h1:k3ufPphLU5YXwNTUcCRXGxUoF1fqxnhFQmscfkCoDA0= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= +github.com/sethvargo/go-retry v0.3.0 h1:EEt31A35QhrcRZtrYFDTBg91cqZVnFL2navjDrah2SE= +github.com/sethvargo/go-retry v0.3.0/go.mod h1:mNX17F0C/HguQMyMyJxcnU471gOZGxCLyYaFyAZraas= github.com/shirou/gopsutil/v4 v4.26.3 h1:2ESdQt90yU3oXF/CdOlRCJxrP+Am1aBYubTMTfxJ1qc= github.com/shirou/gopsutil/v4 v4.26.3/go.mod h1:LZ6ewCSkBqUpvSOf+LsTGnRinC6iaNUNMGBtDkJBaLQ= github.com/sirupsen/logrus v1.9.4 h1:TsZE7l11zFCLZnZ+teH4Umoq5BhEIfIzfRDZ1Uzql2w= github.com/sirupsen/logrus v1.9.4/go.mod h1:ftWc9WdOfJ0a92nsE2jF5u5ZwH8Bv2zdeOC42RjbV2g= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.5.3 h1:jmXUvGomnU1o3W/V5h2VEradbpJDwGrzugQQvL0POH4= github.com/stretchr/objx v0.5.3/go.mod h1:rDQraq+vQZU7Fde9LOZLr8Tax6zZvy4kuNKF+QYS+U0= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/testcontainers/testcontainers-go v0.42.0 h1:He3IhTzTZOygSXLJPMX7n44XtK+qhjat1nI9cneBbUY= github.com/testcontainers/testcontainers-go v0.42.0/go.mod h1:vZjdY1YmUA1qEForxOIOazfsrdyORJAbhi0bp8plN30= +github.com/testcontainers/testcontainers-go/modules/postgres v0.42.0 h1:GCbb1ndrF7OTDiIvxXyItaDab4qkzTFJ48LKFdM7EIo= +github.com/testcontainers/testcontainers-go/modules/postgres v0.42.0/go.mod h1:IRPBaI8jXdrNfD0e4Zm7Fbcgaz5shKxOQv4axiL09xs= github.com/testcontainers/testcontainers-go/modules/redis v0.42.0 h1:id/6LH8ZeDrtAUVSuNvZUAJ1kVpb82y1pr9yweAWsRg= github.com/testcontainers/testcontainers-go/modules/redis v0.42.0/go.mod h1:uF0jI8FITagQpBNOgweGBmPf6rP4K0SeL1XFPbsZSSY= github.com/tklauser/go-sysconf v0.3.16 h1:frioLaCQSsF5Cy1jgRBrzr6t502KIIwQ0MArYICU0nA= @@ -125,6 +158,7 @@ github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaD go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.68.0 h1:CqXxU8VOmDefoh0+ztfGaymYbhdB/tT3zs79QaZTNGY= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.68.0/go.mod h1:BuhAPThV8PBHBvg8ZzZ/Ok3idOdhWIodywz2xEcRbJo= go.opentelemetry.io/otel v1.43.0 h1:mYIM03dnh5zfN7HautFE4ieIig9amkNANT+xcVxAj9I= go.opentelemetry.io/otel v1.43.0/go.mod h1:JuG+u74mvjvcm8vj8pI5XiHy1zDeoCS2LB1spIq7Ay0= go.opentelemetry.io/otel/metric v1.43.0 h1:d7638QeInOnuwOONPp4JAOGfbCEpYb+K6DVWvdxGzgM= @@ -137,24 +171,28 @@ go.opentelemetry.io/otel/trace v1.43.0 h1:BkNrHpup+4k4w+ZZ86CZoHHEkohws8AY+WTX09 go.opentelemetry.io/otel/trace v1.43.0/go.mod h1:/QJhyVBUUswCphDVxq+8mld+AvhXZLhe+8WVFxiFff0= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -golang.org/x/crypto v0.49.0 h1:+Ng2ULVvLHnJ/ZFEq4KdcDd/cfjrrjjNSXNzxg0Y4U4= -golang.org/x/crypto v0.49.0/go.mod h1:ErX4dUh2UM+CFYiXZRTcMpEcN8b/1gxEuv3nODoYtCA= -golang.org/x/net v0.52.0 h1:He/TN1l0e4mmR3QqHMT2Xab3Aj3L9qjbhRm78/6jrW0= -golang.org/x/net v0.52.0/go.mod h1:R1MAz7uMZxVMualyPXb+VaqGSa3LIaUqk0eEt3w36Sw= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +golang.org/x/crypto v0.50.0 h1:zO47/JPrL6vsNkINmLoo/PH1gcxpls50DNogFvB5ZGI= +golang.org/x/crypto v0.50.0/go.mod h1:3muZ7vA7PBCE6xgPX7nkzzjiUq87kRItoJQM1Yo8S+Q= +golang.org/x/net v0.53.0 h1:d+qAbo5L0orcWAr0a9JweQpjXF19LMXJE8Ey7hwOdUA= +golang.org/x/net v0.53.0/go.mod h1:JvMuJH7rrdiCfbeHoo3fCQU24Lf5JJwT9W3sJFulfgs= +golang.org/x/sync v0.20.0 h1:e0PTpb7pjO8GAtTs2dQ6jYa5BWYlMuX047Dco/pItO4= +golang.org/x/sync v0.20.0/go.mod h1:9xrNwdLfx4jkKbNva9FpL6vEN7evnE43NNNJQ2LF3+0= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.42.0 h1:omrd2nAlyT5ESRdCLYdm3+fMfNFE/+Rf4bDIQImRJeo= -golang.org/x/sys v0.42.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw= -golang.org/x/term v0.41.0 h1:QCgPso/Q3RTJx2Th4bDLqML4W6iJiaXFq2/ftQF13YU= -golang.org/x/term v0.41.0/go.mod h1:3pfBgksrReYfZ5lvYM0kSO0LIkAl4Yl2bXOkKP7Ec2A= +golang.org/x/sys v0.43.0 h1:Rlag2XtaFTxp19wS8MXlJwTvoh8ArU6ezoyFsMyCTNI= +golang.org/x/sys v0.43.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw= +golang.org/x/term v0.42.0 h1:UiKe+zDFmJobeJ5ggPwOshJIVt6/Ft0rcfrXZDLWAWY= +golang.org/x/term v0.42.0/go.mod h1:Dq/D+snpsbazcBG5+F9Q1n2rXV8Ma+71xEjTRufARgY= golang.org/x/text v0.36.0 h1:JfKh3XmcRPqZPKevfXVpI1wXPTqbkE5f7JA92a55Yxg= golang.org/x/text v0.36.0/go.mod h1:NIdBknypM8iqVmPiuco0Dh6P5Jcdk8lJL0CUebqK164= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gonum.org/v1/gonum v0.17.0 h1:VbpOemQlsSMrYmn7T2OUvQ4dqxQXU+ouZFQsZOx50z4= gonum.org/v1/gonum v0.17.0/go.mod h1:El3tOrEuMpv2UdMrbNlKEh9vd86bmQ6vqIcDwxEOc1E= -google.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9 h1:m8qni9SQFH0tJc1X0vmnpw/0t+AImlSvp30sEupozUg= -google.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260420184626-e10c466a9529 h1:XF8+t6QQiS0o9ArVan/HW8Q7cycNPGsJf6GA2nXxYAg= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260420184626-e10c466a9529/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8= google.golang.org/grpc v1.80.0 h1:Xr6m2WmWZLETvUNvIUmeD5OAagMw3FiKmMlTdViWsHM= google.golang.org/grpc v1.80.0/go.mod h1:ho/dLnxwi3EDJA4Zghp7k2Ec1+c2jqup0bFkw07bwF4= google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= @@ -162,9 +200,18 @@ google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= +modernc.org/libc v1.72.1 h1:db1xwJ6u1kE3KHTFTTbe2GCrczHPKzlURP0aDC4NGD0= +modernc.org/libc v1.72.1/go.mod h1:HRMiC/PhPGLIPM7GzAFCbI+oSgE3dhZ8FWftmRrHVlY= +modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU= +modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg= +modernc.org/memory v1.11.0 h1:o4QC8aMQzmcwCK3t3Ux/ZHmwFPzE6hf2Y5LbkRs+hbI= +modernc.org/memory v1.11.0/go.mod h1:/JP4VbVC+K5sU2wZi9bHoq2MAkCnrt2r98UGeSK7Mjw= +modernc.org/sqlite v1.49.1 h1:dYGHTKcX1sJ+EQDnUzvz4TJ5GbuvhNJa8Fg6ElGx73U= +modernc.org/sqlite v1.49.1/go.mod h1:m0w8xhwYUVY3H6pSDwc3gkJ/irZT/0YEXwBlhaxQEew= pgregory.net/rapid v1.2.0 h1:keKAYRcjm+e1F0oAuU5F5+YPAWcyxNNRK2wud503Gnk= pgregory.net/rapid v1.2.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04= diff --git a/integration/internal/harness/authsessionservice.go b/integration/internal/harness/authsessionservice.go new file mode 100644 index 0000000..52d77bb --- /dev/null +++ b/integration/internal/harness/authsessionservice.go @@ -0,0 +1,13 @@ +package harness + +// AuthsessionRedisEnv returns the env-var map that wires the authsession +// binary to a Redis master at masterAddr using the master/replica/password +// shape required by `pkg/redisconn`. The integration suites pass a fixed +// placeholder password because the test Redis container runs without +// `requirepass`. +func AuthsessionRedisEnv(masterAddr string) map[string]string { + return map[string]string{ + "AUTHSESSION_REDIS_MASTER_ADDR": masterAddr, + "AUTHSESSION_REDIS_PASSWORD": "integration", + } +} diff --git a/integration/internal/harness/gatewayservice.go b/integration/internal/harness/gatewayservice.go new file mode 100644 index 0000000..a657656 --- /dev/null +++ b/integration/internal/harness/gatewayservice.go @@ -0,0 +1,12 @@ +package harness + +// GatewayRedisEnv returns the env-var map that wires the gateway binary to a +// Redis master at masterAddr using the master/replica/password shape required +// by `pkg/redisconn`. The integration suites pass a fixed placeholder +// password because the test Redis container runs without `requirepass`. +func GatewayRedisEnv(masterAddr string) map[string]string { + return map[string]string{ + "GATEWAY_REDIS_MASTER_ADDR": masterAddr, + "GATEWAY_REDIS_PASSWORD": "integration", + } +} diff --git a/integration/internal/harness/lobbyservice.go b/integration/internal/harness/lobbyservice.go new file mode 100644 index 0000000..1f6668f --- /dev/null +++ b/integration/internal/harness/lobbyservice.go @@ -0,0 +1,51 @@ +package harness + +import ( + "context" + "testing" +) + +// LobbyServicePersistence captures the per-test persistence dependencies of +// the Game Lobby Service binary: a PostgreSQL container hosting the `lobby` +// schema owned by the `lobbyservice` role, plus the Redis credentials that +// point the service at the caller-supplied master address. +type LobbyServicePersistence struct { + // Postgres exposes the started container so tests that need direct SQL + // access to the lobby schema (verifying side effects, seeding fixtures) + // can read or write through it. + Postgres *PostgresRuntime + + // Env carries the environment entries that must be passed to the + // lobby-service process. It is safe to merge into the caller's existing + // env map, or to use as-is and append further LOBBY_* knobs in place. + Env map[string]string +} + +// StartLobbyServicePersistence brings up one isolated PostgreSQL container, +// provisions the `lobby` schema with the `lobbyservice` role, and returns +// the environment entries that wire the lobby-service binary at that +// container plus the supplied Redis master address. +// +// The returned password (`integration`) matches the architectural rule that +// Redis traffic is password-protected; miniredis accepts arbitrary password +// values when its own RequireAuth is not engaged, so the same value works +// against both miniredis and the real `tcredis` runtime. +// +// Cleanup of the container is handled by StartPostgresContainer through +// `t.Cleanup`; callers do not need to defer anything. +func StartLobbyServicePersistence(t testing.TB, redisMasterAddr string) LobbyServicePersistence { + t.Helper() + + rt := StartPostgresContainer(t) + if err := rt.EnsureRoleAndSchema(context.Background(), "lobby", "lobbyservice", "lobbyservice"); err != nil { + t.Fatalf("ensure lobby schema/role: %v", err) + } + + env := WithPostgres(rt, "LOBBY", "lobby", "lobbyservice") + env["LOBBY_REDIS_MASTER_ADDR"] = redisMasterAddr + env["LOBBY_REDIS_PASSWORD"] = "integration" + return LobbyServicePersistence{ + Postgres: rt, + Env: env, + } +} diff --git a/integration/internal/harness/mailservice.go b/integration/internal/harness/mailservice.go new file mode 100644 index 0000000..497e37f --- /dev/null +++ b/integration/internal/harness/mailservice.go @@ -0,0 +1,51 @@ +package harness + +import ( + "context" + "testing" +) + +// MailServicePersistence captures the per-test persistence dependencies of +// the Mail Service binary: a PostgreSQL container hosting the `mail` schema +// owned by the `mailservice` role, and the Redis credentials that point the +// service at the caller-supplied master address. +type MailServicePersistence struct { + // Postgres exposes the started container so tests that need direct SQL + // access to the mail schema (verifying side effects, seeding fixtures) + // can read or write through it. + Postgres *PostgresRuntime + + // Env carries the environment entries that must be passed to the + // mail-service process. It is safe to merge into the caller's existing env + // map, or to use as-is and append further MAIL_* knobs in place. + Env map[string]string +} + +// StartMailServicePersistence brings up one isolated PostgreSQL container, +// provisions the `mail` schema with the `mailservice` role, and returns the +// environment entries that wire the mail-service binary at that container plus +// the supplied Redis master address. +// +// The returned password (`integration`) matches the architectural rule that +// Redis traffic is password-protected; miniredis accepts arbitrary password +// values when its own RequireAuth is not engaged, so the same value works +// against both miniredis and the real `tcredis` runtime. +// +// Cleanup of the container is handled by the underlying StartPostgresContainer +// through `t.Cleanup`; callers do not need to defer anything. +func StartMailServicePersistence(t testing.TB, redisMasterAddr string) MailServicePersistence { + t.Helper() + + rt := StartPostgresContainer(t) + if err := rt.EnsureRoleAndSchema(context.Background(), "mail", "mailservice", "mailservice"); err != nil { + t.Fatalf("ensure mail schema/role: %v", err) + } + + env := WithPostgres(rt, "MAIL", "mail", "mailservice") + env["MAIL_REDIS_MASTER_ADDR"] = redisMasterAddr + env["MAIL_REDIS_PASSWORD"] = "integration" + return MailServicePersistence{ + Postgres: rt, + Env: env, + } +} diff --git a/integration/internal/harness/notificationservice.go b/integration/internal/harness/notificationservice.go new file mode 100644 index 0000000..ec1d5af --- /dev/null +++ b/integration/internal/harness/notificationservice.go @@ -0,0 +1,55 @@ +package harness + +import ( + "context" + "testing" +) + +// NotificationServicePersistence captures the per-test persistence +// dependencies of the Notification Service binary: a PostgreSQL container +// hosting the `notification` schema owned by the `notificationservice` role, +// and the Redis credentials that point the service at the caller-supplied +// master address. +type NotificationServicePersistence struct { + // Postgres exposes the started container so tests that need direct SQL + // access to the notification schema (verifying side effects, seeding + // fixtures) can read or write through it. + Postgres *PostgresRuntime + + // Env carries the environment entries that must be passed to the + // notification-service process. It is safe to merge into the caller's + // existing env map, or to use as-is and append further NOTIFICATION_* + // knobs in place. + Env map[string]string +} + +// StartNotificationServicePersistence brings up one isolated PostgreSQL +// container, provisions the `notification` schema with the +// `notificationservice` role, and returns the environment entries that wire +// the notification-service binary at that container plus the supplied Redis +// master address. +// +// The returned password (`integration`) matches the architectural rule that +// Redis traffic is password-protected; miniredis accepts arbitrary password +// values when its own RequireAuth is not engaged, so the same value works +// against both miniredis and the real `tcredis` runtime. +// +// Cleanup of the container is handled by the underlying +// StartPostgresContainer through `t.Cleanup`; callers do not need to defer +// anything. +func StartNotificationServicePersistence(t testing.TB, redisMasterAddr string) NotificationServicePersistence { + t.Helper() + + rt := StartPostgresContainer(t) + if err := rt.EnsureRoleAndSchema(context.Background(), "notification", "notificationservice", "notificationservice"); err != nil { + t.Fatalf("ensure notification schema/role: %v", err) + } + + env := WithPostgres(rt, "NOTIFICATION", "notification", "notificationservice") + env["NOTIFICATION_REDIS_MASTER_ADDR"] = redisMasterAddr + env["NOTIFICATION_REDIS_PASSWORD"] = "integration" + return NotificationServicePersistence{ + Postgres: rt, + Env: env, + } +} diff --git a/integration/internal/harness/postgres_container.go b/integration/internal/harness/postgres_container.go new file mode 100644 index 0000000..6a03f76 --- /dev/null +++ b/integration/internal/harness/postgres_container.go @@ -0,0 +1,241 @@ +package harness + +import ( + "context" + "fmt" + "net" + "net/url" + "strings" + "sync" + "testing" + "time" + + "galaxy/postgres" + + testcontainers "github.com/testcontainers/testcontainers-go" + tcpostgres "github.com/testcontainers/testcontainers-go/modules/postgres" + "github.com/testcontainers/testcontainers-go/wait" +) + +const ( + defaultPostgresContainerImage = "postgres:16-alpine" + defaultPostgresDatabase = "galaxy_integration" + defaultPostgresSuperuser = "galaxy_integration" + defaultPostgresSuperPassword = "galaxy_integration" + + postgresAdminConnectTimeout = 5 * time.Second + postgresStartupTimeout = 60 * time.Second +) + +// PostgresRuntime stores one started real PostgreSQL container together with +// the parsed connection coordinates and the per-test role credentials issued +// by EnsureRoleAndSchema. +// +// The struct is safe to call from concurrent tests because credential lookups +// guard the internal map with a mutex; each test should still keep its own +// PostgresRuntime to preserve container-level isolation. +type PostgresRuntime struct { + Container *tcpostgres.PostgresContainer + + baseDSN string + host string + port string + database string + + mu sync.Mutex + creds map[string]string +} + +// StartPostgresContainer starts one isolated PostgreSQL container and registers +// automatic cleanup for the suite. The container exposes a superuser created +// from the package-level constants; per-service roles are issued lazily by +// EnsureRoleAndSchema. +func StartPostgresContainer(t testing.TB) *PostgresRuntime { + t.Helper() + + ctx := context.Background() + + container, err := tcpostgres.Run(ctx, + defaultPostgresContainerImage, + tcpostgres.WithDatabase(defaultPostgresDatabase), + tcpostgres.WithUsername(defaultPostgresSuperuser), + tcpostgres.WithPassword(defaultPostgresSuperPassword), + // The default Postgres image emits the "ready to accept connections" + // log line twice during startup: once during temporary bootstrap, once + // after the real listener opens on the mapped port. Waiting for the + // second occurrence avoids racing the temporary instance. + testcontainers.WithWaitStrategy( + wait.ForLog("database system is ready to accept connections"). + WithOccurrence(2). + WithStartupTimeout(postgresStartupTimeout), + ), + ) + if err != nil { + t.Fatalf("start postgres container: %v", err) + } + + t.Cleanup(func() { + if err := testcontainers.TerminateContainer(container); err != nil { + t.Errorf("terminate postgres container: %v", err) + } + }) + + baseDSN, err := container.ConnectionString(ctx, "sslmode=disable") + if err != nil { + t.Fatalf("resolve postgres connection string: %v", err) + } + + host, port, err := splitHostPort(baseDSN) + if err != nil { + t.Fatalf("parse postgres connection string: %v", err) + } + + return &PostgresRuntime{ + Container: container, + baseDSN: baseDSN, + host: host, + port: port, + database: defaultPostgresDatabase, + creds: map[string]string{}, + } +} + +// BaseDSN returns the superuser DSN exposed by the container, suitable for +// administrative tasks such as creating roles or schemas. Callers should +// prefer DSNForSchema for service-scoped access. +func (rt *PostgresRuntime) BaseDSN() string { + return rt.baseDSN +} + +// DSNForSchema returns a DSN that connects as role and pins search_path to +// schema. EnsureRoleAndSchema must have populated credentials for role first; +// otherwise the call panics, signalling a test setup bug. +func (rt *PostgresRuntime) DSNForSchema(schema, role string) string { + rt.mu.Lock() + password, ok := rt.creds[role] + rt.mu.Unlock() + if !ok { + panic(fmt.Sprintf( + "harness: DSNForSchema called for role %q with no credentials; call EnsureRoleAndSchema first", + role, + )) + } + + values := url.Values{} + values.Set("search_path", schema) + values.Set("sslmode", "disable") + + dsn := url.URL{ + Scheme: "postgres", + User: url.UserPassword(role, password), + Host: net.JoinHostPort(rt.host, rt.port), + Path: "/" + rt.database, + RawQuery: values.Encode(), + } + return dsn.String() +} + +// EnsureRoleAndSchema creates role with the given password (idempotent) and a +// schema owned by that role (idempotent), then grants USAGE so the role can +// resolve table references inside it. The credentials are cached for later +// DSNForSchema lookups. +// +// The operation runs through a temporary administrative connection opened +// from BaseDSN; the connection is closed before the call returns. +func (rt *PostgresRuntime) EnsureRoleAndSchema(ctx context.Context, schema, role, password string) error { + if strings.TrimSpace(schema) == "" { + return fmt.Errorf("ensure role and schema: schema must not be empty") + } + if strings.TrimSpace(role) == "" { + return fmt.Errorf("ensure role and schema: role must not be empty") + } + + cfg := postgres.DefaultConfig() + cfg.PrimaryDSN = rt.baseDSN + cfg.OperationTimeout = postgresAdminConnectTimeout + + db, err := postgres.OpenPrimary(ctx, cfg) + if err != nil { + return fmt.Errorf("ensure role and schema: open admin connection: %w", err) + } + defer func() { + _ = db.Close() + }() + + createRole := fmt.Sprintf(`DO $$ +BEGIN + IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = %s) THEN + CREATE ROLE %s LOGIN PASSWORD %s; + END IF; +END $$;`, + quoteSQLLiteral(role), + quoteSQLIdentifier(role), + quoteSQLLiteral(password), + ) + if _, err := db.ExecContext(ctx, createRole); err != nil { + return fmt.Errorf("ensure role and schema: create role %q: %w", role, err) + } + + createSchema := fmt.Sprintf(`CREATE SCHEMA IF NOT EXISTS %s AUTHORIZATION %s;`, + quoteSQLIdentifier(schema), + quoteSQLIdentifier(role), + ) + if _, err := db.ExecContext(ctx, createSchema); err != nil { + return fmt.Errorf("ensure role and schema: create schema %q: %w", schema, err) + } + + grantUsage := fmt.Sprintf(`GRANT USAGE ON SCHEMA %s TO %s;`, + quoteSQLIdentifier(schema), + quoteSQLIdentifier(role), + ) + if _, err := db.ExecContext(ctx, grantUsage); err != nil { + return fmt.Errorf("ensure role and schema: grant usage on %q to %q: %w", schema, role, err) + } + + rt.mu.Lock() + rt.creds[role] = password + rt.mu.Unlock() + + return nil +} + +// WithPostgres returns env entries pointing the service identified by +// envPrefix at schema/role inside rt. EnsureRoleAndSchema must have populated +// credentials for role first. +// +// The returned map carries only `_POSTGRES_PRIMARY_DSN`; the other +// per-service Postgres knobs (operation timeout, pool sizes) keep the +// defaults provided by `pkg/postgres.DefaultConfig`. +func WithPostgres(rt *PostgresRuntime, envPrefix, schema, role string) map[string]string { + return map[string]string{ + envPrefix + "_POSTGRES_PRIMARY_DSN": rt.DSNForSchema(schema, role), + } +} + +// quoteSQLIdentifier wraps name in double quotes and escapes any embedded +// double quote, producing a SQL identifier that survives reserved words such +// as `user`. +func quoteSQLIdentifier(name string) string { + return `"` + strings.ReplaceAll(name, `"`, `""`) + `"` +} + +// quoteSQLLiteral wraps value in single quotes and escapes any embedded single +// quote, producing a SQL literal usable in DDL statements where parameter +// binding is not available. +func quoteSQLLiteral(value string) string { + return "'" + strings.ReplaceAll(value, "'", "''") + "'" +} + +// splitHostPort extracts host and port from a postgres:// DSN. +func splitHostPort(dsn string) (string, string, error) { + parsed, err := url.Parse(dsn) + if err != nil { + return "", "", fmt.Errorf("parse dsn: %w", err) + } + host := parsed.Hostname() + port := parsed.Port() + if host == "" || port == "" { + return "", "", fmt.Errorf("dsn %q missing host or port", dsn) + } + return host, port, nil +} diff --git a/integration/internal/harness/postgres_container_test.go b/integration/internal/harness/postgres_container_test.go new file mode 100644 index 0000000..028c6ce --- /dev/null +++ b/integration/internal/harness/postgres_container_test.go @@ -0,0 +1,138 @@ +package harness + +import ( + "context" + "net/url" + "testing" + "time" + + "galaxy/postgres" + + "github.com/stretchr/testify/require" +) + +func TestPostgresContainerRoundTrip(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + t.Cleanup(cancel) + + rt := StartPostgresContainer(t) + + require.NoError(t, rt.EnsureRoleAndSchema(ctx, "smoke_schema", "smoke_role", "smoke_pass")) + + cfg := postgres.DefaultConfig() + cfg.PrimaryDSN = rt.DSNForSchema("smoke_schema", "smoke_role") + cfg.OperationTimeout = 5 * time.Second + + db, err := postgres.OpenPrimary(ctx, cfg) + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, db.Close()) + }) + + require.NoError(t, postgres.Ping(ctx, db, cfg.OperationTimeout)) + + _, err = db.ExecContext(ctx, `CREATE TABLE notes (id serial PRIMARY KEY, body text NOT NULL)`) + require.NoError(t, err) + + var insertedID int64 + require.NoError(t, db.QueryRowContext(ctx, + `INSERT INTO notes (body) VALUES ($1) RETURNING id`, "hello").Scan(&insertedID)) + require.Greater(t, insertedID, int64(0)) + + var body string + require.NoError(t, db.QueryRowContext(ctx, + `SELECT body FROM notes WHERE id = $1`, insertedID).Scan(&body)) + require.Equal(t, "hello", body) + + // search_path is honoured: the unqualified table created above resolved + // inside smoke_schema. + var schemaName string + require.NoError(t, db.QueryRowContext(ctx, + `SELECT table_schema FROM information_schema.tables WHERE table_name = 'notes'`, + ).Scan(&schemaName)) + require.Equal(t, "smoke_schema", schemaName) +} + +func TestEnsureRoleAndSchemaIsIdempotent(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + t.Cleanup(cancel) + + rt := StartPostgresContainer(t) + + require.NoError(t, rt.EnsureRoleAndSchema(ctx, "schema_x", "role_x", "pass_x")) + require.NoError(t, rt.EnsureRoleAndSchema(ctx, "schema_x", "role_x", "pass_x")) +} + +func TestEnsureRoleAndSchemaSupportsReservedWordIdentifiers(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + t.Cleanup(cancel) + + rt := StartPostgresContainer(t) + + // `user` is a SQL reserved word; identifier quoting must keep this working. + require.NoError(t, rt.EnsureRoleAndSchema(ctx, "user", "userservice", "secret")) + + cfg := postgres.DefaultConfig() + cfg.PrimaryDSN = rt.DSNForSchema("user", "userservice") + cfg.OperationTimeout = 5 * time.Second + + db, err := postgres.OpenPrimary(ctx, cfg) + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, db.Close()) + }) + + require.NoError(t, postgres.Ping(ctx, db, cfg.OperationTimeout)) +} + +func TestWithPostgresBuildsPrimaryDSNEnv(t *testing.T) { + t.Parallel() + + rt := newRuntimeForTest("127.0.0.1", "55432", "galaxy_integration", "userservice", "s3cr3t!") + + env := WithPostgres(rt, "USERSERVICE", "user", "userservice") + + require.Len(t, env, 1) + + dsn, ok := env["USERSERVICE_POSTGRES_PRIMARY_DSN"] + require.True(t, ok, "missing USERSERVICE_POSTGRES_PRIMARY_DSN entry") + + parsed, err := url.Parse(dsn) + require.NoError(t, err) + require.Equal(t, "postgres", parsed.Scheme) + require.Equal(t, "127.0.0.1:55432", parsed.Host) + require.Equal(t, "/galaxy_integration", parsed.Path) + require.Equal(t, "userservice", parsed.User.Username()) + + password, hasPassword := parsed.User.Password() + require.True(t, hasPassword) + require.Equal(t, "s3cr3t!", password) + + query := parsed.Query() + require.Equal(t, "user", query.Get("search_path")) + require.Equal(t, "disable", query.Get("sslmode")) +} + +func TestDSNForSchemaPanicsWithoutCredentials(t *testing.T) { + t.Parallel() + + rt := newRuntimeForTest("127.0.0.1", "55432", "galaxy_integration", "userservice", "secret") + + require.PanicsWithValue(t, + `harness: DSNForSchema called for role "unknown" with no credentials; call EnsureRoleAndSchema first`, + func() { + _ = rt.DSNForSchema("user", "unknown") + }, + ) +} + +// newRuntimeForTest builds a PostgresRuntime without spinning a container. +// It exists only to exercise the pure DSN/env-builder paths. +func newRuntimeForTest(host, port, database, role, password string) *PostgresRuntime { + return &PostgresRuntime{ + host: host, + port: port, + database: database, + creds: map[string]string{role: password}, + } +} diff --git a/integration/internal/harness/userservice.go b/integration/internal/harness/userservice.go new file mode 100644 index 0000000..8fab90e --- /dev/null +++ b/integration/internal/harness/userservice.go @@ -0,0 +1,51 @@ +package harness + +import ( + "context" + "testing" +) + +// UserServicePersistence captures the per-test persistence dependencies of +// the User Service binary: a PostgreSQL container hosting the `user` schema +// owned by the `userservice` role, and the Redis credentials that point the +// service at the caller-supplied master address. +type UserServicePersistence struct { + // Postgres exposes the started container so tests that need direct SQL + // access to the user schema (verifying side effects, seeding fixtures) + // can read or write through it. + Postgres *PostgresRuntime + + // Env carries the environment entries that must be passed to the + // userservice process. It is safe to merge into the caller's existing env + // map, or to use as-is and append further USERSERVICE_* knobs in place. + Env map[string]string +} + +// StartUserServicePersistence brings up one isolated PostgreSQL container, +// provisions the `user` schema with the `userservice` role, and returns the +// environment entries that wire the userservice binary at that container plus +// the supplied Redis master address. +// +// The returned password (`integration`) matches the architectural rule that +// Redis traffic is password-protected; miniredis accepts arbitrary password +// values when its own RequireAuth is not engaged, so the same value works +// against both miniredis and the real `tcredis` runtime. +// +// Cleanup of the container is handled by the underlying StartPostgresContainer +// through `t.Cleanup`; callers do not need to defer anything. +func StartUserServicePersistence(t testing.TB, redisMasterAddr string) UserServicePersistence { + t.Helper() + + rt := StartPostgresContainer(t) + if err := rt.EnsureRoleAndSchema(context.Background(), "user", "userservice", "userservice"); err != nil { + t.Fatalf("ensure user schema/role: %v", err) + } + + env := WithPostgres(rt, "USERSERVICE", "user", "userservice") + env["USERSERVICE_REDIS_MASTER_ADDR"] = redisMasterAddr + env["USERSERVICE_REDIS_PASSWORD"] = "integration" + return UserServicePersistence{ + Postgres: rt, + Env: env, + } +} diff --git a/integration/lobbynotification/lobby_notification_test.go b/integration/lobbynotification/lobby_notification_test.go index 2b72895..55fb574 100644 --- a/integration/lobbynotification/lobby_notification_test.go +++ b/integration/lobbynotification/lobby_notification_test.go @@ -218,13 +218,12 @@ func newLobbyNotificationHarness(t *testing.T, gmHandler http.HandlerFunc) *lobb userServiceBinary := harness.BuildBinary(t, "userservice", "./user/cmd/userservice") lobbyBinary := harness.BuildBinary(t, "lobby", "./lobby/cmd/lobby") - userServiceProcess := harness.StartProcess(t, "userservice", userServiceBinary, map[string]string{ - "USERSERVICE_LOG_LEVEL": "info", - "USERSERVICE_INTERNAL_HTTP_ADDR": userServiceAddr, - "USERSERVICE_REDIS_ADDR": redisRuntime.Addr, - "OTEL_TRACES_EXPORTER": "none", - "OTEL_METRICS_EXPORTER": "none", - }) + userServiceEnv := harness.StartUserServicePersistence(t, redisRuntime.Addr).Env + userServiceEnv["USERSERVICE_LOG_LEVEL"] = "info" + userServiceEnv["USERSERVICE_INTERNAL_HTTP_ADDR"] = userServiceAddr + userServiceEnv["OTEL_TRACES_EXPORTER"] = "none" + userServiceEnv["OTEL_METRICS_EXPORTER"] = "none" + userServiceProcess := harness.StartProcess(t, "userservice", userServiceBinary, userServiceEnv) waitForUserServiceReady(t, userServiceProcess, "http://"+userServiceAddr) // Use unique stream prefixes per test so concurrent runs do not bleed. @@ -234,23 +233,22 @@ func newLobbyNotificationHarness(t *testing.T, gmHandler http.HandlerFunc) *lobb jobResultsStream := runtimeJobResultsStream + ":" + suffix gmEventsStream := gmLobbyEventsStream + ":" + suffix - lobbyProcess := harness.StartProcess(t, "lobby", lobbyBinary, map[string]string{ - "LOBBY_LOG_LEVEL": "info", - "LOBBY_PUBLIC_HTTP_ADDR": lobbyPublicAddr, - "LOBBY_INTERNAL_HTTP_ADDR": lobbyInternalAddr, - "LOBBY_REDIS_ADDR": redisRuntime.Addr, - "LOBBY_USER_SERVICE_BASE_URL": "http://" + userServiceAddr, - "LOBBY_GM_BASE_URL": gmStub.URL, - "LOBBY_NOTIFICATION_INTENTS_STREAM": intentsStream, - "LOBBY_USER_LIFECYCLE_STREAM": lifecycleStream, - "LOBBY_RUNTIME_JOB_RESULTS_STREAM": jobResultsStream, - "LOBBY_GM_EVENTS_STREAM": gmEventsStream, - "LOBBY_RUNTIME_JOB_RESULTS_READ_BLOCK_TIMEOUT": "200ms", - "LOBBY_USER_LIFECYCLE_READ_BLOCK_TIMEOUT": "200ms", - "LOBBY_GM_EVENTS_READ_BLOCK_TIMEOUT": "200ms", - "OTEL_TRACES_EXPORTER": "none", - "OTEL_METRICS_EXPORTER": "none", - }) + lobbyEnv := harness.StartLobbyServicePersistence(t, redisRuntime.Addr).Env + lobbyEnv["LOBBY_LOG_LEVEL"] = "info" + lobbyEnv["LOBBY_PUBLIC_HTTP_ADDR"] = lobbyPublicAddr + lobbyEnv["LOBBY_INTERNAL_HTTP_ADDR"] = lobbyInternalAddr + lobbyEnv["LOBBY_USER_SERVICE_BASE_URL"] = "http://" + userServiceAddr + lobbyEnv["LOBBY_GM_BASE_URL"] = gmStub.URL + lobbyEnv["LOBBY_NOTIFICATION_INTENTS_STREAM"] = intentsStream + lobbyEnv["LOBBY_USER_LIFECYCLE_STREAM"] = lifecycleStream + lobbyEnv["LOBBY_RUNTIME_JOB_RESULTS_STREAM"] = jobResultsStream + lobbyEnv["LOBBY_GM_EVENTS_STREAM"] = gmEventsStream + lobbyEnv["LOBBY_RUNTIME_JOB_RESULTS_READ_BLOCK_TIMEOUT"] = "200ms" + lobbyEnv["LOBBY_USER_LIFECYCLE_READ_BLOCK_TIMEOUT"] = "200ms" + lobbyEnv["LOBBY_GM_EVENTS_READ_BLOCK_TIMEOUT"] = "200ms" + lobbyEnv["OTEL_TRACES_EXPORTER"] = "none" + lobbyEnv["OTEL_METRICS_EXPORTER"] = "none" + lobbyProcess := harness.StartProcess(t, "lobby", lobbyBinary, lobbyEnv) harness.WaitForHTTPStatus(t, lobbyProcess, "http://"+lobbyInternalAddr+"/readyz", http.StatusOK) return &lobbyNotificationHarness{ diff --git a/integration/lobbyuser/lobby_user_test.go b/integration/lobbyuser/lobby_user_test.go index 3c0ac56..3084a85 100644 --- a/integration/lobbyuser/lobby_user_test.go +++ b/integration/lobbyuser/lobby_user_test.go @@ -106,25 +106,23 @@ func newLobbyUserHarness(t *testing.T) *lobbyUserHarness { userServiceBinary := harness.BuildBinary(t, "userservice", "./user/cmd/userservice") lobbyBinary := harness.BuildBinary(t, "lobby", "./lobby/cmd/lobby") - userServiceProcess := harness.StartProcess(t, "userservice", userServiceBinary, map[string]string{ - "USERSERVICE_LOG_LEVEL": "info", - "USERSERVICE_INTERNAL_HTTP_ADDR": userServiceAddr, - "USERSERVICE_REDIS_ADDR": redisRuntime.Addr, - "OTEL_TRACES_EXPORTER": "none", - "OTEL_METRICS_EXPORTER": "none", - }) + userServiceEnv := harness.StartUserServicePersistence(t, redisRuntime.Addr).Env + userServiceEnv["USERSERVICE_LOG_LEVEL"] = "info" + userServiceEnv["USERSERVICE_INTERNAL_HTTP_ADDR"] = userServiceAddr + userServiceEnv["OTEL_TRACES_EXPORTER"] = "none" + userServiceEnv["OTEL_METRICS_EXPORTER"] = "none" + userServiceProcess := harness.StartProcess(t, "userservice", userServiceBinary, userServiceEnv) waitForUserServiceReady(t, userServiceProcess, "http://"+userServiceAddr) - lobbyProcess := harness.StartProcess(t, "lobby", lobbyBinary, map[string]string{ - "LOBBY_LOG_LEVEL": "info", - "LOBBY_PUBLIC_HTTP_ADDR": lobbyPublicAddr, - "LOBBY_INTERNAL_HTTP_ADDR": lobbyInternalAddr, - "LOBBY_REDIS_ADDR": redisRuntime.Addr, - "LOBBY_USER_SERVICE_BASE_URL": "http://" + userServiceAddr, - "LOBBY_GM_BASE_URL": gmStub.URL, - "OTEL_TRACES_EXPORTER": "none", - "OTEL_METRICS_EXPORTER": "none", - }) + lobbyEnv := harness.StartLobbyServicePersistence(t, redisRuntime.Addr).Env + lobbyEnv["LOBBY_LOG_LEVEL"] = "info" + lobbyEnv["LOBBY_PUBLIC_HTTP_ADDR"] = lobbyPublicAddr + lobbyEnv["LOBBY_INTERNAL_HTTP_ADDR"] = lobbyInternalAddr + lobbyEnv["LOBBY_USER_SERVICE_BASE_URL"] = "http://" + userServiceAddr + lobbyEnv["LOBBY_GM_BASE_URL"] = gmStub.URL + lobbyEnv["OTEL_TRACES_EXPORTER"] = "none" + lobbyEnv["OTEL_METRICS_EXPORTER"] = "none" + lobbyProcess := harness.StartProcess(t, "lobby", lobbyBinary, lobbyEnv) harness.WaitForHTTPStatus(t, lobbyProcess, "http://"+lobbyInternalAddr+"/readyz", http.StatusOK) return &lobbyUserHarness{ diff --git a/integration/notificationgateway/notification_gateway_test.go b/integration/notificationgateway/notification_gateway_test.go index f78d6fa..7943a0a 100644 --- a/integration/notificationgateway/notification_gateway_test.go +++ b/integration/notificationgateway/notification_gateway_test.go @@ -167,35 +167,35 @@ func newNotificationGatewayHarness(t *testing.T) *notificationGatewayHarness { notificationBinary := harness.BuildBinary(t, "notification", "./notification/cmd/notification") gatewayBinary := harness.BuildBinary(t, "gateway", "./gateway/cmd/gateway") - userServiceProcess := harness.StartProcess(t, "userservice", userServiceBinary, map[string]string{ - "USERSERVICE_LOG_LEVEL": "info", - "USERSERVICE_INTERNAL_HTTP_ADDR": userServiceAddr, - "USERSERVICE_REDIS_ADDR": redisRuntime.Addr, - "OTEL_TRACES_EXPORTER": "none", - "OTEL_METRICS_EXPORTER": "none", - }) + userServiceEnv := harness.StartUserServicePersistence(t, redisRuntime.Addr).Env + userServiceEnv["USERSERVICE_LOG_LEVEL"] = "info" + userServiceEnv["USERSERVICE_INTERNAL_HTTP_ADDR"] = userServiceAddr + userServiceEnv["OTEL_TRACES_EXPORTER"] = "none" + userServiceEnv["OTEL_METRICS_EXPORTER"] = "none" + userServiceProcess := harness.StartProcess(t, "userservice", userServiceBinary, userServiceEnv) waitForUserServiceReady(t, userServiceProcess, "http://"+userServiceAddr) - notificationProcess := harness.StartProcess(t, "notification", notificationBinary, map[string]string{ - "NOTIFICATION_LOG_LEVEL": "info", - "NOTIFICATION_INTERNAL_HTTP_ADDR": notificationInternalAddr, - "NOTIFICATION_REDIS_ADDR": redisRuntime.Addr, - "NOTIFICATION_USER_SERVICE_BASE_URL": "http://" + userServiceAddr, - "NOTIFICATION_USER_SERVICE_TIMEOUT": time.Second.String(), - "NOTIFICATION_INTENTS_READ_BLOCK_TIMEOUT": "100ms", - "NOTIFICATION_ROUTE_BACKOFF_MIN": "100ms", - "NOTIFICATION_ROUTE_BACKOFF_MAX": "100ms", - "NOTIFICATION_GATEWAY_CLIENT_EVENTS_STREAM": notificationGatewayClientEventsStream, - "OTEL_TRACES_EXPORTER": "none", - "OTEL_METRICS_EXPORTER": "none", - }) + notificationEnv := harness.StartNotificationServicePersistence(t, redisRuntime.Addr).Env + notificationEnv["NOTIFICATION_LOG_LEVEL"] = "info" + notificationEnv["NOTIFICATION_INTERNAL_HTTP_ADDR"] = notificationInternalAddr + notificationEnv["NOTIFICATION_USER_SERVICE_BASE_URL"] = "http://" + userServiceAddr + notificationEnv["NOTIFICATION_USER_SERVICE_TIMEOUT"] = time.Second.String() + notificationEnv["NOTIFICATION_INTENTS_READ_BLOCK_TIMEOUT"] = "100ms" + notificationEnv["NOTIFICATION_ROUTE_BACKOFF_MIN"] = "100ms" + notificationEnv["NOTIFICATION_ROUTE_BACKOFF_MAX"] = "100ms" + notificationEnv["NOTIFICATION_GATEWAY_CLIENT_EVENTS_STREAM"] = notificationGatewayClientEventsStream + notificationEnv["OTEL_TRACES_EXPORTER"] = "none" + notificationEnv["OTEL_METRICS_EXPORTER"] = "none" + notificationProcess := harness.StartProcess(t, "notification", notificationBinary, notificationEnv) harness.WaitForHTTPStatus(t, notificationProcess, "http://"+notificationInternalAddr+"/readyz", http.StatusOK) gatewayProcess := harness.StartProcess(t, "gateway", gatewayBinary, map[string]string{ "GATEWAY_LOG_LEVEL": "info", "GATEWAY_PUBLIC_HTTP_ADDR": gatewayPublicAddr, "GATEWAY_AUTHENTICATED_GRPC_ADDR": gatewayGRPCAddr, - "GATEWAY_SESSION_CACHE_REDIS_ADDR": redisRuntime.Addr, + "GATEWAY_REDIS_MASTER_ADDR": redisRuntime.Addr, + + "GATEWAY_REDIS_PASSWORD": "integration", "GATEWAY_SESSION_CACHE_REDIS_KEY_PREFIX": "gateway:session:", "GATEWAY_SESSION_EVENTS_REDIS_STREAM": "gateway:session_events", "GATEWAY_CLIENT_EVENTS_REDIS_STREAM": notificationGatewayClientEventsStream, diff --git a/integration/notificationmail/notification_mail_test.go b/integration/notificationmail/notification_mail_test.go index cfa007d..3cb464f 100644 --- a/integration/notificationmail/notification_mail_test.go +++ b/integration/notificationmail/notification_mail_test.go @@ -332,45 +332,42 @@ func newNotificationMailHarness(t *testing.T) *notificationMailHarness { mailBinary := harness.BuildBinary(t, "mail", "./mail/cmd/mail") notificationBinary := harness.BuildBinary(t, "notification", "./notification/cmd/notification") - userServiceProcess := harness.StartProcess(t, "userservice", userServiceBinary, map[string]string{ - "USERSERVICE_LOG_LEVEL": "info", - "USERSERVICE_INTERNAL_HTTP_ADDR": userServiceAddr, - "USERSERVICE_REDIS_ADDR": redisRuntime.Addr, - "OTEL_TRACES_EXPORTER": "none", - "OTEL_METRICS_EXPORTER": "none", - }) + userServiceEnv := harness.StartUserServicePersistence(t, redisRuntime.Addr).Env + userServiceEnv["USERSERVICE_LOG_LEVEL"] = "info" + userServiceEnv["USERSERVICE_INTERNAL_HTTP_ADDR"] = userServiceAddr + userServiceEnv["OTEL_TRACES_EXPORTER"] = "none" + userServiceEnv["OTEL_METRICS_EXPORTER"] = "none" + userServiceProcess := harness.StartProcess(t, "userservice", userServiceBinary, userServiceEnv) waitForUserServiceReady(t, userServiceProcess, "http://"+userServiceAddr) - mailProcess := harness.StartProcess(t, "mail", mailBinary, map[string]string{ - "MAIL_LOG_LEVEL": "info", - "MAIL_INTERNAL_HTTP_ADDR": mailInternalAddr, - "MAIL_REDIS_ADDR": redisRuntime.Addr, - "MAIL_TEMPLATE_DIR": mailTemplateDir(t), - "MAIL_SMTP_MODE": "stub", - "MAIL_STREAM_BLOCK_TIMEOUT": "100ms", - "MAIL_OPERATOR_REQUEST_TIMEOUT": time.Second.String(), - "MAIL_SHUTDOWN_TIMEOUT": "2s", - "OTEL_TRACES_EXPORTER": "none", - "OTEL_METRICS_EXPORTER": "none", - }) + mailEnv := harness.StartMailServicePersistence(t, redisRuntime.Addr).Env + mailEnv["MAIL_LOG_LEVEL"] = "info" + mailEnv["MAIL_INTERNAL_HTTP_ADDR"] = mailInternalAddr + mailEnv["MAIL_TEMPLATE_DIR"] = mailTemplateDir(t) + mailEnv["MAIL_SMTP_MODE"] = "stub" + mailEnv["MAIL_STREAM_BLOCK_TIMEOUT"] = "100ms" + mailEnv["MAIL_OPERATOR_REQUEST_TIMEOUT"] = time.Second.String() + mailEnv["MAIL_SHUTDOWN_TIMEOUT"] = "2s" + mailEnv["OTEL_TRACES_EXPORTER"] = "none" + mailEnv["OTEL_METRICS_EXPORTER"] = "none" + mailProcess := harness.StartProcess(t, "mail", mailBinary, mailEnv) waitForMailReady(t, mailProcess, "http://"+mailInternalAddr) - notificationProcess := harness.StartProcess(t, "notification", notificationBinary, map[string]string{ - "NOTIFICATION_LOG_LEVEL": "info", - "NOTIFICATION_INTERNAL_HTTP_ADDR": notificationInternalAddr, - "NOTIFICATION_REDIS_ADDR": redisRuntime.Addr, - "NOTIFICATION_USER_SERVICE_BASE_URL": "http://" + userServiceAddr, - "NOTIFICATION_USER_SERVICE_TIMEOUT": time.Second.String(), - "NOTIFICATION_INTENTS_READ_BLOCK_TIMEOUT": "100ms", - "NOTIFICATION_ROUTE_BACKOFF_MIN": "100ms", - "NOTIFICATION_ROUTE_BACKOFF_MAX": "100ms", - "NOTIFICATION_ADMIN_EMAILS_GEO_REVIEW_RECOMMENDED": "geo-admin@example.com", - "NOTIFICATION_ADMIN_EMAILS_GAME_GENERATION_FAILED": "game-admin@example.com", - "NOTIFICATION_ADMIN_EMAILS_LOBBY_RUNTIME_PAUSED_AFTER_START": "lobby-ops@example.com", - "NOTIFICATION_ADMIN_EMAILS_LOBBY_APPLICATION_SUBMITTED": "lobby-admin@example.com", - "OTEL_TRACES_EXPORTER": "none", - "OTEL_METRICS_EXPORTER": "none", - }) + notificationEnv := harness.StartNotificationServicePersistence(t, redisRuntime.Addr).Env + notificationEnv["NOTIFICATION_LOG_LEVEL"] = "info" + notificationEnv["NOTIFICATION_INTERNAL_HTTP_ADDR"] = notificationInternalAddr + notificationEnv["NOTIFICATION_USER_SERVICE_BASE_URL"] = "http://" + userServiceAddr + notificationEnv["NOTIFICATION_USER_SERVICE_TIMEOUT"] = time.Second.String() + notificationEnv["NOTIFICATION_INTENTS_READ_BLOCK_TIMEOUT"] = "100ms" + notificationEnv["NOTIFICATION_ROUTE_BACKOFF_MIN"] = "100ms" + notificationEnv["NOTIFICATION_ROUTE_BACKOFF_MAX"] = "100ms" + notificationEnv["NOTIFICATION_ADMIN_EMAILS_GEO_REVIEW_RECOMMENDED"] = "geo-admin@example.com" + notificationEnv["NOTIFICATION_ADMIN_EMAILS_GAME_GENERATION_FAILED"] = "game-admin@example.com" + notificationEnv["NOTIFICATION_ADMIN_EMAILS_LOBBY_RUNTIME_PAUSED_AFTER_START"] = "lobby-ops@example.com" + notificationEnv["NOTIFICATION_ADMIN_EMAILS_LOBBY_APPLICATION_SUBMITTED"] = "lobby-admin@example.com" + notificationEnv["OTEL_TRACES_EXPORTER"] = "none" + notificationEnv["OTEL_METRICS_EXPORTER"] = "none" + notificationProcess := harness.StartProcess(t, "notification", notificationBinary, notificationEnv) harness.WaitForHTTPStatus(t, notificationProcess, "http://"+notificationInternalAddr+"/readyz", http.StatusOK) return ¬ificationMailHarness{ diff --git a/integration/notificationuser/notification_user_test.go b/integration/notificationuser/notification_user_test.go index 1a49a21..51ea036 100644 --- a/integration/notificationuser/notification_user_test.go +++ b/integration/notificationuser/notification_user_test.go @@ -3,6 +3,7 @@ package notificationuser_test import ( "bytes" "context" + "database/sql" "encoding/base64" "encoding/json" "errors" @@ -13,6 +14,7 @@ import ( "galaxy/integration/internal/harness" + _ "github.com/jackc/pgx/v5/stdlib" "github.com/redis/go-redis/v9" "github.com/stretchr/testify/require" ) @@ -66,17 +68,13 @@ func TestNotificationUserTemporaryUnavailabilityDoesNotAdvanceOffset(t *testing. return ok && offset.LastProcessedEntryID == messageID }, time.Second, 50*time.Millisecond) - exists, err := h.redis.Exists(context.Background(), notificationMalformedIntentKey(messageID)).Result() - require.NoError(t, err) - require.Zero(t, exists) - - exists, err = h.redis.Exists(context.Background(), notificationRouteKey(messageID, "email:user:"+recipient.UserID)).Result() - require.NoError(t, err) - require.Zero(t, exists) + require.False(t, h.malformedIntentExists(t, messageID)) + require.False(t, h.routeExists(t, messageID, "email:user:"+recipient.UserID)) } type notificationUserHarness struct { redis *redis.Client + pg *sql.DB userServiceURL string @@ -141,31 +139,34 @@ func newNotificationUserHarness(t *testing.T) *notificationUserHarness { userServiceBinary := harness.BuildBinary(t, "userservice", "./user/cmd/userservice") notificationBinary := harness.BuildBinary(t, "notification", "./notification/cmd/notification") - userServiceProcess := harness.StartProcess(t, "userservice", userServiceBinary, map[string]string{ - "USERSERVICE_LOG_LEVEL": "info", - "USERSERVICE_INTERNAL_HTTP_ADDR": userServiceAddr, - "USERSERVICE_REDIS_ADDR": redisRuntime.Addr, - "OTEL_TRACES_EXPORTER": "none", - "OTEL_METRICS_EXPORTER": "none", - }) + userServiceEnv := harness.StartUserServicePersistence(t, redisRuntime.Addr).Env + userServiceEnv["USERSERVICE_LOG_LEVEL"] = "info" + userServiceEnv["USERSERVICE_INTERNAL_HTTP_ADDR"] = userServiceAddr + userServiceEnv["OTEL_TRACES_EXPORTER"] = "none" + userServiceEnv["OTEL_METRICS_EXPORTER"] = "none" + userServiceProcess := harness.StartProcess(t, "userservice", userServiceBinary, userServiceEnv) waitForUserServiceReady(t, userServiceProcess, "http://"+userServiceAddr) - notificationProcess := harness.StartProcess(t, "notification", notificationBinary, map[string]string{ - "NOTIFICATION_LOG_LEVEL": "info", - "NOTIFICATION_INTERNAL_HTTP_ADDR": notificationInternalAddr, - "NOTIFICATION_REDIS_ADDR": redisRuntime.Addr, - "NOTIFICATION_USER_SERVICE_BASE_URL": "http://" + userServiceAddr, - "NOTIFICATION_USER_SERVICE_TIMEOUT": "250ms", - "NOTIFICATION_INTENTS_READ_BLOCK_TIMEOUT": "100ms", - "NOTIFICATION_ROUTE_BACKOFF_MIN": "100ms", - "NOTIFICATION_ROUTE_BACKOFF_MAX": "100ms", - "OTEL_TRACES_EXPORTER": "none", - "OTEL_METRICS_EXPORTER": "none", - }) + notificationPersistence := harness.StartNotificationServicePersistence(t, redisRuntime.Addr) + notificationEnv := notificationPersistence.Env + notificationPG, err := sql.Open("pgx", notificationPersistence.Postgres.DSNForSchema("notification", "notificationservice")) + require.NoError(t, err) + t.Cleanup(func() { _ = notificationPG.Close() }) + notificationEnv["NOTIFICATION_LOG_LEVEL"] = "info" + notificationEnv["NOTIFICATION_INTERNAL_HTTP_ADDR"] = notificationInternalAddr + notificationEnv["NOTIFICATION_USER_SERVICE_BASE_URL"] = "http://" + userServiceAddr + notificationEnv["NOTIFICATION_USER_SERVICE_TIMEOUT"] = "250ms" + notificationEnv["NOTIFICATION_INTENTS_READ_BLOCK_TIMEOUT"] = "100ms" + notificationEnv["NOTIFICATION_ROUTE_BACKOFF_MIN"] = "100ms" + notificationEnv["NOTIFICATION_ROUTE_BACKOFF_MAX"] = "100ms" + notificationEnv["OTEL_TRACES_EXPORTER"] = "none" + notificationEnv["OTEL_METRICS_EXPORTER"] = "none" + notificationProcess := harness.StartProcess(t, "notification", notificationBinary, notificationEnv) harness.WaitForHTTPStatus(t, notificationProcess, "http://"+notificationInternalAddr+"/readyz", http.StatusOK) return ¬ificationUserHarness{ redis: redisClient, + pg: notificationPG, userServiceURL: "http://" + userServiceAddr, notificationProcess: notificationProcess, userServiceProcess: userServiceProcess, @@ -213,14 +214,27 @@ func (h *notificationUserHarness) publishUserIntent(t *testing.T, recipientUserI func (h *notificationUserHarness) waitForRoute(t *testing.T, notificationID string, routeID string) notificationRouteRecord { t.Helper() - key := notificationRouteKey(notificationID, routeID) var route notificationRouteRecord require.Eventually(t, func() bool { - payload, err := h.redis.Get(context.Background(), key).Bytes() - if err != nil { - return false + row := h.pg.QueryRowContext(context.Background(), + `SELECT notification_id, route_id, channel, recipient_ref, status, resolved_email, resolved_locale + FROM routes WHERE notification_id = $1 AND route_id = $2`, + notificationID, routeID, + ) + if err := row.Scan( + &route.NotificationID, + &route.RouteID, + &route.Channel, + &route.RecipientRef, + &route.Status, + &route.ResolvedEmail, + &route.ResolvedLocale, + ); err != nil { + if errors.Is(err, sql.ErrNoRows) { + return false + } + require.NoError(t, err) } - require.NoError(t, decodeJSONPayload(payload, &route)) return true }, 10*time.Second, 50*time.Millisecond) @@ -230,14 +244,30 @@ func (h *notificationUserHarness) waitForRoute(t *testing.T, notificationID stri func (h *notificationUserHarness) waitForMalformedIntent(t *testing.T, streamEntryID string) malformedIntentRecord { t.Helper() - key := notificationMalformedIntentKey(streamEntryID) var record malformedIntentRecord require.Eventually(t, func() bool { - payload, err := h.redis.Get(context.Background(), key).Bytes() - if err != nil { - return false + row := h.pg.QueryRowContext(context.Background(), + `SELECT stream_entry_id, notification_type, producer, idempotency_key, + failure_code, failure_message, recorded_at + FROM malformed_intents WHERE stream_entry_id = $1`, + streamEntryID, + ) + var recordedAt time.Time + if err := row.Scan( + &record.StreamEntryID, + &record.NotificationType, + &record.Producer, + &record.IdempotencyKey, + &record.FailureCode, + &record.FailureMessage, + &recordedAt, + ); err != nil { + if errors.Is(err, sql.ErrNoRows) { + return false + } + require.NoError(t, err) } - require.NoError(t, decodeStrictJSONPayload(payload, &record)) + record.RecordedAtMS = recordedAt.UTC().UnixMilli() return true }, 10*time.Second, 50*time.Millisecond) @@ -374,12 +404,26 @@ func decodeJSONPayload(payload []byte, target any) error { return nil } -func notificationRouteKey(notificationID string, routeID string) string { - return "notification:routes:" + encodeKeyComponent(notificationID) + ":" + encodeKeyComponent(routeID) +func (h *notificationUserHarness) routeExists(t *testing.T, notificationID string, routeID string) bool { + t.Helper() + var exists bool + err := h.pg.QueryRowContext(context.Background(), + `SELECT EXISTS(SELECT 1 FROM routes WHERE notification_id = $1 AND route_id = $2)`, + notificationID, routeID, + ).Scan(&exists) + require.NoError(t, err) + return exists } -func notificationMalformedIntentKey(streamEntryID string) string { - return "notification:malformed_intents:" + encodeKeyComponent(streamEntryID) +func (h *notificationUserHarness) malformedIntentExists(t *testing.T, streamEntryID string) bool { + t.Helper() + var exists bool + err := h.pg.QueryRowContext(context.Background(), + `SELECT EXISTS(SELECT 1 FROM malformed_intents WHERE stream_entry_id = $1)`, + streamEntryID, + ).Scan(&exists) + require.NoError(t, err) + return exists } func notificationStreamOffsetKey() string { diff --git a/lobby/Makefile b/lobby/Makefile new file mode 100644 index 0000000..c2160b9 --- /dev/null +++ b/lobby/Makefile @@ -0,0 +1,10 @@ +# Makefile for galaxy/lobby. +# +# The `jet` target regenerates the go-jet/v2 query-builder code under +# internal/adapters/postgres/jet/ against a transient PostgreSQL container +# brought up by cmd/jetgen. Generated code is committed. + +.PHONY: jet + +jet: + go run ./cmd/jetgen diff --git a/lobby/README.md b/lobby/README.md index 8970539..c980beb 100644 --- a/lobby/README.md +++ b/lobby/README.md @@ -137,7 +137,16 @@ The service starts two HTTP listeners and one Redis Stream consumer pipeline. ### Startup dependencies -- one reachable Redis deployment at `LOBBY_REDIS_ADDR` +- one reachable Redis deployment at `LOBBY_REDIS_MASTER_ADDR` (mandatory + password via `LOBBY_REDIS_PASSWORD`; replicas optional via + `LOBBY_REDIS_REPLICA_ADDRS`). Used for streams, race-name directory, + per-game runtime aggregates, and stream offsets. +- one reachable PostgreSQL primary at `LOBBY_POSTGRES_PRIMARY_DSN` (DSN + must include `search_path=lobby&sslmode=disable`). Embedded goose + migrations apply at startup before any listener opens; on migration or + ping failure the service exits non-zero. The four core enrollment + entities (game / application / invite / membership) live here after + PG_PLAN.md §6A; `docs/postgres-migration.md` is the decision record. - `User Service` reachable at `LOBBY_USER_SERVICE_BASE_URL` (startup check only; runtime failures are surfaced as request errors, not boot failures) - `Game Master` at `LOBBY_GM_BASE_URL` (same policy — startup check omitted; @@ -147,7 +156,7 @@ The service starts two HTTP listeners and one Redis Stream consumer pipeline. - `GET /healthz` on both ports returns `{"status":"ok"}` - `GET /readyz` on both ports returns `{"status":"ready"}` after successful - startup; no live Redis ping per request + startup; no live Redis or PostgreSQL ping per request ## Game Record Model @@ -576,10 +585,14 @@ Sentinel errors: `ErrNameTaken`, `ErrInvalidName`, `ErrPendingMissing`, ### v1 backends -- **Redis** (`lobby/internal/adapters/redisstate/racenamedir.go`) — the - production adapter using the key layout in §Redis Logical Model. +- **PostgreSQL** (`lobby/internal/adapters/postgres/racenamedir/directory.go`) + — the production adapter; one row per binding under + `lobby.race_names`, transactional writes guarded by + `pg_advisory_xact_lock(hashtextextended(canonical_key, 0))`. See + `docs/postgres-migration.md` §6B for the full schema and decision + record. - **Stub** (`lobby/internal/adapters/racenamestub/directory.go`) — in-process - implementation for unit tests that do not need Redis. Chosen by + implementation for unit tests that do not need PostgreSQL. Chosen by `LOBBY_RACE_NAME_DIRECTORY_BACKEND=stub`. A future dedicated `Race Name Service` replaces the adapter without changing @@ -1060,7 +1073,9 @@ Stable error codes: ### Required -- `LOBBY_REDIS_ADDR` +- `LOBBY_REDIS_MASTER_ADDR` +- `LOBBY_REDIS_PASSWORD` +- `LOBBY_POSTGRES_PRIMARY_DSN` - `LOBBY_USER_SERVICE_BASE_URL` - `LOBBY_GM_BASE_URL` @@ -1087,11 +1102,28 @@ Internal HTTP: Redis connectivity: -- `LOBBY_REDIS_USERNAME` -- `LOBBY_REDIS_PASSWORD` -- `LOBBY_REDIS_DB` -- `LOBBY_REDIS_TLS_ENABLED` -- `LOBBY_REDIS_OPERATION_TIMEOUT` with default `2s` +- `LOBBY_REDIS_MASTER_ADDR` (required) +- `LOBBY_REDIS_REPLICA_ADDRS` (optional, comma-separated; not consumed yet) +- `LOBBY_REDIS_PASSWORD` (required) +- `LOBBY_REDIS_DB` (default 0) +- `LOBBY_REDIS_OPERATION_TIMEOUT` (default 250ms) + +The legacy `LOBBY_REDIS_ADDR`, `LOBBY_REDIS_USERNAME`, and +`LOBBY_REDIS_TLS_ENABLED` env vars were retired in PG_PLAN.md §6A; setting +either of the latter two now fails fast at startup. See +`ARCHITECTURE.md §Persistence Backends` for the architectural rules. + +PostgreSQL connectivity (PG_PLAN.md §6A and §6B; durable game / +application / invite / membership records and the Race Name Directory +live here): + +- `LOBBY_POSTGRES_PRIMARY_DSN` (required; + e.g. `postgres://lobbyservice:secret@postgres:5432/galaxy?search_path=lobby&sslmode=disable`) +- `LOBBY_POSTGRES_REPLICA_DSNS` (optional, comma-separated; not consumed yet) +- `LOBBY_POSTGRES_OPERATION_TIMEOUT` (default 1s) +- `LOBBY_POSTGRES_MAX_OPEN_CONNS` (default 25) +- `LOBBY_POSTGRES_MAX_IDLE_CONNS` (default 5) +- `LOBBY_POSTGRES_CONN_MAX_LIFETIME` (default 30m) Stream names: @@ -1114,8 +1146,9 @@ Enrollment automation: Race Name Directory: -- `LOBBY_RACE_NAME_DIRECTORY_BACKEND` with default `redis` - (alternate: `stub` for in-process tests) +- `LOBBY_RACE_NAME_DIRECTORY_BACKEND` with default `postgres` + (alternate: `stub` for in-process tests; PG_PLAN.md §6B retired the + `redis` backend) - `LOBBY_RACE_NAME_EXPIRATION_INTERVAL` with default `1h` — pending registration expiration worker tick @@ -1135,39 +1168,35 @@ OpenTelemetry: - `LOBBY_OTEL_STDOUT_TRACES_ENABLED` - `LOBBY_OTEL_STDOUT_METRICS_ENABLED` -## Redis Logical Model +## Persistence Layout -Storage rules: +Game / application / invite / membership records live in PostgreSQL after +PG_PLAN.md §6A; the Race Name Directory followed in §6B. See +`docs/postgres-migration.md` for the schema and decision records. The +`lobby` schema owns five tables — `games`, `applications`, `invites`, +`memberships`, `race_names` — plus the partial UNIQUE index on +`applications(applicant_user_id, game_id) WHERE status <> 'rejected'` that +enforces the single-active-application invariant and the partial UNIQUE +index on `race_names(canonical_key) WHERE binding_kind = 'registered'` +that enforces single-registered-per-canonical. + +The Redis-backed keys below survive both stages. Redis owns the +runtime-coordination state — per-game runtime aggregates, gap activation, +capability-evaluation guards, and stream consumer offsets — plus the +event-bus streams themselves. + +### Redis key table + +Storage rules for Redis: -- durable records are stored as strict JSON blobs - timestamps are stored in Unix milliseconds unless noted otherwise - dynamic key segments are base64url-encoded -### Key table - | Logical artifact | Redis key | | --- | --- | -| game record | `lobby:games:` | -| game index by status | `lobby:games_by_status:` (sorted set; score = created_at) | -| games by owner | `lobby:games_by_owner:` (set of game_ids; populated for private games on Save) | -| application record | `lobby:applications:` | -| applications by game | `lobby:game_applications:` (set of application_ids) | -| applications by user | `lobby:user_applications:` (set of application_ids) | -| active application per (user, game) | `lobby:user_game_application::` → `application_id` | -| invite record | `lobby:invites:` | -| invites by game | `lobby:game_invites:` (set of invite_ids) | -| invites by user (invitee) | `lobby:user_invites:` (set of invite_ids) | -| invites by inviter | `lobby:user_inviter_invites:` (set of invite_ids) | -| membership record | `lobby:memberships:` | -| memberships by game | `lobby:game_memberships:` (set of membership_ids) | -| memberships by user | `lobby:user_memberships:` (set of membership_ids) | -| registered race name | `lobby:race_names:registered:` → JSON `{user_id, race_name, source_game_id, registered_at}` | -| user → registered canonical keys | `lobby:race_names:user_registered:` (set of `canonical_key`) | -| per-game race name reservation | `lobby:race_names:reservations::` → JSON `{user_id, race_name, reserved_at, status ∈ reserved/pending_registration, eligible_until_ms?}` | -| user → reservations index | `lobby:race_names:user_reservations:` (set of `game_id:canonical_key`) | -| pending-registration expiry index | `lobby:race_names:pending_index` (sorted set; score = `eligible_until_ms`) | -| canonical-key lookup cache | `lobby:race_names:canonical_lookup:` → JSON `{kind, holder_user_id, game_id?}` | | per-game per-user stats aggregate | `lobby:game_turn_stats::` → JSON aggregate | +| per-game stats user index | `lobby:game_turn_stats_by_game:` (set of `user_id`) | +| capability-evaluation guard | `lobby:capability_evaluation:done:` (sentinel string) | | GM event stream offset | `lobby:stream_offsets:gm_events` | | runtime job result offset | `lobby:stream_offsets:runtime_results` | | user lifecycle stream offset | `lobby:stream_offsets:user_lifecycle` | @@ -1175,12 +1204,18 @@ Storage rules: ### Frozen record fields +The five durable records are stored in PostgreSQL columns; the field set +per record is unchanged from the previous Redis JSON shape and is +documented inline with the migration scripts under +`internal/adapters/postgres/migrations/`. + | Record | Frozen fields | | --- | --- | | game record | all game fields listed in Game Record Model section | | application record | `application_id`, `game_id`, `applicant_user_id`, `race_name`, `status`, `created_at`, `decided_at` | | invite record | `invite_id`, `game_id`, `inviter_user_id`, `invitee_user_id`, `race_name` (set at redeem), `status`, `created_at`, `expires_at`, `decided_at` | | membership record | all membership fields listed in Membership Model section | +| race_names row | `canonical_key`, `game_id`, `holder_user_id`, `race_name`, `binding_kind`, `source_game_id`, `reserved_at_ms`, `eligible_until_ms` (pending only), `registered_at_ms` (registered only) | ## Observability diff --git a/lobby/cmd/jetgen/main.go b/lobby/cmd/jetgen/main.go new file mode 100644 index 0000000..585a07c --- /dev/null +++ b/lobby/cmd/jetgen/main.go @@ -0,0 +1,236 @@ +// Command jetgen regenerates the go-jet/v2 query-builder code under +// galaxy/lobby/internal/adapters/postgres/jet/ against a transient +// PostgreSQL instance. +// +// The program is intended to be invoked as `go run ./cmd/jetgen` (or via the +// `make jet` Makefile target) from within `galaxy/lobby`. It is not part of +// the runtime binary. +// +// Steps: +// +// 1. start a postgres:16-alpine container via testcontainers-go +// 2. open it through pkg/postgres as the superuser +// 3. CREATE ROLE lobbyservice and CREATE SCHEMA "lobby" +// AUTHORIZATION lobbyservice +// 4. open a second pool as lobbyservice with search_path=lobby and apply +// the embedded goose migrations +// 5. run jet's PostgreSQL generator against schema=lobby, writing into +// ../internal/adapters/postgres/jet +package main + +import ( + "context" + "errors" + "fmt" + "log" + "net/url" + "os" + "path/filepath" + "runtime" + "time" + + "galaxy/lobby/internal/adapters/postgres/migrations" + "galaxy/postgres" + + jetpostgres "github.com/go-jet/jet/v2/generator/postgres" + testcontainers "github.com/testcontainers/testcontainers-go" + tcpostgres "github.com/testcontainers/testcontainers-go/modules/postgres" + "github.com/testcontainers/testcontainers-go/wait" +) + +const ( + postgresImage = "postgres:16-alpine" + superuserName = "galaxy" + superuserPassword = "galaxy" + superuserDatabase = "galaxy_lobby" + serviceRole = "lobbyservice" + servicePassword = "lobbyservice" + serviceSchema = "lobby" + containerStartup = 90 * time.Second + defaultOpTimeout = 10 * time.Second + jetOutputDirSuffix = "internal/adapters/postgres/jet" +) + +func main() { + if err := run(context.Background()); err != nil { + log.Fatalf("jetgen: %v", err) + } +} + +func run(ctx context.Context) error { + outputDir, err := jetOutputDir() + if err != nil { + return err + } + + container, err := tcpostgres.Run(ctx, postgresImage, + tcpostgres.WithDatabase(superuserDatabase), + tcpostgres.WithUsername(superuserName), + tcpostgres.WithPassword(superuserPassword), + testcontainers.WithWaitStrategy( + wait.ForLog("database system is ready to accept connections"). + WithOccurrence(2). + WithStartupTimeout(containerStartup), + ), + ) + if err != nil { + return fmt.Errorf("start postgres container: %w", err) + } + defer func() { + if termErr := testcontainers.TerminateContainer(container); termErr != nil { + log.Printf("jetgen: terminate container: %v", termErr) + } + }() + + baseDSN, err := container.ConnectionString(ctx, "sslmode=disable") + if err != nil { + return fmt.Errorf("resolve container dsn: %w", err) + } + + if err := provisionRoleAndSchema(ctx, baseDSN); err != nil { + return err + } + + scopedDSN, err := dsnForServiceRole(baseDSN) + if err != nil { + return err + } + if err := applyMigrations(ctx, scopedDSN); err != nil { + return err + } + + if err := os.RemoveAll(outputDir); err != nil { + return fmt.Errorf("remove existing jet output %q: %w", outputDir, err) + } + if err := os.MkdirAll(filepath.Dir(outputDir), 0o755); err != nil { + return fmt.Errorf("ensure jet output parent: %w", err) + } + + jetCfg := postgres.DefaultConfig() + jetCfg.PrimaryDSN = scopedDSN + jetCfg.OperationTimeout = defaultOpTimeout + jetDB, err := postgres.OpenPrimary(ctx, jetCfg) + if err != nil { + return fmt.Errorf("open scoped pool for jet generation: %w", err) + } + defer func() { _ = jetDB.Close() }() + + if err := jetpostgres.GenerateDB(jetDB, serviceSchema, outputDir); err != nil { + return fmt.Errorf("jet generate: %w", err) + } + + log.Printf("jetgen: generated jet code into %s (schema=%s)", outputDir, serviceSchema) + return nil +} + +func provisionRoleAndSchema(ctx context.Context, baseDSN string) error { + cfg := postgres.DefaultConfig() + cfg.PrimaryDSN = baseDSN + cfg.OperationTimeout = defaultOpTimeout + db, err := postgres.OpenPrimary(ctx, cfg) + if err != nil { + return fmt.Errorf("open admin pool: %w", err) + } + defer func() { _ = db.Close() }() + + statements := []string{ + fmt.Sprintf(`DO $$ BEGIN + IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = %s) THEN + CREATE ROLE %s LOGIN PASSWORD %s; + END IF; + END $$;`, sqlLiteral(serviceRole), sqlIdentifier(serviceRole), sqlLiteral(servicePassword)), + fmt.Sprintf(`CREATE SCHEMA IF NOT EXISTS %s AUTHORIZATION %s;`, + sqlIdentifier(serviceSchema), sqlIdentifier(serviceRole)), + fmt.Sprintf(`GRANT USAGE ON SCHEMA %s TO %s;`, + sqlIdentifier(serviceSchema), sqlIdentifier(serviceRole)), + } + for _, statement := range statements { + if _, err := db.ExecContext(ctx, statement); err != nil { + return fmt.Errorf("provision %q/%q: %w", serviceSchema, serviceRole, err) + } + } + return nil +} + +func dsnForServiceRole(baseDSN string) (string, error) { + parsed, err := url.Parse(baseDSN) + if err != nil { + return "", fmt.Errorf("parse base dsn: %w", err) + } + values := url.Values{} + values.Set("search_path", serviceSchema) + values.Set("sslmode", "disable") + scoped := url.URL{ + Scheme: parsed.Scheme, + User: url.UserPassword(serviceRole, servicePassword), + Host: parsed.Host, + Path: parsed.Path, + RawQuery: values.Encode(), + } + return scoped.String(), nil +} + +func applyMigrations(ctx context.Context, dsn string) error { + cfg := postgres.DefaultConfig() + cfg.PrimaryDSN = dsn + cfg.OperationTimeout = defaultOpTimeout + db, err := postgres.OpenPrimary(ctx, cfg) + if err != nil { + return fmt.Errorf("open scoped pool: %w", err) + } + defer func() { _ = db.Close() }() + + if err := postgres.Ping(ctx, db, defaultOpTimeout); err != nil { + return err + } + if err := postgres.RunMigrations(ctx, db, migrations.FS(), "."); err != nil { + return fmt.Errorf("run migrations: %w", err) + } + return nil +} + +// jetOutputDir returns the absolute path that jet should write into. We rely +// on the runtime caller info to anchor it to galaxy/lobby regardless of the +// invoking working directory. +func jetOutputDir() (string, error) { + _, file, _, ok := runtime.Caller(0) + if !ok { + return "", errors.New("resolve runtime caller for jet output path") + } + dir := filepath.Dir(file) + // dir = .../galaxy/lobby/cmd/jetgen + moduleRoot := filepath.Clean(filepath.Join(dir, "..", "..")) + return filepath.Join(moduleRoot, jetOutputDirSuffix), nil +} + +func sqlIdentifier(name string) string { + return `"` + escapeDoubleQuotes(name) + `"` +} + +func sqlLiteral(value string) string { + return "'" + escapeSingleQuotes(value) + "'" +} + +func escapeDoubleQuotes(value string) string { + out := make([]byte, 0, len(value)) + for index := 0; index < len(value); index++ { + if value[index] == '"' { + out = append(out, '"', '"') + continue + } + out = append(out, value[index]) + } + return string(out) +} + +func escapeSingleQuotes(value string) string { + out := make([]byte, 0, len(value)) + for index := 0; index < len(value); index++ { + if value[index] == '\'' { + out = append(out, '\'', '\'') + continue + } + out = append(out, value[index]) + } + return string(out) +} diff --git a/lobby/docs/examples.md b/lobby/docs/examples.md index 532d9f4..f75d03d 100644 --- a/lobby/docs/examples.md +++ b/lobby/docs/examples.md @@ -6,10 +6,14 @@ and timestamps with values that match the deployment under inspection. ## Example `.env` A minimum-viable `LOBBY_*` set for a local run against a single Redis -container. The full list with defaults lives in `../README.md` §Configuration. +container plus a PostgreSQL container with the `lobby` schema and the +`lobbyservice` role provisioned. The full list with defaults lives in +`../README.md` §Configuration. ```bash -LOBBY_REDIS_ADDR=127.0.0.1:6379 +LOBBY_REDIS_MASTER_ADDR=127.0.0.1:6379 +LOBBY_REDIS_PASSWORD=local +LOBBY_POSTGRES_PRIMARY_DSN=postgres://lobbyservice:lobbyservice@127.0.0.1:5432/galaxy?search_path=lobby&sslmode=disable LOBBY_USER_SERVICE_BASE_URL=http://127.0.0.1:8083 LOBBY_GM_BASE_URL=http://127.0.0.1:8096 @@ -19,7 +23,7 @@ LOBBY_INTERNAL_HTTP_ADDR=:8095 LOBBY_LOG_LEVEL=info LOBBY_SHUTDOWN_TIMEOUT=30s -LOBBY_RACE_NAME_DIRECTORY_BACKEND=redis +LOBBY_RACE_NAME_DIRECTORY_BACKEND=postgres LOBBY_ENROLLMENT_AUTOMATION_INTERVAL=30s LOBBY_RACE_NAME_EXPIRATION_INTERVAL=1h @@ -115,16 +119,36 @@ curl -s http://localhost:8095/api/v1/internal/games/game-01HZ... curl -s http://localhost:8095/api/v1/internal/games/game-01HZ.../memberships ``` -## Redis Examples +## Storage Inspection Examples -### Inspect a game record +### Inspect a game record (PostgreSQL) ```bash -redis-cli GET lobby:games:game-01HZ... +psql "$LOBBY_POSTGRES_PRIMARY_DSN" -c \ + "SELECT * FROM lobby.games WHERE game_id = 'game-01HZ...'" ``` -The value is a strict JSON blob with the fields documented in -`../README.md` §Game Record Model. +The columns mirror the fields documented in `../README.md` §Game Record Model. + +### Inspect open enrollment games (sorted by created_at) + +```bash +psql "$LOBBY_POSTGRES_PRIMARY_DSN" -c \ + "SELECT game_id, game_name, created_at FROM lobby.games + WHERE status = 'enrollment_open' + ORDER BY created_at DESC" +``` + +### Inspect a Race Name Directory binding + +```bash +psql "$LOBBY_POSTGRES_PRIMARY_DSN" -c \ + "SELECT canonical_key, game_id, holder_user_id, race_name, binding_kind, + source_game_id, eligible_until_ms, registered_at_ms + FROM lobby.race_names WHERE race_name = 'Aurora'" +``` + +## Redis Examples ### Publish a runtime job result (Runtime Manager simulation) @@ -162,12 +186,6 @@ redis-cli XADD gm:lobby_events '*' \ finished_at_ms 1714123456789 ``` -### Inspect open enrollment games (sorted by created_at) - -```bash -redis-cli ZRANGE lobby:games_by_status:enrollment_open 0 -1 WITHSCORES -``` - ## Notification Intent Format Lobby produces every notification through `pkg/notificationintent` and diff --git a/lobby/docs/postgres-migration.md b/lobby/docs/postgres-migration.md new file mode 100644 index 0000000..de7f70c --- /dev/null +++ b/lobby/docs/postgres-migration.md @@ -0,0 +1,386 @@ +# PostgreSQL Migration + +PG_PLAN.md §6A migrated the four core enrollment entities of Game Lobby +Service — `Game`, `Application`, `Invite`, `Membership` — from Redis-only +durable storage to the steady-state Redis + PostgreSQL split codified in +`ARCHITECTURE.md §Persistence Backends`. PG_PLAN.md §6B then moved the +Race Name Directory onto PostgreSQL, retiring the Redis Lua scripts and +canonical-lookup cache that backed it. PG_PLAN.md §6C confirmed which +runtime-coordination state intentionally stays on Redis (per-game +`game_turn_stats`, `gap_activated_at`, `capability_evaluation:done:*`, +`stream_offsets:*`, plus the event-bus streams themselves) and pruned the +remaining redisstate keyspace. + +This document records the schema decisions and the non-obvious agreements +behind them. Use it together with the migration scripts under +`internal/adapters/postgres/migrations/` and the runtime wiring +(`internal/app/runtime.go`). + +## Outcomes + +- Schema `lobby` (provisioned externally) holds four tables: `games`, + `applications`, `invites`, `memberships`. A partial UNIQUE index on + `applications(applicant_user_id, game_id) WHERE status <> 'rejected'` + enforces the single-active-application constraint at the database + level. +- The runtime opens one PostgreSQL pool via `pkg/postgres.OpenPrimary`, + applies embedded goose migrations strictly before any HTTP listener + becomes ready, and exits non-zero when migration or ping fails. +- The runtime opens one shared `*redis.Client` via + `pkg/redisconn.NewMasterClient` and passes it to the Race Name + Directory adapter, the per-game stats / gap-activation / + evaluation-guard / stream-offset stores, the consumer pipelines, and + the notification-intent publisher. +- The Redis adapter package (`internal/adapters/redisstate/`) keeps the + surviving stores (`racenamedir`, `gameturnstatsstore`, + `gapactivationstore`, `evaluationguardstore`, `streamoffsetstore`, + `streamlagprobe`) and the keyspace methods that back them; the + game/application/invite/membership stores, codecs, tests, and + per-record TTL constants are gone. +- Configuration drops `LOBBY_REDIS_ADDR`, `LOBBY_REDIS_USERNAME`, + `LOBBY_REDIS_TLS_ENABLED` and introduces `LOBBY_REDIS_MASTER_ADDR`, + `LOBBY_REDIS_REPLICA_ADDRS`, `LOBBY_REDIS_PASSWORD`, + `LOBBY_POSTGRES_PRIMARY_DSN`, `LOBBY_POSTGRES_REPLICA_DSNS`, plus + the standard `LOBBY_POSTGRES_*` pool tuning knobs. Setting either of + the two retired Redis env vars now fails fast at startup via the + shared `pkg/redisconn.LoadFromEnv` rejection path. + +## Decisions + +### 1. One schema, externally-provisioned role + +**Decision.** The `lobby` schema and the matching `lobbyservice` role +are created outside the migration sequence (in tests, by +`integration/internal/harness/postgres_container.go::EnsureRoleAndSchema`; +in production, by an ops init script not in scope for this stage). The +embedded migration `00001_init.sql` only contains DDL for tables and +indexes and assumes it runs as the schema owner with +`search_path=lobby`. + +**Why.** Mirrors the precedent set by Notification Stage 5 and Mail +Stage 4 and matches the schema-per-service architectural rule +(`ARCHITECTURE.md §Persistence Backends`). Mixing role + schema + table +DDL into one script would force every consumer of the migration to run +as a superuser; splitting them lines up with the operational split +(ops provisions roles and schemas, the service applies schema-scoped +migrations). + +### 2. Single-active application = partial UNIQUE on `applications` + +**Decision.** `applications` carries a partial UNIQUE index on +`(applicant_user_id, game_id) WHERE status <> 'rejected'`. INSERT +attempts that violate the constraint are surfaced to the service layer +as `application.ErrConflict` via the shared +`sqlx.IsUniqueViolation` helper. + +**Why.** Replaces the Redis lookup key `lobby:user_game_application:*:*` +with a deterministic database-level invariant. Multiple `rejected` +rows are intentionally allowed (one applicant may submit, get rejected, +and resubmit), and the UNIQUE only fires on the second simultaneous +submitted/approved row for the same `(user, game)`. The constraint is +race-safe: under concurrent submission attempts one INSERT wins, the +others fail with conflict. + +### 3. Public games carry an empty `owner_user_id`; partial index excludes them + +**Decision.** `games.owner_user_id` is `text NOT NULL DEFAULT ''`, and +the secondary `games_owner_idx` is partial: `WHERE game_type = 'private'`. +Public games (admin-owned) carry an empty owner string and are excluded +from the index entirely. + +**Why.** Mirrors the previous Redis behaviour where `games_by_owner:*` +sets were created only for private games. The partial index keeps the +owner lookup tight (only private-game rows participate) while letting +the column stay non-nullable and consistent with the domain model. + +### 4. JSONB columns for runtime snapshot and runtime binding + +**Decision.** `games.runtime_snapshot` is `jsonb NOT NULL DEFAULT +'{}'::jsonb`; `games.runtime_binding` is `jsonb NULL`. The JSON shapes +used inside both columns are stable and live in +`internal/adapters/postgres/gamestore/codecs.go`. `runtime_binding` +binds NULL when the domain pointer is nil, otherwise an object with +`container_id`, `engine_endpoint`, `runtime_job_id`, `bound_at_ms` +fields. + +**Why.** Both fields are opaque to queries — Lobby never element-filters +on their internals. JSONB matches the "everything outside primary +fields is JSON" pattern Notification Stage 5 already established and +allows a future GIN index without a schema rewrite. The `bound_at_ms` +field inside the binding stays in Unix milliseconds so the encoded +payload is naked-comparable across Redis and PostgreSQL audits during +the transition window. + +### 5. Optimistic concurrency via current-status compare-and-swap + +**Decision.** `UpdateStatus` on every store is implemented as `UPDATE … +WHERE id = $X AND status = $expected`. A zero-rows result is +disambiguated with a follow-up `SELECT status` probe — missing rows map +to the per-domain `ErrNotFound`, mismatches map to `ErrConflict`. +Snapshot/binding overrides on `games` use the same pattern but only +guard on the primary key (no expected-status gate). + +**Why.** Mirrors the previous Redis WATCH/TxPipelined behaviour without +holding a `SELECT … FOR UPDATE` lock across application logic. The +compare-and-swap is local to one statement, never spans more than one +network round trip, and produces the same observable error semantics +the service layer already depends on. + +### 6. Memberships store `race_name` and `canonical_key` side by side + +**Decision.** `memberships` carries both `race_name` (original casing) +and `canonical_key` (policy-derived form) as separate `text NOT NULL` +columns. There is no UNIQUE constraint on `canonical_key`. + +**Why.** Downstream consumers — capability evaluation and the +user-lifecycle cascade — read the canonical form directly without +re-deriving it from `race_name`, which is the same arrangement the +Redis JSON record had. Race-name uniqueness across the platform +remains the responsibility of the Race Name Directory; enforcing a +UNIQUE on memberships' canonical_key now would duplicate the RND +invariant and create deadlock potential between the two stores. + +### 7. ON DELETE CASCADE from games to children + +**Decision.** Each child table (`applications`, `invites`, +`memberships`) declares its `game_id` as `REFERENCES games(game_id) ON +DELETE CASCADE`. + +**Why.** Lobby code never deletes games today — every status terminal +is a soft state — so the cascade has no live trigger. It exists for +two future paths: scheduled cleanup of `cancelled` games far past +retention, and explicit operator/test resets. CASCADE keeps those paths +trivial and free of dangling references. + +### 8. Listing order: most-recent-first for games, oldest-first for child tables + +**Decision.** `GetByStatus` and `GetByOwner` on `games` order by +`created_at DESC, game_id DESC`. The per-game/per-user listings on +`applications`, `invites`, `memberships` order by `created_at ASC, + ASC` (memberships order by `joined_at ASC`). + +**Why.** Game listings serve user-facing feeds where most-recent-first +is the natural expectation, matching the previous Redis sorted-set +score and the `accounts.created_at DESC` convention from User Stage 3. +Child-table listings serve administrative and cascade flows where the +chronological order helps operators reason about the sequence of +events. The ports doc explicitly says "order is adapter-defined", so +either convention is contract-compatible. + +### 9. Heavy `runtime_test.go` / `runtime_smoke_test.go` deleted; integration coverage + +**Decision.** The service-local `internal/app/runtime_test.go` and +`runtime_smoke_test.go` were removed. Black-box runtime coverage moves +to the `integration/lobbyuser` and `integration/lobbynotification` +suites, which now spin up both a PostgreSQL container (via +`harness.StartLobbyServicePersistence`) and the existing Redis +container. + +**Why.** Mirrors the Mail Stage 4 / Notification Stage 5 precedent. +Booting a full Lobby runtime now requires both PostgreSQL and Redis, +which is the integration-suite shape; duplicating that bootstrap +inside `internal/app/` would be heavy and fragile. The remaining +service-local tests cover units that do not require the full runtime. + +### 10. Query layer is `go-jet/jet/v2` + +**Decision.** All four PG-store packages build SQL through the jet +builder API (`pgtable..INSERT/SELECT/UPDATE/DELETE` plus the +`pg.AND/OR/SET/COALESCE/...` DSL). Generated table models live under +`internal/adapters/postgres/jet/lobby/{model,table}/` and are +regenerated by `make jet` (which spins up a transient PostgreSQL via +testcontainers, applies the embedded goose migrations, and runs jet's +generator). Generated code is committed. + +**Why.** Aligns with `PG_PLAN.md` §Library stack ("Query layer: +`github.com/go-jet/jet/v2` (PostgreSQL dialect). Generated code lives +under each service `internal/adapters/postgres/jet/`, regenerated via +a `make jet` target and committed to the repo"). PostgreSQL constructs +that the jet builder does not cover natively (`FOR UPDATE`, +`COALESCE`, `LOWER` on subselects, JSONB params) are expressed through +the per-DSL helpers (`.FOR(pg.UPDATE())`, `pg.COALESCE`, `pg.LOWER`, +direct `[]byte`/string params for JSONB columns). Manual `rowScanner` +helpers (`scanGame`, `scanApplication`, `scanInvite`, +`scanMembership`) preserve the codecs.go boundary translations and +domain-type mapping; jet only owns SQL construction. + +## Out of scope for §6A + +- Read routing through `LOBBY_POSTGRES_REPLICA_DSNS` — config exposes + the field, runtime ignores it. +- Production provisioning of the `lobby` schema and `lobbyservice` + role — operational concern handled outside the service binary. + +## §6B — Race Name Directory on PostgreSQL + +§6B replaces the Redis-backed Race Name Directory (one Lua script + a +canonical-lookup cache + a pending-index ZSET + per-binding string keys) +with a single PostgreSQL table `race_names` whose rows back all three +binding kinds (`registered`, `reservation`, `pending_registration`). +The `race_names` DDL lives in `00001_init.sql` next to the four core +enrollment tables (it was originally introduced as a separate +`00002_race_names.sql`; PG_PLAN.md §9 collapsed the two files into one +init migration during the pre-launch development window). The adapter +`internal/adapters/postgres/racenamedir/directory.go` is the canonical +reference; the architecture rule is unchanged from §6A. + +### 11. One table, composite primary key `(canonical_key, game_id)` + +**Decision.** `race_names` carries one row per binding under the +composite primary key `(canonical_key, game_id)`. Reservations and +pending_registrations write the actual game id; registered rows write +`game_id = ''` and keep the source game in `source_game_id`. A partial +UNIQUE index on `(canonical_key)` filtered to `binding_kind = +'registered'` enforces the single-registered-per-canonical rule. + +**Why.** PG_PLAN.md §6B sketched the table as `(canonical_key PK, …)`, +but the existing port semantics (`testReserveCrossGame`, +`testReleaseReservationKeepsCrossGame` in +`internal/ports/racenamedirtest/suite.go`) require the same user to hold +several per-game reservations on one canonical key concurrently. A flat +single-PK table cannot model that without losing the per-game +identity. The composite PK matches both invariants — at most one row per +(canonical, game) and at most one registered row per canonical — without +splitting the data into two tables (which would force every write +operation to touch two unrelated indexes and reproduce the old +canonical-lookup cache invariant manually). + +### 12. Concurrency: PostgreSQL transactional advisory locks + +**Decision.** Every write operation (`Reserve`, `MarkPendingRegistration`, +`Register`, `ReleaseReservation`, the per-row branch of +`ExpirePendingRegistrations`) opens a `BEGIN; …; COMMIT` and acquires +`pg_advisory_xact_lock(hashtextextended($canonical_key, 0))` as the very +first statement. The lock auto-releases on commit or rollback. +`ReleaseAllByUser` is a single `DELETE WHERE holder_user_id = $1` and +takes no advisory lock — it runs on permanent_blocked / deleted +lifecycle events, so the user being deleted cannot be a concurrent +writer on those bindings. + +**Why.** PG_PLAN.md §6B explicitly authorised either `SELECT … FOR +UPDATE` or advisory locks. `SELECT … FOR UPDATE` cannot serialize +against not-yet-existing rows (e.g. concurrent first-time `Reserve`s for +the same canonical), so advisory locks are required for race-free +INSERTs. Hashing through `hashtextextended` produces a 64-bit lock key +covering arbitrary canonical strings, sidestepping `bigint` truncation +that older `hashtext` exposes. Holding the lock for one transaction +keeps the contention surface tight and matches the Notification §5 +"narrow CAS, no application-logic-bound row locks" precedent. + +### 13. `binding_kind` values match `ports.Kind*` verbatim + +**Decision.** `race_names.binding_kind` stores `"registered"`, +`"reservation"`, or `"pending_registration"` — the same string literals +exported by `ports.KindRegistered`, `ports.KindReservation`, +`ports.KindPendingRegistration`. The adapter returns the raw value +directly through `Availability.Kind` without translation. A `CHECK` +constraint on the column rejects anything else. + +**Why.** Avoids one boundary translation and one synonym ("reserved" vs +"reservation") that the Redis adapter carried internally as +`reservationStatusReserved = "reserved"`. With the port-equivalent +literals on disk, future operator-side queries (`SELECT … WHERE +binding_kind = 'reservation'`) match the Go-level constants 1:1, and +the adapter saves a `switch` per `Check` call. + +### 14. `Check` returns the strongest binding via in-process priority + +**Decision.** `Check` issues `SELECT holder_user_id, binding_kind FROM +race_names WHERE canonical_key = $1` and picks the strongest binding in +Go using a priority rank `registered > pending_registration > +reservation`. There is no SQL `CASE` expression in the ORDER BY. + +**Why.** The dataset per canonical is bounded (at most one registered + +one row per active game) and is read frequently by every `Check`. The +Go-side rank avoids a SQL DSL detour that go-jet/v2 would express via +raw SQL anyway, and it keeps the query plan a single index scan on +`canonical_key`. + +### 15. `ExpirePendingRegistrations` scans then locks per row + +**Decision.** The expirer first runs an indexed scan +`WHERE binding_kind = 'pending_registration' AND eligible_until_ms <= +$cutoff` (served by `race_names_pending_eligible_idx`), then re-reads +each candidate inside its own advisory-locked transaction, asserts the +binding is still pending and still expired, and DELETEs it. Concurrent +`Register` or `ReleaseReservation` simply causes the per-row branch to +skip without error. + +**Why.** Mirrors the Redis adapter's two-phase `ZRANGEBYSCORE` + per- +member release loop. A bulk `DELETE … WHERE eligible_until_ms <= …` +would not produce the per-entry `ports.ExpiredPending` slice the worker +needs for telemetry, and would race with `Register` (which targets the +same row). + +### 16. Shared port test suite stays on PostgreSQL via a serial harness + +**Decision.** The shared `racenamedirtest` suite no longer calls +`t.Parallel()` from its subtests. Every subtest goes through the +factory, the factory truncates the lobby tables and constructs a fresh +adapter against the package-shared testcontainers PostgreSQL. + +**Why.** The PostgreSQL adapter relies on `pgtest.TruncateAll` between +factory invocations; running subtests in parallel against one shared +container would race truncate against other subtests' INSERTs. Spinning +up a per-subtest schema would multiply container provisioning cost +significantly (PG generation step alone takes minutes per fresh +container), and the suite is fast enough serially. The Redis-only +backend retired in §6B no longer needs the parallelism either; only the +in-process stub remains in scope and has trivial setup cost. + +## §6C — Workers, ephemeral stores, cleanup + +§6C closes the Lobby migration: it confirms what intentionally stays on +Redis, prunes the dead Redis adapter code, and finalises the +service-layer documentation. + +### 17. Workers stayed on ports — no functional change + +**Decision.** The four Lobby workers (`pendingregistration`, +`gmevents`, `runtimejobresult`, `userlifecycle`) and the +`enrollmentautomation` worker shipped in §6A already consume their +storage through ports. After §6B the `RaceNameDirectory` port resolves +to the PostgreSQL adapter; no worker required code changes. + +**Why.** §6A established the port-on-storage seam for `GameStore`, +`ApplicationStore`, `InviteStore`, `MembershipStore`. §6B kept the same +contract for `RaceNameDirectory`. Worker logic depends on the contract, +not the backend, so the migration completes via a wiring switch in +`internal/app/wiring.go::buildRaceNameDirectory` without re-touching +worker code. + +### 18. `redisstate` retains only runtime-coordination adapters + +**Decision.** After §6C the `internal/adapters/redisstate/` package +implements only `GameTurnStatsStore`, `GapActivationStore`, +`EvaluationGuardStore`, `StreamOffsetStore`, and the `StreamLagProbe`. +The legacy `racenamedir.go`, `racenamedir_lua.go`, +`racenamedir_test.go`, `codecs_racename.go`, and the dead game +codecs (`codecs.go`'s `MarshalGame`/`UnmarshalGame`) are removed. The +`Keyspace` type only builds keys for the surviving adapters +(`GapActivatedAt`, `StreamOffset`, `GameTurnStat`, +`GameTurnStatsByGame`, `CapabilityEvaluationGuard`). + +**Why.** Architectural rule (`ARCHITECTURE.md §Persistence Backends`): +Redis owns runtime-coordination state, PostgreSQL owns durable business +state. The retained Redis stores back ephemeral per-game aggregates +(`game_turn_stats`), short-lived sentinels (`gap_activated_at`, +`capability_evaluation:done:*`), and the consumer-offset coordination +state (`stream_offsets:*`) — all rebuildable or losable without +durability impact. Streams stay on Redis because they *are* the event +bus. + +### 19. Default Race Name Directory backend is `postgres` + +**Decision.** `LOBBY_RACE_NAME_DIRECTORY_BACKEND` defaults to +`"postgres"`. The accepted values are `postgres` (production) and +`stub` (in-process for unit tests that do not need a real PostgreSQL). +The `redis` value, the corresponding `RaceNameDirectoryBackendRedis` +constant, and the wiring branch are removed. + +**Why.** The Redis adapter is gone; keeping the value in the validator +would produce a misleading "configuration accepted, but startup fails +when wiring resolves the directory" path. Leaving `stub` as a valid +backend lets per-service unit tests run against a small, fast +in-process directory; integration suites use `postgres` via the +testcontainers harness. diff --git a/lobby/docs/runbook.md b/lobby/docs/runbook.md index d9764f9..9161005 100644 --- a/lobby/docs/runbook.md +++ b/lobby/docs/runbook.md @@ -7,8 +7,23 @@ readiness, shutdown, and the handful of recovery paths specific to Lobby. Before starting the process, confirm: -- `LOBBY_REDIS_ADDR` points to the Redis deployment used for state and the - five Lobby-related streams. +- `LOBBY_REDIS_MASTER_ADDR` and `LOBBY_REDIS_PASSWORD` point to the Redis + deployment used for the runtime-coordination state that intentionally + stays on Redis: stream consumers/publishers, stream offsets, per-game + turn-stats aggregates, gap-activation timestamps, and the + capability-evaluation guard. The deprecated `LOBBY_REDIS_ADDR`, + `LOBBY_REDIS_USERNAME`, and `LOBBY_REDIS_TLS_ENABLED` env vars were + retired in PG_PLAN.md §6A; setting either of the latter two now fails + fast at startup. +- `LOBBY_POSTGRES_PRIMARY_DSN` points to the PostgreSQL primary that + hosts the `lobby` schema. The DSN must include `search_path=lobby` and + `sslmode=disable`. Embedded goose migrations apply at startup before + any HTTP listener opens; a migration or ping failure terminates the + process with a non-zero exit. After PG_PLAN.md §6A the schema holds + `games`, `applications`, `invites`, `memberships`; after §6B it also + holds `race_names`. The schema and the `lobbyservice` role are + provisioned externally (operator init script in production, the + testcontainers harness in tests). - `LOBBY_USER_SERVICE_BASE_URL` and `LOBBY_GM_BASE_URL` are reachable from the network the Lobby pods run in. Lobby does not ping these at boot, but transport failures against them will surface as request errors. @@ -19,11 +34,13 @@ Before starting the process, confirm: - `LOBBY_RUNTIME_JOB_RESULTS_STREAM` (default `runtime:job_results`) - `LOBBY_USER_LIFECYCLE_STREAM` (default `user:lifecycle_events`) - `LOBBY_NOTIFICATION_INTENTS_STREAM` (default `notification:intents`) -- `LOBBY_RACE_NAME_DIRECTORY_BACKEND` is `redis` for production; the - `stub` value is only for unit tests. +- `LOBBY_RACE_NAME_DIRECTORY_BACKEND` is `postgres` for production + (the default after PG_PLAN.md §6B); the `stub` value is only for + unit tests that do not need a real PostgreSQL. -At startup the process performs a bounded `PING` against Redis. Startup -fails fast if the ping fails. There are no liveness checks against User +At startup the process opens the PostgreSQL pool, applies migrations, +pings PostgreSQL, then opens the Redis client and pings Redis. Startup +fails fast if any step fails. There are no liveness checks against User Service or Game Master at boot; those are surfaced at request time. Expected listener state after a healthy start: @@ -160,11 +177,15 @@ is reachable again. To inspect the backlog: ```bash -redis-cli ZRANGE lobby:race_names:pending_index 0 -1 WITHSCORES +psql -c "SELECT canonical_key, game_id, holder_user_id, eligible_until_ms + FROM lobby.race_names + WHERE binding_kind = 'pending_registration' + ORDER BY eligible_until_ms ASC" ``` -Entries with `score < now()` (Unix milliseconds) are expirable on the next -tick. +Rows whose `eligible_until_ms` is at or below `extract(epoch from now()) * 1000` +are expirable on the next tick. The partial index +`race_names_pending_eligible_idx` keeps this scan cheap. ## Cascade Release Operator Notes @@ -195,26 +216,34 @@ out-of-band. ## Diagnostic Queries -A handful of Redis CLI snippets help during incidents: +Durable enrollment state and Race Name Directory bindings live in +PostgreSQL; runtime coordination state stays in Redis. A handful of CLI +snippets help during incidents: ```bash -# Live game count by status -redis-cli ZCARD lobby:games_by_status:enrollment_open -redis-cli ZCARD lobby:games_by_status:running +# Live game count by status (PostgreSQL) +psql -c "SELECT status, COUNT(*) FROM lobby.games GROUP BY status" # Inspect a specific game record -redis-cli GET lobby:games: +psql -c "SELECT * FROM lobby.games WHERE game_id = ''" # Member roster for a game -redis-cli SMEMBERS lobby:game_memberships: +psql -c "SELECT user_id, race_name, status, joined_at + FROM lobby.memberships + WHERE game_id = '' + ORDER BY joined_at" # Race name pending entries (oldest first) -redis-cli ZRANGE lobby:race_names:pending_index 0 -1 WITHSCORES +psql -c "SELECT canonical_key, game_id, holder_user_id, eligible_until_ms + FROM lobby.race_names + WHERE binding_kind = 'pending_registration' + ORDER BY eligible_until_ms ASC" -# Stream lag inspection +# Stream lag inspection (Redis) redis-cli XINFO STREAM gm:lobby_events redis-cli GET lobby:stream_offsets:gm_events ``` The gauges and counters surfaced through OpenTelemetry are the primary -observability surface; raw Redis access is for last-resort triage. +observability surface; raw PostgreSQL and Redis access is for last-resort +triage. diff --git a/lobby/docs/runtime.md b/lobby/docs/runtime.md index 69687fb..4f41539 100644 --- a/lobby/docs/runtime.md +++ b/lobby/docs/runtime.md @@ -56,9 +56,10 @@ flowchart LR Notes: -- `cmd/lobby` refuses startup when Redis connectivity is misconfigured. User - Service and Game Master reachability are not verified at boot; transport - failures surface as request errors. +- `cmd/lobby` refuses startup when Redis connectivity is misconfigured, when + PostgreSQL is unreachable, or when the embedded goose migrations fail to + apply. User Service and Game Master reachability are not verified at boot; + transport failures surface as request errors. - Both HTTP listeners expose `/healthz` and `/readyz` independently so health checks can target either port. - `register-runtime` is an outgoing call from Lobby to Game Master after the @@ -85,7 +86,7 @@ Probe routes: - `GET /healthz` returns `{"status":"ok"}` - `GET /readyz` returns `{"status":"ready"}` once startup wiring completes. -- Neither probe performs a live Redis ping per request. +- Neither probe performs a live Redis or PostgreSQL ping per request. - There is no `/metrics` route. Metrics flow through OpenTelemetry exporters. ## Background Workers @@ -130,13 +131,20 @@ lags or stalls, the gauge climbs and stays high. The full env-var list with defaults lives in `../README.md` §Configuration. The groups below summarize the structure: -- **Required** — `LOBBY_REDIS_ADDR`, `LOBBY_USER_SERVICE_BASE_URL`, +- **Required** — `LOBBY_REDIS_MASTER_ADDR`, `LOBBY_REDIS_PASSWORD`, + `LOBBY_POSTGRES_PRIMARY_DSN`, `LOBBY_USER_SERVICE_BASE_URL`, `LOBBY_GM_BASE_URL`. - **Process and logging** — `LOBBY_SHUTDOWN_TIMEOUT`, `LOBBY_LOG_LEVEL`. - **HTTP listeners** — `LOBBY_PUBLIC_HTTP_*`, `LOBBY_INTERNAL_HTTP_*`. -- **Redis connectivity** — `LOBBY_REDIS_USERNAME`, `LOBBY_REDIS_PASSWORD`, - `LOBBY_REDIS_DB`, `LOBBY_REDIS_TLS_ENABLED`, - `LOBBY_REDIS_OPERATION_TIMEOUT`. +- **Redis connectivity** — `LOBBY_REDIS_MASTER_ADDR`, + `LOBBY_REDIS_REPLICA_ADDRS`, `LOBBY_REDIS_PASSWORD`, `LOBBY_REDIS_DB`, + `LOBBY_REDIS_OPERATION_TIMEOUT` (legacy `LOBBY_REDIS_ADDR`, + `LOBBY_REDIS_TLS_ENABLED`, `LOBBY_REDIS_USERNAME` removed in PG_PLAN.md + §6A). +- **PostgreSQL connectivity** — `LOBBY_POSTGRES_PRIMARY_DSN`, + `LOBBY_POSTGRES_REPLICA_DSNS`, `LOBBY_POSTGRES_OPERATION_TIMEOUT`, + `LOBBY_POSTGRES_MAX_OPEN_CONNS`, `LOBBY_POSTGRES_MAX_IDLE_CONNS`, + `LOBBY_POSTGRES_CONN_MAX_LIFETIME`. - **Streams** — `LOBBY_GM_EVENTS_STREAM`, `LOBBY_RUNTIME_START_JOBS_STREAM`, `LOBBY_RUNTIME_STOP_JOBS_STREAM`, `LOBBY_RUNTIME_JOB_RESULTS_STREAM`, `LOBBY_NOTIFICATION_INTENTS_STREAM`, `LOBBY_USER_LIFECYCLE_STREAM`. @@ -152,9 +160,9 @@ The groups below summarize the structure: - `Game Lobby` owns platform game state. Game Master may cache snapshots but is not the source of truth. -- The Race Name Directory ships a Redis adapter and an in-process stub; the - stub is intended for unit tests and is selected via - `LOBBY_RACE_NAME_DIRECTORY_BACKEND=stub`. +- The Race Name Directory ships a PostgreSQL adapter (default after + PG_PLAN.md §6B) and an in-process stub. The stub is intended for unit + tests and is selected via `LOBBY_RACE_NAME_DIRECTORY_BACKEND=stub`. - A `permanent_block` or `deleted` event from User Service fans out asynchronously through the `user:lifecycle_events` consumer; in-flight games owned by the affected user receive a stop-job and transition to diff --git a/lobby/go.mod b/lobby/go.mod index c31556e..a72523e 100644 --- a/lobby/go.mod +++ b/lobby/go.mod @@ -3,15 +3,17 @@ module galaxy/lobby go 1.26.1 require ( + galaxy/postgres v0.0.0-00010101000000-000000000000 github.com/alicebob/miniredis/v2 v2.37.0 github.com/disciplinedware/go-confusables v0.1.1 github.com/getkin/kin-openapi v0.135.0 - github.com/redis/go-redis/extra/redisotel/v9 v9.18.0 + github.com/go-jet/jet/v2 v2.14.1 + github.com/jackc/pgx/v5 v5.9.2 github.com/redis/go-redis/v9 v9.18.0 github.com/robfig/cron/v3 v3.0.1 github.com/stretchr/testify v1.11.1 github.com/testcontainers/testcontainers-go v0.42.0 - github.com/testcontainers/testcontainers-go/modules/redis v0.42.0 + github.com/testcontainers/testcontainers-go/modules/postgres v0.42.0 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.68.0 go.opentelemetry.io/otel v1.43.0 go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.43.0 @@ -28,6 +30,24 @@ require ( golang.org/x/text v0.36.0 ) +require ( + github.com/XSAM/otelsql v0.42.0 // indirect + github.com/jackc/chunkreader/v2 v2.0.1 // indirect + github.com/jackc/pgconn v1.14.3 // indirect + github.com/jackc/pgio v1.0.0 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgproto3/v2 v2.3.3 // indirect + github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect + github.com/jackc/pgtype v1.14.4 // indirect + github.com/jackc/puddle/v2 v2.2.2 // indirect + github.com/lib/pq v1.10.9 // indirect + github.com/mfridman/interpolate v0.0.2 // indirect + github.com/pressly/goose/v3 v3.27.1 // indirect + github.com/sethvargo/go-retry v0.3.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/sync v0.20.0 // indirect +) + require ( dario.cat/mergo v1.0.2 // indirect galaxy/notificationintent v0.0.0 @@ -44,7 +64,7 @@ require ( github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/distribution/reference v0.6.0 // indirect - github.com/docker/go-connections v0.6.0 // indirect + github.com/docker/go-connections v0.7.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/ebitengine/purego v0.10.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -60,11 +80,10 @@ require ( github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect github.com/magiconair/properties v1.8.10 // indirect github.com/mailru/easyjson v0.7.7 // indirect - github.com/mdelapenya/tlscert v0.2.0 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/go-archive v0.2.0 // indirect - github.com/moby/moby/api v1.54.1 // indirect - github.com/moby/moby/client v0.4.0 // indirect + github.com/moby/moby/api v1.54.2 // indirect + github.com/moby/moby/client v0.4.1 // indirect github.com/moby/patternmatcher v0.6.1 // indirect github.com/moby/sys/sequential v0.6.0 // indirect github.com/moby/sys/user v0.4.0 // indirect @@ -78,7 +97,6 @@ require ( github.com/perimeterx/marshmallow v1.1.5 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect - github.com/redis/go-redis/extra/rediscmd/v9 v9.18.0 // indirect github.com/shirou/gopsutil/v4 v4.26.3 // indirect github.com/sirupsen/logrus v1.9.4 // indirect github.com/tklauser/go-sysconf v0.3.16 // indirect @@ -91,14 +109,18 @@ require ( go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.43.0 // indirect go.opentelemetry.io/proto/otlp v1.10.0 // indirect go.uber.org/atomic v1.11.0 // indirect - golang.org/x/crypto v0.49.0 // indirect - golang.org/x/net v0.52.0 // indirect - golang.org/x/sys v0.42.0 // indirect + golang.org/x/crypto v0.50.0 // indirect + golang.org/x/net v0.53.0 // indirect + golang.org/x/sys v0.43.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20260420184626-e10c466a9529 // indirect google.golang.org/grpc v1.80.0 // indirect google.golang.org/protobuf v1.36.11 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) replace galaxy/notificationintent => ../pkg/notificationintent + +replace galaxy/postgres => ../pkg/postgres + +replace galaxy/redisconn => ../pkg/redisconn diff --git a/lobby/go.sum b/lobby/go.sum index 4e1912e..e1a314e 100644 --- a/lobby/go.sum +++ b/lobby/go.sum @@ -4,8 +4,12 @@ github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 h1:He8af github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/XSAM/otelsql v0.42.0 h1:Li0xF4eJUxG2e0x3D4rvRlys1f27yJKvjTh7ljkUP5o= +github.com/XSAM/otelsql v0.42.0/go.mod h1:4mOrEv+cS1KmKzrvTktvJnstr5GtKSAK+QHvFR9OcpI= github.com/alicebob/miniredis/v2 v2.37.0 h1:RheObYW32G1aiJIj81XVt78ZHJpHonHLHW7OLIshq68= github.com/alicebob/miniredis/v2 v2.37.0/go.mod h1:TcL7YfarKPGDAthEtl5NBeHZfeUQj6OXMm/+iu5cLMM= github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= @@ -18,6 +22,7 @@ github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1x github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= @@ -26,10 +31,15 @@ github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A= github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/cpuguy83/dockercfg v0.3.2 h1:DlJTyZGBDlXqUZ2Dk2Q3xHs/FtnooJJVaad2S9GKorA= github.com/cpuguy83/dockercfg v0.3.2/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.24 h1:bJrF4RRfyJnbTJqzRLHzcGaZK1NeM5kTC9jGgovnR1s= github.com/creack/pty v1.1.24/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= @@ -38,16 +48,22 @@ github.com/disciplinedware/go-confusables v0.1.1 h1:l/JVOsdrEDHo7nvL+tQfRO1F14Uy github.com/disciplinedware/go-confusables v0.1.1/go.mod h1:2hAXIAtpSqx+tMKdCzgRNv4J/kmz/oGfSHTBGJjVgfc= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94= -github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE= +github.com/docker/go-connections v0.7.0 h1:6SsRfJddP22WMrCkj19x9WKjEDTB+ahsdiGYf0mN39c= +github.com/docker/go-connections v0.7.0/go.mod h1:no1qkHdjq7kLMGUXYAduOhYPSJxxvgWBh7ogVvptn3Q= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/ebitengine/purego v0.10.0 h1:QIw4xfpWT6GWTzaW5XEKy3HXoqrJGx1ijYHzTF0/ISU= github.com/ebitengine/purego v0.10.0/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/getkin/kin-openapi v0.135.0 h1:751SjYfbiwqukYuVjwYEIKNfrSwS5YpA7DZnKSwQgtg= github.com/getkin/kin-openapi v0.135.0/go.mod h1:6dd5FJl6RdX4usBtFBaQhk9q62Yb2J0Mk5IhUO/QqFI= +github.com/go-jet/jet/v2 v2.14.1 h1:wsfD9e7CGP9h46+IFNlftfncBcmVnKddikbTtapQM3M= +github.com/go-jet/jet/v2 v2.14.1/go.mod h1:dqTAECV2Mo3S2NFjbm4vJ1aDruZjhaJ1RAAR8rGUkkc= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -59,43 +75,123 @@ github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1 github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM= github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= +github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0 h1:HWRh5R2+9EifMyIHV7ZV+MIZqgz+PMpZ14Jynv3O2Zs= github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0/go.mod h1:JfhWUomR1baixubs02l85lZYYOm7LV6om4ceouMv45c= +github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= +github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= +github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= +github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= +github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= +github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= +github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY= +github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= +github.com/jackc/pgconn v1.14.3 h1:bVoTr12EGANZz66nZPkMInAV/KHD2TxH9npjXXgiB3w= +github.com/jackc/pgconn v1.14.3/go.mod h1:RZbme4uasqzybK2RK5c65VsHxoyaml09lx3tXOcO/VM= +github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= +github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= +github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= +github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65 h1:DadwsjnMwFjfWc9y5Wi/+Zz7xoE5ALHsRQlOctkOiHc= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= +github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.3.3 h1:1HLSx5H+tXR9pW3in3zaztoEwQYRC9SQaYUHjTSUOag= +github.com/jackc/pgproto3/v2 v2.3.3/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= +github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= +github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= +github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM= +github.com/jackc/pgtype v1.14.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= +github.com/jackc/pgtype v1.14.4 h1:fKuNiCumbKTAIxQwXfB/nsrnkEI6bPJrrSiMKgbJ2j8= +github.com/jackc/pgtype v1.14.4/go.mod h1:aKeozOde08iifGosdJpz9MBZonJOUJxqNpPBcMJTlVA= +github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= +github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= +github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= +github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs= +github.com/jackc/pgx/v4 v4.18.2/go.mod h1:Ey4Oru5tH5sB6tV7hDmfWFahwF15Eb7DNXlRKx2CkVw= +github.com/jackc/pgx/v4 v4.18.3 h1:dE2/TrEsGX3RBprb3qryqSV9Y60iZN1C6i8IrmW9/BA= +github.com/jackc/pgx/v4 v4.18.3/go.mod h1:Ey4Oru5tH5sB6tV7hDmfWFahwF15Eb7DNXlRKx2CkVw= +github.com/jackc/pgx/v5 v5.9.2 h1:3ZhOzMWnR4yJ+RW1XImIPsD1aNSz4T4fyP7zlQb56hw= +github.com/jackc/pgx/v5 v5.9.2/go.mod h1:mal1tBGAFfLHvZzaYh77YS/eC6IX9OWbRV1QIIM0Jn4= +github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.3.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= +github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.18.5 h1:/h1gH5Ce+VWNLSWqPzOVn6XBO+vJbCNGvjoaGBFW2IE= github.com/klauspost/compress v1.18.5/go.mod h1:cwPg85FWrGar70rWktvGQj8/hthj3wpl0PGDogxkrSQ= github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y= github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= github.com/magiconair/properties v1.8.10 h1:s31yESBquKXCV9a/ScB3ESkOjUYYv+X0rg8SYxI99mE= github.com/magiconair/properties v1.8.10/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.21 h1:xYae+lCNBP7QuW4PUnNG61ffM4hVIfm+zUzDuSzYLGs= +github.com/mattn/go-isatty v0.0.21/go.mod h1:ZXfXG4SQHsB/w3ZeOYbR0PrPwLy+n6xiMrJlRFqopa4= github.com/mdelapenya/tlscert v0.2.0 h1:7H81W6Z/4weDvZBNOfQte5GpIMo0lGYEeWbkGp5LJHI= github.com/mdelapenya/tlscert v0.2.0/go.mod h1:O4njj3ELLnJjGdkN7M/vIVCpZ+Cf0L6muqOG4tLSl8o= +github.com/mfridman/interpolate v0.0.2 h1:pnuTK7MQIxxFz1Gr+rjSIx9u7qVjf5VOoM/u6BbAxPY= +github.com/mfridman/interpolate v0.0.2/go.mod h1:p+7uk6oE07mpE/Ik1b8EckO0O4ZXiGAfshKBWLUM9Xg= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/go-archive v0.2.0 h1:zg5QDUM2mi0JIM9fdQZWC7U8+2ZfixfTYoHL7rWUcP8= github.com/moby/go-archive v0.2.0/go.mod h1:mNeivT14o8xU+5q1YnNrkQVpK+dnNe/K6fHqnTg4qPU= -github.com/moby/moby/api v1.54.1 h1:TqVzuJkOLsgLDDwNLmYqACUuTehOHRGKiPhvH8V3Nn4= -github.com/moby/moby/api v1.54.1/go.mod h1:+RQ6wluLwtYaTd1WnPLykIDPekkuyD/ROWQClE83pzs= -github.com/moby/moby/client v0.4.0 h1:S+2XegzHQrrvTCvF6s5HFzcrywWQmuVnhOXe2kiWjIw= -github.com/moby/moby/client v0.4.0/go.mod h1:QWPbvWchQbxBNdaLSpoKpCdf5E+WxFAgNHogCWDoa7g= +github.com/moby/moby/api v1.54.2 h1:wiat9QAhnDQjA7wk1kh/TqHz2I1uUA7M7t9SAl/JNXg= +github.com/moby/moby/api v1.54.2/go.mod h1:+RQ6wluLwtYaTd1WnPLykIDPekkuyD/ROWQClE83pzs= +github.com/moby/moby/client v0.4.1 h1:DMQgisVoMkmMs7fp3ROSdiBnoAu8+vo3GggFl06M/wY= +github.com/moby/moby/client v0.4.1/go.mod h1:z52C9O2POPOsnxZAy//WtKcQ32P+jT/NGeXu/7nfjGQ= github.com/moby/patternmatcher v0.6.1 h1:qlhtafmr6kgMIJjKJMDmMWq7WLkKIo23hsrpR3x084U= github.com/moby/patternmatcher v0.6.1/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU= @@ -108,6 +204,8 @@ github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= +github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w= +github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= github.com/oasdiff/yaml v0.0.9 h1:zQOvd2UKoozsSsAknnWoDJlSK4lC0mpmjfDsfqNwX48= github.com/oasdiff/yaml v0.0.9/go.mod h1:8lvhgJG4xiKPj3HN5lDow4jZHPlx1i7dIwzkdAo6oAM= github.com/oasdiff/yaml3 v0.0.9 h1:rWPrKccrdUm8J0F3sGuU+fuh9+1K/RdJlWF7O/9yw2g= @@ -118,32 +216,58 @@ github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJw github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= github.com/perimeterx/marshmallow v1.1.5 h1:a2LALqQ1BlHM8PZblsDdidgv1mWi1DgC2UmX50IvK2s= github.com/perimeterx/marshmallow v1.1.5/go.mod h1:dsXbUu8CRzfYP5a87xpp0xq9S3u0Vchtcl8we9tYaXw= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= -github.com/redis/go-redis/extra/rediscmd/v9 v9.18.0 h1:QY4nmPHLFAJjtT5O4OMUEOxP8WVaRNOFpcbmxT2NLZU= -github.com/redis/go-redis/extra/rediscmd/v9 v9.18.0/go.mod h1:WH8cY/0fT41Bsf341qzo8v4nx0GCE8FykAA23IVbVmo= -github.com/redis/go-redis/extra/redisotel/v9 v9.18.0 h1:2dKdoEYBJ0CZCLPiCdvvc7luz3DPwY6hKdzjL6m1eHE= -github.com/redis/go-redis/extra/redisotel/v9 v9.18.0/go.mod h1:WzkrVG9ro9BwCQD0eJOWn6AGL4Z1CleGflM45w1hu10= +github.com/pressly/goose/v3 v3.27.1 h1:6uEvcprBybDmW4hcz3gYujhARhye+GoWKhEWyzD5sh4= +github.com/pressly/goose/v3 v3.27.1/go.mod h1:maruOxsPnIG2yHHyo8UqKWXYKFcH7Q76csUV7+7KYoM= github.com/redis/go-redis/v9 v9.18.0 h1:pMkxYPkEbMPwRdenAzUNyFNrDgHx9U+DrBabWNfSRQs= github.com/redis/go-redis/v9 v9.18.0/go.mod h1:k3ufPphLU5YXwNTUcCRXGxUoF1fqxnhFQmscfkCoDA0= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= +github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= +github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sethvargo/go-retry v0.3.0 h1:EEt31A35QhrcRZtrYFDTBg91cqZVnFL2navjDrah2SE= +github.com/sethvargo/go-retry v0.3.0/go.mod h1:mNX17F0C/HguQMyMyJxcnU471gOZGxCLyYaFyAZraas= github.com/shirou/gopsutil/v4 v4.26.3 h1:2ESdQt90yU3oXF/CdOlRCJxrP+Am1aBYubTMTfxJ1qc= github.com/shirou/gopsutil/v4 v4.26.3/go.mod h1:LZ6ewCSkBqUpvSOf+LsTGnRinC6iaNUNMGBtDkJBaLQ= +github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.9.4 h1:TsZE7l11zFCLZnZ+teH4Umoq5BhEIfIzfRDZ1Uzql2w= github.com/sirupsen/logrus v1.9.4/go.mod h1:ftWc9WdOfJ0a92nsE2jF5u5ZwH8Bv2zdeOC42RjbV2g= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.3 h1:jmXUvGomnU1o3W/V5h2VEradbpJDwGrzugQQvL0POH4= github.com/stretchr/objx v0.5.3/go.mod h1:rDQraq+vQZU7Fde9LOZLr8Tax6zZvy4kuNKF+QYS+U0= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/testcontainers/testcontainers-go v0.42.0 h1:He3IhTzTZOygSXLJPMX7n44XtK+qhjat1nI9cneBbUY= github.com/testcontainers/testcontainers-go v0.42.0/go.mod h1:vZjdY1YmUA1qEForxOIOazfsrdyORJAbhi0bp8plN30= -github.com/testcontainers/testcontainers-go/modules/redis v0.42.0 h1:id/6LH8ZeDrtAUVSuNvZUAJ1kVpb82y1pr9yweAWsRg= -github.com/testcontainers/testcontainers-go/modules/redis v0.42.0/go.mod h1:uF0jI8FITagQpBNOgweGBmPf6rP4K0SeL1XFPbsZSSY= +github.com/testcontainers/testcontainers-go/modules/postgres v0.42.0 h1:GCbb1ndrF7OTDiIvxXyItaDab4qkzTFJ48LKFdM7EIo= +github.com/testcontainers/testcontainers-go/modules/postgres v0.42.0/go.mod h1:IRPBaI8jXdrNfD0e4Zm7Fbcgaz5shKxOQv4axiL09xs= github.com/tklauser/go-sysconf v0.3.16 h1:frioLaCQSsF5Cy1jgRBrzr6t502KIIwQ0MArYICU0nA= github.com/tklauser/go-sysconf v0.3.16/go.mod h1:/qNL9xxDhc7tx3HSRsLWNnuzbVfh3e7gh/BmM179nYI= github.com/tklauser/numcpus v0.11.0 h1:nSTwhKH5e1dMNsCdVBukSZrURJRoHbSEQjdEbY+9RXw= @@ -152,12 +276,14 @@ github.com/ugorji/go/codec v1.3.1 h1:waO7eEiFDwidsBN6agj1vJQ4AG7lh2yqXyOXqhgQuyY github.com/ugorji/go/codec v1.3.1/go.mod h1:pRBVtBSKl77K30Bv8R2P+cLSGaTtex6fsA2Wjqmfxj4= github.com/woodsbury/decimal128 v1.3.0 h1:8pffMNWIlC0O5vbyHWFZAt5yWvWcrHA+3ovIIjVWss0= github.com/woodsbury/decimal128 v1.3.0/go.mod h1:C5UTmyTjW3JftjUFzOVhC20BEQa2a4ZKOB5I6Zjb+ds= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yuin/gopher-lua v1.1.1 h1:kYKnWBjvbNP4XLT3+bPEwAXJx262OhaHDWDVOPjL46M= github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0= github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= +github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.68.0 h1:CqXxU8VOmDefoh0+ztfGaymYbhdB/tT3zs79QaZTNGY= @@ -188,42 +314,148 @@ go.opentelemetry.io/otel/trace v1.43.0 h1:BkNrHpup+4k4w+ZZ86CZoHHEkohws8AY+WTX09 go.opentelemetry.io/otel/trace v1.43.0/go.mod h1:/QJhyVBUUswCphDVxq+8mld+AvhXZLhe+8WVFxiFff0= go.opentelemetry.io/proto/otlp v1.10.0 h1:IQRWgT5srOCYfiWnpqUYz9CVmbO8bFmKcwYxpuCSL2g= go.opentelemetry.io/proto/otlp v1.10.0/go.mod h1:/CV4QoCR/S9yaPj8utp3lvQPoqMtxXdzn7ozvvozVqk= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -golang.org/x/crypto v0.49.0 h1:+Ng2ULVvLHnJ/ZFEq4KdcDd/cfjrrjjNSXNzxg0Y4U4= -golang.org/x/crypto v0.49.0/go.mod h1:ErX4dUh2UM+CFYiXZRTcMpEcN8b/1gxEuv3nODoYtCA= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ= +golang.org/x/crypto v0.50.0 h1:zO47/JPrL6vsNkINmLoo/PH1gcxpls50DNogFvB5ZGI= +golang.org/x/crypto v0.50.0/go.mod h1:3muZ7vA7PBCE6xgPX7nkzzjiUq87kRItoJQM1Yo8S+Q= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.35.0 h1:Ww1D637e6Pg+Zb2KrWfHQUnH2dQRLBQyAtpr/haaJeM= golang.org/x/mod v0.35.0/go.mod h1:+GwiRhIInF8wPm+4AoT6L0FA1QWAad3OMdTRx4tFYlU= -golang.org/x/net v0.52.0 h1:He/TN1l0e4mmR3QqHMT2Xab3Aj3L9qjbhRm78/6jrW0= -golang.org/x/net v0.52.0/go.mod h1:R1MAz7uMZxVMualyPXb+VaqGSa3LIaUqk0eEt3w36Sw= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.53.0 h1:d+qAbo5L0orcWAr0a9JweQpjXF19LMXJE8Ey7hwOdUA= +golang.org/x/net v0.53.0/go.mod h1:JvMuJH7rrdiCfbeHoo3fCQU24Lf5JJwT9W3sJFulfgs= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.20.0 h1:e0PTpb7pjO8GAtTs2dQ6jYa5BWYlMuX047Dco/pItO4= +golang.org/x/sync v0.20.0/go.mod h1:9xrNwdLfx4jkKbNva9FpL6vEN7evnE43NNNJQ2LF3+0= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.42.0 h1:omrd2nAlyT5ESRdCLYdm3+fMfNFE/+Rf4bDIQImRJeo= -golang.org/x/sys v0.42.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw= -golang.org/x/term v0.41.0 h1:QCgPso/Q3RTJx2Th4bDLqML4W6iJiaXFq2/ftQF13YU= -golang.org/x/term v0.41.0/go.mod h1:3pfBgksrReYfZ5lvYM0kSO0LIkAl4Yl2bXOkKP7Ec2A= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.43.0 h1:Rlag2XtaFTxp19wS8MXlJwTvoh8ArU6ezoyFsMyCTNI= +golang.org/x/sys v0.43.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.42.0 h1:UiKe+zDFmJobeJ5ggPwOshJIVt6/Ft0rcfrXZDLWAWY= +golang.org/x/term v0.42.0/go.mod h1:Dq/D+snpsbazcBG5+F9Q1n2rXV8Ma+71xEjTRufARgY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.36.0 h1:JfKh3XmcRPqZPKevfXVpI1wXPTqbkE5f7JA92a55Yxg= golang.org/x/text v0.36.0/go.mod h1:NIdBknypM8iqVmPiuco0Dh6P5Jcdk8lJL0CUebqK164= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gonum.org/v1/gonum v0.17.0 h1:VbpOemQlsSMrYmn7T2OUvQ4dqxQXU+ouZFQsZOx50z4= gonum.org/v1/gonum v0.17.0/go.mod h1:El3tOrEuMpv2UdMrbNlKEh9vd86bmQ6vqIcDwxEOc1E= google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9 h1:VPWxll4HlMw1Vs/qXtN7BvhZqsS9cdAittCNvVENElA= google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9/go.mod h1:7QBABkRtR8z+TEnmXTqIqwJLlzrZKVfAUm7tY3yGv0M= -google.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9 h1:m8qni9SQFH0tJc1X0vmnpw/0t+AImlSvp30sEupozUg= -google.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260420184626-e10c466a9529 h1:XF8+t6QQiS0o9ArVan/HW8Q7cycNPGsJf6GA2nXxYAg= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260420184626-e10c466a9529/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8= google.golang.org/grpc v1.80.0 h1:Xr6m2WmWZLETvUNvIUmeD5OAagMw3FiKmMlTdViWsHM= google.golang.org/grpc v1.80.0/go.mod h1:ho/dLnxwi3EDJA4Zghp7k2Ec1+c2jqup0bFkw07bwF4= google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +modernc.org/libc v1.72.1 h1:db1xwJ6u1kE3KHTFTTbe2GCrczHPKzlURP0aDC4NGD0= +modernc.org/libc v1.72.1/go.mod h1:HRMiC/PhPGLIPM7GzAFCbI+oSgE3dhZ8FWftmRrHVlY= +modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU= +modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg= +modernc.org/memory v1.11.0 h1:o4QC8aMQzmcwCK3t3Ux/ZHmwFPzE6hf2Y5LbkRs+hbI= +modernc.org/memory v1.11.0/go.mod h1:/JP4VbVC+K5sU2wZi9bHoq2MAkCnrt2r98UGeSK7Mjw= +modernc.org/sqlite v1.49.1 h1:dYGHTKcX1sJ+EQDnUzvz4TJ5GbuvhNJa8Fg6ElGx73U= +modernc.org/sqlite v1.49.1/go.mod h1:m0w8xhwYUVY3H6pSDwc3gkJ/irZT/0YEXwBlhaxQEew= pgregory.net/rapid v1.2.0 h1:keKAYRcjm+e1F0oAuU5F5+YPAWcyxNNRK2wud503Gnk= pgregory.net/rapid v1.2.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04= diff --git a/lobby/internal/adapters/postgres/applicationstore/store.go b/lobby/internal/adapters/postgres/applicationstore/store.go new file mode 100644 index 0000000..bc6bc12 --- /dev/null +++ b/lobby/internal/adapters/postgres/applicationstore/store.go @@ -0,0 +1,310 @@ +// Package applicationstore implements the PostgreSQL-backed adapter for +// `ports.ApplicationStore`. +// +// PG_PLAN.md §6A migrates Game Lobby Service away from Redis-backed durable +// application records; see `galaxy/lobby/docs/postgres-migration.md` for +// the full decision record. +package applicationstore + +import ( + "context" + "database/sql" + "errors" + "fmt" + "strings" + "time" + + "galaxy/lobby/internal/adapters/postgres/internal/sqlx" + pgtable "galaxy/lobby/internal/adapters/postgres/jet/lobby/table" + "galaxy/lobby/internal/domain/application" + "galaxy/lobby/internal/domain/common" + "galaxy/lobby/internal/ports" + + pg "github.com/go-jet/jet/v2/postgres" +) + +// Config configures one PostgreSQL-backed application store instance. +type Config struct { + // DB stores the connection pool the store uses for every query. + DB *sql.DB + + // OperationTimeout bounds one round trip. + OperationTimeout time.Duration +} + +// Store persists Game Lobby application records in PostgreSQL. +type Store struct { + db *sql.DB + operationTimeout time.Duration +} + +// New constructs one PostgreSQL-backed application store from cfg. +func New(cfg Config) (*Store, error) { + if cfg.DB == nil { + return nil, errors.New("new postgres application store: db must not be nil") + } + if cfg.OperationTimeout <= 0 { + return nil, errors.New("new postgres application store: operation timeout must be positive") + } + return &Store{ + db: cfg.DB, + operationTimeout: cfg.OperationTimeout, + }, nil +} + +// applicationSelectColumns is the canonical SELECT list for the applications +// table, matching scanApplication's column order. +var applicationSelectColumns = pg.ColumnList{ + pgtable.Applications.ApplicationID, + pgtable.Applications.GameID, + pgtable.Applications.ApplicantUserID, + pgtable.Applications.RaceName, + pgtable.Applications.Status, + pgtable.Applications.CreatedAt, + pgtable.Applications.DecidedAt, +} + +// Save persists a new submitted application record. The single-active +// constraint is enforced by the partial unique index +// `applications_active_per_user_game_uidx`. +func (store *Store) Save(ctx context.Context, record application.Application) error { + if store == nil || store.db == nil { + return errors.New("save application: nil store") + } + if err := record.Validate(); err != nil { + return fmt.Errorf("save application: %w", err) + } + if record.Status != application.StatusSubmitted { + return fmt.Errorf( + "save application: status must be %q, got %q", + application.StatusSubmitted, record.Status, + ) + } + + operationCtx, cancel, err := sqlx.WithTimeout(ctx, "save application", store.operationTimeout) + if err != nil { + return err + } + defer cancel() + + stmt := pgtable.Applications.INSERT( + pgtable.Applications.ApplicationID, + pgtable.Applications.GameID, + pgtable.Applications.ApplicantUserID, + pgtable.Applications.RaceName, + pgtable.Applications.Status, + pgtable.Applications.CreatedAt, + pgtable.Applications.DecidedAt, + ).VALUES( + record.ApplicationID.String(), + record.GameID.String(), + record.ApplicantUserID, + record.RaceName, + string(record.Status), + record.CreatedAt.UTC(), + sqlx.NullableTimePtr(record.DecidedAt), + ) + + query, args := stmt.Sql() + if _, err := store.db.ExecContext(operationCtx, query, args...); err != nil { + if sqlx.IsUniqueViolation(err) { + return fmt.Errorf("save application: %w", application.ErrConflict) + } + return fmt.Errorf("save application: %w", err) + } + return nil +} + +// Get returns the record identified by applicationID. It returns +// application.ErrNotFound when no record exists. +func (store *Store) Get(ctx context.Context, applicationID common.ApplicationID) (application.Application, error) { + if store == nil || store.db == nil { + return application.Application{}, errors.New("get application: nil store") + } + if err := applicationID.Validate(); err != nil { + return application.Application{}, fmt.Errorf("get application: %w", err) + } + + operationCtx, cancel, err := sqlx.WithTimeout(ctx, "get application", store.operationTimeout) + if err != nil { + return application.Application{}, err + } + defer cancel() + + stmt := pg.SELECT(applicationSelectColumns). + FROM(pgtable.Applications). + WHERE(pgtable.Applications.ApplicationID.EQ(pg.String(applicationID.String()))) + + query, args := stmt.Sql() + row := store.db.QueryRowContext(operationCtx, query, args...) + record, err := scanApplication(row) + if sqlx.IsNoRows(err) { + return application.Application{}, application.ErrNotFound + } + if err != nil { + return application.Application{}, fmt.Errorf("get application: %w", err) + } + return record, nil +} + +// GetByGame returns every application attached to gameID. Sorted by +// created_at ASC then application_id ASC. +func (store *Store) GetByGame(ctx context.Context, gameID common.GameID) ([]application.Application, error) { + if store == nil || store.db == nil { + return nil, errors.New("get applications by game: nil store") + } + if err := gameID.Validate(); err != nil { + return nil, fmt.Errorf("get applications by game: %w", err) + } + + stmt := pg.SELECT(applicationSelectColumns). + FROM(pgtable.Applications). + WHERE(pgtable.Applications.GameID.EQ(pg.String(gameID.String()))). + ORDER_BY(pgtable.Applications.CreatedAt.ASC(), pgtable.Applications.ApplicationID.ASC()) + + return store.queryList(ctx, "get applications by game", stmt) +} + +// GetByUser returns every application submitted by applicantUserID. +func (store *Store) GetByUser(ctx context.Context, applicantUserID string) ([]application.Application, error) { + if store == nil || store.db == nil { + return nil, errors.New("get applications by user: nil store") + } + trimmed := strings.TrimSpace(applicantUserID) + if trimmed == "" { + return nil, fmt.Errorf("get applications by user: applicant user id must not be empty") + } + + stmt := pg.SELECT(applicationSelectColumns). + FROM(pgtable.Applications). + WHERE(pgtable.Applications.ApplicantUserID.EQ(pg.String(trimmed))). + ORDER_BY(pgtable.Applications.CreatedAt.ASC(), pgtable.Applications.ApplicationID.ASC()) + + return store.queryList(ctx, "get applications by user", stmt) +} + +func (store *Store) queryList(ctx context.Context, operation string, stmt pg.SelectStatement) ([]application.Application, error) { + operationCtx, cancel, err := sqlx.WithTimeout(ctx, operation, store.operationTimeout) + if err != nil { + return nil, err + } + defer cancel() + + query, args := stmt.Sql() + rows, err := store.db.QueryContext(operationCtx, query, args...) + if err != nil { + return nil, fmt.Errorf("%s: %w", operation, err) + } + defer rows.Close() + + records := make([]application.Application, 0) + for rows.Next() { + record, err := scanApplication(rows) + if err != nil { + return nil, fmt.Errorf("%s: scan: %w", operation, err) + } + records = append(records, record) + } + if err := rows.Err(); err != nil { + return nil, fmt.Errorf("%s: %w", operation, err) + } + if len(records) == 0 { + return nil, nil + } + return records, nil +} + +// UpdateStatus applies one status transition with compare-and-swap on the +// current status column. +func (store *Store) UpdateStatus(ctx context.Context, input ports.UpdateApplicationStatusInput) error { + if store == nil || store.db == nil { + return errors.New("update application status: nil store") + } + if err := input.Validate(); err != nil { + return fmt.Errorf("update application status: %w", err) + } + if err := application.Transition(input.ExpectedFrom, input.To); err != nil { + return err + } + + operationCtx, cancel, err := sqlx.WithTimeout(ctx, "update application status", store.operationTimeout) + if err != nil { + return err + } + defer cancel() + + at := input.At.UTC() + stmt := pgtable.Applications.UPDATE(pgtable.Applications.Status, pgtable.Applications.DecidedAt). + SET(string(input.To), at). + WHERE(pg.AND( + pgtable.Applications.ApplicationID.EQ(pg.String(input.ApplicationID.String())), + pgtable.Applications.Status.EQ(pg.String(string(input.ExpectedFrom))), + )) + + query, args := stmt.Sql() + result, err := store.db.ExecContext(operationCtx, query, args...) + if err != nil { + return fmt.Errorf("update application status: %w", err) + } + affected, err := result.RowsAffected() + if err != nil { + return fmt.Errorf("update application status: rows affected: %w", err) + } + if affected == 0 { + probe := pg.SELECT(pgtable.Applications.Status). + FROM(pgtable.Applications). + WHERE(pgtable.Applications.ApplicationID.EQ(pg.String(input.ApplicationID.String()))) + probeQuery, probeArgs := probe.Sql() + + var current string + row := store.db.QueryRowContext(operationCtx, probeQuery, probeArgs...) + if err := row.Scan(¤t); err != nil { + if sqlx.IsNoRows(err) { + return application.ErrNotFound + } + return fmt.Errorf("update application status: probe: %w", err) + } + return fmt.Errorf("update application status: %w", application.ErrConflict) + } + return nil +} + +type rowScanner interface { + Scan(dest ...any) error +} + +func scanApplication(rs rowScanner) (application.Application, error) { + var ( + applicationID string + gameID string + applicantUserID string + raceName string + status string + createdAt time.Time + decidedAt sql.NullTime + ) + if err := rs.Scan( + &applicationID, + &gameID, + &applicantUserID, + &raceName, + &status, + &createdAt, + &decidedAt, + ); err != nil { + return application.Application{}, err + } + return application.Application{ + ApplicationID: common.ApplicationID(applicationID), + GameID: common.GameID(gameID), + ApplicantUserID: applicantUserID, + RaceName: raceName, + Status: application.Status(status), + CreatedAt: createdAt.UTC(), + DecidedAt: sqlx.TimePtrFromNullable(decidedAt), + }, nil +} + +// Ensure Store satisfies the ports.ApplicationStore interface at compile +// time. +var _ ports.ApplicationStore = (*Store)(nil) diff --git a/lobby/internal/adapters/postgres/applicationstore/store_test.go b/lobby/internal/adapters/postgres/applicationstore/store_test.go new file mode 100644 index 0000000..822bfea --- /dev/null +++ b/lobby/internal/adapters/postgres/applicationstore/store_test.go @@ -0,0 +1,194 @@ +package applicationstore_test + +import ( + "context" + "testing" + "time" + + "galaxy/lobby/internal/adapters/postgres/applicationstore" + "galaxy/lobby/internal/adapters/postgres/gamestore" + "galaxy/lobby/internal/adapters/postgres/internal/pgtest" + "galaxy/lobby/internal/domain/application" + "galaxy/lobby/internal/domain/common" + "galaxy/lobby/internal/domain/game" + "galaxy/lobby/internal/ports" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestMain(m *testing.M) { pgtest.RunMain(m) } + +func newStores(t *testing.T) (*gamestore.Store, *applicationstore.Store) { + t.Helper() + pgtest.TruncateAll(t) + gs, err := gamestore.New(gamestore.Config{ + DB: pgtest.Ensure(t).Pool(), OperationTimeout: pgtest.OperationTimeout, + }) + require.NoError(t, err) + as, err := applicationstore.New(applicationstore.Config{ + DB: pgtest.Ensure(t).Pool(), OperationTimeout: pgtest.OperationTimeout, + }) + require.NoError(t, err) + return gs, as +} + +func seedGame(t *testing.T, gs *gamestore.Store, id string) game.Game { + t.Helper() + now := time.Date(2026, 4, 23, 12, 0, 0, 0, time.UTC) + g, err := game.New(game.NewGameInput{ + GameID: common.GameID(id), + GameName: "Game " + id, + GameType: game.GameTypePublic, + MinPlayers: 2, + MaxPlayers: 8, + StartGapHours: 12, + StartGapPlayers: 2, + EnrollmentEndsAt: now.Add(7 * 24 * time.Hour), + TurnSchedule: "0 18 * * *", + TargetEngineVersion: "v1.0.0", + Now: now, + }) + require.NoError(t, err) + require.NoError(t, gs.Save(context.Background(), g)) + return g +} + +func newApplication(t *testing.T, id, gameID, userID string) application.Application { + t.Helper() + a, err := application.New(application.NewApplicationInput{ + ApplicationID: common.ApplicationID(id), + GameID: common.GameID(gameID), + ApplicantUserID: userID, + RaceName: "Pilot " + id, + Now: time.Date(2026, 4, 23, 12, 0, 0, 0, time.UTC), + }) + require.NoError(t, err) + return a +} + +func TestSaveAndGet(t *testing.T) { + ctx := context.Background() + gs, as := newStores(t) + seedGame(t, gs, "game-001") + + rec := newApplication(t, "application-001", "game-001", "user-a") + require.NoError(t, as.Save(ctx, rec)) + + got, err := as.Get(ctx, rec.ApplicationID) + require.NoError(t, err) + assert.Equal(t, rec.ApplicationID, got.ApplicationID) + assert.Equal(t, application.StatusSubmitted, got.Status) + assert.Equal(t, "user-a", got.ApplicantUserID) + assert.Nil(t, got.DecidedAt) +} + +func TestSaveRejectsNonSubmittedRecord(t *testing.T) { + ctx := context.Background() + gs, as := newStores(t) + seedGame(t, gs, "game-001") + + rec := newApplication(t, "application-001", "game-001", "user-a") + rec.Status = application.StatusApproved + require.Error(t, as.Save(ctx, rec)) +} + +func TestSavePartialUniqueRejectsSecondActiveForSameUserGame(t *testing.T) { + ctx := context.Background() + gs, as := newStores(t) + seedGame(t, gs, "game-001") + + a1 := newApplication(t, "application-001", "game-001", "user-a") + require.NoError(t, as.Save(ctx, a1)) + + // second submission by the same user against the same game must fail. + a2 := newApplication(t, "application-002", "game-001", "user-a") + err := as.Save(ctx, a2) + require.ErrorIs(t, err, application.ErrConflict) +} + +func TestSavePartialUniqueAllowsResubmitAfterRejection(t *testing.T) { + ctx := context.Background() + gs, as := newStores(t) + seedGame(t, gs, "game-001") + + a1 := newApplication(t, "application-001", "game-001", "user-a") + require.NoError(t, as.Save(ctx, a1)) + + require.NoError(t, as.UpdateStatus(ctx, ports.UpdateApplicationStatusInput{ + ApplicationID: a1.ApplicationID, + ExpectedFrom: application.StatusSubmitted, + To: application.StatusRejected, + At: a1.CreatedAt.Add(time.Minute), + })) + + // after rejection a new submission for the same (user, game) is allowed. + a2 := newApplication(t, "application-002", "game-001", "user-a") + require.NoError(t, as.Save(ctx, a2)) +} + +func TestUpdateStatusReturnsConflictOnExpectedFromMismatch(t *testing.T) { + ctx := context.Background() + gs, as := newStores(t) + seedGame(t, gs, "game-001") + + rec := newApplication(t, "application-001", "game-001", "user-a") + require.NoError(t, as.Save(ctx, rec)) + + // First, transition the row to approved. + require.NoError(t, as.UpdateStatus(ctx, ports.UpdateApplicationStatusInput{ + ApplicationID: rec.ApplicationID, + ExpectedFrom: application.StatusSubmitted, + To: application.StatusApproved, + At: rec.CreatedAt.Add(time.Minute), + })) + + // Second attempt claims status is still submitted: (submitted, rejected) + // is a valid domain transition, but the row is already approved, so the + // adapter must surface ErrConflict on the row-level mismatch. + err := as.UpdateStatus(ctx, ports.UpdateApplicationStatusInput{ + ApplicationID: rec.ApplicationID, + ExpectedFrom: application.StatusSubmitted, + To: application.StatusRejected, + At: rec.CreatedAt.Add(2 * time.Minute), + }) + require.ErrorIs(t, err, application.ErrConflict) +} + +func TestUpdateStatusReturnsNotFoundForMissing(t *testing.T) { + ctx := context.Background() + _, as := newStores(t) + err := as.UpdateStatus(ctx, ports.UpdateApplicationStatusInput{ + ApplicationID: common.ApplicationID("application-missing"), + ExpectedFrom: application.StatusSubmitted, + To: application.StatusApproved, + At: time.Now().UTC(), + }) + require.ErrorIs(t, err, application.ErrNotFound) +} + +func TestGetByGameAndGetByUser(t *testing.T) { + ctx := context.Background() + gs, as := newStores(t) + seedGame(t, gs, "game-001") + seedGame(t, gs, "game-002") + + require.NoError(t, as.Save(ctx, newApplication(t, "application-001", "game-001", "user-a"))) + require.NoError(t, as.Save(ctx, newApplication(t, "application-002", "game-001", "user-b"))) + require.NoError(t, as.Save(ctx, newApplication(t, "application-003", "game-002", "user-a"))) + + g1, err := as.GetByGame(ctx, common.GameID("game-001")) + require.NoError(t, err) + assert.Len(t, g1, 2) + + userA, err := as.GetByUser(ctx, "user-a") + require.NoError(t, err) + assert.Len(t, userA, 2) +} + +func TestGetMissingReturnsNotFound(t *testing.T) { + ctx := context.Background() + _, as := newStores(t) + _, err := as.Get(ctx, common.ApplicationID("application-missing")) + require.ErrorIs(t, err, application.ErrNotFound) +} diff --git a/lobby/internal/adapters/postgres/gamestore/codecs.go b/lobby/internal/adapters/postgres/gamestore/codecs.go new file mode 100644 index 0000000..49e5472 --- /dev/null +++ b/lobby/internal/adapters/postgres/gamestore/codecs.go @@ -0,0 +1,94 @@ +package gamestore + +import ( + "encoding/json" + "fmt" + "time" + + "galaxy/lobby/internal/domain/game" +) + +// runtimeSnapshotJSON is the on-disk JSONB shape used for the denormalised +// runtime snapshot column on `games`. Keys mirror the field names in +// `game.RuntimeSnapshot` so a round-trip remains naked-comparable. +type runtimeSnapshotJSON struct { + CurrentTurn int `json:"current_turn"` + RuntimeStatus string `json:"runtime_status,omitempty"` + EngineHealthSummary string `json:"engine_health_summary,omitempty"` +} + +func marshalRuntimeSnapshot(snapshot game.RuntimeSnapshot) ([]byte, error) { + payload := runtimeSnapshotJSON{ + CurrentTurn: snapshot.CurrentTurn, + RuntimeStatus: snapshot.RuntimeStatus, + EngineHealthSummary: snapshot.EngineHealthSummary, + } + encoded, err := json.Marshal(payload) + if err != nil { + return nil, fmt.Errorf("marshal runtime snapshot: %w", err) + } + return encoded, nil +} + +func unmarshalRuntimeSnapshot(payload []byte) (game.RuntimeSnapshot, error) { + if len(payload) == 0 { + return game.RuntimeSnapshot{}, nil + } + var stored runtimeSnapshotJSON + if err := json.Unmarshal(payload, &stored); err != nil { + return game.RuntimeSnapshot{}, fmt.Errorf("unmarshal runtime snapshot: %w", err) + } + return game.RuntimeSnapshot{ + CurrentTurn: stored.CurrentTurn, + RuntimeStatus: stored.RuntimeStatus, + EngineHealthSummary: stored.EngineHealthSummary, + }, nil +} + +// runtimeBindingJSON is the on-disk JSONB shape used for the optional +// runtime binding column on `games`. The `bound_at_ms` field stores Unix +// milliseconds so the JSON serialisation matches the previous Redis JSON +// shape and the timezone is irrelevant inside the JSON payload itself; the +// adapter still re-wraps the resulting time.Time with .UTC() before exposing +// it to callers. +type runtimeBindingJSON struct { + ContainerID string `json:"container_id"` + EngineEndpoint string `json:"engine_endpoint"` + RuntimeJobID string `json:"runtime_job_id"` + BoundAtMS int64 `json:"bound_at_ms"` +} + +// marshalRuntimeBinding returns nil bytes (SQL NULL) when binding is nil, +// otherwise the JSON encoding of the binding. +func marshalRuntimeBinding(binding *game.RuntimeBinding) ([]byte, error) { + if binding == nil { + return nil, nil + } + payload := runtimeBindingJSON{ + ContainerID: binding.ContainerID, + EngineEndpoint: binding.EngineEndpoint, + RuntimeJobID: binding.RuntimeJobID, + BoundAtMS: binding.BoundAt.UTC().UnixMilli(), + } + encoded, err := json.Marshal(payload) + if err != nil { + return nil, fmt.Errorf("marshal runtime binding: %w", err) + } + return encoded, nil +} + +func unmarshalRuntimeBinding(payload []byte) (*game.RuntimeBinding, error) { + if len(payload) == 0 { + return nil, nil + } + var stored runtimeBindingJSON + if err := json.Unmarshal(payload, &stored); err != nil { + return nil, fmt.Errorf("unmarshal runtime binding: %w", err) + } + return &game.RuntimeBinding{ + ContainerID: stored.ContainerID, + EngineEndpoint: stored.EngineEndpoint, + RuntimeJobID: stored.RuntimeJobID, + BoundAt: time.UnixMilli(stored.BoundAtMS).UTC(), + }, nil +} diff --git a/lobby/internal/adapters/postgres/gamestore/store.go b/lobby/internal/adapters/postgres/gamestore/store.go new file mode 100644 index 0000000..0c13a0e --- /dev/null +++ b/lobby/internal/adapters/postgres/gamestore/store.go @@ -0,0 +1,610 @@ +// Package gamestore implements the PostgreSQL-backed adapter for +// `ports.GameStore`. +// +// The package owns the on-disk shape of the `games` table (defined in +// `galaxy/lobby/internal/adapters/postgres/migrations`) and translates the +// schema-agnostic GameStore interface declared in `internal/ports` into +// concrete go-jet/v2 statements driven by the pgx driver. Per-row +// lifecycle transitions (Save/UpdateStatus/UpdateRuntimeSnapshot/ +// UpdateRuntimeBinding) use optimistic concurrency on the `updated_at` +// column rather than retaining a `SELECT ... FOR UPDATE` lock across the +// caller's logic, mirroring the Notification Stage 5 pattern. +// +// PG_PLAN.md §6A migrates Game Lobby Service away from Redis-backed durable +// game records; see `galaxy/lobby/docs/postgres-migration.md` for the full +// decision record. +package gamestore + +import ( + "context" + "database/sql" + "errors" + "fmt" + "strings" + "time" + + "galaxy/lobby/internal/adapters/postgres/internal/sqlx" + pgtable "galaxy/lobby/internal/adapters/postgres/jet/lobby/table" + "galaxy/lobby/internal/domain/common" + "galaxy/lobby/internal/domain/game" + "galaxy/lobby/internal/ports" + + pg "github.com/go-jet/jet/v2/postgres" +) + +// Config configures one PostgreSQL-backed game store instance. The store +// does not own the underlying *sql.DB lifecycle: the caller (typically the +// service runtime) opens, instruments, migrates, and closes the pool. +type Config struct { + // DB stores the connection pool the store uses for every query. + DB *sql.DB + + // OperationTimeout bounds one round trip. The store creates a derived + // context for each operation so callers cannot starve the pool with an + // unbounded ctx. + OperationTimeout time.Duration +} + +// Store persists Game Lobby game records in PostgreSQL. +type Store struct { + db *sql.DB + operationTimeout time.Duration +} + +// New constructs one PostgreSQL-backed game store from cfg. +func New(cfg Config) (*Store, error) { + if cfg.DB == nil { + return nil, errors.New("new postgres game store: db must not be nil") + } + if cfg.OperationTimeout <= 0 { + return nil, errors.New("new postgres game store: operation timeout must be positive") + } + return &Store{ + db: cfg.DB, + operationTimeout: cfg.OperationTimeout, + }, nil +} + +// gameSelectColumns is the canonical SELECT list for the games table, +// matching scanGame's column order. +var gameSelectColumns = pg.ColumnList{ + pgtable.Games.GameID, + pgtable.Games.GameName, + pgtable.Games.Description, + pgtable.Games.GameType, + pgtable.Games.OwnerUserID, + pgtable.Games.Status, + pgtable.Games.MinPlayers, + pgtable.Games.MaxPlayers, + pgtable.Games.StartGapHours, + pgtable.Games.StartGapPlayers, + pgtable.Games.EnrollmentEndsAt, + pgtable.Games.TurnSchedule, + pgtable.Games.TargetEngineVersion, + pgtable.Games.CreatedAt, + pgtable.Games.UpdatedAt, + pgtable.Games.StartedAt, + pgtable.Games.FinishedAt, + pgtable.Games.RuntimeSnapshot, + pgtable.Games.RuntimeBinding, +} + +// Save upserts record. The status secondary index is intrinsic to +// `games_status_created_idx` so callers see the same effect as the previous +// Redis adapter without the explicit index rewrite. +// +// The implementation is INSERT ... ON CONFLICT (game_id) DO UPDATE: the +// adapter cannot use plain INSERT because callers (notably the create-game +// service and admin updates) expect Save to be upsert. +func (store *Store) Save(ctx context.Context, record game.Game) error { + if store == nil || store.db == nil { + return errors.New("save game: nil store") + } + if err := record.Validate(); err != nil { + return fmt.Errorf("save game: %w", err) + } + + operationCtx, cancel, err := sqlx.WithTimeout(ctx, "save game", store.operationTimeout) + if err != nil { + return err + } + defer cancel() + + snapshot, err := marshalRuntimeSnapshot(record.RuntimeSnapshot) + if err != nil { + return fmt.Errorf("save game: %w", err) + } + binding, err := marshalRuntimeBinding(record.RuntimeBinding) + if err != nil { + return fmt.Errorf("save game: %w", err) + } + + stmt := pgtable.Games.INSERT( + pgtable.Games.GameID, + pgtable.Games.GameName, + pgtable.Games.Description, + pgtable.Games.GameType, + pgtable.Games.OwnerUserID, + pgtable.Games.Status, + pgtable.Games.MinPlayers, + pgtable.Games.MaxPlayers, + pgtable.Games.StartGapHours, + pgtable.Games.StartGapPlayers, + pgtable.Games.EnrollmentEndsAt, + pgtable.Games.TurnSchedule, + pgtable.Games.TargetEngineVersion, + pgtable.Games.CreatedAt, + pgtable.Games.UpdatedAt, + pgtable.Games.StartedAt, + pgtable.Games.FinishedAt, + pgtable.Games.RuntimeSnapshot, + pgtable.Games.RuntimeBinding, + ).VALUES( + record.GameID.String(), + record.GameName, + record.Description, + string(record.GameType), + record.OwnerUserID, + string(record.Status), + record.MinPlayers, + record.MaxPlayers, + record.StartGapHours, + record.StartGapPlayers, + record.EnrollmentEndsAt.UTC(), + record.TurnSchedule, + record.TargetEngineVersion, + record.CreatedAt.UTC(), + record.UpdatedAt.UTC(), + sqlx.NullableTimePtr(record.StartedAt), + sqlx.NullableTimePtr(record.FinishedAt), + snapshot, + binding, + ).ON_CONFLICT(pgtable.Games.GameID).DO_UPDATE( + pg.SET( + pgtable.Games.GameName.SET(pgtable.Games.EXCLUDED.GameName), + pgtable.Games.Description.SET(pgtable.Games.EXCLUDED.Description), + pgtable.Games.GameType.SET(pgtable.Games.EXCLUDED.GameType), + pgtable.Games.OwnerUserID.SET(pgtable.Games.EXCLUDED.OwnerUserID), + pgtable.Games.Status.SET(pgtable.Games.EXCLUDED.Status), + pgtable.Games.MinPlayers.SET(pgtable.Games.EXCLUDED.MinPlayers), + pgtable.Games.MaxPlayers.SET(pgtable.Games.EXCLUDED.MaxPlayers), + pgtable.Games.StartGapHours.SET(pgtable.Games.EXCLUDED.StartGapHours), + pgtable.Games.StartGapPlayers.SET(pgtable.Games.EXCLUDED.StartGapPlayers), + pgtable.Games.EnrollmentEndsAt.SET(pgtable.Games.EXCLUDED.EnrollmentEndsAt), + pgtable.Games.TurnSchedule.SET(pgtable.Games.EXCLUDED.TurnSchedule), + pgtable.Games.TargetEngineVersion.SET(pgtable.Games.EXCLUDED.TargetEngineVersion), + pgtable.Games.UpdatedAt.SET(pgtable.Games.EXCLUDED.UpdatedAt), + pgtable.Games.StartedAt.SET(pgtable.Games.EXCLUDED.StartedAt), + pgtable.Games.FinishedAt.SET(pgtable.Games.EXCLUDED.FinishedAt), + pgtable.Games.RuntimeSnapshot.SET(pgtable.Games.EXCLUDED.RuntimeSnapshot), + pgtable.Games.RuntimeBinding.SET(pgtable.Games.EXCLUDED.RuntimeBinding), + ), + ) + + query, args := stmt.Sql() + if _, err := store.db.ExecContext(operationCtx, query, args...); err != nil { + return fmt.Errorf("save game: %w", err) + } + return nil +} + +// Get returns the record identified by gameID. It returns +// game.ErrNotFound when no record exists. +func (store *Store) Get(ctx context.Context, gameID common.GameID) (game.Game, error) { + if store == nil || store.db == nil { + return game.Game{}, errors.New("get game: nil store") + } + if err := gameID.Validate(); err != nil { + return game.Game{}, fmt.Errorf("get game: %w", err) + } + + operationCtx, cancel, err := sqlx.WithTimeout(ctx, "get game", store.operationTimeout) + if err != nil { + return game.Game{}, err + } + defer cancel() + + stmt := pg.SELECT(gameSelectColumns). + FROM(pgtable.Games). + WHERE(pgtable.Games.GameID.EQ(pg.String(gameID.String()))) + + query, args := stmt.Sql() + row := store.db.QueryRowContext(operationCtx, query, args...) + record, err := scanGame(row) + if sqlx.IsNoRows(err) { + return game.Game{}, game.ErrNotFound + } + if err != nil { + return game.Game{}, fmt.Errorf("get game: %w", err) + } + return record, nil +} + +// GetByStatus returns every record whose status equals status. Records are +// sorted by created_at DESC then game_id DESC, matching the most-recent-first +// ordering Lobby's listing services expect. +func (store *Store) GetByStatus(ctx context.Context, status game.Status) ([]game.Game, error) { + if store == nil || store.db == nil { + return nil, errors.New("get games by status: nil store") + } + if !status.IsKnown() { + return nil, fmt.Errorf("get games by status: status %q is unsupported", status) + } + + operationCtx, cancel, err := sqlx.WithTimeout(ctx, "get games by status", store.operationTimeout) + if err != nil { + return nil, err + } + defer cancel() + + stmt := pg.SELECT(gameSelectColumns). + FROM(pgtable.Games). + WHERE(pgtable.Games.Status.EQ(pg.String(string(status)))). + ORDER_BY(pgtable.Games.CreatedAt.DESC(), pgtable.Games.GameID.DESC()) + + query, args := stmt.Sql() + rows, err := store.db.QueryContext(operationCtx, query, args...) + if err != nil { + return nil, fmt.Errorf("get games by status: %w", err) + } + defer rows.Close() + + records, err := scanAllGames(rows) + if err != nil { + return nil, fmt.Errorf("get games by status: %w", err) + } + return records, nil +} + +// CountByStatus returns the number of records under each known status. +func (store *Store) CountByStatus(ctx context.Context) (map[game.Status]int, error) { + if store == nil || store.db == nil { + return nil, errors.New("count games by status: nil store") + } + + operationCtx, cancel, err := sqlx.WithTimeout(ctx, "count games by status", store.operationTimeout) + if err != nil { + return nil, err + } + defer cancel() + + countAlias := pg.COUNT(pg.STAR).AS("count") + stmt := pg.SELECT(pgtable.Games.Status, countAlias). + FROM(pgtable.Games). + GROUP_BY(pgtable.Games.Status) + + query, args := stmt.Sql() + rows, err := store.db.QueryContext(operationCtx, query, args...) + if err != nil { + return nil, fmt.Errorf("count games by status: %w", err) + } + defer rows.Close() + + counts := make(map[game.Status]int, len(game.AllStatuses())) + for _, status := range game.AllStatuses() { + counts[status] = 0 + } + for rows.Next() { + var status string + var count int + if err := rows.Scan(&status, &count); err != nil { + return nil, fmt.Errorf("count games by status: scan: %w", err) + } + counts[game.Status(status)] = count + } + if err := rows.Err(); err != nil { + return nil, fmt.Errorf("count games by status: %w", err) + } + return counts, nil +} + +// GetByOwner returns every record whose owner_user_id equals userID. The +// underlying `games_owner_idx` is partial (game_type = 'private'); public +// games carry an empty owner_user_id and are excluded from the index, matching +// the Redis-backed behaviour. +func (store *Store) GetByOwner(ctx context.Context, userID string) ([]game.Game, error) { + if store == nil || store.db == nil { + return nil, errors.New("get games by owner: nil store") + } + trimmed := strings.TrimSpace(userID) + if trimmed == "" { + return nil, fmt.Errorf("get games by owner: user id must not be empty") + } + + operationCtx, cancel, err := sqlx.WithTimeout(ctx, "get games by owner", store.operationTimeout) + if err != nil { + return nil, err + } + defer cancel() + + stmt := pg.SELECT(gameSelectColumns). + FROM(pgtable.Games). + WHERE(pgtable.Games.OwnerUserID.EQ(pg.String(trimmed))). + ORDER_BY(pgtable.Games.CreatedAt.DESC(), pgtable.Games.GameID.DESC()) + + query, args := stmt.Sql() + rows, err := store.db.QueryContext(operationCtx, query, args...) + if err != nil { + return nil, fmt.Errorf("get games by owner: %w", err) + } + defer rows.Close() + + records, err := scanAllGames(rows) + if err != nil { + return nil, fmt.Errorf("get games by owner: %w", err) + } + return records, nil +} + +// UpdateStatus applies one status transition with compare-and-swap on the +// current status column. The domain transition gate runs before any SQL +// touch. +func (store *Store) UpdateStatus(ctx context.Context, input ports.UpdateStatusInput) error { + if store == nil || store.db == nil { + return errors.New("update game status: nil store") + } + if err := input.Validate(); err != nil { + return fmt.Errorf("update game status: %w", err) + } + if err := game.Transition(input.ExpectedFrom, input.To, input.Trigger); err != nil { + return err + } + + operationCtx, cancel, err := sqlx.WithTimeout(ctx, "update game status", store.operationTimeout) + if err != nil { + return err + } + defer cancel() + + at := input.At.UTC() + var startedAt, finishedAt any + if input.To == game.StatusRunning { + startedAt = at + } + if input.To == game.StatusFinished { + finishedAt = at + } + + // COALESCE keeps the existing started_at/finished_at when the new value + // is NULL (the bind parameter is nil unless we are entering the + // running/finished state for the first time). + startedExpr := pg.COALESCE(pgtable.Games.StartedAt, pg.TimestampzT(at)) + if startedAt == nil { + startedExpr = pgtable.Games.StartedAt + } + finishedExpr := pg.COALESCE(pgtable.Games.FinishedAt, pg.TimestampzT(at)) + if finishedAt == nil { + finishedExpr = pgtable.Games.FinishedAt + } + + stmt := pgtable.Games.UPDATE( + pgtable.Games.Status, + pgtable.Games.UpdatedAt, + pgtable.Games.StartedAt, + pgtable.Games.FinishedAt, + ).SET( + pg.String(string(input.To)), + pg.TimestampzT(at), + startedExpr, + finishedExpr, + ).WHERE(pg.AND( + pgtable.Games.GameID.EQ(pg.String(input.GameID.String())), + pgtable.Games.Status.EQ(pg.String(string(input.ExpectedFrom))), + )) + + query, args := stmt.Sql() + result, err := store.db.ExecContext(operationCtx, query, args...) + if err != nil { + return fmt.Errorf("update game status: %w", err) + } + affected, err := result.RowsAffected() + if err != nil { + return fmt.Errorf("update game status: rows affected: %w", err) + } + if affected == 0 { + // distinguish "not found" from "status mismatch" with a follow-up read + probe := pg.SELECT(pgtable.Games.Status). + FROM(pgtable.Games). + WHERE(pgtable.Games.GameID.EQ(pg.String(input.GameID.String()))) + probeQuery, probeArgs := probe.Sql() + + var current string + row := store.db.QueryRowContext(operationCtx, probeQuery, probeArgs...) + if err := row.Scan(¤t); err != nil { + if sqlx.IsNoRows(err) { + return game.ErrNotFound + } + return fmt.Errorf("update game status: probe: %w", err) + } + return fmt.Errorf("update game status: %w", game.ErrConflict) + } + return nil +} + +// UpdateRuntimeSnapshot overwrites the denormalised runtime snapshot fields. +func (store *Store) UpdateRuntimeSnapshot(ctx context.Context, input ports.UpdateRuntimeSnapshotInput) error { + if store == nil || store.db == nil { + return errors.New("update runtime snapshot: nil store") + } + if err := input.Validate(); err != nil { + return fmt.Errorf("update runtime snapshot: %w", err) + } + + operationCtx, cancel, err := sqlx.WithTimeout(ctx, "update runtime snapshot", store.operationTimeout) + if err != nil { + return err + } + defer cancel() + + snapshot, err := marshalRuntimeSnapshot(input.Snapshot) + if err != nil { + return fmt.Errorf("update runtime snapshot: %w", err) + } + at := input.At.UTC() + + stmt := pgtable.Games.UPDATE(pgtable.Games.RuntimeSnapshot, pgtable.Games.UpdatedAt). + SET(snapshot, at). + WHERE(pgtable.Games.GameID.EQ(pg.String(input.GameID.String()))) + + query, args := stmt.Sql() + result, err := store.db.ExecContext(operationCtx, query, args...) + if err != nil { + return fmt.Errorf("update runtime snapshot: %w", err) + } + affected, err := result.RowsAffected() + if err != nil { + return fmt.Errorf("update runtime snapshot: rows affected: %w", err) + } + if affected == 0 { + return game.ErrNotFound + } + return nil +} + +// UpdateRuntimeBinding overwrites the runtime binding metadata. +func (store *Store) UpdateRuntimeBinding(ctx context.Context, input ports.UpdateRuntimeBindingInput) error { + if store == nil || store.db == nil { + return errors.New("update runtime binding: nil store") + } + if err := input.Validate(); err != nil { + return fmt.Errorf("update runtime binding: %w", err) + } + + operationCtx, cancel, err := sqlx.WithTimeout(ctx, "update runtime binding", store.operationTimeout) + if err != nil { + return err + } + defer cancel() + + binding := input.Binding + encoded, err := marshalRuntimeBinding(&binding) + if err != nil { + return fmt.Errorf("update runtime binding: %w", err) + } + at := input.At.UTC() + + stmt := pgtable.Games.UPDATE(pgtable.Games.RuntimeBinding, pgtable.Games.UpdatedAt). + SET(encoded, at). + WHERE(pgtable.Games.GameID.EQ(pg.String(input.GameID.String()))) + + query, args := stmt.Sql() + result, err := store.db.ExecContext(operationCtx, query, args...) + if err != nil { + return fmt.Errorf("update runtime binding: %w", err) + } + affected, err := result.RowsAffected() + if err != nil { + return fmt.Errorf("update runtime binding: rows affected: %w", err) + } + if affected == 0 { + return game.ErrNotFound + } + return nil +} + +// rowScanner abstracts *sql.Row and *sql.Rows so scanGame can be shared +// across both single-row reads and iterated reads. +type rowScanner interface { + Scan(dest ...any) error +} + +// scanGame scans one games row from rs. Returns sql.ErrNoRows verbatim so +// callers can distinguish "no row" from a hard error. +func scanGame(rs rowScanner) (game.Game, error) { + var ( + gameID string + gameName string + description string + gameType string + ownerUserID string + status string + minPlayers int + maxPlayers int + startGapHours int + startGapPlayers int + enrollmentEndsAt time.Time + turnSchedule string + targetEngineVersion string + createdAt time.Time + updatedAt time.Time + startedAt sql.NullTime + finishedAt sql.NullTime + runtimeSnapshot []byte + runtimeBinding []byte + ) + if err := rs.Scan( + &gameID, + &gameName, + &description, + &gameType, + &ownerUserID, + &status, + &minPlayers, + &maxPlayers, + &startGapHours, + &startGapPlayers, + &enrollmentEndsAt, + &turnSchedule, + &targetEngineVersion, + &createdAt, + &updatedAt, + &startedAt, + &finishedAt, + &runtimeSnapshot, + &runtimeBinding, + ); err != nil { + return game.Game{}, err + } + + snapshot, err := unmarshalRuntimeSnapshot(runtimeSnapshot) + if err != nil { + return game.Game{}, err + } + binding, err := unmarshalRuntimeBinding(runtimeBinding) + if err != nil { + return game.Game{}, err + } + + return game.Game{ + GameID: common.GameID(gameID), + GameName: gameName, + Description: description, + GameType: game.GameType(gameType), + OwnerUserID: ownerUserID, + Status: game.Status(status), + MinPlayers: minPlayers, + MaxPlayers: maxPlayers, + StartGapHours: startGapHours, + StartGapPlayers: startGapPlayers, + EnrollmentEndsAt: enrollmentEndsAt.UTC(), + TurnSchedule: turnSchedule, + TargetEngineVersion: targetEngineVersion, + CreatedAt: createdAt.UTC(), + UpdatedAt: updatedAt.UTC(), + StartedAt: sqlx.TimePtrFromNullable(startedAt), + FinishedAt: sqlx.TimePtrFromNullable(finishedAt), + RuntimeSnapshot: snapshot, + RuntimeBinding: binding, + }, nil +} + +func scanAllGames(rows *sql.Rows) ([]game.Game, error) { + records := make([]game.Game, 0) + for rows.Next() { + record, err := scanGame(rows) + if err != nil { + return nil, fmt.Errorf("scan: %w", err) + } + records = append(records, record) + } + if err := rows.Err(); err != nil { + return nil, err + } + if len(records) == 0 { + return nil, nil + } + return records, nil +} + +// Ensure Store satisfies the ports.GameStore interface at compile time. +var _ ports.GameStore = (*Store)(nil) diff --git a/lobby/internal/adapters/postgres/gamestore/store_test.go b/lobby/internal/adapters/postgres/gamestore/store_test.go new file mode 100644 index 0000000..0c1d291 --- /dev/null +++ b/lobby/internal/adapters/postgres/gamestore/store_test.go @@ -0,0 +1,338 @@ +package gamestore_test + +import ( + "context" + "testing" + "time" + + "galaxy/lobby/internal/adapters/postgres/gamestore" + "galaxy/lobby/internal/adapters/postgres/internal/pgtest" + "galaxy/lobby/internal/domain/common" + "galaxy/lobby/internal/domain/game" + "galaxy/lobby/internal/ports" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestMain(m *testing.M) { pgtest.RunMain(m) } + +func newStore(t *testing.T) *gamestore.Store { + t.Helper() + pgtest.TruncateAll(t) + store, err := gamestore.New(gamestore.Config{ + DB: pgtest.Ensure(t).Pool(), + OperationTimeout: pgtest.OperationTimeout, + }) + require.NoError(t, err) + return store +} + +func fixturePublicGame(t *testing.T, id string) game.Game { + t.Helper() + now := time.Date(2026, 4, 23, 12, 0, 0, 0, time.UTC) + record, err := game.New(game.NewGameInput{ + GameID: common.GameID(id), + GameName: "Spring Classic " + id, + Description: "first public game", + GameType: game.GameTypePublic, + MinPlayers: 4, + MaxPlayers: 8, + StartGapHours: 24, + StartGapPlayers: 2, + EnrollmentEndsAt: now.Add(7 * 24 * time.Hour), + TurnSchedule: "0 18 * * *", + TargetEngineVersion: "v1.2.3", + Now: now, + }) + require.NoError(t, err) + return record +} + +func fixturePrivateGame(t *testing.T, id, ownerID string) game.Game { + t.Helper() + now := time.Date(2026, 4, 23, 12, 0, 0, 0, time.UTC) + record, err := game.New(game.NewGameInput{ + GameID: common.GameID(id), + GameName: "Private " + id, + GameType: game.GameTypePrivate, + OwnerUserID: ownerID, + MinPlayers: 2, + MaxPlayers: 6, + StartGapHours: 12, + StartGapPlayers: 2, + EnrollmentEndsAt: now.Add(7 * 24 * time.Hour), + TurnSchedule: "0 18 * * *", + TargetEngineVersion: "v1.0.0", + Now: now, + }) + require.NoError(t, err) + return record +} + +func TestSaveAndGet(t *testing.T) { + ctx := context.Background() + store := newStore(t) + + record := fixturePublicGame(t, "game-001") + require.NoError(t, store.Save(ctx, record)) + + got, err := store.Get(ctx, record.GameID) + require.NoError(t, err) + assert.Equal(t, record.GameID, got.GameID) + assert.Equal(t, record.GameName, got.GameName) + assert.Equal(t, record.Status, got.Status) + assert.Equal(t, record.MinPlayers, got.MinPlayers) + assert.Equal(t, record.MaxPlayers, got.MaxPlayers) + assert.True(t, record.EnrollmentEndsAt.Equal(got.EnrollmentEndsAt)) + assert.Equal(t, time.UTC, got.CreatedAt.Location()) + assert.Equal(t, time.UTC, got.UpdatedAt.Location()) +} + +func TestGetReturnsNotFound(t *testing.T) { + ctx := context.Background() + store := newStore(t) + _, err := store.Get(ctx, common.GameID("game-missing-x")) + require.ErrorIs(t, err, game.ErrNotFound) +} + +func TestSaveIsUpsert(t *testing.T) { + ctx := context.Background() + store := newStore(t) + + record := fixturePublicGame(t, "game-001") + require.NoError(t, store.Save(ctx, record)) + + // edit a few fields, save again + record.GameName = "Renamed" + record.UpdatedAt = record.UpdatedAt.Add(time.Minute) + require.NoError(t, store.Save(ctx, record)) + + got, err := store.Get(ctx, record.GameID) + require.NoError(t, err) + assert.Equal(t, "Renamed", got.GameName) +} + +func TestUpdateStatusHappyPath(t *testing.T) { + ctx := context.Background() + store := newStore(t) + + record := fixturePublicGame(t, "game-001") + require.NoError(t, store.Save(ctx, record)) + + require.NoError(t, store.UpdateStatus(ctx, ports.UpdateStatusInput{ + GameID: record.GameID, + ExpectedFrom: game.StatusDraft, + To: game.StatusEnrollmentOpen, + Trigger: game.TriggerCommand, + At: record.UpdatedAt.Add(time.Minute), + })) + + got, err := store.Get(ctx, record.GameID) + require.NoError(t, err) + assert.Equal(t, game.StatusEnrollmentOpen, got.Status) +} + +func TestUpdateStatusReturnsConflictOnExpectedFromMismatch(t *testing.T) { + ctx := context.Background() + store := newStore(t) + + record := fixturePublicGame(t, "game-001") + require.NoError(t, store.Save(ctx, record)) + + err := store.UpdateStatus(ctx, ports.UpdateStatusInput{ + GameID: record.GameID, + ExpectedFrom: game.StatusEnrollmentOpen, // wrong + To: game.StatusReadyToStart, + Trigger: game.TriggerManual, + At: record.UpdatedAt.Add(time.Minute), + }) + require.ErrorIs(t, err, game.ErrConflict) +} + +func TestUpdateStatusReturnsNotFoundForMissing(t *testing.T) { + ctx := context.Background() + store := newStore(t) + + err := store.UpdateStatus(ctx, ports.UpdateStatusInput{ + GameID: common.GameID("game-missing-x"), + ExpectedFrom: game.StatusDraft, + To: game.StatusEnrollmentOpen, + Trigger: game.TriggerCommand, + At: time.Now().UTC(), + }) + require.ErrorIs(t, err, game.ErrNotFound) +} + +func TestUpdateStatusSetsStartedAtOnRunning(t *testing.T) { + ctx := context.Background() + store := newStore(t) + + record := fixturePublicGame(t, "game-001") + require.NoError(t, store.Save(ctx, record)) + advance := func(from, to game.Status, trigger game.Trigger, at time.Time) { + require.NoError(t, store.UpdateStatus(ctx, ports.UpdateStatusInput{ + GameID: record.GameID, ExpectedFrom: from, To: to, Trigger: trigger, At: at, + })) + } + now := record.UpdatedAt.Add(time.Minute) + advance(game.StatusDraft, game.StatusEnrollmentOpen, game.TriggerCommand, now) + advance(game.StatusEnrollmentOpen, game.StatusReadyToStart, game.TriggerManual, now.Add(time.Minute)) + advance(game.StatusReadyToStart, game.StatusStarting, game.TriggerCommand, now.Add(2*time.Minute)) + startedAt := now.Add(3 * time.Minute) + advance(game.StatusStarting, game.StatusRunning, game.TriggerRuntimeEvent, startedAt) + + got, err := store.Get(ctx, record.GameID) + require.NoError(t, err) + assert.Equal(t, game.StatusRunning, got.Status) + require.NotNil(t, got.StartedAt) + assert.True(t, got.StartedAt.Equal(startedAt)) +} + +func TestGetByStatusReturnsExpectedRecords(t *testing.T) { + ctx := context.Background() + store := newStore(t) + + a := fixturePublicGame(t, "game-aaa") + b := fixturePublicGame(t, "game-bbb") + c := fixturePublicGame(t, "game-ccc") + for _, r := range []game.Game{a, b, c} { + require.NoError(t, store.Save(ctx, r)) + } + require.NoError(t, store.UpdateStatus(ctx, ports.UpdateStatusInput{ + GameID: b.GameID, + ExpectedFrom: game.StatusDraft, + To: game.StatusEnrollmentOpen, + Trigger: game.TriggerCommand, + At: b.UpdatedAt.Add(time.Minute), + })) + + drafts, err := store.GetByStatus(ctx, game.StatusDraft) + require.NoError(t, err) + gotIDs := map[common.GameID]struct{}{} + for _, r := range drafts { + gotIDs[r.GameID] = struct{}{} + } + assert.Contains(t, gotIDs, a.GameID) + assert.Contains(t, gotIDs, c.GameID) + assert.NotContains(t, gotIDs, b.GameID) + + open, err := store.GetByStatus(ctx, game.StatusEnrollmentOpen) + require.NoError(t, err) + require.Len(t, open, 1) + assert.Equal(t, b.GameID, open[0].GameID) +} + +func TestGetByOwnerOnlyReturnsPrivateGames(t *testing.T) { + ctx := context.Background() + store := newStore(t) + + owner := "user-123" + pub := fixturePublicGame(t, "game-pub-001") + priv1 := fixturePrivateGame(t, "game-priv-001", owner) + priv2 := fixturePrivateGame(t, "game-priv-002", owner) + priv3 := fixturePrivateGame(t, "game-priv-003", "user-other") + for _, r := range []game.Game{pub, priv1, priv2, priv3} { + require.NoError(t, store.Save(ctx, r)) + } + + got, err := store.GetByOwner(ctx, owner) + require.NoError(t, err) + ids := map[common.GameID]struct{}{} + for _, r := range got { + ids[r.GameID] = struct{}{} + } + assert.Contains(t, ids, priv1.GameID) + assert.Contains(t, ids, priv2.GameID) + assert.NotContains(t, ids, priv3.GameID) + assert.NotContains(t, ids, pub.GameID) +} + +func TestCountByStatusIncludesAllBuckets(t *testing.T) { + ctx := context.Background() + store := newStore(t) + + require.NoError(t, store.Save(ctx, fixturePublicGame(t, "game-aaa"))) + require.NoError(t, store.Save(ctx, fixturePublicGame(t, "game-bbb"))) + + counts, err := store.CountByStatus(ctx) + require.NoError(t, err) + for _, status := range game.AllStatuses() { + _, ok := counts[status] + assert.Truef(t, ok, "missing bucket for %q", status) + } + assert.Equal(t, 2, counts[game.StatusDraft]) +} + +func TestUpdateRuntimeSnapshotRoundTripsValues(t *testing.T) { + ctx := context.Background() + store := newStore(t) + + record := fixturePublicGame(t, "game-001") + require.NoError(t, store.Save(ctx, record)) + + snapshot := game.RuntimeSnapshot{ + CurrentTurn: 42, + RuntimeStatus: "running_accepting_commands", + EngineHealthSummary: "ok", + } + require.NoError(t, store.UpdateRuntimeSnapshot(ctx, ports.UpdateRuntimeSnapshotInput{ + GameID: record.GameID, + Snapshot: snapshot, + At: record.UpdatedAt.Add(time.Minute), + })) + + got, err := store.Get(ctx, record.GameID) + require.NoError(t, err) + assert.Equal(t, snapshot, got.RuntimeSnapshot) +} + +func TestUpdateRuntimeBindingRoundTripsValues(t *testing.T) { + ctx := context.Background() + store := newStore(t) + + record := fixturePublicGame(t, "game-001") + require.NoError(t, store.Save(ctx, record)) + + at := record.UpdatedAt.Add(time.Minute) + require.NoError(t, store.UpdateRuntimeBinding(ctx, ports.UpdateRuntimeBindingInput{ + GameID: record.GameID, + Binding: game.RuntimeBinding{ + ContainerID: "container-7", + EngineEndpoint: "10.0.0.5:9000", + RuntimeJobID: "1700000000-0", + BoundAt: at, + }, + At: at, + })) + + got, err := store.Get(ctx, record.GameID) + require.NoError(t, err) + require.NotNil(t, got.RuntimeBinding) + assert.Equal(t, "container-7", got.RuntimeBinding.ContainerID) + assert.Equal(t, "10.0.0.5:9000", got.RuntimeBinding.EngineEndpoint) + assert.Equal(t, "1700000000-0", got.RuntimeBinding.RuntimeJobID) + assert.True(t, got.RuntimeBinding.BoundAt.Equal(at)) + assert.Equal(t, time.UTC, got.RuntimeBinding.BoundAt.Location()) +} + +func TestUpdateRuntimeSnapshotReturnsNotFoundForMissing(t *testing.T) { + ctx := context.Background() + store := newStore(t) + err := store.UpdateRuntimeSnapshot(ctx, ports.UpdateRuntimeSnapshotInput{ + GameID: common.GameID("game-missing-x"), + Snapshot: game.RuntimeSnapshot{CurrentTurn: 1}, + At: time.Now().UTC(), + }) + require.ErrorIs(t, err, game.ErrNotFound) +} + +func TestNewRejectsNilDB(t *testing.T) { + _, err := gamestore.New(gamestore.Config{OperationTimeout: time.Second}) + require.Error(t, err) +} + +func TestNewRejectsNonPositiveTimeout(t *testing.T) { + _, err := gamestore.New(gamestore.Config{DB: pgtest.Ensure(t).Pool()}) + require.Error(t, err) +} diff --git a/lobby/internal/adapters/postgres/internal/pgtest/pgtest.go b/lobby/internal/adapters/postgres/internal/pgtest/pgtest.go new file mode 100644 index 0000000..e1cdead --- /dev/null +++ b/lobby/internal/adapters/postgres/internal/pgtest/pgtest.go @@ -0,0 +1,208 @@ +// Package pgtest exposes the testcontainers-backed PostgreSQL bootstrap +// shared by every Game Lobby PG adapter test. The package is regular Go +// code — not a `_test.go` file — so it can be imported by the `_test.go` +// files in the four sibling store packages (`gamestore`, `applicationstore`, +// `invitestore`, `membershipstore`). +// +// No production code in `cmd/lobby` or in the runtime imports this package. +// The testcontainers-go dependency therefore stays out of the production +// binary's import graph. +package pgtest + +import ( + "context" + "database/sql" + "net/url" + "os" + "sync" + "testing" + "time" + + "galaxy/lobby/internal/adapters/postgres/migrations" + "galaxy/postgres" + + testcontainers "github.com/testcontainers/testcontainers-go" + tcpostgres "github.com/testcontainers/testcontainers-go/modules/postgres" + "github.com/testcontainers/testcontainers-go/wait" +) + +const ( + postgresImage = "postgres:16-alpine" + superUser = "galaxy" + superPassword = "galaxy" + superDatabase = "galaxy_lobby" + serviceRole = "lobbyservice" + servicePassword = "lobbyservice" + serviceSchema = "lobby" + containerStartup = 90 * time.Second + + // OperationTimeout is the per-statement timeout used by every store + // constructed via NewStoreConfig. Tests may pass a smaller value if they + // need to assert deadline behaviour explicitly. + OperationTimeout = 10 * time.Second +) + +// Env holds the per-process container plus the *sql.DB pool already +// provisioned with the lobby schema, role, and migrations applied. +type Env struct { + container *tcpostgres.PostgresContainer + pool *sql.DB +} + +// Pool returns the shared pool. Tests truncate per-table state before each +// run via TruncateAll. +func (env *Env) Pool() *sql.DB { return env.pool } + +var ( + once sync.Once + cur *Env + curEr error +) + +// Ensure starts the PostgreSQL container on first invocation and applies +// the embedded goose migrations. Subsequent invocations reuse the same +// container/pool. When Docker is unavailable Ensure calls t.Skip with the +// underlying error so the test suite still passes on machines without +// Docker. +func Ensure(t testing.TB) *Env { + t.Helper() + once.Do(func() { + cur, curEr = start() + }) + if curEr != nil { + t.Skipf("postgres container start failed (Docker unavailable?): %v", curEr) + } + return cur +} + +// TruncateAll wipes every Game Lobby table inside the shared pool, leaving +// the schema and indexes intact. Use it from each test that needs a clean +// slate. +func TruncateAll(t testing.TB) { + t.Helper() + env := Ensure(t) + const stmt = `TRUNCATE TABLE memberships, invites, applications, games, race_names RESTART IDENTITY CASCADE` + if _, err := env.pool.ExecContext(context.Background(), stmt); err != nil { + t.Fatalf("truncate lobby tables: %v", err) + } +} + +// Shutdown terminates the shared container and closes the pool. It is +// invoked from each test package's TestMain after `m.Run` returns so the +// container is released even if individual tests panic. +func Shutdown() { + if cur == nil { + return + } + if cur.pool != nil { + _ = cur.pool.Close() + } + if cur.container != nil { + _ = testcontainers.TerminateContainer(cur.container) + } + cur = nil +} + +// RunMain is a convenience helper for each store package's TestMain: it +// runs the test main, captures the exit code, shuts the container down, and +// exits. Wiring it through one helper keeps every TestMain to two lines. +func RunMain(m *testing.M) { + code := m.Run() + Shutdown() + os.Exit(code) +} + +func start() (*Env, error) { + ctx := context.Background() + container, err := tcpostgres.Run(ctx, postgresImage, + tcpostgres.WithDatabase(superDatabase), + tcpostgres.WithUsername(superUser), + tcpostgres.WithPassword(superPassword), + testcontainers.WithWaitStrategy( + wait.ForLog("database system is ready to accept connections"). + WithOccurrence(2). + WithStartupTimeout(containerStartup), + ), + ) + if err != nil { + return nil, err + } + baseDSN, err := container.ConnectionString(ctx, "sslmode=disable") + if err != nil { + _ = testcontainers.TerminateContainer(container) + return nil, err + } + if err := provisionRoleAndSchema(ctx, baseDSN); err != nil { + _ = testcontainers.TerminateContainer(container) + return nil, err + } + scopedDSN, err := dsnForServiceRole(baseDSN) + if err != nil { + _ = testcontainers.TerminateContainer(container) + return nil, err + } + cfg := postgres.DefaultConfig() + cfg.PrimaryDSN = scopedDSN + cfg.OperationTimeout = OperationTimeout + pool, err := postgres.OpenPrimary(ctx, cfg) + if err != nil { + _ = testcontainers.TerminateContainer(container) + return nil, err + } + if err := postgres.Ping(ctx, pool, OperationTimeout); err != nil { + _ = pool.Close() + _ = testcontainers.TerminateContainer(container) + return nil, err + } + if err := postgres.RunMigrations(ctx, pool, migrations.FS(), "."); err != nil { + _ = pool.Close() + _ = testcontainers.TerminateContainer(container) + return nil, err + } + return &Env{container: container, pool: pool}, nil +} + +func provisionRoleAndSchema(ctx context.Context, baseDSN string) error { + cfg := postgres.DefaultConfig() + cfg.PrimaryDSN = baseDSN + cfg.OperationTimeout = OperationTimeout + db, err := postgres.OpenPrimary(ctx, cfg) + if err != nil { + return err + } + defer func() { _ = db.Close() }() + + statements := []string{ + `DO $$ BEGIN + IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'lobbyservice') THEN + CREATE ROLE lobbyservice LOGIN PASSWORD 'lobbyservice'; + END IF; + END $$;`, + `CREATE SCHEMA IF NOT EXISTS lobby AUTHORIZATION lobbyservice;`, + `GRANT USAGE ON SCHEMA lobby TO lobbyservice;`, + } + for _, statement := range statements { + if _, err := db.ExecContext(ctx, statement); err != nil { + return err + } + } + return nil +} + +func dsnForServiceRole(baseDSN string) (string, error) { + parsed, err := url.Parse(baseDSN) + if err != nil { + return "", err + } + values := url.Values{} + values.Set("search_path", serviceSchema) + values.Set("sslmode", "disable") + scoped := url.URL{ + Scheme: parsed.Scheme, + User: url.UserPassword(serviceRole, servicePassword), + Host: parsed.Host, + Path: parsed.Path, + RawQuery: values.Encode(), + } + return scoped.String(), nil +} diff --git a/lobby/internal/adapters/postgres/internal/sqlx/sqlx.go b/lobby/internal/adapters/postgres/internal/sqlx/sqlx.go new file mode 100644 index 0000000..f528017 --- /dev/null +++ b/lobby/internal/adapters/postgres/internal/sqlx/sqlx.go @@ -0,0 +1,96 @@ +// Package sqlx contains the small set of helpers shared by every Game Lobby +// PostgreSQL adapter (gamestore, applicationstore, invitestore, +// membershipstore). The helpers centralise the boundary translations from +// the per-service ARCHITECTURE.md timestamp-handling rules and from the pgx +// SQLSTATE codes the adapters interpret as domain conflicts. +package sqlx + +import ( + "context" + "database/sql" + "errors" + "fmt" + "time" + + "github.com/jackc/pgx/v5/pgconn" +) + +// PgUniqueViolationCode identifies the SQLSTATE returned by PostgreSQL when +// a UNIQUE constraint is violated by INSERT or UPDATE. +const PgUniqueViolationCode = "23505" + +// IsUniqueViolation reports whether err is a PostgreSQL unique-violation, +// regardless of constraint name. +func IsUniqueViolation(err error) bool { + var pgErr *pgconn.PgError + if !errors.As(err, &pgErr) { + return false + } + return pgErr.Code == PgUniqueViolationCode +} + +// IsNoRows reports whether err is sql.ErrNoRows. +func IsNoRows(err error) bool { + return errors.Is(err, sql.ErrNoRows) +} + +// NullableTime returns t.UTC() when non-zero, otherwise nil so the column +// is bound as SQL NULL. Several Lobby domain records use *time.Time to +// express absent timestamps; for those, callers translate the pointer with +// NullableTimePtr instead. +func NullableTime(t time.Time) any { + if t.IsZero() { + return nil + } + return t.UTC() +} + +// NullableTimePtr returns t.UTC() when t is non-nil and non-zero, otherwise +// nil. The helper is the *time.Time companion of NullableTime: every Lobby +// domain record has at least one optional `*time.Time` field +// (`StartedAt`, `FinishedAt`, `DecidedAt`, `RemovedAt`) that maps to a +// nullable timestamptz column. +func NullableTimePtr(t *time.Time) any { + if t == nil { + return nil + } + return NullableTime(*t) +} + +// TimeFromNullable copies an optional sql.NullTime read from PostgreSQL +// into a domain time.Time, applying the global UTC normalisation rule. +// Invalid (NULL) values become the zero time.Time. +func TimeFromNullable(value sql.NullTime) time.Time { + if !value.Valid { + return time.Time{} + } + return value.Time.UTC() +} + +// TimePtrFromNullable copies an optional sql.NullTime into a domain +// *time.Time. NULL becomes nil; non-NULL values are wrapped after UTC +// normalisation. +func TimePtrFromNullable(value sql.NullTime) *time.Time { + if !value.Valid { + return nil + } + t := value.Time.UTC() + return &t +} + +// WithTimeout derives a child context bounded by timeout and prefixes +// context errors with operation. Callers must always invoke the returned +// cancel. +func WithTimeout(ctx context.Context, operation string, timeout time.Duration) (context.Context, context.CancelFunc, error) { + if ctx == nil { + return nil, nil, fmt.Errorf("%s: nil context", operation) + } + if err := ctx.Err(); err != nil { + return nil, nil, fmt.Errorf("%s: %w", operation, err) + } + if timeout <= 0 { + return nil, nil, fmt.Errorf("%s: operation timeout must be positive", operation) + } + bounded, cancel := context.WithTimeout(ctx, timeout) + return bounded, cancel, nil +} diff --git a/lobby/internal/adapters/postgres/invitestore/store.go b/lobby/internal/adapters/postgres/invitestore/store.go new file mode 100644 index 0000000..62ca822 --- /dev/null +++ b/lobby/internal/adapters/postgres/invitestore/store.go @@ -0,0 +1,348 @@ +// Package invitestore implements the PostgreSQL-backed adapter for +// `ports.InviteStore`. +// +// PG_PLAN.md §6A migrates Game Lobby Service away from Redis-backed durable +// invite records. +package invitestore + +import ( + "context" + "database/sql" + "errors" + "fmt" + "strings" + "time" + + "galaxy/lobby/internal/adapters/postgres/internal/sqlx" + pgtable "galaxy/lobby/internal/adapters/postgres/jet/lobby/table" + "galaxy/lobby/internal/domain/common" + "galaxy/lobby/internal/domain/invite" + "galaxy/lobby/internal/ports" + + pg "github.com/go-jet/jet/v2/postgres" +) + +// Config configures one PostgreSQL-backed invite store instance. +type Config struct { + DB *sql.DB + OperationTimeout time.Duration +} + +// Store persists Game Lobby invite records in PostgreSQL. +type Store struct { + db *sql.DB + operationTimeout time.Duration +} + +// New constructs one PostgreSQL-backed invite store from cfg. +func New(cfg Config) (*Store, error) { + if cfg.DB == nil { + return nil, errors.New("new postgres invite store: db must not be nil") + } + if cfg.OperationTimeout <= 0 { + return nil, errors.New("new postgres invite store: operation timeout must be positive") + } + return &Store{ + db: cfg.DB, + operationTimeout: cfg.OperationTimeout, + }, nil +} + +// inviteSelectColumns is the canonical SELECT list for the invites table, +// matching scanInvite's column order. +var inviteSelectColumns = pg.ColumnList{ + pgtable.Invites.InviteID, + pgtable.Invites.GameID, + pgtable.Invites.InviterUserID, + pgtable.Invites.InviteeUserID, + pgtable.Invites.RaceName, + pgtable.Invites.Status, + pgtable.Invites.CreatedAt, + pgtable.Invites.ExpiresAt, + pgtable.Invites.DecidedAt, +} + +// Save persists a new created invite record. Save is create-only; a second +// save against the same invite id maps the unique-violation to +// invite.ErrConflict. +func (store *Store) Save(ctx context.Context, record invite.Invite) error { + if store == nil || store.db == nil { + return errors.New("save invite: nil store") + } + if err := record.Validate(); err != nil { + return fmt.Errorf("save invite: %w", err) + } + if record.Status != invite.StatusCreated { + return fmt.Errorf( + "save invite: status must be %q, got %q", + invite.StatusCreated, record.Status, + ) + } + + operationCtx, cancel, err := sqlx.WithTimeout(ctx, "save invite", store.operationTimeout) + if err != nil { + return err + } + defer cancel() + + stmt := pgtable.Invites.INSERT( + pgtable.Invites.InviteID, + pgtable.Invites.GameID, + pgtable.Invites.InviterUserID, + pgtable.Invites.InviteeUserID, + pgtable.Invites.RaceName, + pgtable.Invites.Status, + pgtable.Invites.CreatedAt, + pgtable.Invites.ExpiresAt, + pgtable.Invites.DecidedAt, + ).VALUES( + record.InviteID.String(), + record.GameID.String(), + record.InviterUserID, + record.InviteeUserID, + record.RaceName, + string(record.Status), + record.CreatedAt.UTC(), + record.ExpiresAt.UTC(), + sqlx.NullableTimePtr(record.DecidedAt), + ) + + query, args := stmt.Sql() + if _, err := store.db.ExecContext(operationCtx, query, args...); err != nil { + if sqlx.IsUniqueViolation(err) { + return fmt.Errorf("save invite: %w", invite.ErrConflict) + } + return fmt.Errorf("save invite: %w", err) + } + return nil +} + +// Get returns the record identified by inviteID. +func (store *Store) Get(ctx context.Context, inviteID common.InviteID) (invite.Invite, error) { + if store == nil || store.db == nil { + return invite.Invite{}, errors.New("get invite: nil store") + } + if err := inviteID.Validate(); err != nil { + return invite.Invite{}, fmt.Errorf("get invite: %w", err) + } + + operationCtx, cancel, err := sqlx.WithTimeout(ctx, "get invite", store.operationTimeout) + if err != nil { + return invite.Invite{}, err + } + defer cancel() + + stmt := pg.SELECT(inviteSelectColumns). + FROM(pgtable.Invites). + WHERE(pgtable.Invites.InviteID.EQ(pg.String(inviteID.String()))) + + query, args := stmt.Sql() + row := store.db.QueryRowContext(operationCtx, query, args...) + record, err := scanInvite(row) + if sqlx.IsNoRows(err) { + return invite.Invite{}, invite.ErrNotFound + } + if err != nil { + return invite.Invite{}, fmt.Errorf("get invite: %w", err) + } + return record, nil +} + +// GetByGame returns every invite attached to gameID. +func (store *Store) GetByGame(ctx context.Context, gameID common.GameID) ([]invite.Invite, error) { + if store == nil || store.db == nil { + return nil, errors.New("get invites by game: nil store") + } + if err := gameID.Validate(); err != nil { + return nil, fmt.Errorf("get invites by game: %w", err) + } + + stmt := pg.SELECT(inviteSelectColumns). + FROM(pgtable.Invites). + WHERE(pgtable.Invites.GameID.EQ(pg.String(gameID.String()))). + ORDER_BY(pgtable.Invites.CreatedAt.ASC(), pgtable.Invites.InviteID.ASC()) + + return store.queryList(ctx, "get invites by game", stmt) +} + +// GetByUser returns every invite addressed to inviteeUserID. +func (store *Store) GetByUser(ctx context.Context, inviteeUserID string) ([]invite.Invite, error) { + if store == nil || store.db == nil { + return nil, errors.New("get invites by user: nil store") + } + trimmed := strings.TrimSpace(inviteeUserID) + if trimmed == "" { + return nil, fmt.Errorf("get invites by user: invitee user id must not be empty") + } + + stmt := pg.SELECT(inviteSelectColumns). + FROM(pgtable.Invites). + WHERE(pgtable.Invites.InviteeUserID.EQ(pg.String(trimmed))). + ORDER_BY(pgtable.Invites.CreatedAt.ASC(), pgtable.Invites.InviteID.ASC()) + + return store.queryList(ctx, "get invites by user", stmt) +} + +// GetByInviter returns every invite created by inviterUserID. +func (store *Store) GetByInviter(ctx context.Context, inviterUserID string) ([]invite.Invite, error) { + if store == nil || store.db == nil { + return nil, errors.New("get invites by inviter: nil store") + } + trimmed := strings.TrimSpace(inviterUserID) + if trimmed == "" { + return nil, fmt.Errorf("get invites by inviter: inviter user id must not be empty") + } + + stmt := pg.SELECT(inviteSelectColumns). + FROM(pgtable.Invites). + WHERE(pgtable.Invites.InviterUserID.EQ(pg.String(trimmed))). + ORDER_BY(pgtable.Invites.CreatedAt.ASC(), pgtable.Invites.InviteID.ASC()) + + return store.queryList(ctx, "get invites by inviter", stmt) +} + +func (store *Store) queryList(ctx context.Context, operation string, stmt pg.SelectStatement) ([]invite.Invite, error) { + operationCtx, cancel, err := sqlx.WithTimeout(ctx, operation, store.operationTimeout) + if err != nil { + return nil, err + } + defer cancel() + + query, args := stmt.Sql() + rows, err := store.db.QueryContext(operationCtx, query, args...) + if err != nil { + return nil, fmt.Errorf("%s: %w", operation, err) + } + defer rows.Close() + + records := make([]invite.Invite, 0) + for rows.Next() { + record, err := scanInvite(rows) + if err != nil { + return nil, fmt.Errorf("%s: scan: %w", operation, err) + } + records = append(records, record) + } + if err := rows.Err(); err != nil { + return nil, fmt.Errorf("%s: %w", operation, err) + } + if len(records) == 0 { + return nil, nil + } + return records, nil +} + +// UpdateStatus applies one status transition with compare-and-swap on the +// current status column. When transitioning to redeemed the row's race_name +// column is replaced with the trimmed input value. +func (store *Store) UpdateStatus(ctx context.Context, input ports.UpdateInviteStatusInput) error { + if store == nil || store.db == nil { + return errors.New("update invite status: nil store") + } + if err := input.Validate(); err != nil { + return fmt.Errorf("update invite status: %w", err) + } + if err := invite.Transition(input.ExpectedFrom, input.To); err != nil { + return err + } + + operationCtx, cancel, err := sqlx.WithTimeout(ctx, "update invite status", store.operationTimeout) + if err != nil { + return err + } + defer cancel() + + at := input.At.UTC() + raceName := strings.TrimSpace(input.RaceName) + + // race_name is replaced only when the caller supplies a non-empty value; + // otherwise the existing value is preserved (CASE WHEN '' THEN race_name). + raceExpr := pg.CASE(). + WHEN(pg.String(raceName).EQ(pg.String(""))).THEN(pgtable.Invites.RaceName). + ELSE(pg.String(raceName)) + + stmt := pgtable.Invites.UPDATE( + pgtable.Invites.Status, + pgtable.Invites.DecidedAt, + pgtable.Invites.RaceName, + ).SET( + pg.String(string(input.To)), + pg.TimestampzT(at), + raceExpr, + ).WHERE(pg.AND( + pgtable.Invites.InviteID.EQ(pg.String(input.InviteID.String())), + pgtable.Invites.Status.EQ(pg.String(string(input.ExpectedFrom))), + )) + + query, args := stmt.Sql() + result, err := store.db.ExecContext(operationCtx, query, args...) + if err != nil { + return fmt.Errorf("update invite status: %w", err) + } + affected, err := result.RowsAffected() + if err != nil { + return fmt.Errorf("update invite status: rows affected: %w", err) + } + if affected == 0 { + probe := pg.SELECT(pgtable.Invites.Status). + FROM(pgtable.Invites). + WHERE(pgtable.Invites.InviteID.EQ(pg.String(input.InviteID.String()))) + probeQuery, probeArgs := probe.Sql() + + var current string + row := store.db.QueryRowContext(operationCtx, probeQuery, probeArgs...) + if err := row.Scan(¤t); err != nil { + if sqlx.IsNoRows(err) { + return invite.ErrNotFound + } + return fmt.Errorf("update invite status: probe: %w", err) + } + return fmt.Errorf("update invite status: %w", invite.ErrConflict) + } + return nil +} + +type rowScanner interface { + Scan(dest ...any) error +} + +func scanInvite(rs rowScanner) (invite.Invite, error) { + var ( + inviteID string + gameID string + inviterUserID string + inviteeUserID string + raceName string + status string + createdAt time.Time + expiresAt time.Time + decidedAt sql.NullTime + ) + if err := rs.Scan( + &inviteID, + &gameID, + &inviterUserID, + &inviteeUserID, + &raceName, + &status, + &createdAt, + &expiresAt, + &decidedAt, + ); err != nil { + return invite.Invite{}, err + } + return invite.Invite{ + InviteID: common.InviteID(inviteID), + GameID: common.GameID(gameID), + InviterUserID: inviterUserID, + InviteeUserID: inviteeUserID, + RaceName: raceName, + Status: invite.Status(status), + CreatedAt: createdAt.UTC(), + ExpiresAt: expiresAt.UTC(), + DecidedAt: sqlx.TimePtrFromNullable(decidedAt), + }, nil +} + +// Ensure Store satisfies the ports.InviteStore interface at compile time. +var _ ports.InviteStore = (*Store)(nil) diff --git a/lobby/internal/adapters/postgres/invitestore/store_test.go b/lobby/internal/adapters/postgres/invitestore/store_test.go new file mode 100644 index 0000000..adb8c3c --- /dev/null +++ b/lobby/internal/adapters/postgres/invitestore/store_test.go @@ -0,0 +1,199 @@ +package invitestore_test + +import ( + "context" + "testing" + "time" + + "galaxy/lobby/internal/adapters/postgres/gamestore" + "galaxy/lobby/internal/adapters/postgres/internal/pgtest" + "galaxy/lobby/internal/adapters/postgres/invitestore" + "galaxy/lobby/internal/domain/common" + "galaxy/lobby/internal/domain/game" + "galaxy/lobby/internal/domain/invite" + "galaxy/lobby/internal/ports" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestMain(m *testing.M) { pgtest.RunMain(m) } + +func newStores(t *testing.T) (*gamestore.Store, *invitestore.Store) { + t.Helper() + pgtest.TruncateAll(t) + gs, err := gamestore.New(gamestore.Config{ + DB: pgtest.Ensure(t).Pool(), OperationTimeout: pgtest.OperationTimeout, + }) + require.NoError(t, err) + is, err := invitestore.New(invitestore.Config{ + DB: pgtest.Ensure(t).Pool(), OperationTimeout: pgtest.OperationTimeout, + }) + require.NoError(t, err) + return gs, is +} + +func seedPrivateGame(t *testing.T, gs *gamestore.Store, id, ownerID string) game.Game { + t.Helper() + now := time.Date(2026, 4, 23, 12, 0, 0, 0, time.UTC) + g, err := game.New(game.NewGameInput{ + GameID: common.GameID(id), + GameName: "Private " + id, + GameType: game.GameTypePrivate, + OwnerUserID: ownerID, + MinPlayers: 2, + MaxPlayers: 6, + StartGapHours: 12, + StartGapPlayers: 2, + EnrollmentEndsAt: now.Add(7 * 24 * time.Hour), + TurnSchedule: "0 18 * * *", + TargetEngineVersion: "v1.0.0", + Now: now, + }) + require.NoError(t, err) + require.NoError(t, gs.Save(context.Background(), g)) + return g +} + +func newInvite(t *testing.T, id, gameID, inviter, invitee string) invite.Invite { + t.Helper() + now := time.Date(2026, 4, 23, 12, 0, 0, 0, time.UTC) + rec, err := invite.New(invite.NewInviteInput{ + InviteID: common.InviteID(id), + GameID: common.GameID(gameID), + InviterUserID: inviter, + InviteeUserID: invitee, + Now: now, + ExpiresAt: now.Add(7 * 24 * time.Hour), + }) + require.NoError(t, err) + return rec +} + +func TestSaveAndGet(t *testing.T) { + ctx := context.Background() + gs, is := newStores(t) + seedPrivateGame(t, gs, "game-001", "owner-1") + + rec := newInvite(t, "invite-001", "game-001", "owner-1", "invitee-1") + require.NoError(t, is.Save(ctx, rec)) + + got, err := is.Get(ctx, rec.InviteID) + require.NoError(t, err) + assert.Equal(t, rec.InviteID, got.InviteID) + assert.Equal(t, invite.StatusCreated, got.Status) + assert.Equal(t, "invitee-1", got.InviteeUserID) + assert.True(t, got.ExpiresAt.Equal(rec.ExpiresAt)) +} + +func TestSaveRejectsNonCreated(t *testing.T) { + ctx := context.Background() + gs, is := newStores(t) + seedPrivateGame(t, gs, "game-001", "owner-1") + + rec := newInvite(t, "invite-001", "game-001", "owner-1", "invitee-1") + rec.Status = invite.StatusRedeemed + require.Error(t, is.Save(ctx, rec)) +} + +func TestSaveDuplicateReturnsConflict(t *testing.T) { + ctx := context.Background() + gs, is := newStores(t) + seedPrivateGame(t, gs, "game-001", "owner-1") + + rec := newInvite(t, "invite-001", "game-001", "owner-1", "invitee-1") + require.NoError(t, is.Save(ctx, rec)) + err := is.Save(ctx, rec) + require.ErrorIs(t, err, invite.ErrConflict) +} + +func TestUpdateStatusRedeemSetsRaceName(t *testing.T) { + ctx := context.Background() + gs, is := newStores(t) + seedPrivateGame(t, gs, "game-001", "owner-1") + + rec := newInvite(t, "invite-001", "game-001", "owner-1", "invitee-1") + require.NoError(t, is.Save(ctx, rec)) + + require.NoError(t, is.UpdateStatus(ctx, ports.UpdateInviteStatusInput{ + InviteID: rec.InviteID, + ExpectedFrom: invite.StatusCreated, + To: invite.StatusRedeemed, + At: rec.CreatedAt.Add(time.Minute), + RaceName: "PilotRedeemed", + })) + + got, err := is.Get(ctx, rec.InviteID) + require.NoError(t, err) + assert.Equal(t, invite.StatusRedeemed, got.Status) + assert.Equal(t, "PilotRedeemed", got.RaceName) + require.NotNil(t, got.DecidedAt) +} + +func TestUpdateStatusReturnsConflictOnExpectedFromMismatch(t *testing.T) { + ctx := context.Background() + gs, is := newStores(t) + seedPrivateGame(t, gs, "game-001", "owner-1") + + rec := newInvite(t, "invite-001", "game-001", "owner-1", "invitee-1") + require.NoError(t, is.Save(ctx, rec)) + + // Move row out of `created` so the next attempt's `WHERE status = ?` + // fails on persistence even though the (created → revoked) transition is + // itself valid in the domain table. + require.NoError(t, is.UpdateStatus(ctx, ports.UpdateInviteStatusInput{ + InviteID: rec.InviteID, + ExpectedFrom: invite.StatusCreated, + To: invite.StatusDeclined, + At: rec.CreatedAt.Add(time.Minute), + })) + err := is.UpdateStatus(ctx, ports.UpdateInviteStatusInput{ + InviteID: rec.InviteID, + ExpectedFrom: invite.StatusCreated, + To: invite.StatusRevoked, + At: rec.CreatedAt.Add(2 * time.Minute), + }) + require.ErrorIs(t, err, invite.ErrConflict) +} + +func TestUpdateStatusReturnsNotFoundForMissing(t *testing.T) { + ctx := context.Background() + _, is := newStores(t) + err := is.UpdateStatus(ctx, ports.UpdateInviteStatusInput{ + InviteID: common.InviteID("invite-missing"), + ExpectedFrom: invite.StatusCreated, + To: invite.StatusDeclined, + At: time.Now().UTC(), + }) + require.ErrorIs(t, err, invite.ErrNotFound) +} + +func TestGetByGameUserInviter(t *testing.T) { + ctx := context.Background() + gs, is := newStores(t) + seedPrivateGame(t, gs, "game-001", "owner-1") + seedPrivateGame(t, gs, "game-002", "owner-2") + + require.NoError(t, is.Save(ctx, newInvite(t, "invite-001", "game-001", "owner-1", "invitee-1"))) + require.NoError(t, is.Save(ctx, newInvite(t, "invite-002", "game-001", "owner-1", "invitee-2"))) + require.NoError(t, is.Save(ctx, newInvite(t, "invite-003", "game-002", "owner-2", "invitee-1"))) + + g1, err := is.GetByGame(ctx, common.GameID("game-001")) + require.NoError(t, err) + assert.Len(t, g1, 2) + + user1, err := is.GetByUser(ctx, "invitee-1") + require.NoError(t, err) + assert.Len(t, user1, 2) + + by1, err := is.GetByInviter(ctx, "owner-1") + require.NoError(t, err) + assert.Len(t, by1, 2) +} + +func TestGetMissingReturnsNotFound(t *testing.T) { + ctx := context.Background() + _, is := newStores(t) + _, err := is.Get(ctx, common.InviteID("invite-missing")) + require.ErrorIs(t, err, invite.ErrNotFound) +} diff --git a/lobby/internal/adapters/postgres/jet/lobby/model/applications.go b/lobby/internal/adapters/postgres/jet/lobby/model/applications.go new file mode 100644 index 0000000..9bda399 --- /dev/null +++ b/lobby/internal/adapters/postgres/jet/lobby/model/applications.go @@ -0,0 +1,22 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package model + +import ( + "time" +) + +type Applications struct { + ApplicationID string `sql:"primary_key"` + GameID string + ApplicantUserID string + RaceName string + Status string + CreatedAt time.Time + DecidedAt *time.Time +} diff --git a/lobby/internal/adapters/postgres/jet/lobby/model/games.go b/lobby/internal/adapters/postgres/jet/lobby/model/games.go new file mode 100644 index 0000000..69482df --- /dev/null +++ b/lobby/internal/adapters/postgres/jet/lobby/model/games.go @@ -0,0 +1,34 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package model + +import ( + "time" +) + +type Games struct { + GameID string `sql:"primary_key"` + GameName string + Description string + GameType string + OwnerUserID string + Status string + MinPlayers int32 + MaxPlayers int32 + StartGapHours int32 + StartGapPlayers int32 + EnrollmentEndsAt time.Time + TurnSchedule string + TargetEngineVersion string + CreatedAt time.Time + UpdatedAt time.Time + StartedAt *time.Time + FinishedAt *time.Time + RuntimeSnapshot string + RuntimeBinding *string +} diff --git a/lobby/internal/adapters/postgres/jet/lobby/model/goose_db_version.go b/lobby/internal/adapters/postgres/jet/lobby/model/goose_db_version.go new file mode 100644 index 0000000..c7f68e8 --- /dev/null +++ b/lobby/internal/adapters/postgres/jet/lobby/model/goose_db_version.go @@ -0,0 +1,19 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package model + +import ( + "time" +) + +type GooseDbVersion struct { + ID int32 `sql:"primary_key"` + VersionID int64 + IsApplied bool + Tstamp time.Time +} diff --git a/lobby/internal/adapters/postgres/jet/lobby/model/invites.go b/lobby/internal/adapters/postgres/jet/lobby/model/invites.go new file mode 100644 index 0000000..982fee4 --- /dev/null +++ b/lobby/internal/adapters/postgres/jet/lobby/model/invites.go @@ -0,0 +1,24 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package model + +import ( + "time" +) + +type Invites struct { + InviteID string `sql:"primary_key"` + GameID string + InviterUserID string + InviteeUserID string + RaceName string + Status string + CreatedAt time.Time + ExpiresAt time.Time + DecidedAt *time.Time +} diff --git a/lobby/internal/adapters/postgres/jet/lobby/model/memberships.go b/lobby/internal/adapters/postgres/jet/lobby/model/memberships.go new file mode 100644 index 0000000..4751bab --- /dev/null +++ b/lobby/internal/adapters/postgres/jet/lobby/model/memberships.go @@ -0,0 +1,23 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package model + +import ( + "time" +) + +type Memberships struct { + MembershipID string `sql:"primary_key"` + GameID string + UserID string + RaceName string + CanonicalKey string + Status string + JoinedAt time.Time + RemovedAt *time.Time +} diff --git a/lobby/internal/adapters/postgres/jet/lobby/model/race_names.go b/lobby/internal/adapters/postgres/jet/lobby/model/race_names.go new file mode 100644 index 0000000..3cd5909 --- /dev/null +++ b/lobby/internal/adapters/postgres/jet/lobby/model/race_names.go @@ -0,0 +1,20 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package model + +type RaceNames struct { + CanonicalKey string `sql:"primary_key"` + GameID string `sql:"primary_key"` + HolderUserID string + RaceName string + BindingKind string + SourceGameID string + ReservedAtMs int64 + EligibleUntilMs *int64 + RegisteredAtMs *int64 +} diff --git a/lobby/internal/adapters/postgres/jet/lobby/table/applications.go b/lobby/internal/adapters/postgres/jet/lobby/table/applications.go new file mode 100644 index 0000000..26e0e3f --- /dev/null +++ b/lobby/internal/adapters/postgres/jet/lobby/table/applications.go @@ -0,0 +1,96 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var Applications = newApplicationsTable("lobby", "applications", "") + +type applicationsTable struct { + postgres.Table + + // Columns + ApplicationID postgres.ColumnString + GameID postgres.ColumnString + ApplicantUserID postgres.ColumnString + RaceName postgres.ColumnString + Status postgres.ColumnString + CreatedAt postgres.ColumnTimestampz + DecidedAt postgres.ColumnTimestampz + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList + DefaultColumns postgres.ColumnList +} + +type ApplicationsTable struct { + applicationsTable + + EXCLUDED applicationsTable +} + +// AS creates new ApplicationsTable with assigned alias +func (a ApplicationsTable) AS(alias string) *ApplicationsTable { + return newApplicationsTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new ApplicationsTable with assigned schema name +func (a ApplicationsTable) FromSchema(schemaName string) *ApplicationsTable { + return newApplicationsTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new ApplicationsTable with assigned table prefix +func (a ApplicationsTable) WithPrefix(prefix string) *ApplicationsTable { + return newApplicationsTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new ApplicationsTable with assigned table suffix +func (a ApplicationsTable) WithSuffix(suffix string) *ApplicationsTable { + return newApplicationsTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newApplicationsTable(schemaName, tableName, alias string) *ApplicationsTable { + return &ApplicationsTable{ + applicationsTable: newApplicationsTableImpl(schemaName, tableName, alias), + EXCLUDED: newApplicationsTableImpl("", "excluded", ""), + } +} + +func newApplicationsTableImpl(schemaName, tableName, alias string) applicationsTable { + var ( + ApplicationIDColumn = postgres.StringColumn("application_id") + GameIDColumn = postgres.StringColumn("game_id") + ApplicantUserIDColumn = postgres.StringColumn("applicant_user_id") + RaceNameColumn = postgres.StringColumn("race_name") + StatusColumn = postgres.StringColumn("status") + CreatedAtColumn = postgres.TimestampzColumn("created_at") + DecidedAtColumn = postgres.TimestampzColumn("decided_at") + allColumns = postgres.ColumnList{ApplicationIDColumn, GameIDColumn, ApplicantUserIDColumn, RaceNameColumn, StatusColumn, CreatedAtColumn, DecidedAtColumn} + mutableColumns = postgres.ColumnList{GameIDColumn, ApplicantUserIDColumn, RaceNameColumn, StatusColumn, CreatedAtColumn, DecidedAtColumn} + defaultColumns = postgres.ColumnList{} + ) + + return applicationsTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + ApplicationID: ApplicationIDColumn, + GameID: GameIDColumn, + ApplicantUserID: ApplicantUserIDColumn, + RaceName: RaceNameColumn, + Status: StatusColumn, + CreatedAt: CreatedAtColumn, + DecidedAt: DecidedAtColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + DefaultColumns: defaultColumns, + } +} diff --git a/lobby/internal/adapters/postgres/jet/lobby/table/games.go b/lobby/internal/adapters/postgres/jet/lobby/table/games.go new file mode 100644 index 0000000..7413a1d --- /dev/null +++ b/lobby/internal/adapters/postgres/jet/lobby/table/games.go @@ -0,0 +1,132 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var Games = newGamesTable("lobby", "games", "") + +type gamesTable struct { + postgres.Table + + // Columns + GameID postgres.ColumnString + GameName postgres.ColumnString + Description postgres.ColumnString + GameType postgres.ColumnString + OwnerUserID postgres.ColumnString + Status postgres.ColumnString + MinPlayers postgres.ColumnInteger + MaxPlayers postgres.ColumnInteger + StartGapHours postgres.ColumnInteger + StartGapPlayers postgres.ColumnInteger + EnrollmentEndsAt postgres.ColumnTimestampz + TurnSchedule postgres.ColumnString + TargetEngineVersion postgres.ColumnString + CreatedAt postgres.ColumnTimestampz + UpdatedAt postgres.ColumnTimestampz + StartedAt postgres.ColumnTimestampz + FinishedAt postgres.ColumnTimestampz + RuntimeSnapshot postgres.ColumnString + RuntimeBinding postgres.ColumnString + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList + DefaultColumns postgres.ColumnList +} + +type GamesTable struct { + gamesTable + + EXCLUDED gamesTable +} + +// AS creates new GamesTable with assigned alias +func (a GamesTable) AS(alias string) *GamesTable { + return newGamesTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new GamesTable with assigned schema name +func (a GamesTable) FromSchema(schemaName string) *GamesTable { + return newGamesTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new GamesTable with assigned table prefix +func (a GamesTable) WithPrefix(prefix string) *GamesTable { + return newGamesTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new GamesTable with assigned table suffix +func (a GamesTable) WithSuffix(suffix string) *GamesTable { + return newGamesTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newGamesTable(schemaName, tableName, alias string) *GamesTable { + return &GamesTable{ + gamesTable: newGamesTableImpl(schemaName, tableName, alias), + EXCLUDED: newGamesTableImpl("", "excluded", ""), + } +} + +func newGamesTableImpl(schemaName, tableName, alias string) gamesTable { + var ( + GameIDColumn = postgres.StringColumn("game_id") + GameNameColumn = postgres.StringColumn("game_name") + DescriptionColumn = postgres.StringColumn("description") + GameTypeColumn = postgres.StringColumn("game_type") + OwnerUserIDColumn = postgres.StringColumn("owner_user_id") + StatusColumn = postgres.StringColumn("status") + MinPlayersColumn = postgres.IntegerColumn("min_players") + MaxPlayersColumn = postgres.IntegerColumn("max_players") + StartGapHoursColumn = postgres.IntegerColumn("start_gap_hours") + StartGapPlayersColumn = postgres.IntegerColumn("start_gap_players") + EnrollmentEndsAtColumn = postgres.TimestampzColumn("enrollment_ends_at") + TurnScheduleColumn = postgres.StringColumn("turn_schedule") + TargetEngineVersionColumn = postgres.StringColumn("target_engine_version") + CreatedAtColumn = postgres.TimestampzColumn("created_at") + UpdatedAtColumn = postgres.TimestampzColumn("updated_at") + StartedAtColumn = postgres.TimestampzColumn("started_at") + FinishedAtColumn = postgres.TimestampzColumn("finished_at") + RuntimeSnapshotColumn = postgres.StringColumn("runtime_snapshot") + RuntimeBindingColumn = postgres.StringColumn("runtime_binding") + allColumns = postgres.ColumnList{GameIDColumn, GameNameColumn, DescriptionColumn, GameTypeColumn, OwnerUserIDColumn, StatusColumn, MinPlayersColumn, MaxPlayersColumn, StartGapHoursColumn, StartGapPlayersColumn, EnrollmentEndsAtColumn, TurnScheduleColumn, TargetEngineVersionColumn, CreatedAtColumn, UpdatedAtColumn, StartedAtColumn, FinishedAtColumn, RuntimeSnapshotColumn, RuntimeBindingColumn} + mutableColumns = postgres.ColumnList{GameNameColumn, DescriptionColumn, GameTypeColumn, OwnerUserIDColumn, StatusColumn, MinPlayersColumn, MaxPlayersColumn, StartGapHoursColumn, StartGapPlayersColumn, EnrollmentEndsAtColumn, TurnScheduleColumn, TargetEngineVersionColumn, CreatedAtColumn, UpdatedAtColumn, StartedAtColumn, FinishedAtColumn, RuntimeSnapshotColumn, RuntimeBindingColumn} + defaultColumns = postgres.ColumnList{DescriptionColumn, OwnerUserIDColumn, RuntimeSnapshotColumn} + ) + + return gamesTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + GameID: GameIDColumn, + GameName: GameNameColumn, + Description: DescriptionColumn, + GameType: GameTypeColumn, + OwnerUserID: OwnerUserIDColumn, + Status: StatusColumn, + MinPlayers: MinPlayersColumn, + MaxPlayers: MaxPlayersColumn, + StartGapHours: StartGapHoursColumn, + StartGapPlayers: StartGapPlayersColumn, + EnrollmentEndsAt: EnrollmentEndsAtColumn, + TurnSchedule: TurnScheduleColumn, + TargetEngineVersion: TargetEngineVersionColumn, + CreatedAt: CreatedAtColumn, + UpdatedAt: UpdatedAtColumn, + StartedAt: StartedAtColumn, + FinishedAt: FinishedAtColumn, + RuntimeSnapshot: RuntimeSnapshotColumn, + RuntimeBinding: RuntimeBindingColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + DefaultColumns: defaultColumns, + } +} diff --git a/lobby/internal/adapters/postgres/jet/lobby/table/goose_db_version.go b/lobby/internal/adapters/postgres/jet/lobby/table/goose_db_version.go new file mode 100644 index 0000000..4e954ca --- /dev/null +++ b/lobby/internal/adapters/postgres/jet/lobby/table/goose_db_version.go @@ -0,0 +1,87 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var GooseDbVersion = newGooseDbVersionTable("lobby", "goose_db_version", "") + +type gooseDbVersionTable struct { + postgres.Table + + // Columns + ID postgres.ColumnInteger + VersionID postgres.ColumnInteger + IsApplied postgres.ColumnBool + Tstamp postgres.ColumnTimestamp + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList + DefaultColumns postgres.ColumnList +} + +type GooseDbVersionTable struct { + gooseDbVersionTable + + EXCLUDED gooseDbVersionTable +} + +// AS creates new GooseDbVersionTable with assigned alias +func (a GooseDbVersionTable) AS(alias string) *GooseDbVersionTable { + return newGooseDbVersionTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new GooseDbVersionTable with assigned schema name +func (a GooseDbVersionTable) FromSchema(schemaName string) *GooseDbVersionTable { + return newGooseDbVersionTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new GooseDbVersionTable with assigned table prefix +func (a GooseDbVersionTable) WithPrefix(prefix string) *GooseDbVersionTable { + return newGooseDbVersionTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new GooseDbVersionTable with assigned table suffix +func (a GooseDbVersionTable) WithSuffix(suffix string) *GooseDbVersionTable { + return newGooseDbVersionTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newGooseDbVersionTable(schemaName, tableName, alias string) *GooseDbVersionTable { + return &GooseDbVersionTable{ + gooseDbVersionTable: newGooseDbVersionTableImpl(schemaName, tableName, alias), + EXCLUDED: newGooseDbVersionTableImpl("", "excluded", ""), + } +} + +func newGooseDbVersionTableImpl(schemaName, tableName, alias string) gooseDbVersionTable { + var ( + IDColumn = postgres.IntegerColumn("id") + VersionIDColumn = postgres.IntegerColumn("version_id") + IsAppliedColumn = postgres.BoolColumn("is_applied") + TstampColumn = postgres.TimestampColumn("tstamp") + allColumns = postgres.ColumnList{IDColumn, VersionIDColumn, IsAppliedColumn, TstampColumn} + mutableColumns = postgres.ColumnList{VersionIDColumn, IsAppliedColumn, TstampColumn} + defaultColumns = postgres.ColumnList{TstampColumn} + ) + + return gooseDbVersionTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + ID: IDColumn, + VersionID: VersionIDColumn, + IsApplied: IsAppliedColumn, + Tstamp: TstampColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + DefaultColumns: defaultColumns, + } +} diff --git a/lobby/internal/adapters/postgres/jet/lobby/table/invites.go b/lobby/internal/adapters/postgres/jet/lobby/table/invites.go new file mode 100644 index 0000000..7f96532 --- /dev/null +++ b/lobby/internal/adapters/postgres/jet/lobby/table/invites.go @@ -0,0 +1,102 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var Invites = newInvitesTable("lobby", "invites", "") + +type invitesTable struct { + postgres.Table + + // Columns + InviteID postgres.ColumnString + GameID postgres.ColumnString + InviterUserID postgres.ColumnString + InviteeUserID postgres.ColumnString + RaceName postgres.ColumnString + Status postgres.ColumnString + CreatedAt postgres.ColumnTimestampz + ExpiresAt postgres.ColumnTimestampz + DecidedAt postgres.ColumnTimestampz + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList + DefaultColumns postgres.ColumnList +} + +type InvitesTable struct { + invitesTable + + EXCLUDED invitesTable +} + +// AS creates new InvitesTable with assigned alias +func (a InvitesTable) AS(alias string) *InvitesTable { + return newInvitesTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new InvitesTable with assigned schema name +func (a InvitesTable) FromSchema(schemaName string) *InvitesTable { + return newInvitesTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new InvitesTable with assigned table prefix +func (a InvitesTable) WithPrefix(prefix string) *InvitesTable { + return newInvitesTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new InvitesTable with assigned table suffix +func (a InvitesTable) WithSuffix(suffix string) *InvitesTable { + return newInvitesTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newInvitesTable(schemaName, tableName, alias string) *InvitesTable { + return &InvitesTable{ + invitesTable: newInvitesTableImpl(schemaName, tableName, alias), + EXCLUDED: newInvitesTableImpl("", "excluded", ""), + } +} + +func newInvitesTableImpl(schemaName, tableName, alias string) invitesTable { + var ( + InviteIDColumn = postgres.StringColumn("invite_id") + GameIDColumn = postgres.StringColumn("game_id") + InviterUserIDColumn = postgres.StringColumn("inviter_user_id") + InviteeUserIDColumn = postgres.StringColumn("invitee_user_id") + RaceNameColumn = postgres.StringColumn("race_name") + StatusColumn = postgres.StringColumn("status") + CreatedAtColumn = postgres.TimestampzColumn("created_at") + ExpiresAtColumn = postgres.TimestampzColumn("expires_at") + DecidedAtColumn = postgres.TimestampzColumn("decided_at") + allColumns = postgres.ColumnList{InviteIDColumn, GameIDColumn, InviterUserIDColumn, InviteeUserIDColumn, RaceNameColumn, StatusColumn, CreatedAtColumn, ExpiresAtColumn, DecidedAtColumn} + mutableColumns = postgres.ColumnList{GameIDColumn, InviterUserIDColumn, InviteeUserIDColumn, RaceNameColumn, StatusColumn, CreatedAtColumn, ExpiresAtColumn, DecidedAtColumn} + defaultColumns = postgres.ColumnList{RaceNameColumn} + ) + + return invitesTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + InviteID: InviteIDColumn, + GameID: GameIDColumn, + InviterUserID: InviterUserIDColumn, + InviteeUserID: InviteeUserIDColumn, + RaceName: RaceNameColumn, + Status: StatusColumn, + CreatedAt: CreatedAtColumn, + ExpiresAt: ExpiresAtColumn, + DecidedAt: DecidedAtColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + DefaultColumns: defaultColumns, + } +} diff --git a/lobby/internal/adapters/postgres/jet/lobby/table/memberships.go b/lobby/internal/adapters/postgres/jet/lobby/table/memberships.go new file mode 100644 index 0000000..9e562a3 --- /dev/null +++ b/lobby/internal/adapters/postgres/jet/lobby/table/memberships.go @@ -0,0 +1,99 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var Memberships = newMembershipsTable("lobby", "memberships", "") + +type membershipsTable struct { + postgres.Table + + // Columns + MembershipID postgres.ColumnString + GameID postgres.ColumnString + UserID postgres.ColumnString + RaceName postgres.ColumnString + CanonicalKey postgres.ColumnString + Status postgres.ColumnString + JoinedAt postgres.ColumnTimestampz + RemovedAt postgres.ColumnTimestampz + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList + DefaultColumns postgres.ColumnList +} + +type MembershipsTable struct { + membershipsTable + + EXCLUDED membershipsTable +} + +// AS creates new MembershipsTable with assigned alias +func (a MembershipsTable) AS(alias string) *MembershipsTable { + return newMembershipsTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new MembershipsTable with assigned schema name +func (a MembershipsTable) FromSchema(schemaName string) *MembershipsTable { + return newMembershipsTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new MembershipsTable with assigned table prefix +func (a MembershipsTable) WithPrefix(prefix string) *MembershipsTable { + return newMembershipsTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new MembershipsTable with assigned table suffix +func (a MembershipsTable) WithSuffix(suffix string) *MembershipsTable { + return newMembershipsTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newMembershipsTable(schemaName, tableName, alias string) *MembershipsTable { + return &MembershipsTable{ + membershipsTable: newMembershipsTableImpl(schemaName, tableName, alias), + EXCLUDED: newMembershipsTableImpl("", "excluded", ""), + } +} + +func newMembershipsTableImpl(schemaName, tableName, alias string) membershipsTable { + var ( + MembershipIDColumn = postgres.StringColumn("membership_id") + GameIDColumn = postgres.StringColumn("game_id") + UserIDColumn = postgres.StringColumn("user_id") + RaceNameColumn = postgres.StringColumn("race_name") + CanonicalKeyColumn = postgres.StringColumn("canonical_key") + StatusColumn = postgres.StringColumn("status") + JoinedAtColumn = postgres.TimestampzColumn("joined_at") + RemovedAtColumn = postgres.TimestampzColumn("removed_at") + allColumns = postgres.ColumnList{MembershipIDColumn, GameIDColumn, UserIDColumn, RaceNameColumn, CanonicalKeyColumn, StatusColumn, JoinedAtColumn, RemovedAtColumn} + mutableColumns = postgres.ColumnList{GameIDColumn, UserIDColumn, RaceNameColumn, CanonicalKeyColumn, StatusColumn, JoinedAtColumn, RemovedAtColumn} + defaultColumns = postgres.ColumnList{} + ) + + return membershipsTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + MembershipID: MembershipIDColumn, + GameID: GameIDColumn, + UserID: UserIDColumn, + RaceName: RaceNameColumn, + CanonicalKey: CanonicalKeyColumn, + Status: StatusColumn, + JoinedAt: JoinedAtColumn, + RemovedAt: RemovedAtColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + DefaultColumns: defaultColumns, + } +} diff --git a/lobby/internal/adapters/postgres/jet/lobby/table/race_names.go b/lobby/internal/adapters/postgres/jet/lobby/table/race_names.go new file mode 100644 index 0000000..1164664 --- /dev/null +++ b/lobby/internal/adapters/postgres/jet/lobby/table/race_names.go @@ -0,0 +1,102 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var RaceNames = newRaceNamesTable("lobby", "race_names", "") + +type raceNamesTable struct { + postgres.Table + + // Columns + CanonicalKey postgres.ColumnString + GameID postgres.ColumnString + HolderUserID postgres.ColumnString + RaceName postgres.ColumnString + BindingKind postgres.ColumnString + SourceGameID postgres.ColumnString + ReservedAtMs postgres.ColumnInteger + EligibleUntilMs postgres.ColumnInteger + RegisteredAtMs postgres.ColumnInteger + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList + DefaultColumns postgres.ColumnList +} + +type RaceNamesTable struct { + raceNamesTable + + EXCLUDED raceNamesTable +} + +// AS creates new RaceNamesTable with assigned alias +func (a RaceNamesTable) AS(alias string) *RaceNamesTable { + return newRaceNamesTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new RaceNamesTable with assigned schema name +func (a RaceNamesTable) FromSchema(schemaName string) *RaceNamesTable { + return newRaceNamesTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new RaceNamesTable with assigned table prefix +func (a RaceNamesTable) WithPrefix(prefix string) *RaceNamesTable { + return newRaceNamesTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new RaceNamesTable with assigned table suffix +func (a RaceNamesTable) WithSuffix(suffix string) *RaceNamesTable { + return newRaceNamesTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newRaceNamesTable(schemaName, tableName, alias string) *RaceNamesTable { + return &RaceNamesTable{ + raceNamesTable: newRaceNamesTableImpl(schemaName, tableName, alias), + EXCLUDED: newRaceNamesTableImpl("", "excluded", ""), + } +} + +func newRaceNamesTableImpl(schemaName, tableName, alias string) raceNamesTable { + var ( + CanonicalKeyColumn = postgres.StringColumn("canonical_key") + GameIDColumn = postgres.StringColumn("game_id") + HolderUserIDColumn = postgres.StringColumn("holder_user_id") + RaceNameColumn = postgres.StringColumn("race_name") + BindingKindColumn = postgres.StringColumn("binding_kind") + SourceGameIDColumn = postgres.StringColumn("source_game_id") + ReservedAtMsColumn = postgres.IntegerColumn("reserved_at_ms") + EligibleUntilMsColumn = postgres.IntegerColumn("eligible_until_ms") + RegisteredAtMsColumn = postgres.IntegerColumn("registered_at_ms") + allColumns = postgres.ColumnList{CanonicalKeyColumn, GameIDColumn, HolderUserIDColumn, RaceNameColumn, BindingKindColumn, SourceGameIDColumn, ReservedAtMsColumn, EligibleUntilMsColumn, RegisteredAtMsColumn} + mutableColumns = postgres.ColumnList{HolderUserIDColumn, RaceNameColumn, BindingKindColumn, SourceGameIDColumn, ReservedAtMsColumn, EligibleUntilMsColumn, RegisteredAtMsColumn} + defaultColumns = postgres.ColumnList{GameIDColumn, SourceGameIDColumn, ReservedAtMsColumn} + ) + + return raceNamesTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + CanonicalKey: CanonicalKeyColumn, + GameID: GameIDColumn, + HolderUserID: HolderUserIDColumn, + RaceName: RaceNameColumn, + BindingKind: BindingKindColumn, + SourceGameID: SourceGameIDColumn, + ReservedAtMs: ReservedAtMsColumn, + EligibleUntilMs: EligibleUntilMsColumn, + RegisteredAtMs: RegisteredAtMsColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + DefaultColumns: defaultColumns, + } +} diff --git a/lobby/internal/adapters/postgres/jet/lobby/table/table_use_schema.go b/lobby/internal/adapters/postgres/jet/lobby/table/table_use_schema.go new file mode 100644 index 0000000..11e6784 --- /dev/null +++ b/lobby/internal/adapters/postgres/jet/lobby/table/table_use_schema.go @@ -0,0 +1,19 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +// UseSchema sets a new schema name for all generated table SQL builder types. It is recommended to invoke +// this method only once at the beginning of the program. +func UseSchema(schema string) { + Applications = Applications.FromSchema(schema) + Games = Games.FromSchema(schema) + GooseDbVersion = GooseDbVersion.FromSchema(schema) + Invites = Invites.FromSchema(schema) + Memberships = Memberships.FromSchema(schema) + RaceNames = RaceNames.FromSchema(schema) +} diff --git a/lobby/internal/adapters/postgres/membershipstore/store.go b/lobby/internal/adapters/postgres/membershipstore/store.go new file mode 100644 index 0000000..f57b62f --- /dev/null +++ b/lobby/internal/adapters/postgres/membershipstore/store.go @@ -0,0 +1,346 @@ +// Package membershipstore implements the PostgreSQL-backed adapter for +// `ports.MembershipStore`. +// +// PG_PLAN.md §6A migrates Game Lobby Service away from Redis-backed durable +// membership records. +package membershipstore + +import ( + "context" + "database/sql" + "errors" + "fmt" + "strings" + "time" + + "galaxy/lobby/internal/adapters/postgres/internal/sqlx" + pgtable "galaxy/lobby/internal/adapters/postgres/jet/lobby/table" + "galaxy/lobby/internal/domain/common" + "galaxy/lobby/internal/domain/membership" + "galaxy/lobby/internal/ports" + + pg "github.com/go-jet/jet/v2/postgres" +) + +// Config configures one PostgreSQL-backed membership store instance. +type Config struct { + DB *sql.DB + OperationTimeout time.Duration +} + +// Store persists Game Lobby membership records in PostgreSQL. +type Store struct { + db *sql.DB + operationTimeout time.Duration +} + +// New constructs one PostgreSQL-backed membership store from cfg. +func New(cfg Config) (*Store, error) { + if cfg.DB == nil { + return nil, errors.New("new postgres membership store: db must not be nil") + } + if cfg.OperationTimeout <= 0 { + return nil, errors.New("new postgres membership store: operation timeout must be positive") + } + return &Store{ + db: cfg.DB, + operationTimeout: cfg.OperationTimeout, + }, nil +} + +// membershipSelectColumns is the canonical SELECT list for the memberships +// table, matching scanMembership's column order. +var membershipSelectColumns = pg.ColumnList{ + pgtable.Memberships.MembershipID, + pgtable.Memberships.GameID, + pgtable.Memberships.UserID, + pgtable.Memberships.RaceName, + pgtable.Memberships.CanonicalKey, + pgtable.Memberships.Status, + pgtable.Memberships.JoinedAt, + pgtable.Memberships.RemovedAt, +} + +// Save persists a new active membership record. Save is create-only; a +// second save against the same membership id maps the unique-violation to +// membership.ErrConflict. +func (store *Store) Save(ctx context.Context, record membership.Membership) error { + if store == nil || store.db == nil { + return errors.New("save membership: nil store") + } + if err := record.Validate(); err != nil { + return fmt.Errorf("save membership: %w", err) + } + if record.Status != membership.StatusActive { + return fmt.Errorf( + "save membership: status must be %q, got %q", + membership.StatusActive, record.Status, + ) + } + + operationCtx, cancel, err := sqlx.WithTimeout(ctx, "save membership", store.operationTimeout) + if err != nil { + return err + } + defer cancel() + + stmt := pgtable.Memberships.INSERT( + pgtable.Memberships.MembershipID, + pgtable.Memberships.GameID, + pgtable.Memberships.UserID, + pgtable.Memberships.RaceName, + pgtable.Memberships.CanonicalKey, + pgtable.Memberships.Status, + pgtable.Memberships.JoinedAt, + pgtable.Memberships.RemovedAt, + ).VALUES( + record.MembershipID.String(), + record.GameID.String(), + record.UserID, + record.RaceName, + record.CanonicalKey, + string(record.Status), + record.JoinedAt.UTC(), + sqlx.NullableTimePtr(record.RemovedAt), + ) + + query, args := stmt.Sql() + if _, err := store.db.ExecContext(operationCtx, query, args...); err != nil { + if sqlx.IsUniqueViolation(err) { + return fmt.Errorf("save membership: %w", membership.ErrConflict) + } + return fmt.Errorf("save membership: %w", err) + } + return nil +} + +// Get returns the record identified by membershipID. +func (store *Store) Get(ctx context.Context, membershipID common.MembershipID) (membership.Membership, error) { + if store == nil || store.db == nil { + return membership.Membership{}, errors.New("get membership: nil store") + } + if err := membershipID.Validate(); err != nil { + return membership.Membership{}, fmt.Errorf("get membership: %w", err) + } + + operationCtx, cancel, err := sqlx.WithTimeout(ctx, "get membership", store.operationTimeout) + if err != nil { + return membership.Membership{}, err + } + defer cancel() + + stmt := pg.SELECT(membershipSelectColumns). + FROM(pgtable.Memberships). + WHERE(pgtable.Memberships.MembershipID.EQ(pg.String(membershipID.String()))) + + query, args := stmt.Sql() + row := store.db.QueryRowContext(operationCtx, query, args...) + record, err := scanMembership(row) + if sqlx.IsNoRows(err) { + return membership.Membership{}, membership.ErrNotFound + } + if err != nil { + return membership.Membership{}, fmt.Errorf("get membership: %w", err) + } + return record, nil +} + +// GetByGame returns every membership attached to gameID. +func (store *Store) GetByGame(ctx context.Context, gameID common.GameID) ([]membership.Membership, error) { + if store == nil || store.db == nil { + return nil, errors.New("get memberships by game: nil store") + } + if err := gameID.Validate(); err != nil { + return nil, fmt.Errorf("get memberships by game: %w", err) + } + + stmt := pg.SELECT(membershipSelectColumns). + FROM(pgtable.Memberships). + WHERE(pgtable.Memberships.GameID.EQ(pg.String(gameID.String()))). + ORDER_BY(pgtable.Memberships.JoinedAt.ASC(), pgtable.Memberships.MembershipID.ASC()) + + return store.queryList(ctx, "get memberships by game", stmt) +} + +// GetByUser returns every membership held by userID. +func (store *Store) GetByUser(ctx context.Context, userID string) ([]membership.Membership, error) { + if store == nil || store.db == nil { + return nil, errors.New("get memberships by user: nil store") + } + trimmed := strings.TrimSpace(userID) + if trimmed == "" { + return nil, fmt.Errorf("get memberships by user: user id must not be empty") + } + + stmt := pg.SELECT(membershipSelectColumns). + FROM(pgtable.Memberships). + WHERE(pgtable.Memberships.UserID.EQ(pg.String(trimmed))). + ORDER_BY(pgtable.Memberships.JoinedAt.ASC(), pgtable.Memberships.MembershipID.ASC()) + + return store.queryList(ctx, "get memberships by user", stmt) +} + +func (store *Store) queryList(ctx context.Context, operation string, stmt pg.SelectStatement) ([]membership.Membership, error) { + operationCtx, cancel, err := sqlx.WithTimeout(ctx, operation, store.operationTimeout) + if err != nil { + return nil, err + } + defer cancel() + + query, args := stmt.Sql() + rows, err := store.db.QueryContext(operationCtx, query, args...) + if err != nil { + return nil, fmt.Errorf("%s: %w", operation, err) + } + defer rows.Close() + + records := make([]membership.Membership, 0) + for rows.Next() { + record, err := scanMembership(rows) + if err != nil { + return nil, fmt.Errorf("%s: scan: %w", operation, err) + } + records = append(records, record) + } + if err := rows.Err(); err != nil { + return nil, fmt.Errorf("%s: %w", operation, err) + } + if len(records) == 0 { + return nil, nil + } + return records, nil +} + +// UpdateStatus applies one status transition with compare-and-swap on the +// current status column. RemovedAt is set to input.At when transitioning out +// of active. +func (store *Store) UpdateStatus(ctx context.Context, input ports.UpdateMembershipStatusInput) error { + if store == nil || store.db == nil { + return errors.New("update membership status: nil store") + } + if err := input.Validate(); err != nil { + return fmt.Errorf("update membership status: %w", err) + } + if err := membership.Transition(input.ExpectedFrom, input.To); err != nil { + return err + } + + operationCtx, cancel, err := sqlx.WithTimeout(ctx, "update membership status", store.operationTimeout) + if err != nil { + return err + } + defer cancel() + + at := input.At.UTC() + stmt := pgtable.Memberships.UPDATE(pgtable.Memberships.Status, pgtable.Memberships.RemovedAt). + SET(string(input.To), at). + WHERE(pg.AND( + pgtable.Memberships.MembershipID.EQ(pg.String(input.MembershipID.String())), + pgtable.Memberships.Status.EQ(pg.String(string(input.ExpectedFrom))), + )) + + query, args := stmt.Sql() + result, err := store.db.ExecContext(operationCtx, query, args...) + if err != nil { + return fmt.Errorf("update membership status: %w", err) + } + affected, err := result.RowsAffected() + if err != nil { + return fmt.Errorf("update membership status: rows affected: %w", err) + } + if affected == 0 { + probe := pg.SELECT(pgtable.Memberships.Status). + FROM(pgtable.Memberships). + WHERE(pgtable.Memberships.MembershipID.EQ(pg.String(input.MembershipID.String()))) + probeQuery, probeArgs := probe.Sql() + + var current string + row := store.db.QueryRowContext(operationCtx, probeQuery, probeArgs...) + if err := row.Scan(¤t); err != nil { + if sqlx.IsNoRows(err) { + return membership.ErrNotFound + } + return fmt.Errorf("update membership status: probe: %w", err) + } + return fmt.Errorf("update membership status: %w", membership.ErrConflict) + } + return nil +} + +// Delete removes the membership record identified by membershipID. The +// pre-start removemember path uses Delete; the post-start path uses +// UpdateStatus(active → removed). +func (store *Store) Delete(ctx context.Context, membershipID common.MembershipID) error { + if store == nil || store.db == nil { + return errors.New("delete membership: nil store") + } + if err := membershipID.Validate(); err != nil { + return fmt.Errorf("delete membership: %w", err) + } + + operationCtx, cancel, err := sqlx.WithTimeout(ctx, "delete membership", store.operationTimeout) + if err != nil { + return err + } + defer cancel() + + stmt := pgtable.Memberships.DELETE(). + WHERE(pgtable.Memberships.MembershipID.EQ(pg.String(membershipID.String()))) + + query, args := stmt.Sql() + result, err := store.db.ExecContext(operationCtx, query, args...) + if err != nil { + return fmt.Errorf("delete membership: %w", err) + } + affected, err := result.RowsAffected() + if err != nil { + return fmt.Errorf("delete membership: rows affected: %w", err) + } + if affected == 0 { + return membership.ErrNotFound + } + return nil +} + +type rowScanner interface { + Scan(dest ...any) error +} + +func scanMembership(rs rowScanner) (membership.Membership, error) { + var ( + membershipID string + gameID string + userID string + raceName string + canonicalKey string + status string + joinedAt time.Time + removedAt sql.NullTime + ) + if err := rs.Scan( + &membershipID, + &gameID, + &userID, + &raceName, + &canonicalKey, + &status, + &joinedAt, + &removedAt, + ); err != nil { + return membership.Membership{}, err + } + return membership.Membership{ + MembershipID: common.MembershipID(membershipID), + GameID: common.GameID(gameID), + UserID: userID, + RaceName: raceName, + CanonicalKey: canonicalKey, + Status: membership.Status(status), + JoinedAt: joinedAt.UTC(), + RemovedAt: sqlx.TimePtrFromNullable(removedAt), + }, nil +} + +// Ensure Store satisfies the ports.MembershipStore interface at compile +// time. +var _ ports.MembershipStore = (*Store)(nil) diff --git a/lobby/internal/adapters/postgres/membershipstore/store_test.go b/lobby/internal/adapters/postgres/membershipstore/store_test.go new file mode 100644 index 0000000..3107bd3 --- /dev/null +++ b/lobby/internal/adapters/postgres/membershipstore/store_test.go @@ -0,0 +1,213 @@ +package membershipstore_test + +import ( + "context" + "testing" + "time" + + "galaxy/lobby/internal/adapters/postgres/gamestore" + "galaxy/lobby/internal/adapters/postgres/internal/pgtest" + "galaxy/lobby/internal/adapters/postgres/membershipstore" + "galaxy/lobby/internal/domain/common" + "galaxy/lobby/internal/domain/game" + "galaxy/lobby/internal/domain/membership" + "galaxy/lobby/internal/ports" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestMain(m *testing.M) { pgtest.RunMain(m) } + +func newStores(t *testing.T) (*gamestore.Store, *membershipstore.Store) { + t.Helper() + pgtest.TruncateAll(t) + gs, err := gamestore.New(gamestore.Config{ + DB: pgtest.Ensure(t).Pool(), OperationTimeout: pgtest.OperationTimeout, + }) + require.NoError(t, err) + ms, err := membershipstore.New(membershipstore.Config{ + DB: pgtest.Ensure(t).Pool(), OperationTimeout: pgtest.OperationTimeout, + }) + require.NoError(t, err) + return gs, ms +} + +func seedGame(t *testing.T, gs *gamestore.Store, id string) game.Game { + t.Helper() + now := time.Date(2026, 4, 23, 12, 0, 0, 0, time.UTC) + g, err := game.New(game.NewGameInput{ + GameID: common.GameID(id), + GameName: "G " + id, + GameType: game.GameTypePublic, + MinPlayers: 2, + MaxPlayers: 8, + StartGapHours: 12, + StartGapPlayers: 2, + EnrollmentEndsAt: now.Add(7 * 24 * time.Hour), + TurnSchedule: "0 18 * * *", + TargetEngineVersion: "v1.0.0", + Now: now, + }) + require.NoError(t, err) + require.NoError(t, gs.Save(context.Background(), g)) + return g +} + +func newMembership(t *testing.T, id, gameID, userID, race, canon string) membership.Membership { + t.Helper() + now := time.Date(2026, 4, 23, 12, 0, 0, 0, time.UTC) + rec, err := membership.New(membership.NewMembershipInput{ + MembershipID: common.MembershipID(id), + GameID: common.GameID(gameID), + UserID: userID, + RaceName: race, + CanonicalKey: canon, + Now: now, + }) + require.NoError(t, err) + return rec +} + +func TestSaveAndGet(t *testing.T) { + ctx := context.Background() + gs, ms := newStores(t) + seedGame(t, gs, "game-001") + + rec := newMembership(t, "membership-001", "game-001", "user-a", "Pilot Alpha", "pilot-alpha") + require.NoError(t, ms.Save(ctx, rec)) + + got, err := ms.Get(ctx, rec.MembershipID) + require.NoError(t, err) + assert.Equal(t, rec.MembershipID, got.MembershipID) + assert.Equal(t, "Pilot Alpha", got.RaceName) + assert.Equal(t, "pilot-alpha", got.CanonicalKey) + assert.Equal(t, membership.StatusActive, got.Status) + assert.Nil(t, got.RemovedAt) +} + +func TestSaveRejectsNonActive(t *testing.T) { + ctx := context.Background() + gs, ms := newStores(t) + seedGame(t, gs, "game-001") + + rec := newMembership(t, "membership-001", "game-001", "user-a", "Pilot", "pilot") + rec.Status = membership.StatusRemoved + require.Error(t, ms.Save(ctx, rec)) +} + +func TestSaveDuplicateReturnsConflict(t *testing.T) { + ctx := context.Background() + gs, ms := newStores(t) + seedGame(t, gs, "game-001") + + rec := newMembership(t, "membership-001", "game-001", "user-a", "Pilot", "pilot") + require.NoError(t, ms.Save(ctx, rec)) + err := ms.Save(ctx, rec) + require.ErrorIs(t, err, membership.ErrConflict) +} + +func TestUpdateStatusToRemovedSetsRemovedAt(t *testing.T) { + ctx := context.Background() + gs, ms := newStores(t) + seedGame(t, gs, "game-001") + + rec := newMembership(t, "membership-001", "game-001", "user-a", "Pilot", "pilot") + require.NoError(t, ms.Save(ctx, rec)) + at := rec.JoinedAt.Add(time.Minute) + require.NoError(t, ms.UpdateStatus(ctx, ports.UpdateMembershipStatusInput{ + MembershipID: rec.MembershipID, + ExpectedFrom: membership.StatusActive, + To: membership.StatusRemoved, + At: at, + })) + got, err := ms.Get(ctx, rec.MembershipID) + require.NoError(t, err) + assert.Equal(t, membership.StatusRemoved, got.Status) + require.NotNil(t, got.RemovedAt) + assert.True(t, got.RemovedAt.Equal(at)) +} + +func TestUpdateStatusReturnsConflictOnExpectedFromMismatch(t *testing.T) { + ctx := context.Background() + gs, ms := newStores(t) + seedGame(t, gs, "game-001") + + rec := newMembership(t, "membership-001", "game-001", "user-a", "Pilot", "pilot") + require.NoError(t, ms.Save(ctx, rec)) + + // Move the row out of `active` first; the next attempt's + // `WHERE status = 'active'` then fails on persistence even though + // (active → blocked) is itself a valid transition in the domain table. + require.NoError(t, ms.UpdateStatus(ctx, ports.UpdateMembershipStatusInput{ + MembershipID: rec.MembershipID, + ExpectedFrom: membership.StatusActive, + To: membership.StatusRemoved, + At: rec.JoinedAt.Add(time.Minute), + })) + err := ms.UpdateStatus(ctx, ports.UpdateMembershipStatusInput{ + MembershipID: rec.MembershipID, + ExpectedFrom: membership.StatusActive, + To: membership.StatusBlocked, + At: rec.JoinedAt.Add(2 * time.Minute), + }) + require.ErrorIs(t, err, membership.ErrConflict) +} + +func TestUpdateStatusReturnsNotFoundForMissing(t *testing.T) { + ctx := context.Background() + _, ms := newStores(t) + err := ms.UpdateStatus(ctx, ports.UpdateMembershipStatusInput{ + MembershipID: common.MembershipID("membership-missing"), + ExpectedFrom: membership.StatusActive, + To: membership.StatusRemoved, + At: time.Now().UTC(), + }) + require.ErrorIs(t, err, membership.ErrNotFound) +} + +func TestDeleteRemovesRecord(t *testing.T) { + ctx := context.Background() + gs, ms := newStores(t) + seedGame(t, gs, "game-001") + + rec := newMembership(t, "membership-001", "game-001", "user-a", "Pilot", "pilot") + require.NoError(t, ms.Save(ctx, rec)) + require.NoError(t, ms.Delete(ctx, rec.MembershipID)) + + _, err := ms.Get(ctx, rec.MembershipID) + require.ErrorIs(t, err, membership.ErrNotFound) +} + +func TestDeleteReturnsNotFoundForMissing(t *testing.T) { + ctx := context.Background() + _, ms := newStores(t) + err := ms.Delete(ctx, common.MembershipID("membership-missing")) + require.ErrorIs(t, err, membership.ErrNotFound) +} + +func TestGetByGameAndUser(t *testing.T) { + ctx := context.Background() + gs, ms := newStores(t) + seedGame(t, gs, "game-001") + seedGame(t, gs, "game-002") + + require.NoError(t, ms.Save(ctx, newMembership(t, "membership-001", "game-001", "user-a", "P-a", "p-a"))) + require.NoError(t, ms.Save(ctx, newMembership(t, "membership-002", "game-001", "user-b", "P-b", "p-b"))) + require.NoError(t, ms.Save(ctx, newMembership(t, "membership-003", "game-002", "user-a", "P-a2", "p-a2"))) + + g1, err := ms.GetByGame(ctx, common.GameID("game-001")) + require.NoError(t, err) + assert.Len(t, g1, 2) + + userA, err := ms.GetByUser(ctx, "user-a") + require.NoError(t, err) + assert.Len(t, userA, 2) +} + +func TestGetMissingReturnsNotFound(t *testing.T) { + ctx := context.Background() + _, ms := newStores(t) + _, err := ms.Get(ctx, common.MembershipID("membership-missing")) + require.ErrorIs(t, err, membership.ErrNotFound) +} diff --git a/lobby/internal/adapters/postgres/migrations/00001_init.sql b/lobby/internal/adapters/postgres/migrations/00001_init.sql new file mode 100644 index 0000000..119a9db --- /dev/null +++ b/lobby/internal/adapters/postgres/migrations/00001_init.sql @@ -0,0 +1,169 @@ +-- +goose Up +-- Initial Game Lobby PostgreSQL schema. +-- +-- Five tables cover the durable surface of the service: +-- * games, applications, invites, memberships — the four core +-- enrollment entities; +-- * race_names — the Race Name Directory, holding the registered / +-- reservation / pending_registration bindings keyed by canonical key. +-- +-- Schema and the matching `lobbyservice` role are provisioned outside +-- this script (in tests via +-- integration/internal/harness/postgres_container.go::EnsureRoleAndSchema; +-- in production via an ops init script). This migration runs as the +-- schema owner with `search_path=lobby` and only contains DDL for the +-- service-owned tables and indexes. + +-- games holds one durable record per platform game session. The status + +-- created_at index serves the listing/scheduler queries that previously +-- read `lobby:games_by_status:*`. The partial owner index serves the +-- per-owner listings used by user-lifecycle cascade and "my games" +-- listings; public games carry an empty owner_user_id and never enter +-- the index. +CREATE TABLE games ( + game_id text PRIMARY KEY, + game_name text NOT NULL, + description text NOT NULL DEFAULT '', + game_type text NOT NULL, + owner_user_id text NOT NULL DEFAULT '', + status text NOT NULL, + min_players integer NOT NULL, + max_players integer NOT NULL, + start_gap_hours integer NOT NULL, + start_gap_players integer NOT NULL, + enrollment_ends_at timestamptz NOT NULL, + turn_schedule text NOT NULL, + target_engine_version text NOT NULL, + created_at timestamptz NOT NULL, + updated_at timestamptz NOT NULL, + started_at timestamptz, + finished_at timestamptz, + runtime_snapshot jsonb NOT NULL DEFAULT '{}'::jsonb, + runtime_binding jsonb +); + +CREATE INDEX games_status_created_idx + ON games (status, created_at DESC, game_id DESC); + +CREATE INDEX games_owner_idx + ON games (owner_user_id) WHERE game_type = 'private'; + +-- applications carries one row per public-game enrollment request. The +-- partial UNIQUE on (applicant_user_id, game_id) WHERE status <> 'rejected' +-- replaces the Redis lookup key `lobby:user_game_application:*:*` and +-- enforces the single-active constraint at the database level. Rejected +-- applications are kept (one applicant may produce multiple rejected rows +-- before submitting a successful one). +CREATE TABLE applications ( + application_id text PRIMARY KEY, + game_id text NOT NULL REFERENCES games(game_id) ON DELETE CASCADE, + applicant_user_id text NOT NULL, + race_name text NOT NULL, + status text NOT NULL, + created_at timestamptz NOT NULL, + decided_at timestamptz +); + +CREATE INDEX applications_game_idx ON applications (game_id); + +CREATE INDEX applications_user_idx ON applications (applicant_user_id); + +CREATE UNIQUE INDEX applications_active_per_user_game_uidx + ON applications (applicant_user_id, game_id) + WHERE status <> 'rejected'; + +-- invites carries one row per private-game invitation. race_name is empty +-- until the invite transitions to redeemed. The (status, expires_at) index +-- serves the enrollment-automation expiration sweep; the per-game, +-- per-invitee, and per-inviter indexes serve listing queries from the +-- service layer. +CREATE TABLE invites ( + invite_id text PRIMARY KEY, + game_id text NOT NULL REFERENCES games(game_id) ON DELETE CASCADE, + inviter_user_id text NOT NULL, + invitee_user_id text NOT NULL, + race_name text NOT NULL DEFAULT '', + status text NOT NULL, + created_at timestamptz NOT NULL, + expires_at timestamptz NOT NULL, + decided_at timestamptz +); + +CREATE INDEX invites_game_idx ON invites (game_id); +CREATE INDEX invites_invitee_idx ON invites (invitee_user_id); +CREATE INDEX invites_inviter_idx ON invites (inviter_user_id); +CREATE INDEX invites_status_expires_idx ON invites (status, expires_at); + +-- memberships carries one row per platform roster entry. Both race_name +-- (original casing) and canonical_key are stored explicitly because +-- downstream readers (capability evaluation, cascade release) consume the +-- canonical form without re-deriving it from race_name. Race-name +-- uniqueness is enforced by the Race Name Directory (the race_names +-- table below) — this table intentionally has no unique constraint on +-- canonical_key. +CREATE TABLE memberships ( + membership_id text PRIMARY KEY, + game_id text NOT NULL REFERENCES games(game_id) ON DELETE CASCADE, + user_id text NOT NULL, + race_name text NOT NULL, + canonical_key text NOT NULL, + status text NOT NULL, + joined_at timestamptz NOT NULL, + removed_at timestamptz +); + +CREATE INDEX memberships_game_idx ON memberships (game_id); +CREATE INDEX memberships_user_idx ON memberships (user_id); + +-- race_names is the durable Race Name Directory store. One row covers one +-- of three bindings on a canonical key: a registered name (one per +-- canonical_key, immutable holder), a per-game reservation, or a +-- pending_registration that is waiting on lobby.race_name.register inside +-- the eligible_until_ms window. The composite primary key (canonical_key, +-- game_id) lets the same user hold reservations for the same race name +-- across multiple active games concurrently, matching the behaviour the +-- shared port test suite (lobby/internal/ports/racenamedirtest) covers. +-- Registered rows store game_id = '' and keep the source game in +-- source_game_id so the per-canonical uniqueness rule expresses cleanly +-- as a partial UNIQUE index. Cross-user uniqueness on canonical_key is +-- enforced at write time inside transactions guarded by +-- pg_advisory_xact_lock(hashtextextended(canonical_key, 0)). +CREATE TABLE race_names ( + canonical_key text NOT NULL, + game_id text NOT NULL DEFAULT '', + holder_user_id text NOT NULL, + race_name text NOT NULL, + binding_kind text NOT NULL, + source_game_id text NOT NULL DEFAULT '', + reserved_at_ms bigint NOT NULL DEFAULT 0, + eligible_until_ms bigint, + registered_at_ms bigint, + PRIMARY KEY (canonical_key, game_id), + CONSTRAINT race_names_binding_kind_chk + CHECK (binding_kind IN ('registered', 'reservation', 'pending_registration')) +); + +-- Exactly one registered binding per canonical_key. Reservations and +-- pending_registration entries are differentiated by game_id within the +-- primary key. +CREATE UNIQUE INDEX race_names_registered_uidx + ON race_names (canonical_key) + WHERE binding_kind = 'registered'; + +-- Per-user listings used by ListRegistered / ListReservations / +-- ListPendingRegistrations. +CREATE INDEX race_names_holder_idx + ON race_names (holder_user_id, binding_kind); + +-- Pending-registration expiration scanner reads only the pending subset +-- ordered by eligible_until_ms. +CREATE INDEX race_names_pending_eligible_idx + ON race_names (eligible_until_ms) + WHERE binding_kind = 'pending_registration'; + +-- +goose Down +DROP TABLE IF EXISTS race_names; +DROP TABLE IF EXISTS memberships; +DROP TABLE IF EXISTS invites; +DROP TABLE IF EXISTS applications; +DROP TABLE IF EXISTS games; diff --git a/lobby/internal/adapters/postgres/migrations/migrations.go b/lobby/internal/adapters/postgres/migrations/migrations.go new file mode 100644 index 0000000..f95d05f --- /dev/null +++ b/lobby/internal/adapters/postgres/migrations/migrations.go @@ -0,0 +1,19 @@ +// Package migrations exposes the embedded goose migration files used by +// Game Lobby Service to provision its `lobby` schema in PostgreSQL. +// +// The embedded filesystem is consumed by `pkg/postgres.RunMigrations` during +// lobby-service startup and by `cmd/jetgen` when regenerating the +// `internal/adapters/postgres/jet/` code against a transient PostgreSQL +// instance. +package migrations + +import "embed" + +//go:embed *.sql +var fs embed.FS + +// FS returns the embedded filesystem containing every numbered goose +// migration shipped with Game Lobby Service. +func FS() embed.FS { + return fs +} diff --git a/lobby/internal/adapters/postgres/racenamedir/directory.go b/lobby/internal/adapters/postgres/racenamedir/directory.go new file mode 100644 index 0000000..36d0467 --- /dev/null +++ b/lobby/internal/adapters/postgres/racenamedir/directory.go @@ -0,0 +1,1039 @@ +// Package racenamedir implements the PostgreSQL-backed adapter for +// `ports.RaceNameDirectory`. +// +// One row in the `race_names` table backs one of three bindings on a +// canonical key: a registered name (one per canonical_key, immutable +// holder), a per-game reservation, or a pending_registration created by a +// capable game finish. The composite primary key (canonical_key, game_id) +// matches the existing two-tier semantics, where the same user may hold +// reservations on the same canonical key across multiple active games. +// Registered rows store game_id = '' and keep the source game in +// source_game_id, so the per-canonical uniqueness rule reduces to a +// partial UNIQUE index. Cross-user collisions on canonical_key are +// arbitrated by serialising every write transaction with +// pg_advisory_xact_lock(hashtextextended(canonical_key, 0)). +// +// PG_PLAN.md §6B introduces this adapter; see +// `galaxy/lobby/docs/postgres-migration.md` for the full decision record. +package racenamedir + +import ( + "context" + "database/sql" + "errors" + "fmt" + "strings" + "time" + + "galaxy/lobby/internal/adapters/postgres/internal/sqlx" + pgtable "galaxy/lobby/internal/adapters/postgres/jet/lobby/table" + "galaxy/lobby/internal/domain/common" + "galaxy/lobby/internal/domain/racename" + "galaxy/lobby/internal/ports" + + pg "github.com/go-jet/jet/v2/postgres" +) + +// Binding kind values stored verbatim in `race_names.binding_kind`. They +// equal the corresponding `ports.Kind*` constants so adapter methods can +// surface them at the port boundary without translation. +const ( + bindingRegistered = ports.KindRegistered // "registered" + bindingReservation = ports.KindReservation // "reservation" + bindingPending = ports.KindPendingRegistration // "pending_registration" +) + +// registeredGameID is the sentinel value stored in the (canonical_key, +// game_id) primary key for binding_kind = 'registered' rows. The actual +// source game is kept in source_game_id; the empty `game_id` keeps +// registered rows distinct from per-game reservations under the same +// canonical_key. +const registeredGameID = "" + +// Config configures one PostgreSQL-backed Race Name Directory adapter. +// The adapter does not own the underlying *sql.DB lifecycle: the caller +// (typically the service runtime) opens, instruments, migrates, and +// closes the pool. +type Config struct { + // DB is the connection pool the directory uses for every query. + DB *sql.DB + + // OperationTimeout bounds one operation. Read-only methods derive a + // single context from it; write methods reuse the same bound across + // the BEGIN ... COMMIT transaction. + OperationTimeout time.Duration + + // Policy supplies the canonical-key derivation and ValidateName + // rules; the adapter owns no race-name policy of its own. + Policy *racename.Policy + + // Clock supplies wall-clock time used to stamp reserved_at_ms, + // registered_at_ms and the cutoff passed to + // ExpirePendingRegistrations.Defaults to time.Now when nil. + Clock func() time.Time +} + +// Directory persists Race Name Directory bindings in PostgreSQL. +type Directory struct { + db *sql.DB + operationTimeout time.Duration + policy *racename.Policy + nowFn func() time.Time +} + +// New constructs one PostgreSQL-backed Race Name Directory from cfg. +func New(cfg Config) (*Directory, error) { + if cfg.DB == nil { + return nil, errors.New("new postgres race name directory: db must not be nil") + } + if cfg.OperationTimeout <= 0 { + return nil, errors.New("new postgres race name directory: operation timeout must be positive") + } + if cfg.Policy == nil { + return nil, errors.New("new postgres race name directory: policy must not be nil") + } + nowFn := cfg.Clock + if nowFn == nil { + nowFn = time.Now + } + return &Directory{ + db: cfg.DB, + operationTimeout: cfg.OperationTimeout, + policy: cfg.Policy, + nowFn: nowFn, + }, nil +} + +// Canonicalize returns the canonical uniqueness key for raceName as a +// plain string. Validation failures map to ports.ErrInvalidName. +func (directory *Directory) Canonicalize(raceName string) (string, error) { + if directory == nil { + return "", errors.New("canonicalize race name: nil directory") + } + canonical, err := directory.policy.Canonicalize(raceName) + if err != nil { + return "", fmt.Errorf("canonicalize race name: %w", ports.ErrInvalidName) + } + return canonical.String(), nil +} + +// Check reports whether raceName is taken for actorUserID. A concurrent +// Reserve may race against the result; service code that needs atomicity +// must rely on Reserve returning ErrNameTaken instead of pre-checking. +func (directory *Directory) Check( + ctx context.Context, + raceName, actorUserID string, +) (ports.Availability, error) { + if directory == nil { + return ports.Availability{}, errors.New("check race name: nil directory") + } + actor, err := normalizeNonEmpty(actorUserID, "check race name", "actor user id") + if err != nil { + return ports.Availability{}, err + } + canonical, err := directory.policy.Canonicalize(raceName) + if err != nil { + return ports.Availability{}, fmt.Errorf("check race name: %w", ports.ErrInvalidName) + } + + operationCtx, cancel, err := sqlx.WithTimeout(ctx, "check race name", directory.operationTimeout) + if err != nil { + return ports.Availability{}, err + } + defer cancel() + + stmt := pg.SELECT( + pgtable.RaceNames.HolderUserID, + pgtable.RaceNames.BindingKind, + ).FROM(pgtable.RaceNames). + WHERE(pgtable.RaceNames.CanonicalKey.EQ(pg.String(canonical.String()))) + + query, args := stmt.Sql() + rows, err := directory.db.QueryContext(operationCtx, query, args...) + if err != nil { + return ports.Availability{}, fmt.Errorf("check race name: %w", err) + } + defer rows.Close() + + var ( + bestHolder string + bestKind string + bestRank int + ) + for rows.Next() { + var holder, kind string + if err := rows.Scan(&holder, &kind); err != nil { + return ports.Availability{}, fmt.Errorf("check race name: scan: %w", err) + } + rank := bindingPriority(kind) + if bestKind == "" || rank < bestRank { + bestHolder = holder + bestKind = kind + bestRank = rank + } + } + if err := rows.Err(); err != nil { + return ports.Availability{}, fmt.Errorf("check race name: %w", err) + } + if bestKind == "" { + return ports.Availability{}, nil + } + return ports.Availability{ + Taken: bestHolder != actor, + HolderUserID: bestHolder, + Kind: bestKind, + }, nil +} + +// Reserve claims raceName for (gameID, userID). Repeating the call with +// the same tuple is idempotent; cross-user collisions on the canonical +// key surface ports.ErrNameTaken. +func (directory *Directory) Reserve( + ctx context.Context, + gameID, userID, raceName string, +) error { + if directory == nil { + return errors.New("reserve race name: nil directory") + } + game, err := normalizeGameID(gameID, "reserve race name") + if err != nil { + return err + } + user, err := normalizeNonEmpty(userID, "reserve race name", "user id") + if err != nil { + return err + } + displayName, err := racename.ValidateName(raceName) + if err != nil { + return fmt.Errorf("reserve race name: %w", ports.ErrInvalidName) + } + canonical, err := directory.policy.Canonical(displayName) + if err != nil { + return fmt.Errorf("reserve race name: %w", ports.ErrInvalidName) + } + + operationCtx, cancel, err := sqlx.WithTimeout(ctx, "reserve race name", directory.operationTimeout) + if err != nil { + return err + } + defer cancel() + + reservedAtMs := directory.nowFn().UTC().UnixMilli() + return directory.withCanonicalLock(operationCtx, canonical, "reserve race name", func(tx *sql.Tx) error { + existing, err := loadByCanonicalTx(operationCtx, tx, canonical) + if err != nil { + return fmt.Errorf("reserve race name: %w", err) + } + for _, r := range existing { + if r.holderUserID != user { + return ports.ErrNameTaken + } + } + // Same-user idempotency: a row already at this PK means the + // holder already binds this canonical for this game (whether as + // reservation or pending_registration). Skip the INSERT. + for _, r := range existing { + if r.gameID == game.String() { + return nil + } + } + stmt := pgtable.RaceNames.INSERT( + pgtable.RaceNames.CanonicalKey, + pgtable.RaceNames.GameID, + pgtable.RaceNames.HolderUserID, + pgtable.RaceNames.RaceName, + pgtable.RaceNames.BindingKind, + pgtable.RaceNames.SourceGameID, + pgtable.RaceNames.ReservedAtMs, + ).VALUES( + canonical.String(), + game.String(), + user, + displayName, + bindingReservation, + game.String(), + reservedAtMs, + ) + query, args := stmt.Sql() + if _, err := tx.ExecContext(operationCtx, query, args...); err != nil { + return fmt.Errorf("reserve race name: %w", err) + } + return nil + }) +} + +// ReleaseReservation removes the reservation held by userID for raceName +// in gameID. Missing reservation, mismatched holder, and invalid raceName +// all resolve to a silent no-op per the port contract. +func (directory *Directory) ReleaseReservation( + ctx context.Context, + gameID, userID, raceName string, +) error { + if directory == nil { + return errors.New("release race name reservation: nil directory") + } + game, err := normalizeGameID(gameID, "release race name reservation") + if err != nil { + return err + } + user, err := normalizeNonEmpty(userID, "release race name reservation", "user id") + if err != nil { + return err + } + canonical, err := directory.policy.Canonicalize(raceName) + if err != nil { + // Invalid name is a silent no-op per the port contract. + if ctxErr := contextAlive(ctx); ctxErr != nil { + return ctxErr + } + return nil + } + + operationCtx, cancel, err := sqlx.WithTimeout(ctx, "release race name reservation", directory.operationTimeout) + if err != nil { + return err + } + defer cancel() + + return directory.withCanonicalLock(operationCtx, canonical, "release race name reservation", func(tx *sql.Tx) error { + stmt := pgtable.RaceNames.DELETE().WHERE(pg.AND( + pgtable.RaceNames.CanonicalKey.EQ(pg.String(canonical.String())), + pgtable.RaceNames.GameID.EQ(pg.String(game.String())), + pgtable.RaceNames.HolderUserID.EQ(pg.String(user)), + )) + query, args := stmt.Sql() + if _, err := tx.ExecContext(operationCtx, query, args...); err != nil { + return fmt.Errorf("release race name reservation: %w", err) + } + return nil + }) +} + +// MarkPendingRegistration promotes the reservation for (gameID, userID) +// on raceName's canonical key to pending_registration status. +func (directory *Directory) MarkPendingRegistration( + ctx context.Context, + gameID, userID, raceName string, + eligibleUntil time.Time, +) error { + if directory == nil { + return errors.New("mark pending race name registration: nil directory") + } + game, err := normalizeGameID(gameID, "mark pending race name registration") + if err != nil { + return err + } + user, err := normalizeNonEmpty(userID, "mark pending race name registration", "user id") + if err != nil { + return err + } + if eligibleUntil.IsZero() { + return fmt.Errorf("mark pending race name registration: eligible until must be set") + } + displayName, err := racename.ValidateName(raceName) + if err != nil { + return fmt.Errorf("mark pending race name registration: %w", ports.ErrInvalidName) + } + canonical, err := directory.policy.Canonical(displayName) + if err != nil { + return fmt.Errorf("mark pending race name registration: %w", ports.ErrInvalidName) + } + + operationCtx, cancel, err := sqlx.WithTimeout(ctx, "mark pending race name registration", directory.operationTimeout) + if err != nil { + return err + } + defer cancel() + + eligibleUntilMs := eligibleUntil.UTC().UnixMilli() + return directory.withCanonicalLock(operationCtx, canonical, "mark pending race name registration", func(tx *sql.Tx) error { + existing, err := loadByCanonicalTx(operationCtx, tx, canonical) + if err != nil { + return fmt.Errorf("mark pending race name registration: %w", err) + } + var target *raceNameRow + for index, candidate := range existing { + if candidate.gameID == game.String() && candidate.holderUserID == user { + target = &existing[index] + break + } + } + if target == nil { + return fmt.Errorf("mark pending race name registration: reservation missing for game %q user %q", game, user) + } + switch target.bindingKind { + case bindingPending: + if target.eligibleUntilMs == nil || *target.eligibleUntilMs != eligibleUntilMs { + return fmt.Errorf("mark pending race name registration: %w", ports.ErrInvalidName) + } + return nil + case bindingReservation: + // promote + default: + return fmt.Errorf("mark pending race name registration: reservation missing for game %q user %q", game, user) + } + + stmt := pgtable.RaceNames.UPDATE( + pgtable.RaceNames.BindingKind, + pgtable.RaceNames.RaceName, + pgtable.RaceNames.EligibleUntilMs, + ).SET( + bindingPending, + displayName, + eligibleUntilMs, + ).WHERE(pg.AND( + pgtable.RaceNames.CanonicalKey.EQ(pg.String(canonical.String())), + pgtable.RaceNames.GameID.EQ(pg.String(game.String())), + )) + query, args := stmt.Sql() + if _, err := tx.ExecContext(operationCtx, query, args...); err != nil { + return fmt.Errorf("mark pending race name registration: %w", err) + } + return nil + }) +} + +// ExpirePendingRegistrations releases every pending registration whose +// eligible_until_ms is at or before now. Each released entry is returned +// so callers can emit telemetry; running the method twice over the same +// state returns an empty slice the second time. +func (directory *Directory) ExpirePendingRegistrations( + ctx context.Context, + now time.Time, +) ([]ports.ExpiredPending, error) { + if directory == nil { + return nil, errors.New("expire pending race name registrations: nil directory") + } + + cutoff := now.UTC().UnixMilli() + + scanCtx, cancel, err := sqlx.WithTimeout(ctx, "expire pending race name registrations", directory.operationTimeout) + if err != nil { + return nil, err + } + defer cancel() + + stmt := pg.SELECT( + pgtable.RaceNames.CanonicalKey, + pgtable.RaceNames.GameID, + ).FROM(pgtable.RaceNames).WHERE(pg.AND( + pgtable.RaceNames.BindingKind.EQ(pg.String(bindingPending)), + pgtable.RaceNames.EligibleUntilMs.LT_EQ(pg.Int(cutoff)), + )) + query, args := stmt.Sql() + rows, err := directory.db.QueryContext(scanCtx, query, args...) + if err != nil { + return nil, fmt.Errorf("expire pending race name registrations: %w", err) + } + type candidate struct { + canonical racename.CanonicalKey + gameID string + } + var candidates []candidate + for rows.Next() { + var canonicalKey, gameID string + if err := rows.Scan(&canonicalKey, &gameID); err != nil { + rows.Close() + return nil, fmt.Errorf("expire pending race name registrations: scan: %w", err) + } + candidates = append(candidates, candidate{ + canonical: racename.CanonicalKey(canonicalKey), + gameID: gameID, + }) + } + if err := rows.Close(); err != nil { + return nil, fmt.Errorf("expire pending race name registrations: %w", err) + } + if err := rows.Err(); err != nil { + return nil, fmt.Errorf("expire pending race name registrations: %w", err) + } + if len(candidates) == 0 { + return nil, nil + } + + expired := make([]ports.ExpiredPending, 0, len(candidates)) + for _, cand := range candidates { + entry, released, err := directory.expireOne(ctx, cand.canonical, cand.gameID, cutoff) + if err != nil { + return nil, fmt.Errorf("expire pending race name registrations: %w", err) + } + if released { + expired = append(expired, entry) + } + } + return expired, nil +} + +// expireOne re-reads the candidate row under an advisory lock, deletes it +// when still pending and at-or-before cutoff, and returns the matching +// ExpiredPending entry. Concurrent transitions (Register, ReleaseReservation, +// or a refreshed eligible_until_ms) cause expireOne to skip the row. +func (directory *Directory) expireOne( + ctx context.Context, + canonical racename.CanonicalKey, + gameID string, + cutoff int64, +) (ports.ExpiredPending, bool, error) { + operationCtx, cancel, err := sqlx.WithTimeout(ctx, "expire pending race name registrations", directory.operationTimeout) + if err != nil { + return ports.ExpiredPending{}, false, err + } + defer cancel() + + var ( + entry ports.ExpiredPending + released bool + ) + err = directory.withCanonicalLock(operationCtx, canonical, "expire pending race name registrations", func(tx *sql.Tx) error { + row, found, err := loadOneByPKTx(operationCtx, tx, canonical, gameID) + if err != nil { + return err + } + if !found { + return nil + } + if row.bindingKind != bindingPending { + return nil + } + if row.eligibleUntilMs == nil || *row.eligibleUntilMs > cutoff { + return nil + } + stmt := pgtable.RaceNames.DELETE().WHERE(pg.AND( + pgtable.RaceNames.CanonicalKey.EQ(pg.String(canonical.String())), + pgtable.RaceNames.GameID.EQ(pg.String(gameID)), + )) + query, args := stmt.Sql() + if _, err := tx.ExecContext(operationCtx, query, args...); err != nil { + return err + } + entry = ports.ExpiredPending{ + CanonicalKey: canonical.String(), + RaceName: row.raceName, + GameID: gameID, + UserID: row.holderUserID, + EligibleUntilMs: *row.eligibleUntilMs, + } + released = true + return nil + }) + if err != nil { + return ports.ExpiredPending{}, false, err + } + return entry, released, nil +} + +// Register converts the pending registration identified by (gameID, +// userID) on raceName's canonical key into a registered race name. +func (directory *Directory) Register( + ctx context.Context, + gameID, userID, raceName string, +) error { + if directory == nil { + return errors.New("register race name: nil directory") + } + game, err := normalizeGameID(gameID, "register race name") + if err != nil { + return err + } + user, err := normalizeNonEmpty(userID, "register race name", "user id") + if err != nil { + return err + } + displayName, err := racename.ValidateName(raceName) + if err != nil { + return fmt.Errorf("register race name: %w", ports.ErrInvalidName) + } + canonical, err := directory.policy.Canonical(displayName) + if err != nil { + return fmt.Errorf("register race name: %w", ports.ErrInvalidName) + } + + operationCtx, cancel, err := sqlx.WithTimeout(ctx, "register race name", directory.operationTimeout) + if err != nil { + return err + } + defer cancel() + + nowMs := directory.nowFn().UTC().UnixMilli() + return directory.withCanonicalLock(operationCtx, canonical, "register race name", func(tx *sql.Tx) error { + existing, err := loadByCanonicalTx(operationCtx, tx, canonical) + if err != nil { + return fmt.Errorf("register race name: %w", err) + } + // Already registered: idempotent for the same holder, ErrNameTaken + // for any other user. + for _, r := range existing { + if r.bindingKind == bindingRegistered { + if r.holderUserID == user { + return nil + } + return ports.ErrNameTaken + } + } + var pending *raceNameRow + for index, r := range existing { + if r.gameID == game.String() && r.holderUserID == user && r.bindingKind == bindingPending { + pending = &existing[index] + break + } + } + if pending == nil { + return ports.ErrPendingMissing + } + if pending.eligibleUntilMs == nil || *pending.eligibleUntilMs <= nowMs { + return ports.ErrPendingExpired + } + + del := pgtable.RaceNames.DELETE().WHERE(pg.AND( + pgtable.RaceNames.CanonicalKey.EQ(pg.String(canonical.String())), + pgtable.RaceNames.GameID.EQ(pg.String(game.String())), + )) + delQuery, delArgs := del.Sql() + if _, err := tx.ExecContext(operationCtx, delQuery, delArgs...); err != nil { + return fmt.Errorf("register race name: %w", err) + } + ins := pgtable.RaceNames.INSERT( + pgtable.RaceNames.CanonicalKey, + pgtable.RaceNames.GameID, + pgtable.RaceNames.HolderUserID, + pgtable.RaceNames.RaceName, + pgtable.RaceNames.BindingKind, + pgtable.RaceNames.SourceGameID, + pgtable.RaceNames.ReservedAtMs, + pgtable.RaceNames.RegisteredAtMs, + ).VALUES( + canonical.String(), + registeredGameID, + user, + pending.raceName, + bindingRegistered, + game.String(), + pending.reservedAtMs, + nowMs, + ) + insQuery, insArgs := ins.Sql() + if _, err := tx.ExecContext(operationCtx, insQuery, insArgs...); err != nil { + return fmt.Errorf("register race name: %w", err) + } + return nil + }) +} + +// ListRegistered returns every registered race name owned by userID. +func (directory *Directory) ListRegistered( + ctx context.Context, + userID string, +) ([]ports.RegisteredName, error) { + if directory == nil { + return nil, errors.New("list registered race names: nil directory") + } + user, err := normalizeNonEmpty(userID, "list registered race names", "user id") + if err != nil { + return nil, err + } + + operationCtx, cancel, err := sqlx.WithTimeout(ctx, "list registered race names", directory.operationTimeout) + if err != nil { + return nil, err + } + defer cancel() + + stmt := pg.SELECT( + pgtable.RaceNames.CanonicalKey, + pgtable.RaceNames.RaceName, + pgtable.RaceNames.SourceGameID, + pgtable.RaceNames.RegisteredAtMs, + ).FROM(pgtable.RaceNames).WHERE(pg.AND( + pgtable.RaceNames.HolderUserID.EQ(pg.String(user)), + pgtable.RaceNames.BindingKind.EQ(pg.String(bindingRegistered)), + )) + + query, args := stmt.Sql() + rows, err := directory.db.QueryContext(operationCtx, query, args...) + if err != nil { + return nil, fmt.Errorf("list registered race names: %w", err) + } + defer rows.Close() + var out []ports.RegisteredName + for rows.Next() { + var ( + canonical string + raceName string + sourceGameID string + regAt sql.NullInt64 + ) + if err := rows.Scan(&canonical, &raceName, &sourceGameID, ®At); err != nil { + return nil, fmt.Errorf("list registered race names: scan: %w", err) + } + var regAtMs int64 + if regAt.Valid { + regAtMs = regAt.Int64 + } + out = append(out, ports.RegisteredName{ + CanonicalKey: canonical, + RaceName: raceName, + SourceGameID: sourceGameID, + RegisteredAtMs: regAtMs, + }) + } + if err := rows.Err(); err != nil { + return nil, fmt.Errorf("list registered race names: %w", err) + } + return out, nil +} + +// ListReservations returns every active reservation owned by userID +// whose status has not yet been promoted to pending_registration. +func (directory *Directory) ListReservations( + ctx context.Context, + userID string, +) ([]ports.Reservation, error) { + if directory == nil { + return nil, errors.New("list race name reservations: nil directory") + } + user, err := normalizeNonEmpty(userID, "list race name reservations", "user id") + if err != nil { + return nil, err + } + + operationCtx, cancel, err := sqlx.WithTimeout(ctx, "list race name reservations", directory.operationTimeout) + if err != nil { + return nil, err + } + defer cancel() + + stmt := pg.SELECT( + pgtable.RaceNames.CanonicalKey, + pgtable.RaceNames.RaceName, + pgtable.RaceNames.GameID, + pgtable.RaceNames.ReservedAtMs, + ).FROM(pgtable.RaceNames).WHERE(pg.AND( + pgtable.RaceNames.HolderUserID.EQ(pg.String(user)), + pgtable.RaceNames.BindingKind.EQ(pg.String(bindingReservation)), + )) + + query, args := stmt.Sql() + rows, err := directory.db.QueryContext(operationCtx, query, args...) + if err != nil { + return nil, fmt.Errorf("list race name reservations: %w", err) + } + defer rows.Close() + var out []ports.Reservation + for rows.Next() { + var ( + canonical string + raceName string + gameID string + reservedAtMs int64 + ) + if err := rows.Scan(&canonical, &raceName, &gameID, &reservedAtMs); err != nil { + return nil, fmt.Errorf("list race name reservations: scan: %w", err) + } + out = append(out, ports.Reservation{ + CanonicalKey: canonical, + RaceName: raceName, + GameID: gameID, + ReservedAtMs: reservedAtMs, + }) + } + if err := rows.Err(); err != nil { + return nil, fmt.Errorf("list race name reservations: %w", err) + } + return out, nil +} + +// ListPendingRegistrations returns every pending registration owned by +// userID. +func (directory *Directory) ListPendingRegistrations( + ctx context.Context, + userID string, +) ([]ports.PendingRegistration, error) { + if directory == nil { + return nil, errors.New("list pending race name registrations: nil directory") + } + user, err := normalizeNonEmpty(userID, "list pending race name registrations", "user id") + if err != nil { + return nil, err + } + + operationCtx, cancel, err := sqlx.WithTimeout(ctx, "list pending race name registrations", directory.operationTimeout) + if err != nil { + return nil, err + } + defer cancel() + + stmt := pg.SELECT( + pgtable.RaceNames.CanonicalKey, + pgtable.RaceNames.RaceName, + pgtable.RaceNames.GameID, + pgtable.RaceNames.ReservedAtMs, + pgtable.RaceNames.EligibleUntilMs, + ).FROM(pgtable.RaceNames).WHERE(pg.AND( + pgtable.RaceNames.HolderUserID.EQ(pg.String(user)), + pgtable.RaceNames.BindingKind.EQ(pg.String(bindingPending)), + )) + + query, args := stmt.Sql() + rows, err := directory.db.QueryContext(operationCtx, query, args...) + if err != nil { + return nil, fmt.Errorf("list pending race name registrations: %w", err) + } + defer rows.Close() + var out []ports.PendingRegistration + for rows.Next() { + var ( + canonical string + raceName string + gameID string + reservedAtMs int64 + eligibleAt sql.NullInt64 + ) + if err := rows.Scan(&canonical, &raceName, &gameID, &reservedAtMs, &eligibleAt); err != nil { + return nil, fmt.Errorf("list pending race name registrations: scan: %w", err) + } + var eligibleAtMs int64 + if eligibleAt.Valid { + eligibleAtMs = eligibleAt.Int64 + } + out = append(out, ports.PendingRegistration{ + CanonicalKey: canonical, + RaceName: raceName, + GameID: gameID, + ReservedAtMs: reservedAtMs, + EligibleUntilMs: eligibleAtMs, + }) + } + if err := rows.Err(); err != nil { + return nil, fmt.Errorf("list pending race name registrations: %w", err) + } + return out, nil +} + +// ReleaseAllByUser atomically clears every binding owned by userID. The +// user-lifecycle consumer invokes the method on permanent_blocked and +// deleted events, so concurrent writes by the same user cannot race +// (the user is permanently disabled by the time the cascade runs). +func (directory *Directory) ReleaseAllByUser( + ctx context.Context, + userID string, +) error { + if directory == nil { + return errors.New("release all race names by user: nil directory") + } + user, err := normalizeNonEmpty(userID, "release all race names by user", "user id") + if err != nil { + return err + } + + operationCtx, cancel, err := sqlx.WithTimeout(ctx, "release all race names by user", directory.operationTimeout) + if err != nil { + return err + } + defer cancel() + + stmt := pgtable.RaceNames.DELETE(). + WHERE(pgtable.RaceNames.HolderUserID.EQ(pg.String(user))) + query, args := stmt.Sql() + if _, err := directory.db.ExecContext(operationCtx, query, args...); err != nil { + return fmt.Errorf("release all race names by user: %w", err) + } + return nil +} + +// raceNameRow mirrors one race_names row in adapter-internal code so +// transactional methods can read the row state under an advisory lock and +// branch without re-deriving column ordering at every call site. +type raceNameRow struct { + canonicalKey string + gameID string + holderUserID string + raceName string + bindingKind string + sourceGameID string + reservedAtMs int64 + eligibleUntilMs *int64 + registeredAtMs *int64 +} + +// raceNameAllColumns is the column list scanRow expects in order. +var raceNameAllColumns = pg.ColumnList{ + pgtable.RaceNames.CanonicalKey, + pgtable.RaceNames.GameID, + pgtable.RaceNames.HolderUserID, + pgtable.RaceNames.RaceName, + pgtable.RaceNames.BindingKind, + pgtable.RaceNames.SourceGameID, + pgtable.RaceNames.ReservedAtMs, + pgtable.RaceNames.EligibleUntilMs, + pgtable.RaceNames.RegisteredAtMs, +} + +func scanRow(scanner interface{ Scan(...any) error }) (raceNameRow, error) { + var ( + row raceNameRow + eligible sql.NullInt64 + registered sql.NullInt64 + ) + if err := scanner.Scan( + &row.canonicalKey, + &row.gameID, + &row.holderUserID, + &row.raceName, + &row.bindingKind, + &row.sourceGameID, + &row.reservedAtMs, + &eligible, + ®istered, + ); err != nil { + return raceNameRow{}, err + } + if eligible.Valid { + v := eligible.Int64 + row.eligibleUntilMs = &v + } + if registered.Valid { + v := registered.Int64 + row.registeredAtMs = &v + } + return row, nil +} + +// loadByCanonicalTx returns every race_names row for canonical_key. +func loadByCanonicalTx( + ctx context.Context, + tx *sql.Tx, + canonical racename.CanonicalKey, +) ([]raceNameRow, error) { + stmt := pg.SELECT(raceNameAllColumns). + FROM(pgtable.RaceNames). + WHERE(pgtable.RaceNames.CanonicalKey.EQ(pg.String(canonical.String()))) + query, args := stmt.Sql() + rows, err := tx.QueryContext(ctx, query, args...) + if err != nil { + return nil, err + } + defer rows.Close() + var out []raceNameRow + for rows.Next() { + row, err := scanRow(rows) + if err != nil { + return nil, err + } + out = append(out, row) + } + if err := rows.Err(); err != nil { + return nil, err + } + return out, nil +} + +// loadOneByPKTx loads one row by its (canonical_key, game_id) primary +// key. The returned bool is false when no row matches. +func loadOneByPKTx( + ctx context.Context, + tx *sql.Tx, + canonical racename.CanonicalKey, + gameID string, +) (raceNameRow, bool, error) { + stmt := pg.SELECT(raceNameAllColumns). + FROM(pgtable.RaceNames). + WHERE(pg.AND( + pgtable.RaceNames.CanonicalKey.EQ(pg.String(canonical.String())), + pgtable.RaceNames.GameID.EQ(pg.String(gameID)), + )) + query, args := stmt.Sql() + row := tx.QueryRowContext(ctx, query, args...) + out, err := scanRow(row) + if sqlx.IsNoRows(err) { + return raceNameRow{}, false, nil + } + if err != nil { + return raceNameRow{}, false, err + } + return out, true, nil +} + +// withCanonicalLock runs op inside a transaction guarded by +// pg_advisory_xact_lock(hashtextextended(canonical_key, 0)). The lock is +// released when the transaction terminates (commit or rollback). +func (directory *Directory) withCanonicalLock( + ctx context.Context, + canonical racename.CanonicalKey, + operation string, + op func(tx *sql.Tx) error, +) error { + tx, err := directory.db.BeginTx(ctx, nil) + if err != nil { + return fmt.Errorf("%s: begin tx: %w", operation, err) + } + committed := false + defer func() { + if !committed { + _ = tx.Rollback() + } + }() + if _, err := tx.ExecContext(ctx, "SELECT pg_advisory_xact_lock(hashtextextended($1, 0))", canonical.String()); err != nil { + return fmt.Errorf("%s: advisory lock: %w", operation, err) + } + if err := op(tx); err != nil { + return err + } + if err := tx.Commit(); err != nil { + return fmt.Errorf("%s: commit: %w", operation, err) + } + committed = true + return nil +} + +// bindingPriority maps a binding_kind value to a priority rank where +// lower numbers mean a stronger binding (registered > pending > reservation). +func bindingPriority(kind string) int { + switch kind { + case bindingRegistered: + return 1 + case bindingPending: + return 2 + case bindingReservation: + return 3 + default: + return 99 + } +} + +// normalizeNonEmpty trims value and rejects empty results with an error +// that mentions operation and field for traceability. +func normalizeNonEmpty(value, operation, field string) (string, error) { + trimmed := strings.TrimSpace(value) + if trimmed == "" { + return "", fmt.Errorf("%s: %s must not be empty", operation, field) + } + return trimmed, nil +} + +// normalizeGameID trims value and converts it into a typed common.GameID, +// rejecting empty input through normalizeNonEmpty. +func normalizeGameID(value, operation string) (common.GameID, error) { + trimmed, err := normalizeNonEmpty(value, operation, "game id") + if err != nil { + return "", err + } + return common.GameID(trimmed), nil +} + +// contextAlive surfaces ctx cancellation through a stable error path even +// when the calling method is a defensive no-op for invalid input. The +// shared port test suite expects every method to honour cancellation +// regardless of preceding validation. +func contextAlive(ctx context.Context) error { + if ctx == nil { + return errors.New("nil context") + } + if err := ctx.Err(); err != nil { + return err + } + return nil +} + +// Ensure *Directory satisfies the ports.RaceNameDirectory interface at +// compile time. +var _ ports.RaceNameDirectory = (*Directory)(nil) diff --git a/lobby/internal/adapters/postgres/racenamedir/directory_test.go b/lobby/internal/adapters/postgres/racenamedir/directory_test.go new file mode 100644 index 0000000..4e69d62 --- /dev/null +++ b/lobby/internal/adapters/postgres/racenamedir/directory_test.go @@ -0,0 +1,193 @@ +package racenamedir_test + +import ( + "context" + "database/sql" + "strconv" + "testing" + "time" + + "galaxy/lobby/internal/adapters/postgres/internal/pgtest" + "galaxy/lobby/internal/adapters/postgres/racenamedir" + "galaxy/lobby/internal/domain/racename" + "galaxy/lobby/internal/ports" + "galaxy/lobby/internal/ports/racenamedirtest" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// TestMain wires the per-package PostgreSQL container shared by every +// store test in this module. +func TestMain(m *testing.M) { pgtest.RunMain(m) } + +// newDirectory builds one Race Name Directory adapter against a freshly +// truncated lobby schema. now selects between the deterministic clock the +// shared suite supplies and the default time.Now. +func newDirectory(t *testing.T, now func() time.Time) *racenamedir.Directory { + t.Helper() + pgtest.TruncateAll(t) + policy, err := racename.NewPolicy() + require.NoError(t, err) + cfg := racenamedir.Config{ + DB: pgtest.Ensure(t).Pool(), + OperationTimeout: pgtest.OperationTimeout, + Policy: policy, + } + if now != nil { + cfg.Clock = now + } + directory, err := racenamedir.New(cfg) + require.NoError(t, err) + return directory +} + +// TestRaceNameDirectoryContract runs the shared behavioural suite that +// every ports.RaceNameDirectory implementation must pass. +func TestRaceNameDirectoryContract(t *testing.T) { + racenamedirtest.Run(t, func(now func() time.Time) ports.RaceNameDirectory { + return newDirectory(t, now) + }) +} + +func TestNewRejectsNilDB(t *testing.T) { + policy, err := racename.NewPolicy() + require.NoError(t, err) + + _, err = racenamedir.New(racenamedir.Config{ + OperationTimeout: pgtest.OperationTimeout, + Policy: policy, + }) + require.Error(t, err) +} + +func TestNewRejectsNilPolicy(t *testing.T) { + _, err := racenamedir.New(racenamedir.Config{ + DB: pgtest.Ensure(t).Pool(), + OperationTimeout: pgtest.OperationTimeout, + }) + require.Error(t, err) +} + +func TestNewRejectsNonPositiveTimeout(t *testing.T) { + policy, err := racename.NewPolicy() + require.NoError(t, err) + + _, err = racenamedir.New(racenamedir.Config{ + DB: pgtest.Ensure(t).Pool(), + Policy: policy, + }) + require.Error(t, err) +} + +// TestRegisteredRowShape validates the on-disk shape of a registered +// binding so future schema migrations have an explicit anchor. +func TestRegisteredRowShape(t *testing.T) { + now := time.Date(2026, 5, 1, 12, 0, 0, 0, time.UTC) + directory := newDirectory(t, func() time.Time { return now }) + ctx := context.Background() + + const ( + gameID = "game-shape-1" + userID = "user-shape-1" + raceName = "PilotNova" + ) + + require.NoError(t, directory.Reserve(ctx, gameID, userID, raceName)) + require.NoError(t, directory.MarkPendingRegistration(ctx, gameID, userID, raceName, now.Add(time.Hour))) + require.NoError(t, directory.Register(ctx, gameID, userID, raceName)) + + pool := pgtest.Ensure(t).Pool() + + canonical, err := directory.Canonicalize(raceName) + require.NoError(t, err) + + row := pool.QueryRowContext(ctx, ` + SELECT canonical_key, game_id, holder_user_id, race_name, binding_kind, + source_game_id, reserved_at_ms, eligible_until_ms, registered_at_ms + FROM race_names + WHERE canonical_key = $1 + `, canonical) + + var ( + canonicalKey string + storedGameID string + holderUserID string + raceNameCol string + bindingKind string + sourceGameID string + reservedAtMs int64 + eligibleAtMs sql.NullInt64 + registeredAtMs sql.NullInt64 + ) + require.NoError(t, row.Scan( + &canonicalKey, + &storedGameID, + &holderUserID, + &raceNameCol, + &bindingKind, + &sourceGameID, + &reservedAtMs, + &eligibleAtMs, + ®isteredAtMs, + )) + + assert.Equal(t, canonical, canonicalKey) + assert.Equal(t, "", storedGameID, "registered rows store game_id = ''") + assert.Equal(t, userID, holderUserID) + assert.Equal(t, raceName, raceNameCol) + assert.Equal(t, ports.KindRegistered, bindingKind) + assert.Equal(t, gameID, sourceGameID) + assert.True(t, registeredAtMs.Valid) + assert.Equal(t, now.UTC().UnixMilli(), registeredAtMs.Int64) + assert.False(t, eligibleAtMs.Valid, "registered rows null out eligible_until_ms") + assert.Equal(t, now.UTC().UnixMilli(), reservedAtMs, "reserved_at_ms is preserved across promote+register") +} + +// TestRegisteredPartialUniqueIndex confirms that a second user cannot +// register the same canonical key, even when they own a separate +// reservation row at a different (canonical_key, game_id) PK. +func TestRegisteredPartialUniqueIndex(t *testing.T) { + now := time.Date(2026, 5, 1, 12, 0, 0, 0, time.UTC) + directory := newDirectory(t, func() time.Time { return now }) + ctx := context.Background() + + const ( + raceName = "PilotNova" + gameA = "game-unique-a" + userA = "user-unique-a" + userB = "user-unique-b" + ) + + require.NoError(t, directory.Reserve(ctx, gameA, userA, raceName)) + require.NoError(t, directory.MarkPendingRegistration(ctx, gameA, userA, raceName, now.Add(time.Hour))) + require.NoError(t, directory.Register(ctx, gameA, userA, raceName)) + + err := directory.Reserve(ctx, gameA, userB, raceName) + require.ErrorIs(t, err, ports.ErrNameTaken) +} + +// TestExpirePendingRegistrationsBatched seeds two pending entries with +// distinct canonical keys and asserts both are released by a single pass +// even when the worker iterates via separate advisory locks. +func TestExpirePendingRegistrationsBatched(t *testing.T) { + now := time.Date(2026, 5, 1, 12, 0, 0, 0, time.UTC) + directory := newDirectory(t, func() time.Time { return now }) + ctx := context.Background() + + for index := range 3 { + gameID := "game-batch-" + strconv.Itoa(index) + userID := "user-batch-" + strconv.Itoa(index) + raceName := "PilotBatch" + strconv.Itoa(index) + require.NoError(t, directory.Reserve(ctx, gameID, userID, raceName)) + require.NoError(t, directory.MarkPendingRegistration(ctx, gameID, userID, raceName, now.Add(time.Hour))) + } + + expired, err := directory.ExpirePendingRegistrations(ctx, now.Add(2*time.Hour)) + require.NoError(t, err) + require.Len(t, expired, 3) + + expired, err = directory.ExpirePendingRegistrations(ctx, now.Add(2*time.Hour)) + require.NoError(t, err) + assert.Empty(t, expired, "second pass releases nothing") +} diff --git a/lobby/internal/adapters/redisstate/applicationstore.go b/lobby/internal/adapters/redisstate/applicationstore.go deleted file mode 100644 index 96e5571..0000000 --- a/lobby/internal/adapters/redisstate/applicationstore.go +++ /dev/null @@ -1,277 +0,0 @@ -package redisstate - -import ( - "context" - "errors" - "fmt" - "strings" - - "galaxy/lobby/internal/domain/application" - "galaxy/lobby/internal/domain/common" - "galaxy/lobby/internal/ports" - - "github.com/redis/go-redis/v9" -) - -// ApplicationStore provides Redis-backed durable storage for application -// records. -type ApplicationStore struct { - client *redis.Client - keys Keyspace -} - -// NewApplicationStore constructs one Redis-backed application store. It -// returns an error when client is nil. -func NewApplicationStore(client *redis.Client) (*ApplicationStore, error) { - if client == nil { - return nil, errors.New("new application store: nil redis client") - } - - return &ApplicationStore{ - client: client, - keys: Keyspace{}, - }, nil -} - -// Save persists a new submitted application record and enforces the -// single-active (non-rejected) constraint per (applicant, game) pair. -func (store *ApplicationStore) Save(ctx context.Context, record application.Application) error { - if store == nil || store.client == nil { - return errors.New("save application: nil store") - } - if ctx == nil { - return errors.New("save application: nil context") - } - if err := record.Validate(); err != nil { - return fmt.Errorf("save application: %w", err) - } - if record.Status != application.StatusSubmitted { - return fmt.Errorf( - "save application: status must be %q, got %q", - application.StatusSubmitted, record.Status, - ) - } - - payload, err := MarshalApplication(record) - if err != nil { - return fmt.Errorf("save application: %w", err) - } - - primaryKey := store.keys.Application(record.ApplicationID) - activeLookupKey := store.keys.UserGameApplication(record.ApplicantUserID, record.GameID) - gameIndexKey := store.keys.ApplicationsByGame(record.GameID) - userIndexKey := store.keys.ApplicationsByUser(record.ApplicantUserID) - member := record.ApplicationID.String() - - watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error { - existingPrimary, getErr := tx.Exists(ctx, primaryKey).Result() - if getErr != nil { - return fmt.Errorf("save application: %w", getErr) - } - if existingPrimary != 0 { - return fmt.Errorf("save application: %w", application.ErrConflict) - } - - existingActive, getErr := tx.Exists(ctx, activeLookupKey).Result() - if getErr != nil { - return fmt.Errorf("save application: %w", getErr) - } - if existingActive != 0 { - return fmt.Errorf("save application: %w", application.ErrConflict) - } - - _, err := tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error { - pipe.Set(ctx, primaryKey, payload, ApplicationRecordTTL) - pipe.Set(ctx, activeLookupKey, member, ApplicationRecordTTL) - pipe.SAdd(ctx, gameIndexKey, member) - pipe.SAdd(ctx, userIndexKey, member) - return nil - }) - return err - }, primaryKey, activeLookupKey) - - switch { - case errors.Is(watchErr, redis.TxFailedErr): - return fmt.Errorf("save application: %w", application.ErrConflict) - case watchErr != nil: - return watchErr - default: - return nil - } -} - -// Get returns the record identified by applicationID. -func (store *ApplicationStore) Get(ctx context.Context, applicationID common.ApplicationID) (application.Application, error) { - if store == nil || store.client == nil { - return application.Application{}, errors.New("get application: nil store") - } - if ctx == nil { - return application.Application{}, errors.New("get application: nil context") - } - if err := applicationID.Validate(); err != nil { - return application.Application{}, fmt.Errorf("get application: %w", err) - } - - payload, err := store.client.Get(ctx, store.keys.Application(applicationID)).Bytes() - switch { - case errors.Is(err, redis.Nil): - return application.Application{}, application.ErrNotFound - case err != nil: - return application.Application{}, fmt.Errorf("get application: %w", err) - } - - record, err := UnmarshalApplication(payload) - if err != nil { - return application.Application{}, fmt.Errorf("get application: %w", err) - } - return record, nil -} - -// GetByGame returns every application attached to gameID. -func (store *ApplicationStore) GetByGame(ctx context.Context, gameID common.GameID) ([]application.Application, error) { - if store == nil || store.client == nil { - return nil, errors.New("get applications by game: nil store") - } - if ctx == nil { - return nil, errors.New("get applications by game: nil context") - } - if err := gameID.Validate(); err != nil { - return nil, fmt.Errorf("get applications by game: %w", err) - } - - return store.loadApplicationsBySet(ctx, - "get applications by game", - store.keys.ApplicationsByGame(gameID), - ) -} - -// GetByUser returns every application submitted by applicantUserID. -func (store *ApplicationStore) GetByUser(ctx context.Context, applicantUserID string) ([]application.Application, error) { - if store == nil || store.client == nil { - return nil, errors.New("get applications by user: nil store") - } - if ctx == nil { - return nil, errors.New("get applications by user: nil context") - } - trimmed := strings.TrimSpace(applicantUserID) - if trimmed == "" { - return nil, fmt.Errorf("get applications by user: applicant user id must not be empty") - } - - return store.loadApplicationsBySet(ctx, - "get applications by user", - store.keys.ApplicationsByUser(trimmed), - ) -} - -// loadApplicationsBySet materializes applications whose ids are stored in -// setKey. Stale set members (primary key removed out-of-band) are dropped -// silently, mirroring gamestore.GetByStatus. -func (store *ApplicationStore) loadApplicationsBySet(ctx context.Context, operation, setKey string) ([]application.Application, error) { - members, err := store.client.SMembers(ctx, setKey).Result() - if err != nil { - return nil, fmt.Errorf("%s: %w", operation, err) - } - if len(members) == 0 { - return nil, nil - } - - primaryKeys := make([]string, len(members)) - for index, member := range members { - primaryKeys[index] = store.keys.Application(common.ApplicationID(member)) - } - - payloads, err := store.client.MGet(ctx, primaryKeys...).Result() - if err != nil { - return nil, fmt.Errorf("%s: %w", operation, err) - } - - records := make([]application.Application, 0, len(payloads)) - for _, entry := range payloads { - if entry == nil { - continue - } - raw, ok := entry.(string) - if !ok { - return nil, fmt.Errorf("%s: unexpected payload type %T", operation, entry) - } - record, err := UnmarshalApplication([]byte(raw)) - if err != nil { - return nil, fmt.Errorf("%s: %w", operation, err) - } - records = append(records, record) - } - - return records, nil -} - -// UpdateStatus applies one status transition in a compare-and-swap fashion. -func (store *ApplicationStore) UpdateStatus(ctx context.Context, input ports.UpdateApplicationStatusInput) error { - if store == nil || store.client == nil { - return errors.New("update application status: nil store") - } - if ctx == nil { - return errors.New("update application status: nil context") - } - if err := input.Validate(); err != nil { - return fmt.Errorf("update application status: %w", err) - } - - if err := application.Transition(input.ExpectedFrom, input.To); err != nil { - return err - } - - primaryKey := store.keys.Application(input.ApplicationID) - at := input.At.UTC() - - watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error { - payload, getErr := tx.Get(ctx, primaryKey).Bytes() - switch { - case errors.Is(getErr, redis.Nil): - return application.ErrNotFound - case getErr != nil: - return fmt.Errorf("update application status: %w", getErr) - } - - existing, err := UnmarshalApplication(payload) - if err != nil { - return fmt.Errorf("update application status: %w", err) - } - if existing.Status != input.ExpectedFrom { - return fmt.Errorf("update application status: %w", application.ErrConflict) - } - - existing.Status = input.To - decidedAt := at - existing.DecidedAt = &decidedAt - - encoded, err := MarshalApplication(existing) - if err != nil { - return fmt.Errorf("update application status: %w", err) - } - - activeLookupKey := store.keys.UserGameApplication(existing.ApplicantUserID, existing.GameID) - - _, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error { - pipe.Set(ctx, primaryKey, encoded, ApplicationRecordTTL) - if input.To == application.StatusRejected { - pipe.Del(ctx, activeLookupKey) - } - return nil - }) - return err - }, primaryKey) - - switch { - case errors.Is(watchErr, redis.TxFailedErr): - return fmt.Errorf("update application status: %w", application.ErrConflict) - case watchErr != nil: - return watchErr - default: - return nil - } -} - -// Ensure ApplicationStore satisfies the ports.ApplicationStore interface -// at compile time. -var _ ports.ApplicationStore = (*ApplicationStore)(nil) diff --git a/lobby/internal/adapters/redisstate/applicationstore_test.go b/lobby/internal/adapters/redisstate/applicationstore_test.go deleted file mode 100644 index f6e2c58..0000000 --- a/lobby/internal/adapters/redisstate/applicationstore_test.go +++ /dev/null @@ -1,360 +0,0 @@ -package redisstate_test - -import ( - "context" - "errors" - "sort" - "sync" - "sync/atomic" - "testing" - "time" - - "galaxy/lobby/internal/adapters/redisstate" - "galaxy/lobby/internal/domain/application" - "galaxy/lobby/internal/domain/common" - "galaxy/lobby/internal/ports" - - "github.com/alicebob/miniredis/v2" - "github.com/redis/go-redis/v9" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func newApplicationTestStore(t *testing.T) (*redisstate.ApplicationStore, *miniredis.Miniredis, *redis.Client) { - t.Helper() - - server := miniredis.RunT(t) - client := redis.NewClient(&redis.Options{Addr: server.Addr()}) - t.Cleanup(func() { - _ = client.Close() - }) - - store, err := redisstate.NewApplicationStore(client) - require.NoError(t, err) - - return store, server, client -} - -func fixtureApplication(t *testing.T, id common.ApplicationID, userID string, gameID common.GameID) application.Application { - t.Helper() - - now := time.Date(2026, 4, 23, 12, 0, 0, 0, time.UTC) - record, err := application.New(application.NewApplicationInput{ - ApplicationID: id, - GameID: gameID, - ApplicantUserID: userID, - RaceName: "Spring Racer", - Now: now, - }) - require.NoError(t, err) - return record -} - -func TestNewApplicationStoreRejectsNilClient(t *testing.T) { - _, err := redisstate.NewApplicationStore(nil) - require.Error(t, err) -} - -func TestApplicationStoreSaveAndGet(t *testing.T) { - ctx := context.Background() - store, _, client := newApplicationTestStore(t) - - record := fixtureApplication(t, "application-a", "user-1", "game-1") - require.NoError(t, store.Save(ctx, record)) - - got, err := store.Get(ctx, record.ApplicationID) - require.NoError(t, err) - assert.Equal(t, record.ApplicationID, got.ApplicationID) - assert.Equal(t, record.GameID, got.GameID) - assert.Equal(t, record.ApplicantUserID, got.ApplicantUserID) - assert.Equal(t, record.RaceName, got.RaceName) - assert.Equal(t, application.StatusSubmitted, got.Status) - assert.Nil(t, got.DecidedAt) - - byGame, err := client.SMembers(ctx, "lobby:game_applications:"+base64URL(record.GameID.String())).Result() - require.NoError(t, err) - assert.ElementsMatch(t, []string{record.ApplicationID.String()}, byGame) - - byUser, err := client.SMembers(ctx, "lobby:user_applications:"+base64URL(record.ApplicantUserID)).Result() - require.NoError(t, err) - assert.ElementsMatch(t, []string{record.ApplicationID.String()}, byUser) - - active, err := client.Get(ctx, - "lobby:user_game_application:"+base64URL(record.ApplicantUserID)+":"+base64URL(record.GameID.String()), - ).Result() - require.NoError(t, err) - assert.Equal(t, record.ApplicationID.String(), active) -} - -func TestApplicationStoreGetReturnsNotFound(t *testing.T) { - ctx := context.Background() - store, _, _ := newApplicationTestStore(t) - - _, err := store.Get(ctx, common.ApplicationID("application-missing")) - require.ErrorIs(t, err, application.ErrNotFound) -} - -func TestApplicationStoreSaveRejectsNonSubmitted(t *testing.T) { - ctx := context.Background() - store, _, _ := newApplicationTestStore(t) - - record := fixtureApplication(t, "application-a", "user-1", "game-1") - record.Status = application.StatusApproved - decidedAt := record.CreatedAt.Add(time.Minute) - record.DecidedAt = &decidedAt - - err := store.Save(ctx, record) - require.Error(t, err) - assert.False(t, errors.Is(err, application.ErrConflict)) -} - -func TestApplicationStoreSaveRejectsSecondActiveForSameUserGame(t *testing.T) { - ctx := context.Background() - store, _, _ := newApplicationTestStore(t) - - first := fixtureApplication(t, "application-a", "user-1", "game-1") - require.NoError(t, store.Save(ctx, first)) - - second := fixtureApplication(t, "application-b", "user-1", "game-1") - err := store.Save(ctx, second) - require.Error(t, err) - assert.True(t, errors.Is(err, application.ErrConflict)) - - _, err = store.Get(ctx, second.ApplicationID) - require.ErrorIs(t, err, application.ErrNotFound) -} - -func TestApplicationStoreSaveRejectsDuplicateApplicationID(t *testing.T) { - ctx := context.Background() - store, _, _ := newApplicationTestStore(t) - - first := fixtureApplication(t, "application-a", "user-1", "game-1") - require.NoError(t, store.Save(ctx, first)) - - err := store.Save(ctx, first) - require.Error(t, err) - assert.True(t, errors.Is(err, application.ErrConflict)) -} - -func TestApplicationStoreSaveAllowsSameUserDifferentGame(t *testing.T) { - ctx := context.Background() - store, _, _ := newApplicationTestStore(t) - - first := fixtureApplication(t, "application-a", "user-1", "game-1") - second := fixtureApplication(t, "application-b", "user-1", "game-2") - - require.NoError(t, store.Save(ctx, first)) - require.NoError(t, store.Save(ctx, second)) - - byUser, err := store.GetByUser(ctx, "user-1") - require.NoError(t, err) - require.Len(t, byUser, 2) -} - -func TestApplicationStoreUpdateStatusApproveKeepsActiveKey(t *testing.T) { - ctx := context.Background() - store, _, client := newApplicationTestStore(t) - - record := fixtureApplication(t, "application-a", "user-1", "game-1") - require.NoError(t, store.Save(ctx, record)) - - at := record.CreatedAt.Add(time.Hour) - require.NoError(t, store.UpdateStatus(ctx, ports.UpdateApplicationStatusInput{ - ApplicationID: record.ApplicationID, - ExpectedFrom: application.StatusSubmitted, - To: application.StatusApproved, - At: at, - })) - - got, err := store.Get(ctx, record.ApplicationID) - require.NoError(t, err) - assert.Equal(t, application.StatusApproved, got.Status) - require.NotNil(t, got.DecidedAt) - assert.True(t, got.DecidedAt.Equal(at.UTC())) - - activeKey := "lobby:user_game_application:" + base64URL(record.ApplicantUserID) + ":" + base64URL(record.GameID.String()) - stored, err := client.Get(ctx, activeKey).Result() - require.NoError(t, err) - assert.Equal(t, record.ApplicationID.String(), stored) -} - -func TestApplicationStoreUpdateStatusRejectClearsActiveKey(t *testing.T) { - ctx := context.Background() - store, _, client := newApplicationTestStore(t) - - record := fixtureApplication(t, "application-a", "user-1", "game-1") - require.NoError(t, store.Save(ctx, record)) - - at := record.CreatedAt.Add(time.Hour) - require.NoError(t, store.UpdateStatus(ctx, ports.UpdateApplicationStatusInput{ - ApplicationID: record.ApplicationID, - ExpectedFrom: application.StatusSubmitted, - To: application.StatusRejected, - At: at, - })) - - got, err := store.Get(ctx, record.ApplicationID) - require.NoError(t, err) - assert.Equal(t, application.StatusRejected, got.Status) - require.NotNil(t, got.DecidedAt) - - activeKey := "lobby:user_game_application:" + base64URL(record.ApplicantUserID) + ":" + base64URL(record.GameID.String()) - _, err = client.Get(ctx, activeKey).Result() - require.ErrorIs(t, err, redis.Nil) - - // After rejection, the same user may re-apply to the same game. - reapplied := fixtureApplication(t, "application-b", "user-1", "game-1") - require.NoError(t, store.Save(ctx, reapplied)) -} - -func TestApplicationStoreUpdateStatusRejectsInvalidTransitionWithoutMutation(t *testing.T) { - ctx := context.Background() - store, _, _ := newApplicationTestStore(t) - - record := fixtureApplication(t, "application-a", "user-1", "game-1") - require.NoError(t, store.Save(ctx, record)) - - err := store.UpdateStatus(ctx, ports.UpdateApplicationStatusInput{ - ApplicationID: record.ApplicationID, - ExpectedFrom: application.StatusApproved, - To: application.StatusSubmitted, - At: record.CreatedAt.Add(time.Minute), - }) - require.Error(t, err) - assert.True(t, errors.Is(err, application.ErrInvalidTransition)) - - got, err := store.Get(ctx, record.ApplicationID) - require.NoError(t, err) - assert.Equal(t, application.StatusSubmitted, got.Status) - assert.Nil(t, got.DecidedAt) -} - -func TestApplicationStoreUpdateStatusReturnsConflictOnExpectedFromMismatch(t *testing.T) { - ctx := context.Background() - store, _, _ := newApplicationTestStore(t) - - record := fixtureApplication(t, "application-a", "user-1", "game-1") - require.NoError(t, store.Save(ctx, record)) - - require.NoError(t, store.UpdateStatus(ctx, ports.UpdateApplicationStatusInput{ - ApplicationID: record.ApplicationID, - ExpectedFrom: application.StatusSubmitted, - To: application.StatusApproved, - At: record.CreatedAt.Add(time.Minute), - })) - - err := store.UpdateStatus(ctx, ports.UpdateApplicationStatusInput{ - ApplicationID: record.ApplicationID, - ExpectedFrom: application.StatusSubmitted, - To: application.StatusRejected, - At: record.CreatedAt.Add(2 * time.Minute), - }) - require.Error(t, err) - assert.True(t, errors.Is(err, application.ErrConflict)) -} - -func TestApplicationStoreUpdateStatusReturnsNotFoundForMissingRecord(t *testing.T) { - ctx := context.Background() - store, _, _ := newApplicationTestStore(t) - - err := store.UpdateStatus(ctx, ports.UpdateApplicationStatusInput{ - ApplicationID: common.ApplicationID("application-missing"), - ExpectedFrom: application.StatusSubmitted, - To: application.StatusApproved, - At: time.Now().UTC(), - }) - require.ErrorIs(t, err, application.ErrNotFound) -} - -func TestApplicationStoreGetByGameAndByUser(t *testing.T) { - ctx := context.Background() - store, _, _ := newApplicationTestStore(t) - - a1 := fixtureApplication(t, "application-a1", "user-1", "game-1") - a2 := fixtureApplication(t, "application-a2", "user-2", "game-1") - a3 := fixtureApplication(t, "application-a3", "user-1", "game-2") - - for _, record := range []application.Application{a1, a2, a3} { - require.NoError(t, store.Save(ctx, record)) - } - - byGame1, err := store.GetByGame(ctx, "game-1") - require.NoError(t, err) - require.Len(t, byGame1, 2) - - byUser1, err := store.GetByUser(ctx, "user-1") - require.NoError(t, err) - require.Len(t, byUser1, 2) - - ids := collectApplicationIDs(byUser1) - sort.Strings(ids) - assert.Equal(t, []string{"application-a1", "application-a3"}, ids) - - byUser3, err := store.GetByUser(ctx, "user-missing") - require.NoError(t, err) - assert.Empty(t, byUser3) -} - -func TestApplicationStoreGetByGameDropsStaleIndexEntries(t *testing.T) { - ctx := context.Background() - store, server, _ := newApplicationTestStore(t) - - record := fixtureApplication(t, "application-a", "user-1", "game-1") - require.NoError(t, store.Save(ctx, record)) - - server.Del("lobby:applications:" + base64URL(record.ApplicationID.String())) - - records, err := store.GetByGame(ctx, record.GameID) - require.NoError(t, err) - assert.Empty(t, records) -} - -func TestApplicationStoreConcurrentSaveHasExactlyOneWinner(t *testing.T) { - ctx := context.Background() - _, _, client := newApplicationTestStore(t) - - storeA, err := redisstate.NewApplicationStore(client) - require.NoError(t, err) - storeB, err := redisstate.NewApplicationStore(client) - require.NoError(t, err) - - recordA := fixtureApplication(t, "application-a", "user-1", "game-1") - recordB := fixtureApplication(t, "application-b", "user-1", "game-1") - - var ( - wg sync.WaitGroup - successes atomic.Int32 - conflicts atomic.Int32 - others atomic.Int32 - ) - - apply := func(target *redisstate.ApplicationStore, record application.Application) { - defer wg.Done() - err := target.Save(ctx, record) - switch { - case err == nil: - successes.Add(1) - case errors.Is(err, application.ErrConflict): - conflicts.Add(1) - default: - others.Add(1) - } - } - - wg.Add(2) - go apply(storeA, recordA) - go apply(storeB, recordB) - wg.Wait() - - assert.Equal(t, int32(0), others.Load(), "unexpected non-conflict error") - assert.Equal(t, int32(1), successes.Load(), "expected exactly one success") - assert.Equal(t, int32(1), conflicts.Load(), "expected exactly one conflict") -} - -func collectApplicationIDs(records []application.Application) []string { - ids := make([]string, len(records)) - for index, record := range records { - ids[index] = record.ApplicationID.String() - } - return ids -} diff --git a/lobby/internal/adapters/redisstate/codecs.go b/lobby/internal/adapters/redisstate/codecs.go deleted file mode 100644 index ce7d65b..0000000 --- a/lobby/internal/adapters/redisstate/codecs.go +++ /dev/null @@ -1,172 +0,0 @@ -package redisstate - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "time" - - "galaxy/lobby/internal/domain/common" - "galaxy/lobby/internal/domain/game" -) - -// gameRecord stores the strict Redis JSON shape used for one game record. -type gameRecord struct { - GameID string `json:"game_id"` - GameName string `json:"game_name"` - Description string `json:"description,omitempty"` - GameType game.GameType `json:"game_type"` - OwnerUserID string `json:"owner_user_id,omitempty"` - Status game.Status `json:"status"` - MinPlayers int `json:"min_players"` - MaxPlayers int `json:"max_players"` - StartGapHours int `json:"start_gap_hours"` - StartGapPlayers int `json:"start_gap_players"` - EnrollmentEndsAtSec int64 `json:"enrollment_ends_at_sec"` - TurnSchedule string `json:"turn_schedule"` - TargetEngineVersion string `json:"target_engine_version"` - CreatedAtMS int64 `json:"created_at_ms"` - UpdatedAtMS int64 `json:"updated_at_ms"` - StartedAtMS *int64 `json:"started_at_ms,omitempty"` - FinishedAtMS *int64 `json:"finished_at_ms,omitempty"` - CurrentTurn int `json:"current_turn"` - RuntimeStatus string `json:"runtime_status,omitempty"` - EngineHealthSummary string `json:"engine_health_summary,omitempty"` - RuntimeBinding *runtimeBindingRecord `json:"runtime_binding,omitempty"` -} - -// runtimeBindingRecord stores the strict Redis JSON shape used for the -// optional runtime binding object on one game record. -type runtimeBindingRecord struct { - ContainerID string `json:"container_id"` - EngineEndpoint string `json:"engine_endpoint"` - RuntimeJobID string `json:"runtime_job_id"` - BoundAtMS int64 `json:"bound_at_ms"` -} - -// MarshalGame encodes record into the strict Redis JSON shape used for -// game records. The record is re-validated before marshalling. -func MarshalGame(record game.Game) ([]byte, error) { - if err := record.Validate(); err != nil { - return nil, fmt.Errorf("marshal redis game record: %w", err) - } - - stored := gameRecord{ - GameID: record.GameID.String(), - GameName: record.GameName, - Description: record.Description, - GameType: record.GameType, - OwnerUserID: record.OwnerUserID, - Status: record.Status, - MinPlayers: record.MinPlayers, - MaxPlayers: record.MaxPlayers, - StartGapHours: record.StartGapHours, - StartGapPlayers: record.StartGapPlayers, - EnrollmentEndsAtSec: record.EnrollmentEndsAt.UTC().Unix(), - TurnSchedule: record.TurnSchedule, - TargetEngineVersion: record.TargetEngineVersion, - CreatedAtMS: record.CreatedAt.UTC().UnixMilli(), - UpdatedAtMS: record.UpdatedAt.UTC().UnixMilli(), - StartedAtMS: optionalUnixMilli(record.StartedAt), - FinishedAtMS: optionalUnixMilli(record.FinishedAt), - CurrentTurn: record.RuntimeSnapshot.CurrentTurn, - RuntimeStatus: record.RuntimeSnapshot.RuntimeStatus, - EngineHealthSummary: record.RuntimeSnapshot.EngineHealthSummary, - } - if record.RuntimeBinding != nil { - stored.RuntimeBinding = &runtimeBindingRecord{ - ContainerID: record.RuntimeBinding.ContainerID, - EngineEndpoint: record.RuntimeBinding.EngineEndpoint, - RuntimeJobID: record.RuntimeBinding.RuntimeJobID, - BoundAtMS: record.RuntimeBinding.BoundAt.UTC().UnixMilli(), - } - } - - payload, err := json.Marshal(stored) - if err != nil { - return nil, fmt.Errorf("marshal redis game record: %w", err) - } - - return payload, nil -} - -// UnmarshalGame decodes payload from the strict Redis JSON shape used for -// game records. The decoded record is validated before returning. -func UnmarshalGame(payload []byte) (game.Game, error) { - var stored gameRecord - if err := decodeStrictJSON("decode redis game record", payload, &stored); err != nil { - return game.Game{}, err - } - - record := game.Game{ - GameID: common.GameID(stored.GameID), - GameName: stored.GameName, - Description: stored.Description, - GameType: stored.GameType, - OwnerUserID: stored.OwnerUserID, - Status: stored.Status, - MinPlayers: stored.MinPlayers, - MaxPlayers: stored.MaxPlayers, - StartGapHours: stored.StartGapHours, - StartGapPlayers: stored.StartGapPlayers, - EnrollmentEndsAt: time.Unix(stored.EnrollmentEndsAtSec, 0).UTC(), - TurnSchedule: stored.TurnSchedule, - TargetEngineVersion: stored.TargetEngineVersion, - CreatedAt: time.UnixMilli(stored.CreatedAtMS).UTC(), - UpdatedAt: time.UnixMilli(stored.UpdatedAtMS).UTC(), - StartedAt: inflateOptionalTime(stored.StartedAtMS), - FinishedAt: inflateOptionalTime(stored.FinishedAtMS), - RuntimeSnapshot: game.RuntimeSnapshot{ - CurrentTurn: stored.CurrentTurn, - RuntimeStatus: stored.RuntimeStatus, - EngineHealthSummary: stored.EngineHealthSummary, - }, - } - if stored.RuntimeBinding != nil { - record.RuntimeBinding = &game.RuntimeBinding{ - ContainerID: stored.RuntimeBinding.ContainerID, - EngineEndpoint: stored.RuntimeBinding.EngineEndpoint, - RuntimeJobID: stored.RuntimeBinding.RuntimeJobID, - BoundAt: time.UnixMilli(stored.RuntimeBinding.BoundAtMS).UTC(), - } - } - if err := record.Validate(); err != nil { - return game.Game{}, fmt.Errorf("decode redis game record: %w", err) - } - - return record, nil -} - -func decodeStrictJSON(operation string, payload []byte, target any) error { - decoder := json.NewDecoder(bytes.NewReader(payload)) - decoder.DisallowUnknownFields() - - if err := decoder.Decode(target); err != nil { - return fmt.Errorf("%s: %w", operation, err) - } - if err := decoder.Decode(&struct{}{}); err != io.EOF { - if err == nil { - return fmt.Errorf("%s: unexpected trailing JSON input", operation) - } - return fmt.Errorf("%s: %w", operation, err) - } - - return nil -} - -func optionalUnixMilli(value *time.Time) *int64 { - if value == nil { - return nil - } - milliseconds := value.UTC().UnixMilli() - return &milliseconds -} - -func inflateOptionalTime(value *int64) *time.Time { - if value == nil { - return nil - } - converted := time.UnixMilli(*value).UTC() - return &converted -} diff --git a/lobby/internal/adapters/redisstate/codecs_application.go b/lobby/internal/adapters/redisstate/codecs_application.go deleted file mode 100644 index b82dd54..0000000 --- a/lobby/internal/adapters/redisstate/codecs_application.go +++ /dev/null @@ -1,73 +0,0 @@ -package redisstate - -import ( - "encoding/json" - "fmt" - "time" - - "galaxy/lobby/internal/domain/application" - "galaxy/lobby/internal/domain/common" -) - -// applicationRecord stores the strict Redis JSON shape used for one -// application record. -type applicationRecord struct { - ApplicationID string `json:"application_id"` - GameID string `json:"game_id"` - ApplicantUserID string `json:"applicant_user_id"` - RaceName string `json:"race_name"` - Status application.Status `json:"status"` - CreatedAtMS int64 `json:"created_at_ms"` - DecidedAtMS *int64 `json:"decided_at_ms,omitempty"` -} - -// MarshalApplication encodes record into the strict Redis JSON shape -// used for application records. The record is re-validated before -// marshalling. -func MarshalApplication(record application.Application) ([]byte, error) { - if err := record.Validate(); err != nil { - return nil, fmt.Errorf("marshal redis application record: %w", err) - } - - stored := applicationRecord{ - ApplicationID: record.ApplicationID.String(), - GameID: record.GameID.String(), - ApplicantUserID: record.ApplicantUserID, - RaceName: record.RaceName, - Status: record.Status, - CreatedAtMS: record.CreatedAt.UTC().UnixMilli(), - DecidedAtMS: optionalUnixMilli(record.DecidedAt), - } - - payload, err := json.Marshal(stored) - if err != nil { - return nil, fmt.Errorf("marshal redis application record: %w", err) - } - - return payload, nil -} - -// UnmarshalApplication decodes payload from the strict Redis JSON shape -// used for application records. The decoded record is validated before -// returning. -func UnmarshalApplication(payload []byte) (application.Application, error) { - var stored applicationRecord - if err := decodeStrictJSON("decode redis application record", payload, &stored); err != nil { - return application.Application{}, err - } - - record := application.Application{ - ApplicationID: common.ApplicationID(stored.ApplicationID), - GameID: common.GameID(stored.GameID), - ApplicantUserID: stored.ApplicantUserID, - RaceName: stored.RaceName, - Status: stored.Status, - CreatedAt: time.UnixMilli(stored.CreatedAtMS).UTC(), - DecidedAt: inflateOptionalTime(stored.DecidedAtMS), - } - if err := record.Validate(); err != nil { - return application.Application{}, fmt.Errorf("decode redis application record: %w", err) - } - - return record, nil -} diff --git a/lobby/internal/adapters/redisstate/codecs_invite.go b/lobby/internal/adapters/redisstate/codecs_invite.go deleted file mode 100644 index 34117d6..0000000 --- a/lobby/internal/adapters/redisstate/codecs_invite.go +++ /dev/null @@ -1,77 +0,0 @@ -package redisstate - -import ( - "encoding/json" - "fmt" - "time" - - "galaxy/lobby/internal/domain/common" - "galaxy/lobby/internal/domain/invite" -) - -// inviteRecord stores the strict Redis JSON shape used for one invite -// record. -type inviteRecord struct { - InviteID string `json:"invite_id"` - GameID string `json:"game_id"` - InviterUserID string `json:"inviter_user_id"` - InviteeUserID string `json:"invitee_user_id"` - RaceName string `json:"race_name,omitempty"` - Status invite.Status `json:"status"` - CreatedAtMS int64 `json:"created_at_ms"` - ExpiresAtMS int64 `json:"expires_at_ms"` - DecidedAtMS *int64 `json:"decided_at_ms,omitempty"` -} - -// MarshalInvite encodes record into the strict Redis JSON shape used for -// invite records. The record is re-validated before marshalling. -func MarshalInvite(record invite.Invite) ([]byte, error) { - if err := record.Validate(); err != nil { - return nil, fmt.Errorf("marshal redis invite record: %w", err) - } - - stored := inviteRecord{ - InviteID: record.InviteID.String(), - GameID: record.GameID.String(), - InviterUserID: record.InviterUserID, - InviteeUserID: record.InviteeUserID, - RaceName: record.RaceName, - Status: record.Status, - CreatedAtMS: record.CreatedAt.UTC().UnixMilli(), - ExpiresAtMS: record.ExpiresAt.UTC().UnixMilli(), - DecidedAtMS: optionalUnixMilli(record.DecidedAt), - } - - payload, err := json.Marshal(stored) - if err != nil { - return nil, fmt.Errorf("marshal redis invite record: %w", err) - } - - return payload, nil -} - -// UnmarshalInvite decodes payload from the strict Redis JSON shape used -// for invite records. The decoded record is validated before returning. -func UnmarshalInvite(payload []byte) (invite.Invite, error) { - var stored inviteRecord - if err := decodeStrictJSON("decode redis invite record", payload, &stored); err != nil { - return invite.Invite{}, err - } - - record := invite.Invite{ - InviteID: common.InviteID(stored.InviteID), - GameID: common.GameID(stored.GameID), - InviterUserID: stored.InviterUserID, - InviteeUserID: stored.InviteeUserID, - RaceName: stored.RaceName, - Status: stored.Status, - CreatedAt: time.UnixMilli(stored.CreatedAtMS).UTC(), - ExpiresAt: time.UnixMilli(stored.ExpiresAtMS).UTC(), - DecidedAt: inflateOptionalTime(stored.DecidedAtMS), - } - if err := record.Validate(); err != nil { - return invite.Invite{}, fmt.Errorf("decode redis invite record: %w", err) - } - - return record, nil -} diff --git a/lobby/internal/adapters/redisstate/codecs_membership.go b/lobby/internal/adapters/redisstate/codecs_membership.go deleted file mode 100644 index 81313ba..0000000 --- a/lobby/internal/adapters/redisstate/codecs_membership.go +++ /dev/null @@ -1,75 +0,0 @@ -package redisstate - -import ( - "encoding/json" - "fmt" - "time" - - "galaxy/lobby/internal/domain/common" - "galaxy/lobby/internal/domain/membership" -) - -// membershipRecord stores the strict Redis JSON shape used for one -// membership record. -type membershipRecord struct { - MembershipID string `json:"membership_id"` - GameID string `json:"game_id"` - UserID string `json:"user_id"` - RaceName string `json:"race_name"` - CanonicalKey string `json:"canonical_key"` - Status membership.Status `json:"status"` - JoinedAtMS int64 `json:"joined_at_ms"` - RemovedAtMS *int64 `json:"removed_at_ms,omitempty"` -} - -// MarshalMembership encodes record into the strict Redis JSON shape used -// for membership records. The record is re-validated before marshalling. -func MarshalMembership(record membership.Membership) ([]byte, error) { - if err := record.Validate(); err != nil { - return nil, fmt.Errorf("marshal redis membership record: %w", err) - } - - stored := membershipRecord{ - MembershipID: record.MembershipID.String(), - GameID: record.GameID.String(), - UserID: record.UserID, - RaceName: record.RaceName, - CanonicalKey: record.CanonicalKey, - Status: record.Status, - JoinedAtMS: record.JoinedAt.UTC().UnixMilli(), - RemovedAtMS: optionalUnixMilli(record.RemovedAt), - } - - payload, err := json.Marshal(stored) - if err != nil { - return nil, fmt.Errorf("marshal redis membership record: %w", err) - } - - return payload, nil -} - -// UnmarshalMembership decodes payload from the strict Redis JSON shape -// used for membership records. The decoded record is validated before -// returning. -func UnmarshalMembership(payload []byte) (membership.Membership, error) { - var stored membershipRecord - if err := decodeStrictJSON("decode redis membership record", payload, &stored); err != nil { - return membership.Membership{}, err - } - - record := membership.Membership{ - MembershipID: common.MembershipID(stored.MembershipID), - GameID: common.GameID(stored.GameID), - UserID: stored.UserID, - RaceName: stored.RaceName, - CanonicalKey: stored.CanonicalKey, - Status: stored.Status, - JoinedAt: time.UnixMilli(stored.JoinedAtMS).UTC(), - RemovedAt: inflateOptionalTime(stored.RemovedAtMS), - } - if err := record.Validate(); err != nil { - return membership.Membership{}, fmt.Errorf("decode redis membership record: %w", err) - } - - return record, nil -} diff --git a/lobby/internal/adapters/redisstate/codecs_racename.go b/lobby/internal/adapters/redisstate/codecs_racename.go deleted file mode 100644 index 3258982..0000000 --- a/lobby/internal/adapters/redisstate/codecs_racename.go +++ /dev/null @@ -1,111 +0,0 @@ -package redisstate - -import ( - "encoding/json" - "fmt" -) - -// registeredRecord stores the strict Redis JSON shape of one registered -// race name. The canonical key is stored only as the Redis key suffix and -// is not duplicated inside the blob. -type registeredRecord struct { - UserID string `json:"user_id"` - RaceName string `json:"race_name"` - SourceGameID string `json:"source_game_id"` - RegisteredAtMS int64 `json:"registered_at_ms"` -} - -// reservationStatusReserved marks a per-game race name reservation that -// has not yet been promoted by capability evaluation. -const reservationStatusReserved = "reserved" - -// reservationStatusPending marks a reservation that has been promoted to -// pending_registration by the capability evaluator at game_finished. -const reservationStatusPending = "pending_registration" - -// reservationRecord stores the strict Redis JSON shape of one per-game -// race name reservation. The game_id and canonical key are carried by the -// Redis key suffix; the blob never duplicates them. -type reservationRecord struct { - UserID string `json:"user_id"` - RaceName string `json:"race_name"` - ReservedAtMS int64 `json:"reserved_at_ms"` - Status string `json:"status"` - EligibleUntilMS *int64 `json:"eligible_until_ms,omitempty"` -} - -// canonicalLookupRecord stores the eager canonical-lookup cache entry -// used by Check to return availability without scanning the authoritative -// keys. GameID is populated only for reservation and pending_registration -// kinds; it is omitted for registered bindings. -type canonicalLookupRecord struct { - Kind string `json:"kind"` - HolderUserID string `json:"holder_user_id"` - GameID string `json:"game_id,omitempty"` -} - -// marshalRegisteredRecord encodes record into the strict Redis JSON shape -// used for registered race names. -func marshalRegisteredRecord(record registeredRecord) ([]byte, error) { - payload, err := json.Marshal(record) - if err != nil { - return nil, fmt.Errorf("marshal redis registered race name record: %w", err) - } - - return payload, nil -} - -// unmarshalRegisteredRecord decodes payload from the strict Redis JSON -// shape used for registered race names. -func unmarshalRegisteredRecord(payload []byte) (registeredRecord, error) { - var record registeredRecord - if err := decodeStrictJSON("decode redis registered race name record", payload, &record); err != nil { - return registeredRecord{}, err - } - - return record, nil -} - -// marshalReservationRecord encodes record into the strict Redis JSON -// shape used for per-game race name reservations. -func marshalReservationRecord(record reservationRecord) ([]byte, error) { - payload, err := json.Marshal(record) - if err != nil { - return nil, fmt.Errorf("marshal redis race name reservation record: %w", err) - } - - return payload, nil -} - -// unmarshalReservationRecord decodes payload from the strict Redis JSON -// shape used for per-game race name reservations. -func unmarshalReservationRecord(payload []byte) (reservationRecord, error) { - var record reservationRecord - if err := decodeStrictJSON("decode redis race name reservation record", payload, &record); err != nil { - return reservationRecord{}, err - } - - return record, nil -} - -// marshalCanonicalLookupRecord encodes record into the strict Redis JSON -// shape used for canonical-lookup cache entries. -func marshalCanonicalLookupRecord(record canonicalLookupRecord) ([]byte, error) { - payload, err := json.Marshal(record) - if err != nil { - return nil, fmt.Errorf("marshal redis race name canonical lookup record: %w", err) - } - - return payload, nil -} - -// unmarshalCanonicalLookupRecord decodes payload from the strict Redis -// JSON shape used for canonical-lookup cache entries. -func unmarshalCanonicalLookupRecord(payload []byte) (canonicalLookupRecord, error) { - var record canonicalLookupRecord - if err := decodeStrictJSON("decode redis race name canonical lookup record", payload, &record); err != nil { - return canonicalLookupRecord{}, err - } - - return record, nil -} diff --git a/lobby/internal/adapters/redisstate/doc.go b/lobby/internal/adapters/redisstate/doc.go index 2c4ede8..aab4ca4 100644 --- a/lobby/internal/adapters/redisstate/doc.go +++ b/lobby/internal/adapters/redisstate/doc.go @@ -1,10 +1,11 @@ -// Package redisstate defines the frozen Game Lobby Service Redis keyspace, -// strict JSON record shapes, and low-level mutation helpers used by the -// Game Lobby store adapters. +// Package redisstate defines the Game Lobby Service Redis keyspace and +// the adapters for the runtime-coordination state that intentionally +// stays on Redis after the PG_PLAN.md §6A and §6B migrations. // -// Adapters in this package implement ports.GameStore, -// ports.ApplicationStore, ports.InviteStore, and ports.MembershipStore on -// top of a `*redis.Client`. Every marshal and unmarshal round-trip calls -// the domain-level Validate method to guarantee that the store never -// exposes malformed records. +// Adapters in this package implement ports.GameTurnStatsStore, +// ports.GapActivationStore, ports.EvaluationGuardStore, and +// ports.StreamOffsetStore plus the StreamLagProbe used for telemetry. The +// durable enrollment entities (game, application, invite, membership) +// and the Race Name Directory live in PostgreSQL; their previous Redis +// adapters and codecs have been removed. package redisstate diff --git a/lobby/internal/adapters/redisstate/gamestore.go b/lobby/internal/adapters/redisstate/gamestore.go deleted file mode 100644 index 57426bc..0000000 --- a/lobby/internal/adapters/redisstate/gamestore.go +++ /dev/null @@ -1,454 +0,0 @@ -package redisstate - -import ( - "context" - "errors" - "fmt" - "strings" - - "galaxy/lobby/internal/domain/common" - "galaxy/lobby/internal/domain/game" - "galaxy/lobby/internal/ports" - - "github.com/redis/go-redis/v9" -) - -// GameStore provides Redis-backed durable storage for game records. -type GameStore struct { - client *redis.Client - keys Keyspace -} - -// NewGameStore constructs one Redis-backed game store. It returns an -// error when client is nil. -func NewGameStore(client *redis.Client) (*GameStore, error) { - if client == nil { - return nil, errors.New("new game store: nil redis client") - } - - return &GameStore{ - client: client, - keys: Keyspace{}, - }, nil -} - -// Save upserts record and rewrites the status secondary index when the -// status changes. -func (store *GameStore) Save(ctx context.Context, record game.Game) error { - if store == nil || store.client == nil { - return errors.New("save game: nil store") - } - if ctx == nil { - return errors.New("save game: nil context") - } - if err := record.Validate(); err != nil { - return fmt.Errorf("save game: %w", err) - } - - payload, err := MarshalGame(record) - if err != nil { - return fmt.Errorf("save game: %w", err) - } - - primaryKey := store.keys.Game(record.GameID) - newIndexKey := store.keys.GamesByStatus(record.Status) - member := record.GameID.String() - createdAtScore := CreatedAtScore(record.CreatedAt) - - watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error { - var previousStatus game.Status - existingPayload, getErr := tx.Get(ctx, primaryKey).Bytes() - switch { - case errors.Is(getErr, redis.Nil): - previousStatus = "" - case getErr != nil: - return fmt.Errorf("save game: %w", getErr) - default: - existing, err := UnmarshalGame(existingPayload) - if err != nil { - return fmt.Errorf("save game: %w", err) - } - previousStatus = existing.Status - } - - _, err := tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error { - pipe.Set(ctx, primaryKey, payload, GameRecordTTL) - if previousStatus != "" && previousStatus != record.Status { - pipe.ZRem(ctx, store.keys.GamesByStatus(previousStatus), member) - } - pipe.ZAdd(ctx, newIndexKey, redis.Z{ - Score: createdAtScore, - Member: member, - }) - if owner := strings.TrimSpace(record.OwnerUserID); owner != "" { - pipe.SAdd(ctx, store.keys.GamesByOwner(owner), member) - } - return nil - }) - return err - }, primaryKey) - - switch { - case errors.Is(watchErr, redis.TxFailedErr): - return fmt.Errorf("save game: %w", game.ErrConflict) - case watchErr != nil: - return watchErr - default: - return nil - } -} - -// Get returns the record identified by gameID. -func (store *GameStore) Get(ctx context.Context, gameID common.GameID) (game.Game, error) { - if store == nil || store.client == nil { - return game.Game{}, errors.New("get game: nil store") - } - if ctx == nil { - return game.Game{}, errors.New("get game: nil context") - } - if err := gameID.Validate(); err != nil { - return game.Game{}, fmt.Errorf("get game: %w", err) - } - - payload, err := store.client.Get(ctx, store.keys.Game(gameID)).Bytes() - switch { - case errors.Is(err, redis.Nil): - return game.Game{}, game.ErrNotFound - case err != nil: - return game.Game{}, fmt.Errorf("get game: %w", err) - } - - record, err := UnmarshalGame(payload) - if err != nil { - return game.Game{}, fmt.Errorf("get game: %w", err) - } - - return record, nil -} - -// GetByStatus returns every record indexed under status. Stale index -// entries (primary key removed out-of-band) are dropped silently. -func (store *GameStore) GetByStatus(ctx context.Context, status game.Status) ([]game.Game, error) { - if store == nil || store.client == nil { - return nil, errors.New("get games by status: nil store") - } - if ctx == nil { - return nil, errors.New("get games by status: nil context") - } - if !status.IsKnown() { - return nil, fmt.Errorf("get games by status: status %q is unsupported", status) - } - - members, err := store.client.ZRange(ctx, store.keys.GamesByStatus(status), 0, -1).Result() - if err != nil { - return nil, fmt.Errorf("get games by status: %w", err) - } - if len(members) == 0 { - return nil, nil - } - - primaryKeys := make([]string, len(members)) - for index, member := range members { - primaryKeys[index] = store.keys.Game(common.GameID(member)) - } - - payloads, err := store.client.MGet(ctx, primaryKeys...).Result() - if err != nil { - return nil, fmt.Errorf("get games by status: %w", err) - } - - records := make([]game.Game, 0, len(payloads)) - for _, entry := range payloads { - if entry == nil { - continue - } - raw, ok := entry.(string) - if !ok { - return nil, fmt.Errorf("get games by status: unexpected payload type %T", entry) - } - record, err := UnmarshalGame([]byte(raw)) - if err != nil { - return nil, fmt.Errorf("get games by status: %w", err) - } - records = append(records, record) - } - - return records, nil -} - -// CountByStatus returns the number of game identifiers indexed under each -// known status. The map carries one entry per game.AllStatuses, with zero -// counts for empty buckets. The implementation issues one ZCARD per status -// in a single Redis pipeline so the cost stays O(number of statuses). -func (store *GameStore) CountByStatus(ctx context.Context) (map[game.Status]int, error) { - if store == nil || store.client == nil { - return nil, errors.New("count games by status: nil store") - } - if ctx == nil { - return nil, errors.New("count games by status: nil context") - } - - statuses := game.AllStatuses() - pipeline := store.client.Pipeline() - results := make([]*redis.IntCmd, len(statuses)) - for index, status := range statuses { - results[index] = pipeline.ZCard(ctx, store.keys.GamesByStatus(status)) - } - if _, err := pipeline.Exec(ctx); err != nil { - return nil, fmt.Errorf("count games by status: %w", err) - } - - counts := make(map[game.Status]int, len(statuses)) - for index, status := range statuses { - count, err := results[index].Result() - if err != nil { - return nil, fmt.Errorf("count games by status: %s: %w", status, err) - } - counts[status] = int(count) - } - return counts, nil -} - -// GetByOwner returns every record whose OwnerUserID equals userID. -// Stale index entries (primary key removed out-of-band) are dropped -// silently. The slice order is adapter-defined. -func (store *GameStore) GetByOwner(ctx context.Context, userID string) ([]game.Game, error) { - if store == nil || store.client == nil { - return nil, errors.New("get games by owner: nil store") - } - if ctx == nil { - return nil, errors.New("get games by owner: nil context") - } - trimmed := strings.TrimSpace(userID) - if trimmed == "" { - return nil, fmt.Errorf("get games by owner: user id must not be empty") - } - - members, err := store.client.SMembers(ctx, store.keys.GamesByOwner(trimmed)).Result() - if err != nil { - return nil, fmt.Errorf("get games by owner: %w", err) - } - if len(members) == 0 { - return nil, nil - } - - primaryKeys := make([]string, len(members)) - for index, member := range members { - primaryKeys[index] = store.keys.Game(common.GameID(member)) - } - - payloads, err := store.client.MGet(ctx, primaryKeys...).Result() - if err != nil { - return nil, fmt.Errorf("get games by owner: %w", err) - } - - records := make([]game.Game, 0, len(payloads)) - for _, entry := range payloads { - if entry == nil { - continue - } - raw, ok := entry.(string) - if !ok { - return nil, fmt.Errorf("get games by owner: unexpected payload type %T", entry) - } - record, err := UnmarshalGame([]byte(raw)) - if err != nil { - return nil, fmt.Errorf("get games by owner: %w", err) - } - records = append(records, record) - } - - return records, nil -} - -// UpdateStatus applies one status transition in a compare-and-swap -// fashion. -func (store *GameStore) UpdateStatus(ctx context.Context, input ports.UpdateStatusInput) error { - if store == nil || store.client == nil { - return errors.New("update game status: nil store") - } - if ctx == nil { - return errors.New("update game status: nil context") - } - if err := input.Validate(); err != nil { - return fmt.Errorf("update game status: %w", err) - } - - if err := game.Transition(input.ExpectedFrom, input.To, input.Trigger); err != nil { - return err - } - - primaryKey := store.keys.Game(input.GameID) - member := input.GameID.String() - at := input.At.UTC() - - watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error { - payload, getErr := tx.Get(ctx, primaryKey).Bytes() - switch { - case errors.Is(getErr, redis.Nil): - return game.ErrNotFound - case getErr != nil: - return fmt.Errorf("update game status: %w", getErr) - } - - existing, err := UnmarshalGame(payload) - if err != nil { - return fmt.Errorf("update game status: %w", err) - } - if existing.Status != input.ExpectedFrom { - return fmt.Errorf("update game status: %w", game.ErrConflict) - } - - existing.Status = input.To - existing.UpdatedAt = at - if input.To == game.StatusRunning && existing.StartedAt == nil { - startedAt := at - existing.StartedAt = &startedAt - } - if input.To == game.StatusFinished && existing.FinishedAt == nil { - finishedAt := at - existing.FinishedAt = &finishedAt - } - - encoded, err := MarshalGame(existing) - if err != nil { - return fmt.Errorf("update game status: %w", err) - } - - _, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error { - pipe.Set(ctx, primaryKey, encoded, GameRecordTTL) - pipe.ZRem(ctx, store.keys.GamesByStatus(input.ExpectedFrom), member) - pipe.ZAdd(ctx, store.keys.GamesByStatus(input.To), redis.Z{ - Score: CreatedAtScore(existing.CreatedAt), - Member: member, - }) - return nil - }) - return err - }, primaryKey) - - switch { - case errors.Is(watchErr, redis.TxFailedErr): - return fmt.Errorf("update game status: %w", game.ErrConflict) - case watchErr != nil: - return watchErr - default: - return nil - } -} - -// UpdateRuntimeSnapshot overwrites the denormalized runtime snapshot -// fields on the record identified by input.GameID. -func (store *GameStore) UpdateRuntimeSnapshot(ctx context.Context, input ports.UpdateRuntimeSnapshotInput) error { - if store == nil || store.client == nil { - return errors.New("update runtime snapshot: nil store") - } - if ctx == nil { - return errors.New("update runtime snapshot: nil context") - } - if err := input.Validate(); err != nil { - return fmt.Errorf("update runtime snapshot: %w", err) - } - - primaryKey := store.keys.Game(input.GameID) - at := input.At.UTC() - - watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error { - payload, getErr := tx.Get(ctx, primaryKey).Bytes() - switch { - case errors.Is(getErr, redis.Nil): - return game.ErrNotFound - case getErr != nil: - return fmt.Errorf("update runtime snapshot: %w", getErr) - } - - existing, err := UnmarshalGame(payload) - if err != nil { - return fmt.Errorf("update runtime snapshot: %w", err) - } - - existing.RuntimeSnapshot = input.Snapshot - existing.UpdatedAt = at - - encoded, err := MarshalGame(existing) - if err != nil { - return fmt.Errorf("update runtime snapshot: %w", err) - } - - _, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error { - pipe.Set(ctx, primaryKey, encoded, GameRecordTTL) - return nil - }) - return err - }, primaryKey) - - switch { - case errors.Is(watchErr, redis.TxFailedErr): - return fmt.Errorf("update runtime snapshot: %w", game.ErrConflict) - case watchErr != nil: - return watchErr - default: - return nil - } -} - -// UpdateRuntimeBinding overwrites the runtime binding metadata on the -// record identified by input.GameID. calls this method from -// the runtimejobresult worker after a successful container start. -func (store *GameStore) UpdateRuntimeBinding(ctx context.Context, input ports.UpdateRuntimeBindingInput) error { - if store == nil || store.client == nil { - return errors.New("update runtime binding: nil store") - } - if ctx == nil { - return errors.New("update runtime binding: nil context") - } - if err := input.Validate(); err != nil { - return fmt.Errorf("update runtime binding: %w", err) - } - - primaryKey := store.keys.Game(input.GameID) - at := input.At.UTC() - - watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error { - payload, getErr := tx.Get(ctx, primaryKey).Bytes() - switch { - case errors.Is(getErr, redis.Nil): - return game.ErrNotFound - case getErr != nil: - return fmt.Errorf("update runtime binding: %w", getErr) - } - - existing, err := UnmarshalGame(payload) - if err != nil { - return fmt.Errorf("update runtime binding: %w", err) - } - - binding := input.Binding - existing.RuntimeBinding = &binding - existing.UpdatedAt = at - - encoded, err := MarshalGame(existing) - if err != nil { - return fmt.Errorf("update runtime binding: %w", err) - } - - _, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error { - pipe.Set(ctx, primaryKey, encoded, GameRecordTTL) - return nil - }) - return err - }, primaryKey) - - switch { - case errors.Is(watchErr, redis.TxFailedErr): - return fmt.Errorf("update runtime binding: %w", game.ErrConflict) - case watchErr != nil: - return watchErr - default: - return nil - } -} - -// Ensure GameStore satisfies the ports.GameStore interface at compile -// time. -var _ ports.GameStore = (*GameStore)(nil) diff --git a/lobby/internal/adapters/redisstate/gamestore_test.go b/lobby/internal/adapters/redisstate/gamestore_test.go deleted file mode 100644 index 173e84f..0000000 --- a/lobby/internal/adapters/redisstate/gamestore_test.go +++ /dev/null @@ -1,557 +0,0 @@ -package redisstate_test - -import ( - "context" - "encoding/base64" - "errors" - "sync" - "sync/atomic" - "testing" - "time" - - "galaxy/lobby/internal/adapters/redisstate" - "galaxy/lobby/internal/domain/common" - "galaxy/lobby/internal/domain/game" - "galaxy/lobby/internal/ports" - - "github.com/alicebob/miniredis/v2" - "github.com/redis/go-redis/v9" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func newTestStore(t *testing.T) (*redisstate.GameStore, *miniredis.Miniredis, *redis.Client) { - t.Helper() - - server := miniredis.RunT(t) - client := redis.NewClient(&redis.Options{Addr: server.Addr()}) - t.Cleanup(func() { - _ = client.Close() - }) - - store, err := redisstate.NewGameStore(client) - require.NoError(t, err) - - return store, server, client -} - -func fixtureGame(t *testing.T) game.Game { - t.Helper() - - now := time.Date(2026, 4, 23, 12, 0, 0, 0, time.UTC) - record, err := game.New(game.NewGameInput{ - GameID: common.GameID("game-1"), - GameName: "Spring Classic", - Description: "first public game", - GameType: game.GameTypePublic, - MinPlayers: 4, - MaxPlayers: 8, - StartGapHours: 24, - StartGapPlayers: 2, - EnrollmentEndsAt: now.Add(7 * 24 * time.Hour), - TurnSchedule: "0 18 * * *", - TargetEngineVersion: "v1.2.3", - Now: now, - }) - require.NoError(t, err) - - return record -} - -func statusIndexMembers(t *testing.T, client *redis.Client, status game.Status) []string { - t.Helper() - - members, err := client.ZRange(context.Background(), "lobby:games_by_status:"+base64URL(string(status)), 0, -1).Result() - require.NoError(t, err) - return members -} - -func TestNewGameStoreRejectsNilClient(t *testing.T) { - _, err := redisstate.NewGameStore(nil) - require.Error(t, err) -} - -func TestGameStoreSaveAndGet(t *testing.T) { - ctx := context.Background() - store, _, client := newTestStore(t) - - record := fixtureGame(t) - require.NoError(t, store.Save(ctx, record)) - - got, err := store.Get(ctx, record.GameID) - require.NoError(t, err) - assert.Equal(t, record.GameID, got.GameID) - assert.Equal(t, record.Status, got.Status) - assert.Equal(t, record.GameName, got.GameName) - assert.Equal(t, record.MinPlayers, got.MinPlayers) - assert.Equal(t, record.MaxPlayers, got.MaxPlayers) - assert.Equal(t, record.EnrollmentEndsAt.Unix(), got.EnrollmentEndsAt.Unix()) - - members := statusIndexMembers(t, client, game.StatusDraft) - assert.Contains(t, members, record.GameID.String()) -} - -func TestGameStoreGetReturnsNotFound(t *testing.T) { - ctx := context.Background() - store, _, _ := newTestStore(t) - - _, err := store.Get(ctx, common.GameID("game-missing")) - require.ErrorIs(t, err, game.ErrNotFound) -} - -func TestGameStoreSaveRewritesStatusIndexOnStatusChange(t *testing.T) { - ctx := context.Background() - store, _, client := newTestStore(t) - - record := fixtureGame(t) - require.NoError(t, store.Save(ctx, record)) - - record.Status = game.StatusEnrollmentOpen - record.UpdatedAt = record.UpdatedAt.Add(time.Minute) - require.NoError(t, store.Save(ctx, record)) - - assert.Empty(t, statusIndexMembers(t, client, game.StatusDraft)) - assert.Contains(t, statusIndexMembers(t, client, game.StatusEnrollmentOpen), record.GameID.String()) -} - -func TestGameStoreCountByStatusReturnsAllBuckets(t *testing.T) { - ctx := context.Background() - store, _, _ := newTestStore(t) - - record1 := fixtureGame(t) - record1.GameID = common.GameID("game-count-a") - - record2 := fixtureGame(t) - record2.GameID = common.GameID("game-count-b") - record2.CreatedAt = record2.CreatedAt.Add(time.Second) - record2.UpdatedAt = record2.CreatedAt - - record3 := fixtureGame(t) - record3.GameID = common.GameID("game-count-c") - record3.Status = game.StatusEnrollmentOpen - - for _, record := range []game.Game{record1, record2, record3} { - require.NoError(t, store.Save(ctx, record)) - } - - counts, err := store.CountByStatus(ctx) - require.NoError(t, err) - - for _, status := range game.AllStatuses() { - _, present := counts[status] - require.True(t, present, "expected %s bucket", status) - } - require.Equal(t, 2, counts[game.StatusDraft]) - require.Equal(t, 1, counts[game.StatusEnrollmentOpen]) - require.Equal(t, 0, counts[game.StatusRunning]) -} - -func TestGameStoreGetByStatusReturnsMatchingRecords(t *testing.T) { - ctx := context.Background() - store, _, _ := newTestStore(t) - - record1 := fixtureGame(t) - record1.GameID = common.GameID("game-a") - - record2 := fixtureGame(t) - record2.GameID = common.GameID("game-b") - record2.CreatedAt = record2.CreatedAt.Add(time.Second) - record2.UpdatedAt = record2.CreatedAt - - record3 := fixtureGame(t) - record3.GameID = common.GameID("game-c") - record3.Status = game.StatusEnrollmentOpen - - for _, record := range []game.Game{record1, record2, record3} { - require.NoError(t, store.Save(ctx, record)) - } - - drafts, err := store.GetByStatus(ctx, game.StatusDraft) - require.NoError(t, err) - require.Len(t, drafts, 2) - gotIDs := []string{drafts[0].GameID.String(), drafts[1].GameID.String()} - assert.Contains(t, gotIDs, record1.GameID.String()) - assert.Contains(t, gotIDs, record2.GameID.String()) - - enrollment, err := store.GetByStatus(ctx, game.StatusEnrollmentOpen) - require.NoError(t, err) - require.Len(t, enrollment, 1) - assert.Equal(t, record3.GameID, enrollment[0].GameID) - - running, err := store.GetByStatus(ctx, game.StatusRunning) - require.NoError(t, err) - assert.Empty(t, running) -} - -func TestGameStoreGetByOwnerReturnsOwnedGames(t *testing.T) { - ctx := context.Background() - store, _, _ := newTestStore(t) - - now := time.Date(2026, 4, 23, 12, 0, 0, 0, time.UTC) - record1, err := game.New(game.NewGameInput{ - GameID: common.GameID("game-priv-a"), - GameName: "Owner A first", - GameType: game.GameTypePrivate, - OwnerUserID: "user-owner-a", - MinPlayers: 2, - MaxPlayers: 4, - StartGapHours: 1, - StartGapPlayers: 1, - EnrollmentEndsAt: now.Add(48 * time.Hour), - TurnSchedule: "0 18 * * *", - TargetEngineVersion: "v1.0.0", - Now: now, - }) - require.NoError(t, err) - record2, err := game.New(game.NewGameInput{ - GameID: common.GameID("game-priv-b"), - GameName: "Owner A second", - GameType: game.GameTypePrivate, - OwnerUserID: "user-owner-a", - MinPlayers: 2, - MaxPlayers: 4, - StartGapHours: 1, - StartGapPlayers: 1, - EnrollmentEndsAt: now.Add(48 * time.Hour), - TurnSchedule: "0 18 * * *", - TargetEngineVersion: "v1.0.0", - Now: now.Add(time.Second), - }) - require.NoError(t, err) - record3, err := game.New(game.NewGameInput{ - GameID: common.GameID("game-priv-c"), - GameName: "Owner B", - GameType: game.GameTypePrivate, - OwnerUserID: "user-owner-b", - MinPlayers: 2, - MaxPlayers: 4, - StartGapHours: 1, - StartGapPlayers: 1, - EnrollmentEndsAt: now.Add(48 * time.Hour), - TurnSchedule: "0 18 * * *", - TargetEngineVersion: "v1.0.0", - Now: now, - }) - require.NoError(t, err) - publicRecord := fixtureGame(t) - - for _, record := range []game.Game{record1, record2, record3, publicRecord} { - require.NoError(t, store.Save(ctx, record)) - } - - ownerA, err := store.GetByOwner(ctx, "user-owner-a") - require.NoError(t, err) - require.Len(t, ownerA, 2) - - ownerB, err := store.GetByOwner(ctx, "user-owner-b") - require.NoError(t, err) - require.Len(t, ownerB, 1) - assert.Equal(t, record3.GameID, ownerB[0].GameID) - - ownerNone, err := store.GetByOwner(ctx, "user-owner-none") - require.NoError(t, err) - assert.Empty(t, ownerNone) -} - -func TestGameStoreGetByStatusDropsStaleIndexEntries(t *testing.T) { - ctx := context.Background() - store, server, _ := newTestStore(t) - - record := fixtureGame(t) - require.NoError(t, store.Save(ctx, record)) - - // Delete the primary key out-of-band, leaving the index entry stale. - server.Del("lobby:games:" + base64URL(record.GameID.String())) - - records, err := store.GetByStatus(ctx, game.StatusDraft) - require.NoError(t, err) - assert.Empty(t, records) -} - -func TestGameStoreUpdateStatusValidTransition(t *testing.T) { - ctx := context.Background() - store, _, client := newTestStore(t) - - record := fixtureGame(t) - require.NoError(t, store.Save(ctx, record)) - - at := record.CreatedAt.Add(time.Hour) - require.NoError(t, store.UpdateStatus(ctx, ports.UpdateStatusInput{ - GameID: record.GameID, - ExpectedFrom: game.StatusDraft, - To: game.StatusEnrollmentOpen, - Trigger: game.TriggerCommand, - At: at, - })) - - got, err := store.Get(ctx, record.GameID) - require.NoError(t, err) - assert.Equal(t, game.StatusEnrollmentOpen, got.Status) - assert.True(t, got.UpdatedAt.Equal(at.UTC())) - assert.Nil(t, got.StartedAt) - assert.Nil(t, got.FinishedAt) - - assert.Empty(t, statusIndexMembers(t, client, game.StatusDraft)) - assert.Contains(t, statusIndexMembers(t, client, game.StatusEnrollmentOpen), record.GameID.String()) -} - -func TestGameStoreUpdateStatusSetsStartedAtAndFinishedAt(t *testing.T) { - ctx := context.Background() - store, _, _ := newTestStore(t) - - record := fixtureGame(t) - record.Status = game.StatusStarting - require.NoError(t, store.Save(ctx, record)) - - startedAt := record.CreatedAt.Add(time.Hour) - require.NoError(t, store.UpdateStatus(ctx, ports.UpdateStatusInput{ - GameID: record.GameID, - ExpectedFrom: game.StatusStarting, - To: game.StatusRunning, - Trigger: game.TriggerRuntimeEvent, - At: startedAt, - })) - - got, err := store.Get(ctx, record.GameID) - require.NoError(t, err) - assert.Equal(t, game.StatusRunning, got.Status) - require.NotNil(t, got.StartedAt) - assert.True(t, got.StartedAt.Equal(startedAt.UTC())) - assert.Nil(t, got.FinishedAt) - - finishedAt := startedAt.Add(2 * time.Hour) - require.NoError(t, store.UpdateStatus(ctx, ports.UpdateStatusInput{ - GameID: record.GameID, - ExpectedFrom: game.StatusRunning, - To: game.StatusFinished, - Trigger: game.TriggerRuntimeEvent, - At: finishedAt, - })) - - got, err = store.Get(ctx, record.GameID) - require.NoError(t, err) - assert.Equal(t, game.StatusFinished, got.Status) - require.NotNil(t, got.StartedAt) - assert.True(t, got.StartedAt.Equal(startedAt.UTC())) - require.NotNil(t, got.FinishedAt) - assert.True(t, got.FinishedAt.Equal(finishedAt.UTC())) -} - -func TestGameStoreUpdateStatusRejectsInvalidTransitionWithoutMutation(t *testing.T) { - ctx := context.Background() - store, _, _ := newTestStore(t) - - record := fixtureGame(t) - require.NoError(t, store.Save(ctx, record)) - - err := store.UpdateStatus(ctx, ports.UpdateStatusInput{ - GameID: record.GameID, - ExpectedFrom: game.StatusDraft, - To: game.StatusRunning, - Trigger: game.TriggerCommand, - At: record.CreatedAt.Add(time.Minute), - }) - require.Error(t, err) - assert.True(t, errors.Is(err, game.ErrInvalidTransition)) - - got, err := store.Get(ctx, record.GameID) - require.NoError(t, err) - assert.Equal(t, game.StatusDraft, got.Status) - assert.True(t, got.UpdatedAt.Equal(record.UpdatedAt)) -} - -func TestGameStoreUpdateStatusRejectsWrongTrigger(t *testing.T) { - ctx := context.Background() - store, _, _ := newTestStore(t) - - record := fixtureGame(t) - require.NoError(t, store.Save(ctx, record)) - - err := store.UpdateStatus(ctx, ports.UpdateStatusInput{ - GameID: record.GameID, - ExpectedFrom: game.StatusDraft, - To: game.StatusEnrollmentOpen, - Trigger: game.TriggerDeadline, - At: record.CreatedAt.Add(time.Minute), - }) - require.Error(t, err) - assert.True(t, errors.Is(err, game.ErrInvalidTransition)) -} - -func TestGameStoreUpdateStatusReturnsConflictOnExpectedFromMismatch(t *testing.T) { - ctx := context.Background() - store, _, _ := newTestStore(t) - - record := fixtureGame(t) - require.NoError(t, store.Save(ctx, record)) - - err := store.UpdateStatus(ctx, ports.UpdateStatusInput{ - GameID: record.GameID, - ExpectedFrom: game.StatusEnrollmentOpen, - To: game.StatusReadyToStart, - Trigger: game.TriggerManual, - At: record.CreatedAt.Add(time.Minute), - }) - require.Error(t, err) - assert.True(t, errors.Is(err, game.ErrConflict)) -} - -func TestGameStoreUpdateStatusReturnsNotFoundForMissingRecord(t *testing.T) { - ctx := context.Background() - store, _, _ := newTestStore(t) - - err := store.UpdateStatus(ctx, ports.UpdateStatusInput{ - GameID: common.GameID("game-missing"), - ExpectedFrom: game.StatusDraft, - To: game.StatusEnrollmentOpen, - Trigger: game.TriggerCommand, - At: time.Now().UTC(), - }) - require.ErrorIs(t, err, game.ErrNotFound) -} - -func TestGameStoreUpdateRuntimeSnapshot(t *testing.T) { - ctx := context.Background() - store, _, client := newTestStore(t) - - record := fixtureGame(t) - record.Status = game.StatusRunning - startedAt := record.CreatedAt.Add(time.Hour) - record.StartedAt = &startedAt - require.NoError(t, store.Save(ctx, record)) - - at := startedAt.Add(10 * time.Minute) - require.NoError(t, store.UpdateRuntimeSnapshot(ctx, ports.UpdateRuntimeSnapshotInput{ - GameID: record.GameID, - Snapshot: game.RuntimeSnapshot{ - CurrentTurn: 5, - RuntimeStatus: "running_accepting_commands", - EngineHealthSummary: "ok", - }, - At: at, - })) - - got, err := store.Get(ctx, record.GameID) - require.NoError(t, err) - assert.Equal(t, 5, got.RuntimeSnapshot.CurrentTurn) - assert.Equal(t, "running_accepting_commands", got.RuntimeSnapshot.RuntimeStatus) - assert.Equal(t, "ok", got.RuntimeSnapshot.EngineHealthSummary) - assert.True(t, got.UpdatedAt.Equal(at.UTC())) - assert.Equal(t, game.StatusRunning, got.Status) - - assert.Contains(t, statusIndexMembers(t, client, game.StatusRunning), record.GameID.String()) -} - -func TestGameStoreUpdateRuntimeSnapshotReturnsNotFound(t *testing.T) { - ctx := context.Background() - store, _, _ := newTestStore(t) - - err := store.UpdateRuntimeSnapshot(ctx, ports.UpdateRuntimeSnapshotInput{ - GameID: common.GameID("game-missing"), - Snapshot: game.RuntimeSnapshot{}, - At: time.Now().UTC(), - }) - require.ErrorIs(t, err, game.ErrNotFound) -} - -func TestGameStoreUpdateRuntimeBinding(t *testing.T) { - ctx := context.Background() - store, _, _ := newTestStore(t) - - record := fixtureGame(t) - record.Status = game.StatusStarting - require.NoError(t, store.Save(ctx, record)) - - bound := record.CreatedAt.Add(time.Hour) - require.NoError(t, store.UpdateRuntimeBinding(ctx, ports.UpdateRuntimeBindingInput{ - GameID: record.GameID, - Binding: game.RuntimeBinding{ - ContainerID: "container-1", - EngineEndpoint: "engine.local:9000", - RuntimeJobID: "1700000000000-0", - BoundAt: bound, - }, - At: bound, - })) - - got, err := store.Get(ctx, record.GameID) - require.NoError(t, err) - require.NotNil(t, got.RuntimeBinding) - assert.Equal(t, "container-1", got.RuntimeBinding.ContainerID) - assert.Equal(t, "engine.local:9000", got.RuntimeBinding.EngineEndpoint) - assert.Equal(t, "1700000000000-0", got.RuntimeBinding.RuntimeJobID) - assert.True(t, got.RuntimeBinding.BoundAt.Equal(bound.UTC())) - assert.Equal(t, game.StatusStarting, got.Status, "binding update must not change status") - assert.True(t, got.UpdatedAt.Equal(bound.UTC())) -} - -func TestGameStoreUpdateRuntimeBindingReturnsNotFound(t *testing.T) { - ctx := context.Background() - store, _, _ := newTestStore(t) - - err := store.UpdateRuntimeBinding(ctx, ports.UpdateRuntimeBindingInput{ - GameID: common.GameID("game-missing"), - Binding: game.RuntimeBinding{ - ContainerID: "container-1", - EngineEndpoint: "engine.local:9000", - RuntimeJobID: "1700000000000-0", - BoundAt: time.Now().UTC(), - }, - At: time.Now().UTC(), - }) - require.ErrorIs(t, err, game.ErrNotFound) -} - -func TestGameStoreConcurrentUpdateStatusHasExactlyOneWinner(t *testing.T) { - ctx := context.Background() - store, _, client := newTestStore(t) - - record := fixtureGame(t) - require.NoError(t, store.Save(ctx, record)) - - storeA, err := redisstate.NewGameStore(client) - require.NoError(t, err) - storeB, err := redisstate.NewGameStore(client) - require.NoError(t, err) - - var ( - wg sync.WaitGroup - successes atomic.Int32 - conflicts atomic.Int32 - others atomic.Int32 - ) - - apply := func(target *redisstate.GameStore) { - defer wg.Done() - err := target.UpdateStatus(ctx, ports.UpdateStatusInput{ - GameID: record.GameID, - ExpectedFrom: game.StatusDraft, - To: game.StatusEnrollmentOpen, - Trigger: game.TriggerCommand, - At: record.CreatedAt.Add(time.Minute), - }) - switch { - case err == nil: - successes.Add(1) - case errors.Is(err, game.ErrConflict): - conflicts.Add(1) - default: - others.Add(1) - } - } - - wg.Add(2) - go apply(storeA) - go apply(storeB) - wg.Wait() - - assert.Equal(t, int32(0), others.Load(), "unexpected non-conflict error") - assert.Equal(t, int32(1), successes.Load(), "expected exactly one success") - assert.Equal(t, int32(1), conflicts.Load(), "expected exactly one conflict") -} - -// base64URL mirrors the private key-segment encoding used by Keyspace. -// The tests use it to assert on exact Redis key shapes. -func base64URL(value string) string { - return base64.RawURLEncoding.EncodeToString([]byte(value)) -} diff --git a/lobby/internal/adapters/redisstate/invitestore.go b/lobby/internal/adapters/redisstate/invitestore.go deleted file mode 100644 index 4b76ffa..0000000 --- a/lobby/internal/adapters/redisstate/invitestore.go +++ /dev/null @@ -1,284 +0,0 @@ -package redisstate - -import ( - "context" - "errors" - "fmt" - "strings" - - "galaxy/lobby/internal/domain/common" - "galaxy/lobby/internal/domain/invite" - "galaxy/lobby/internal/ports" - - "github.com/redis/go-redis/v9" -) - -// InviteStore provides Redis-backed durable storage for invite records. -type InviteStore struct { - client *redis.Client - keys Keyspace -} - -// NewInviteStore constructs one Redis-backed invite store. It returns an -// error when client is nil. -func NewInviteStore(client *redis.Client) (*InviteStore, error) { - if client == nil { - return nil, errors.New("new invite store: nil redis client") - } - - return &InviteStore{ - client: client, - keys: Keyspace{}, - }, nil -} - -// Save persists a new created invite record. Save is create-only; a -// second save against the same invite id returns invite.ErrConflict. -func (store *InviteStore) Save(ctx context.Context, record invite.Invite) error { - if store == nil || store.client == nil { - return errors.New("save invite: nil store") - } - if ctx == nil { - return errors.New("save invite: nil context") - } - if err := record.Validate(); err != nil { - return fmt.Errorf("save invite: %w", err) - } - if record.Status != invite.StatusCreated { - return fmt.Errorf( - "save invite: status must be %q, got %q", - invite.StatusCreated, record.Status, - ) - } - - payload, err := MarshalInvite(record) - if err != nil { - return fmt.Errorf("save invite: %w", err) - } - - primaryKey := store.keys.Invite(record.InviteID) - gameIndexKey := store.keys.InvitesByGame(record.GameID) - userIndexKey := store.keys.InvitesByUser(record.InviteeUserID) - inviterIndexKey := store.keys.InvitesByInviter(record.InviterUserID) - member := record.InviteID.String() - - watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error { - existing, getErr := tx.Exists(ctx, primaryKey).Result() - if getErr != nil { - return fmt.Errorf("save invite: %w", getErr) - } - if existing != 0 { - return fmt.Errorf("save invite: %w", invite.ErrConflict) - } - - _, err := tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error { - pipe.Set(ctx, primaryKey, payload, InviteRecordTTL) - pipe.SAdd(ctx, gameIndexKey, member) - pipe.SAdd(ctx, userIndexKey, member) - pipe.SAdd(ctx, inviterIndexKey, member) - return nil - }) - return err - }, primaryKey) - - switch { - case errors.Is(watchErr, redis.TxFailedErr): - return fmt.Errorf("save invite: %w", invite.ErrConflict) - case watchErr != nil: - return watchErr - default: - return nil - } -} - -// Get returns the record identified by inviteID. -func (store *InviteStore) Get(ctx context.Context, inviteID common.InviteID) (invite.Invite, error) { - if store == nil || store.client == nil { - return invite.Invite{}, errors.New("get invite: nil store") - } - if ctx == nil { - return invite.Invite{}, errors.New("get invite: nil context") - } - if err := inviteID.Validate(); err != nil { - return invite.Invite{}, fmt.Errorf("get invite: %w", err) - } - - payload, err := store.client.Get(ctx, store.keys.Invite(inviteID)).Bytes() - switch { - case errors.Is(err, redis.Nil): - return invite.Invite{}, invite.ErrNotFound - case err != nil: - return invite.Invite{}, fmt.Errorf("get invite: %w", err) - } - - record, err := UnmarshalInvite(payload) - if err != nil { - return invite.Invite{}, fmt.Errorf("get invite: %w", err) - } - return record, nil -} - -// GetByGame returns every invite attached to gameID. -func (store *InviteStore) GetByGame(ctx context.Context, gameID common.GameID) ([]invite.Invite, error) { - if store == nil || store.client == nil { - return nil, errors.New("get invites by game: nil store") - } - if ctx == nil { - return nil, errors.New("get invites by game: nil context") - } - if err := gameID.Validate(); err != nil { - return nil, fmt.Errorf("get invites by game: %w", err) - } - - return store.loadInvitesBySet(ctx, - "get invites by game", - store.keys.InvitesByGame(gameID), - ) -} - -// GetByUser returns every invite addressed to inviteeUserID. -func (store *InviteStore) GetByUser(ctx context.Context, inviteeUserID string) ([]invite.Invite, error) { - if store == nil || store.client == nil { - return nil, errors.New("get invites by user: nil store") - } - if ctx == nil { - return nil, errors.New("get invites by user: nil context") - } - trimmed := strings.TrimSpace(inviteeUserID) - if trimmed == "" { - return nil, fmt.Errorf("get invites by user: invitee user id must not be empty") - } - - return store.loadInvitesBySet(ctx, - "get invites by user", - store.keys.InvitesByUser(trimmed), - ) -} - -// GetByInviter returns every invite created by inviterUserID. -func (store *InviteStore) GetByInviter(ctx context.Context, inviterUserID string) ([]invite.Invite, error) { - if store == nil || store.client == nil { - return nil, errors.New("get invites by inviter: nil store") - } - if ctx == nil { - return nil, errors.New("get invites by inviter: nil context") - } - trimmed := strings.TrimSpace(inviterUserID) - if trimmed == "" { - return nil, fmt.Errorf("get invites by inviter: inviter user id must not be empty") - } - - return store.loadInvitesBySet(ctx, - "get invites by inviter", - store.keys.InvitesByInviter(trimmed), - ) -} - -// loadInvitesBySet materializes invites whose ids are stored in setKey. -// Stale set members (primary key removed out-of-band) are dropped silently. -func (store *InviteStore) loadInvitesBySet(ctx context.Context, operation, setKey string) ([]invite.Invite, error) { - members, err := store.client.SMembers(ctx, setKey).Result() - if err != nil { - return nil, fmt.Errorf("%s: %w", operation, err) - } - if len(members) == 0 { - return nil, nil - } - - primaryKeys := make([]string, len(members)) - for index, member := range members { - primaryKeys[index] = store.keys.Invite(common.InviteID(member)) - } - - payloads, err := store.client.MGet(ctx, primaryKeys...).Result() - if err != nil { - return nil, fmt.Errorf("%s: %w", operation, err) - } - - records := make([]invite.Invite, 0, len(payloads)) - for _, entry := range payloads { - if entry == nil { - continue - } - raw, ok := entry.(string) - if !ok { - return nil, fmt.Errorf("%s: unexpected payload type %T", operation, entry) - } - record, err := UnmarshalInvite([]byte(raw)) - if err != nil { - return nil, fmt.Errorf("%s: %w", operation, err) - } - records = append(records, record) - } - - return records, nil -} - -// UpdateStatus applies one status transition in a compare-and-swap fashion. -func (store *InviteStore) UpdateStatus(ctx context.Context, input ports.UpdateInviteStatusInput) error { - if store == nil || store.client == nil { - return errors.New("update invite status: nil store") - } - if ctx == nil { - return errors.New("update invite status: nil context") - } - if err := input.Validate(); err != nil { - return fmt.Errorf("update invite status: %w", err) - } - - if err := invite.Transition(input.ExpectedFrom, input.To); err != nil { - return err - } - - primaryKey := store.keys.Invite(input.InviteID) - at := input.At.UTC() - - watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error { - payload, getErr := tx.Get(ctx, primaryKey).Bytes() - switch { - case errors.Is(getErr, redis.Nil): - return invite.ErrNotFound - case getErr != nil: - return fmt.Errorf("update invite status: %w", getErr) - } - - existing, err := UnmarshalInvite(payload) - if err != nil { - return fmt.Errorf("update invite status: %w", err) - } - if existing.Status != input.ExpectedFrom { - return fmt.Errorf("update invite status: %w", invite.ErrConflict) - } - - existing.Status = input.To - decidedAt := at - existing.DecidedAt = &decidedAt - if input.To == invite.StatusRedeemed { - existing.RaceName = strings.TrimSpace(input.RaceName) - } - - encoded, err := MarshalInvite(existing) - if err != nil { - return fmt.Errorf("update invite status: %w", err) - } - - _, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error { - pipe.Set(ctx, primaryKey, encoded, InviteRecordTTL) - return nil - }) - return err - }, primaryKey) - - switch { - case errors.Is(watchErr, redis.TxFailedErr): - return fmt.Errorf("update invite status: %w", invite.ErrConflict) - case watchErr != nil: - return watchErr - default: - return nil - } -} - -// Ensure InviteStore satisfies the ports.InviteStore interface at -// compile time. -var _ ports.InviteStore = (*InviteStore)(nil) diff --git a/lobby/internal/adapters/redisstate/invitestore_test.go b/lobby/internal/adapters/redisstate/invitestore_test.go deleted file mode 100644 index ae2dabc..0000000 --- a/lobby/internal/adapters/redisstate/invitestore_test.go +++ /dev/null @@ -1,363 +0,0 @@ -package redisstate_test - -import ( - "context" - "errors" - "sort" - "testing" - "time" - - "galaxy/lobby/internal/adapters/redisstate" - "galaxy/lobby/internal/domain/common" - "galaxy/lobby/internal/domain/invite" - "galaxy/lobby/internal/ports" - - "github.com/alicebob/miniredis/v2" - "github.com/redis/go-redis/v9" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func newInviteTestStore(t *testing.T) (*redisstate.InviteStore, *miniredis.Miniredis, *redis.Client) { - t.Helper() - - server := miniredis.RunT(t) - client := redis.NewClient(&redis.Options{Addr: server.Addr()}) - t.Cleanup(func() { - _ = client.Close() - }) - - store, err := redisstate.NewInviteStore(client) - require.NoError(t, err) - - return store, server, client -} - -func fixtureInvite(t *testing.T, id common.InviteID, inviter, invitee string, gameID common.GameID) invite.Invite { - t.Helper() - - now := time.Date(2026, 4, 23, 12, 0, 0, 0, time.UTC) - record, err := invite.New(invite.NewInviteInput{ - InviteID: id, - GameID: gameID, - InviterUserID: inviter, - InviteeUserID: invitee, - Now: now, - ExpiresAt: now.Add(7 * 24 * time.Hour), - }) - require.NoError(t, err) - return record -} - -func TestNewInviteStoreRejectsNilClient(t *testing.T) { - _, err := redisstate.NewInviteStore(nil) - require.Error(t, err) -} - -func TestInviteStoreSaveAndGet(t *testing.T) { - ctx := context.Background() - store, _, client := newInviteTestStore(t) - - record := fixtureInvite(t, "invite-a", "user-owner", "user-guest", "game-1") - require.NoError(t, store.Save(ctx, record)) - - got, err := store.Get(ctx, record.InviteID) - require.NoError(t, err) - assert.Equal(t, record.InviteID, got.InviteID) - assert.Equal(t, record.InviteeUserID, got.InviteeUserID) - assert.Equal(t, invite.StatusCreated, got.Status) - assert.Equal(t, "", got.RaceName) - assert.Nil(t, got.DecidedAt) - assert.True(t, got.ExpiresAt.Equal(record.ExpiresAt)) - - byGame, err := client.SMembers(ctx, "lobby:game_invites:"+base64URL(record.GameID.String())).Result() - require.NoError(t, err) - assert.ElementsMatch(t, []string{record.InviteID.String()}, byGame) - - byUser, err := client.SMembers(ctx, "lobby:user_invites:"+base64URL(record.InviteeUserID)).Result() - require.NoError(t, err) - assert.ElementsMatch(t, []string{record.InviteID.String()}, byUser) -} - -func TestInviteStoreGetReturnsNotFound(t *testing.T) { - ctx := context.Background() - store, _, _ := newInviteTestStore(t) - - _, err := store.Get(ctx, common.InviteID("invite-missing")) - require.ErrorIs(t, err, invite.ErrNotFound) -} - -func TestInviteStoreSaveRejectsDuplicate(t *testing.T) { - ctx := context.Background() - store, _, _ := newInviteTestStore(t) - - record := fixtureInvite(t, "invite-a", "user-owner", "user-guest", "game-1") - require.NoError(t, store.Save(ctx, record)) - - err := store.Save(ctx, record) - require.Error(t, err) - assert.True(t, errors.Is(err, invite.ErrConflict)) -} - -func TestInviteStoreSaveRejectsNonCreated(t *testing.T) { - ctx := context.Background() - store, _, _ := newInviteTestStore(t) - - record := fixtureInvite(t, "invite-a", "user-owner", "user-guest", "game-1") - record.Status = invite.StatusRevoked - decidedAt := record.CreatedAt.Add(time.Minute) - record.DecidedAt = &decidedAt - - err := store.Save(ctx, record) - require.Error(t, err) - assert.False(t, errors.Is(err, invite.ErrConflict)) -} - -func TestInviteStoreUpdateStatusRedeemSetsRaceName(t *testing.T) { - ctx := context.Background() - store, _, _ := newInviteTestStore(t) - - record := fixtureInvite(t, "invite-a", "user-owner", "user-guest", "game-1") - require.NoError(t, store.Save(ctx, record)) - - at := record.CreatedAt.Add(time.Hour) - require.NoError(t, store.UpdateStatus(ctx, ports.UpdateInviteStatusInput{ - InviteID: record.InviteID, - ExpectedFrom: invite.StatusCreated, - To: invite.StatusRedeemed, - At: at, - RaceName: "Lunar Raider", - })) - - got, err := store.Get(ctx, record.InviteID) - require.NoError(t, err) - assert.Equal(t, invite.StatusRedeemed, got.Status) - assert.Equal(t, "Lunar Raider", got.RaceName) - require.NotNil(t, got.DecidedAt) - assert.True(t, got.DecidedAt.Equal(at.UTC())) -} - -func TestInviteStoreUpdateStatusTerminalTransitions(t *testing.T) { - cases := []struct { - name string - target invite.Status - }{ - {"declined", invite.StatusDeclined}, - {"revoked", invite.StatusRevoked}, - {"expired", invite.StatusExpired}, - } - - for _, tc := range cases { - t.Run(tc.name, func(t *testing.T) { - ctx := context.Background() - store, _, _ := newInviteTestStore(t) - - record := fixtureInvite(t, common.InviteID("invite-"+tc.name), "user-owner", "user-guest", "game-1") - require.NoError(t, store.Save(ctx, record)) - - at := record.CreatedAt.Add(30 * time.Minute) - require.NoError(t, store.UpdateStatus(ctx, ports.UpdateInviteStatusInput{ - InviteID: record.InviteID, - ExpectedFrom: invite.StatusCreated, - To: tc.target, - At: at, - })) - - got, err := store.Get(ctx, record.InviteID) - require.NoError(t, err) - assert.Equal(t, tc.target, got.Status) - assert.Equal(t, "", got.RaceName) - require.NotNil(t, got.DecidedAt) - assert.True(t, got.DecidedAt.Equal(at.UTC())) - }) - } -} - -func TestInviteStoreUpdateStatusRejectsRedeemWithoutRaceName(t *testing.T) { - ctx := context.Background() - store, _, _ := newInviteTestStore(t) - - record := fixtureInvite(t, "invite-a", "user-owner", "user-guest", "game-1") - require.NoError(t, store.Save(ctx, record)) - - err := store.UpdateStatus(ctx, ports.UpdateInviteStatusInput{ - InviteID: record.InviteID, - ExpectedFrom: invite.StatusCreated, - To: invite.StatusRedeemed, - At: record.CreatedAt.Add(time.Minute), - }) - require.Error(t, err) - assert.False(t, errors.Is(err, invite.ErrInvalidTransition)) -} - -func TestInviteStoreUpdateStatusRejectsRaceNameOnNonRedeem(t *testing.T) { - ctx := context.Background() - store, _, _ := newInviteTestStore(t) - - record := fixtureInvite(t, "invite-a", "user-owner", "user-guest", "game-1") - require.NoError(t, store.Save(ctx, record)) - - err := store.UpdateStatus(ctx, ports.UpdateInviteStatusInput{ - InviteID: record.InviteID, - ExpectedFrom: invite.StatusCreated, - To: invite.StatusDeclined, - At: record.CreatedAt.Add(time.Minute), - RaceName: "Nope", - }) - require.Error(t, err) - assert.False(t, errors.Is(err, invite.ErrInvalidTransition)) -} - -func TestInviteStoreUpdateStatusRejectsInvalidTransitionWithoutMutation(t *testing.T) { - ctx := context.Background() - store, _, _ := newInviteTestStore(t) - - record := fixtureInvite(t, "invite-a", "user-owner", "user-guest", "game-1") - require.NoError(t, store.Save(ctx, record)) - - err := store.UpdateStatus(ctx, ports.UpdateInviteStatusInput{ - InviteID: record.InviteID, - ExpectedFrom: invite.StatusRedeemed, - To: invite.StatusExpired, - At: record.CreatedAt.Add(time.Minute), - }) - require.Error(t, err) - assert.True(t, errors.Is(err, invite.ErrInvalidTransition)) -} - -func TestInviteStoreUpdateStatusReturnsConflictOnExpectedFromMismatch(t *testing.T) { - ctx := context.Background() - store, _, _ := newInviteTestStore(t) - - record := fixtureInvite(t, "invite-a", "user-owner", "user-guest", "game-1") - require.NoError(t, store.Save(ctx, record)) - - require.NoError(t, store.UpdateStatus(ctx, ports.UpdateInviteStatusInput{ - InviteID: record.InviteID, - ExpectedFrom: invite.StatusCreated, - To: invite.StatusRevoked, - At: record.CreatedAt.Add(time.Minute), - })) - - err := store.UpdateStatus(ctx, ports.UpdateInviteStatusInput{ - InviteID: record.InviteID, - ExpectedFrom: invite.StatusCreated, - To: invite.StatusDeclined, - At: record.CreatedAt.Add(2 * time.Minute), - }) - require.Error(t, err) - assert.True(t, errors.Is(err, invite.ErrConflict)) -} - -func TestInviteStoreUpdateStatusReturnsNotFoundForMissingRecord(t *testing.T) { - ctx := context.Background() - store, _, _ := newInviteTestStore(t) - - err := store.UpdateStatus(ctx, ports.UpdateInviteStatusInput{ - InviteID: common.InviteID("invite-missing"), - ExpectedFrom: invite.StatusCreated, - To: invite.StatusDeclined, - At: time.Now().UTC(), - }) - require.ErrorIs(t, err, invite.ErrNotFound) -} - -func TestInviteStoreGetByGameAndByUser(t *testing.T) { - ctx := context.Background() - store, _, _ := newInviteTestStore(t) - - i1 := fixtureInvite(t, "invite-a1", "user-owner", "user-1", "game-1") - i2 := fixtureInvite(t, "invite-a2", "user-owner", "user-2", "game-1") - i3 := fixtureInvite(t, "invite-a3", "user-owner", "user-1", "game-2") - - for _, record := range []invite.Invite{i1, i2, i3} { - require.NoError(t, store.Save(ctx, record)) - } - - byGame1, err := store.GetByGame(ctx, "game-1") - require.NoError(t, err) - require.Len(t, byGame1, 2) - - byUser1, err := store.GetByUser(ctx, "user-1") - require.NoError(t, err) - require.Len(t, byUser1, 2) - - ids := collectInviteIDs(byUser1) - sort.Strings(ids) - assert.Equal(t, []string{"invite-a1", "invite-a3"}, ids) - - byGameMissing, err := store.GetByGame(ctx, "game-missing") - require.NoError(t, err) - assert.Empty(t, byGameMissing) -} - -func TestInviteStoreGetByInviter(t *testing.T) { - ctx := context.Background() - store, _, _ := newInviteTestStore(t) - - i1 := fixtureInvite(t, "invite-i1", "user-owner-a", "user-guest-1", "game-1") - i2 := fixtureInvite(t, "invite-i2", "user-owner-a", "user-guest-2", "game-2") - i3 := fixtureInvite(t, "invite-i3", "user-owner-b", "user-guest-1", "game-3") - - for _, record := range []invite.Invite{i1, i2, i3} { - require.NoError(t, store.Save(ctx, record)) - } - - byInviterA, err := store.GetByInviter(ctx, "user-owner-a") - require.NoError(t, err) - require.Len(t, byInviterA, 2) - idsA := collectInviteIDs(byInviterA) - sort.Strings(idsA) - assert.Equal(t, []string{"invite-i1", "invite-i2"}, idsA) - - byInviterB, err := store.GetByInviter(ctx, "user-owner-b") - require.NoError(t, err) - require.Len(t, byInviterB, 1) - assert.Equal(t, "invite-i3", byInviterB[0].InviteID.String()) - - byInviterMissing, err := store.GetByInviter(ctx, "user-owner-none") - require.NoError(t, err) - assert.Empty(t, byInviterMissing) -} - -func TestInviteStoreGetByInviterRetainsAfterStatusChange(t *testing.T) { - ctx := context.Background() - store, _, _ := newInviteTestStore(t) - - record := fixtureInvite(t, "invite-i", "user-owner-a", "user-guest", "game-1") - require.NoError(t, store.Save(ctx, record)) - - require.NoError(t, store.UpdateStatus(ctx, ports.UpdateInviteStatusInput{ - InviteID: record.InviteID, - ExpectedFrom: invite.StatusCreated, - To: invite.StatusRevoked, - At: record.CreatedAt.Add(time.Minute), - })) - - matches, err := store.GetByInviter(ctx, "user-owner-a") - require.NoError(t, err) - require.Len(t, matches, 1) - assert.Equal(t, invite.StatusRevoked, matches[0].Status) -} - -func TestInviteStoreGetByGameDropsStaleIndexEntries(t *testing.T) { - ctx := context.Background() - store, server, _ := newInviteTestStore(t) - - record := fixtureInvite(t, "invite-a", "user-owner", "user-guest", "game-1") - require.NoError(t, store.Save(ctx, record)) - - server.Del("lobby:invites:" + base64URL(record.InviteID.String())) - - records, err := store.GetByGame(ctx, record.GameID) - require.NoError(t, err) - assert.Empty(t, records) -} - -func collectInviteIDs(records []invite.Invite) []string { - ids := make([]string, len(records)) - for index, record := range records { - ids[index] = record.InviteID.String() - } - return ids -} diff --git a/lobby/internal/adapters/redisstate/keyspace.go b/lobby/internal/adapters/redisstate/keyspace.go index 3eea9dc..37c6bc3 100644 --- a/lobby/internal/adapters/redisstate/keyspace.go +++ b/lobby/internal/adapters/redisstate/keyspace.go @@ -2,178 +2,25 @@ package redisstate import ( "encoding/base64" - "time" "galaxy/lobby/internal/domain/common" - "galaxy/lobby/internal/domain/game" - "galaxy/lobby/internal/domain/racename" ) // defaultPrefix is the mandatory `lobby:` namespace prefix shared by every // Game Lobby Redis key. const defaultPrefix = "lobby:" -// GameRecordTTL is the Redis retention applied to game records. The -// value is zero (no expiry); a future stage will revisit this -// choice when the platform locks in archival/GDPR policy. -const GameRecordTTL time.Duration = 0 - -// ApplicationRecordTTL is the Redis retention applied to application -// records. uses zero (no expiry) to match game records; the -// archival policy will be revisited when the platform locks it in. -const ApplicationRecordTTL time.Duration = 0 - -// InviteRecordTTL is the Redis retention applied to invite records. -// uses zero (no expiry); the `expires_at` field is a business -// deadline enforced by the service layer, not a Redis TTL. -const InviteRecordTTL time.Duration = 0 - -// MembershipRecordTTL is the Redis retention applied to membership -// records. uses zero (no expiry) to match the other participant -// entities. -const MembershipRecordTTL time.Duration = 0 - -// Keyspace builds the frozen Game Lobby Redis keys. All dynamic key -// segments are encoded with base64url so raw key structure does not -// depend on user-provided or caller-provided characters. +// Keyspace builds the Game Lobby Redis keys that survive the PG_PLAN.md +// §6A and §6B migrations: per-game ephemeral runtime aggregates, +// capability-evaluation guards, gap activation timestamps, and stream +// consumer offsets. The four core enrollment entities (game, application, +// invite, membership) and the Race Name Directory live in PostgreSQL — +// their previous keyspace methods are gone. +// +// All dynamic key segments are encoded with base64url so raw key structure +// does not depend on user-provided or caller-provided characters. type Keyspace struct{} -// Game returns the primary Redis key for one game record. -func (Keyspace) Game(gameID common.GameID) string { - return defaultPrefix + "games:" + encodeKeyComponent(gameID.String()) -} - -// GamesByStatus returns the sorted-set key that stores game identifiers -// indexed by their current status. -func (Keyspace) GamesByStatus(status game.Status) string { - return defaultPrefix + "games_by_status:" + encodeKeyComponent(string(status)) -} - -// GamesByOwner returns the set key that stores game identifiers owned -// by one user. The set is maintained for private games whose -// OwnerUserID is non-empty (public games are admin-owned and carry an -// empty OwnerUserID, so they never enter the index). -func (Keyspace) GamesByOwner(userID string) string { - return defaultPrefix + "games_by_owner:" + encodeKeyComponent(userID) -} - -// Application returns the primary Redis key for one application record. -func (Keyspace) Application(applicationID common.ApplicationID) string { - return defaultPrefix + "applications:" + encodeKeyComponent(applicationID.String()) -} - -// ApplicationsByGame returns the set key that stores application -// identifiers attached to one game. -func (Keyspace) ApplicationsByGame(gameID common.GameID) string { - return defaultPrefix + "game_applications:" + encodeKeyComponent(gameID.String()) -} - -// ApplicationsByUser returns the set key that stores application -// identifiers submitted by one applicant. -func (Keyspace) ApplicationsByUser(applicantUserID string) string { - return defaultPrefix + "user_applications:" + encodeKeyComponent(applicantUserID) -} - -// UserGameApplication returns the lookup key that stores the single -// non-rejected application identifier for one (user, game) pair. Presence -// of this key blocks a second submitted/approved application for the -// same user and game. -func (Keyspace) UserGameApplication(applicantUserID string, gameID common.GameID) string { - return defaultPrefix + "user_game_application:" + - encodeKeyComponent(applicantUserID) + ":" + - encodeKeyComponent(gameID.String()) -} - -// Invite returns the primary Redis key for one invite record. -func (Keyspace) Invite(inviteID common.InviteID) string { - return defaultPrefix + "invites:" + encodeKeyComponent(inviteID.String()) -} - -// InvitesByGame returns the set key that stores invite identifiers -// attached to one game. -func (Keyspace) InvitesByGame(gameID common.GameID) string { - return defaultPrefix + "game_invites:" + encodeKeyComponent(gameID.String()) -} - -// InvitesByUser returns the set key that stores invite identifiers -// addressed to one invitee. -func (Keyspace) InvitesByUser(inviteeUserID string) string { - return defaultPrefix + "user_invites:" + encodeKeyComponent(inviteeUserID) -} - -// InvitesByInviter returns the set key that stores invite identifiers -// created by one inviter (private-game owner). The set retains -// invite_ids regardless of subsequent status transitions; callers -// filter by status when needed. -func (Keyspace) InvitesByInviter(inviterUserID string) string { - return defaultPrefix + "user_inviter_invites:" + encodeKeyComponent(inviterUserID) -} - -// Membership returns the primary Redis key for one membership record. -func (Keyspace) Membership(membershipID common.MembershipID) string { - return defaultPrefix + "memberships:" + encodeKeyComponent(membershipID.String()) -} - -// MembershipsByGame returns the set key that stores membership -// identifiers attached to one game. -func (Keyspace) MembershipsByGame(gameID common.GameID) string { - return defaultPrefix + "game_memberships:" + encodeKeyComponent(gameID.String()) -} - -// MembershipsByUser returns the set key that stores membership -// identifiers held by one user. -func (Keyspace) MembershipsByUser(userID string) string { - return defaultPrefix + "user_memberships:" + encodeKeyComponent(userID) -} - -// RegisteredRaceName returns the Redis key that stores the registered -// race name bound to canonical. -func (Keyspace) RegisteredRaceName(canonical racename.CanonicalKey) string { - return defaultPrefix + "race_names:registered:" + encodeKeyComponent(canonical.String()) -} - -// UserRegisteredRaceNames returns the set key that stores canonical keys -// of every registered race name owned by userID. -func (Keyspace) UserRegisteredRaceNames(userID string) string { - return defaultPrefix + "race_names:user_registered:" + encodeKeyComponent(userID) -} - -// RaceNameReservation returns the Redis key that stores the per-game race -// name reservation bound to (gameID, canonical). -func (Keyspace) RaceNameReservation(gameID common.GameID, canonical racename.CanonicalKey) string { - return defaultPrefix + "race_names:reservations:" + - encodeKeyComponent(gameID.String()) + ":" + - encodeKeyComponent(canonical.String()) -} - -// UserRaceNameReservations returns the set key that stores -// `:` tuples of every active reservation -// (including pending_registration) owned by userID. -func (Keyspace) UserRaceNameReservations(userID string) string { - return defaultPrefix + "race_names:user_reservations:" + encodeKeyComponent(userID) -} - -// RaceNameCanonicalLookup returns the Redis key that stores the eager -// canonical-lookup cache entry for canonical. The cache surfaces the -// strongest existing binding (registered > pending_registration > -// reservation) so Check remains an O(1) read. -func (Keyspace) RaceNameCanonicalLookup(canonical racename.CanonicalKey) string { - return defaultPrefix + "race_names:canonical_lookup:" + encodeKeyComponent(canonical.String()) -} - -// PendingRaceNameIndex returns the singleton sorted-set key that indexes -// pending registrations by eligible_until_ms for the expiration worker. -func (Keyspace) PendingRaceNameIndex() string { - return defaultPrefix + "race_names:pending_index" -} - -// RaceNameReservationMember returns the canonical member representation -// stored inside UserRaceNameReservations and PendingRaceNameIndex for -// (gameID, canonical). -func (Keyspace) RaceNameReservationMember(gameID common.GameID, canonical racename.CanonicalKey) string { - return encodeKeyComponent(gameID.String()) + ":" + encodeKeyComponent(canonical.String()) -} - // GapActivatedAt returns the Redis key that stores the gap-window // activation timestamp for one game. func (Keyspace) GapActivatedAt(gameID common.GameID) string { @@ -216,12 +63,6 @@ func (Keyspace) CapabilityEvaluationGuard(gameID common.GameID) string { encodeKeyComponent(gameID.String()) } -// CreatedAtScore returns the frozen sorted-set score representation for -// game creation timestamps stored in the status index. -func CreatedAtScore(createdAt time.Time) float64 { - return float64(createdAt.UTC().UnixMilli()) -} - func encodeKeyComponent(value string) string { return base64.RawURLEncoding.EncodeToString([]byte(value)) } diff --git a/lobby/internal/adapters/redisstate/keyspace_test_helpers_test.go b/lobby/internal/adapters/redisstate/keyspace_test_helpers_test.go new file mode 100644 index 0000000..f5992ca --- /dev/null +++ b/lobby/internal/adapters/redisstate/keyspace_test_helpers_test.go @@ -0,0 +1,10 @@ +package redisstate_test + +import "encoding/base64" + +// base64URL is the test helper that mirrors the encodeKeyComponent function +// inside Keyspace. Per-store tests use it to assert the exact Redis key +// shape the adapter writes. +func base64URL(value string) string { + return base64.RawURLEncoding.EncodeToString([]byte(value)) +} diff --git a/lobby/internal/adapters/redisstate/membershipstore.go b/lobby/internal/adapters/redisstate/membershipstore.go deleted file mode 100644 index 54e3d44..0000000 --- a/lobby/internal/adapters/redisstate/membershipstore.go +++ /dev/null @@ -1,317 +0,0 @@ -package redisstate - -import ( - "context" - "errors" - "fmt" - "strings" - - "galaxy/lobby/internal/domain/common" - "galaxy/lobby/internal/domain/membership" - "galaxy/lobby/internal/ports" - - "github.com/redis/go-redis/v9" -) - -// MembershipStore provides Redis-backed durable storage for membership -// records. -type MembershipStore struct { - client *redis.Client - keys Keyspace -} - -// NewMembershipStore constructs one Redis-backed membership store. It -// returns an error when client is nil. -func NewMembershipStore(client *redis.Client) (*MembershipStore, error) { - if client == nil { - return nil, errors.New("new membership store: nil redis client") - } - - return &MembershipStore{ - client: client, - keys: Keyspace{}, - }, nil -} - -// Save persists a new active membership record. Save is create-only; a -// second save against the same membership id returns -// membership.ErrConflict. -func (store *MembershipStore) Save(ctx context.Context, record membership.Membership) error { - if store == nil || store.client == nil { - return errors.New("save membership: nil store") - } - if ctx == nil { - return errors.New("save membership: nil context") - } - if err := record.Validate(); err != nil { - return fmt.Errorf("save membership: %w", err) - } - if record.Status != membership.StatusActive { - return fmt.Errorf( - "save membership: status must be %q, got %q", - membership.StatusActive, record.Status, - ) - } - - payload, err := MarshalMembership(record) - if err != nil { - return fmt.Errorf("save membership: %w", err) - } - - primaryKey := store.keys.Membership(record.MembershipID) - gameIndexKey := store.keys.MembershipsByGame(record.GameID) - userIndexKey := store.keys.MembershipsByUser(record.UserID) - member := record.MembershipID.String() - - watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error { - existing, getErr := tx.Exists(ctx, primaryKey).Result() - if getErr != nil { - return fmt.Errorf("save membership: %w", getErr) - } - if existing != 0 { - return fmt.Errorf("save membership: %w", membership.ErrConflict) - } - - _, err := tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error { - pipe.Set(ctx, primaryKey, payload, MembershipRecordTTL) - pipe.SAdd(ctx, gameIndexKey, member) - pipe.SAdd(ctx, userIndexKey, member) - return nil - }) - return err - }, primaryKey) - - switch { - case errors.Is(watchErr, redis.TxFailedErr): - return fmt.Errorf("save membership: %w", membership.ErrConflict) - case watchErr != nil: - return watchErr - default: - return nil - } -} - -// Get returns the record identified by membershipID. -func (store *MembershipStore) Get(ctx context.Context, membershipID common.MembershipID) (membership.Membership, error) { - if store == nil || store.client == nil { - return membership.Membership{}, errors.New("get membership: nil store") - } - if ctx == nil { - return membership.Membership{}, errors.New("get membership: nil context") - } - if err := membershipID.Validate(); err != nil { - return membership.Membership{}, fmt.Errorf("get membership: %w", err) - } - - payload, err := store.client.Get(ctx, store.keys.Membership(membershipID)).Bytes() - switch { - case errors.Is(err, redis.Nil): - return membership.Membership{}, membership.ErrNotFound - case err != nil: - return membership.Membership{}, fmt.Errorf("get membership: %w", err) - } - - record, err := UnmarshalMembership(payload) - if err != nil { - return membership.Membership{}, fmt.Errorf("get membership: %w", err) - } - return record, nil -} - -// GetByGame returns every membership attached to gameID. -func (store *MembershipStore) GetByGame(ctx context.Context, gameID common.GameID) ([]membership.Membership, error) { - if store == nil || store.client == nil { - return nil, errors.New("get memberships by game: nil store") - } - if ctx == nil { - return nil, errors.New("get memberships by game: nil context") - } - if err := gameID.Validate(); err != nil { - return nil, fmt.Errorf("get memberships by game: %w", err) - } - - return store.loadMembershipsBySet(ctx, - "get memberships by game", - store.keys.MembershipsByGame(gameID), - ) -} - -// GetByUser returns every membership held by userID. -func (store *MembershipStore) GetByUser(ctx context.Context, userID string) ([]membership.Membership, error) { - if store == nil || store.client == nil { - return nil, errors.New("get memberships by user: nil store") - } - if ctx == nil { - return nil, errors.New("get memberships by user: nil context") - } - trimmed := strings.TrimSpace(userID) - if trimmed == "" { - return nil, fmt.Errorf("get memberships by user: user id must not be empty") - } - - return store.loadMembershipsBySet(ctx, - "get memberships by user", - store.keys.MembershipsByUser(trimmed), - ) -} - -// loadMembershipsBySet materializes memberships whose ids are stored in -// setKey. Stale set members are dropped silently. -func (store *MembershipStore) loadMembershipsBySet(ctx context.Context, operation, setKey string) ([]membership.Membership, error) { - members, err := store.client.SMembers(ctx, setKey).Result() - if err != nil { - return nil, fmt.Errorf("%s: %w", operation, err) - } - if len(members) == 0 { - return nil, nil - } - - primaryKeys := make([]string, len(members)) - for index, member := range members { - primaryKeys[index] = store.keys.Membership(common.MembershipID(member)) - } - - payloads, err := store.client.MGet(ctx, primaryKeys...).Result() - if err != nil { - return nil, fmt.Errorf("%s: %w", operation, err) - } - - records := make([]membership.Membership, 0, len(payloads)) - for _, entry := range payloads { - if entry == nil { - continue - } - raw, ok := entry.(string) - if !ok { - return nil, fmt.Errorf("%s: unexpected payload type %T", operation, entry) - } - record, err := UnmarshalMembership([]byte(raw)) - if err != nil { - return nil, fmt.Errorf("%s: %w", operation, err) - } - records = append(records, record) - } - - return records, nil -} - -// UpdateStatus applies one status transition in a compare-and-swap fashion. -func (store *MembershipStore) UpdateStatus(ctx context.Context, input ports.UpdateMembershipStatusInput) error { - if store == nil || store.client == nil { - return errors.New("update membership status: nil store") - } - if ctx == nil { - return errors.New("update membership status: nil context") - } - if err := input.Validate(); err != nil { - return fmt.Errorf("update membership status: %w", err) - } - - if err := membership.Transition(input.ExpectedFrom, input.To); err != nil { - return err - } - - primaryKey := store.keys.Membership(input.MembershipID) - at := input.At.UTC() - - watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error { - payload, getErr := tx.Get(ctx, primaryKey).Bytes() - switch { - case errors.Is(getErr, redis.Nil): - return membership.ErrNotFound - case getErr != nil: - return fmt.Errorf("update membership status: %w", getErr) - } - - existing, err := UnmarshalMembership(payload) - if err != nil { - return fmt.Errorf("update membership status: %w", err) - } - if existing.Status != input.ExpectedFrom { - return fmt.Errorf("update membership status: %w", membership.ErrConflict) - } - - existing.Status = input.To - removedAt := at - existing.RemovedAt = &removedAt - - encoded, err := MarshalMembership(existing) - if err != nil { - return fmt.Errorf("update membership status: %w", err) - } - - _, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error { - pipe.Set(ctx, primaryKey, encoded, MembershipRecordTTL) - return nil - }) - return err - }, primaryKey) - - switch { - case errors.Is(watchErr, redis.TxFailedErr): - return fmt.Errorf("update membership status: %w", membership.ErrConflict) - case watchErr != nil: - return watchErr - default: - return nil - } -} - -// Delete removes the membership record identified by membershipID from -// the primary store and from the per-game and per-user index sets in -// one transaction. It returns membership.ErrNotFound when no record -// exists for the id and membership.ErrConflict when a concurrent -// mutation invalidates the watched key. -func (store *MembershipStore) Delete(ctx context.Context, membershipID common.MembershipID) error { - if store == nil || store.client == nil { - return errors.New("delete membership: nil store") - } - if ctx == nil { - return errors.New("delete membership: nil context") - } - if err := membershipID.Validate(); err != nil { - return fmt.Errorf("delete membership: %w", err) - } - - primaryKey := store.keys.Membership(membershipID) - member := membershipID.String() - - watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error { - payload, getErr := tx.Get(ctx, primaryKey).Bytes() - switch { - case errors.Is(getErr, redis.Nil): - return membership.ErrNotFound - case getErr != nil: - return fmt.Errorf("delete membership: %w", getErr) - } - - existing, err := UnmarshalMembership(payload) - if err != nil { - return fmt.Errorf("delete membership: %w", err) - } - - gameIndexKey := store.keys.MembershipsByGame(existing.GameID) - userIndexKey := store.keys.MembershipsByUser(existing.UserID) - - _, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error { - pipe.Del(ctx, primaryKey) - pipe.SRem(ctx, gameIndexKey, member) - pipe.SRem(ctx, userIndexKey, member) - return nil - }) - return err - }, primaryKey) - - switch { - case errors.Is(watchErr, redis.TxFailedErr): - return fmt.Errorf("delete membership: %w", membership.ErrConflict) - case watchErr != nil: - return watchErr - default: - return nil - } -} - -// Ensure MembershipStore satisfies the ports.MembershipStore interface at -// compile time. -var _ ports.MembershipStore = (*MembershipStore)(nil) diff --git a/lobby/internal/adapters/redisstate/membershipstore_test.go b/lobby/internal/adapters/redisstate/membershipstore_test.go deleted file mode 100644 index 60b503f..0000000 --- a/lobby/internal/adapters/redisstate/membershipstore_test.go +++ /dev/null @@ -1,299 +0,0 @@ -package redisstate_test - -import ( - "context" - "errors" - "sort" - "strings" - "testing" - "time" - - "galaxy/lobby/internal/adapters/redisstate" - "galaxy/lobby/internal/domain/common" - "galaxy/lobby/internal/domain/membership" - "galaxy/lobby/internal/ports" - - "github.com/alicebob/miniredis/v2" - "github.com/redis/go-redis/v9" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func newMembershipTestStore(t *testing.T) (*redisstate.MembershipStore, *miniredis.Miniredis, *redis.Client) { - t.Helper() - - server := miniredis.RunT(t) - client := redis.NewClient(&redis.Options{Addr: server.Addr()}) - t.Cleanup(func() { - _ = client.Close() - }) - - store, err := redisstate.NewMembershipStore(client) - require.NoError(t, err) - - return store, server, client -} - -func fixtureMembership(t *testing.T, id common.MembershipID, userID, raceName string, gameID common.GameID) membership.Membership { - t.Helper() - - now := time.Date(2026, 4, 23, 12, 0, 0, 0, time.UTC) - record, err := membership.New(membership.NewMembershipInput{ - MembershipID: id, - GameID: gameID, - UserID: userID, - RaceName: raceName, - CanonicalKey: strings.ToLower(strings.ReplaceAll(raceName, " ", "")), - Now: now, - }) - require.NoError(t, err) - return record -} - -func TestNewMembershipStoreRejectsNilClient(t *testing.T) { - _, err := redisstate.NewMembershipStore(nil) - require.Error(t, err) -} - -func TestMembershipStoreSaveAndGet(t *testing.T) { - ctx := context.Background() - store, _, client := newMembershipTestStore(t) - - record := fixtureMembership(t, "membership-a", "user-1", "Solar Pilot", "game-1") - require.NoError(t, store.Save(ctx, record)) - - got, err := store.Get(ctx, record.MembershipID) - require.NoError(t, err) - assert.Equal(t, record.MembershipID, got.MembershipID) - assert.Equal(t, "Solar Pilot", got.RaceName) - assert.Equal(t, membership.StatusActive, got.Status) - assert.Nil(t, got.RemovedAt) - - byGame, err := client.SMembers(ctx, "lobby:game_memberships:"+base64URL(record.GameID.String())).Result() - require.NoError(t, err) - assert.ElementsMatch(t, []string{record.MembershipID.String()}, byGame) - - byUser, err := client.SMembers(ctx, "lobby:user_memberships:"+base64URL(record.UserID)).Result() - require.NoError(t, err) - assert.ElementsMatch(t, []string{record.MembershipID.String()}, byUser) -} - -func TestMembershipStoreGetReturnsNotFound(t *testing.T) { - ctx := context.Background() - store, _, _ := newMembershipTestStore(t) - - _, err := store.Get(ctx, common.MembershipID("membership-missing")) - require.ErrorIs(t, err, membership.ErrNotFound) -} - -func TestMembershipStoreSaveRejectsNonActive(t *testing.T) { - ctx := context.Background() - store, _, _ := newMembershipTestStore(t) - - record := fixtureMembership(t, "membership-a", "user-1", "Solar Pilot", "game-1") - record.Status = membership.StatusRemoved - removedAt := record.JoinedAt.Add(time.Hour) - record.RemovedAt = &removedAt - - err := store.Save(ctx, record) - require.Error(t, err) - assert.False(t, errors.Is(err, membership.ErrConflict)) -} - -func TestMembershipStoreSaveRejectsDuplicate(t *testing.T) { - ctx := context.Background() - store, _, _ := newMembershipTestStore(t) - - record := fixtureMembership(t, "membership-a", "user-1", "Solar Pilot", "game-1") - require.NoError(t, store.Save(ctx, record)) - - err := store.Save(ctx, record) - require.Error(t, err) - assert.True(t, errors.Is(err, membership.ErrConflict)) -} - -func TestMembershipStoreUpdateStatusSetsRemovedAt(t *testing.T) { - cases := []struct { - name string - target membership.Status - }{ - {"removed", membership.StatusRemoved}, - {"blocked", membership.StatusBlocked}, - } - - for _, tc := range cases { - t.Run(tc.name, func(t *testing.T) { - ctx := context.Background() - store, _, _ := newMembershipTestStore(t) - - record := fixtureMembership(t, common.MembershipID("membership-"+tc.name), "user-1", "Solar Pilot", "game-1") - require.NoError(t, store.Save(ctx, record)) - - at := record.JoinedAt.Add(2 * time.Hour) - require.NoError(t, store.UpdateStatus(ctx, ports.UpdateMembershipStatusInput{ - MembershipID: record.MembershipID, - ExpectedFrom: membership.StatusActive, - To: tc.target, - At: at, - })) - - got, err := store.Get(ctx, record.MembershipID) - require.NoError(t, err) - assert.Equal(t, tc.target, got.Status) - require.NotNil(t, got.RemovedAt) - assert.True(t, got.RemovedAt.Equal(at.UTC())) - }) - } -} - -func TestMembershipStoreUpdateStatusRejectsInvalidTransitionWithoutMutation(t *testing.T) { - ctx := context.Background() - store, _, _ := newMembershipTestStore(t) - - record := fixtureMembership(t, "membership-a", "user-1", "Solar Pilot", "game-1") - require.NoError(t, store.Save(ctx, record)) - - err := store.UpdateStatus(ctx, ports.UpdateMembershipStatusInput{ - MembershipID: record.MembershipID, - ExpectedFrom: membership.StatusRemoved, - To: membership.StatusBlocked, - At: record.JoinedAt.Add(time.Minute), - }) - require.Error(t, err) - assert.True(t, errors.Is(err, membership.ErrInvalidTransition)) - - got, err := store.Get(ctx, record.MembershipID) - require.NoError(t, err) - assert.Equal(t, membership.StatusActive, got.Status) - assert.Nil(t, got.RemovedAt) -} - -func TestMembershipStoreUpdateStatusReturnsConflictWhenStatusDiverges(t *testing.T) { - ctx := context.Background() - store, _, _ := newMembershipTestStore(t) - - record := fixtureMembership(t, "membership-a", "user-1", "Solar Pilot", "game-1") - require.NoError(t, store.Save(ctx, record)) - - require.NoError(t, store.UpdateStatus(ctx, ports.UpdateMembershipStatusInput{ - MembershipID: record.MembershipID, - ExpectedFrom: membership.StatusActive, - To: membership.StatusBlocked, - At: record.JoinedAt.Add(time.Minute), - })) - - err := store.UpdateStatus(ctx, ports.UpdateMembershipStatusInput{ - MembershipID: record.MembershipID, - ExpectedFrom: membership.StatusActive, - To: membership.StatusRemoved, - At: record.JoinedAt.Add(2 * time.Minute), - }) - require.Error(t, err) - assert.True(t, errors.Is(err, membership.ErrConflict)) -} - -func TestMembershipStoreUpdateStatusReturnsNotFoundForMissingRecord(t *testing.T) { - ctx := context.Background() - store, _, _ := newMembershipTestStore(t) - - err := store.UpdateStatus(ctx, ports.UpdateMembershipStatusInput{ - MembershipID: common.MembershipID("membership-missing"), - ExpectedFrom: membership.StatusActive, - To: membership.StatusRemoved, - At: time.Now().UTC(), - }) - require.ErrorIs(t, err, membership.ErrNotFound) -} - -func TestMembershipStoreGetByGameAndByUser(t *testing.T) { - ctx := context.Background() - store, _, _ := newMembershipTestStore(t) - - m1 := fixtureMembership(t, "membership-a1", "user-1", "Racer A", "game-1") - m2 := fixtureMembership(t, "membership-a2", "user-2", "Racer B", "game-1") - m3 := fixtureMembership(t, "membership-a3", "user-1", "Racer C", "game-2") - - for _, record := range []membership.Membership{m1, m2, m3} { - require.NoError(t, store.Save(ctx, record)) - } - - byGame1, err := store.GetByGame(ctx, "game-1") - require.NoError(t, err) - require.Len(t, byGame1, 2) - - byUser1, err := store.GetByUser(ctx, "user-1") - require.NoError(t, err) - require.Len(t, byUser1, 2) - - ids := collectMembershipIDs(byUser1) - sort.Strings(ids) - assert.Equal(t, []string{"membership-a1", "membership-a3"}, ids) - - byUserMissing, err := store.GetByUser(ctx, "user-missing") - require.NoError(t, err) - assert.Empty(t, byUserMissing) -} - -func TestMembershipStoreGetByUserDropsStaleIndexEntries(t *testing.T) { - ctx := context.Background() - store, server, _ := newMembershipTestStore(t) - - record := fixtureMembership(t, "membership-a", "user-1", "Solar Pilot", "game-1") - require.NoError(t, store.Save(ctx, record)) - - server.Del("lobby:memberships:" + base64URL(record.MembershipID.String())) - - records, err := store.GetByUser(ctx, record.UserID) - require.NoError(t, err) - assert.Empty(t, records) -} - -func TestMembershipStoreDeleteRemovesPrimaryAndIndexes(t *testing.T) { - ctx := context.Background() - store, _, client := newMembershipTestStore(t) - - record := fixtureMembership(t, "membership-a", "user-1", "Solar Pilot", "game-1") - require.NoError(t, store.Save(ctx, record)) - - require.NoError(t, store.Delete(ctx, record.MembershipID)) - - _, err := store.Get(ctx, record.MembershipID) - require.ErrorIs(t, err, membership.ErrNotFound) - - byGame, err := client.SMembers(ctx, "lobby:game_memberships:"+base64URL(record.GameID.String())).Result() - require.NoError(t, err) - assert.Empty(t, byGame) - - byUser, err := client.SMembers(ctx, "lobby:user_memberships:"+base64URL(record.UserID)).Result() - require.NoError(t, err) - assert.Empty(t, byUser) -} - -func TestMembershipStoreDeleteReturnsNotFoundForMissingRecord(t *testing.T) { - ctx := context.Background() - store, _, _ := newMembershipTestStore(t) - - err := store.Delete(ctx, common.MembershipID("membership-missing")) - require.ErrorIs(t, err, membership.ErrNotFound) -} - -func TestMembershipStoreDeleteIsIdempotentAfterFirstSuccess(t *testing.T) { - ctx := context.Background() - store, _, _ := newMembershipTestStore(t) - - record := fixtureMembership(t, "membership-a", "user-1", "Solar Pilot", "game-1") - require.NoError(t, store.Save(ctx, record)) - - require.NoError(t, store.Delete(ctx, record.MembershipID)) - - err := store.Delete(ctx, record.MembershipID) - require.ErrorIs(t, err, membership.ErrNotFound) -} - -func collectMembershipIDs(records []membership.Membership) []string { - ids := make([]string, len(records)) - for index, record := range records { - ids[index] = record.MembershipID.String() - } - return ids -} diff --git a/lobby/internal/adapters/redisstate/racenamedir.go b/lobby/internal/adapters/redisstate/racenamedir.go deleted file mode 100644 index 0bf853c..0000000 --- a/lobby/internal/adapters/redisstate/racenamedir.go +++ /dev/null @@ -1,1101 +0,0 @@ -package redisstate - -import ( - "context" - "encoding/base64" - "errors" - "fmt" - "strings" - "time" - - "galaxy/lobby/internal/domain/common" - "galaxy/lobby/internal/domain/racename" - "galaxy/lobby/internal/ports" - - "github.com/redis/go-redis/v9" -) - -// RaceNameDirectory is the Redis-backed implementation of -// ports.RaceNameDirectory. It persists the two-tier Race Name Directory -// state (registered, reservation, pending_registration) under the Redis -// key layout frozen in lobby/README.md §Redis Logical Model. -type RaceNameDirectory struct { - client *redis.Client - keys Keyspace - policy *racename.Policy - nowFn func() time.Time - releaseLua *redis.Script -} - -// RaceNameDirectoryOption tunes the Redis Race Name Directory adapter -// during construction. Options are evaluated in order. -type RaceNameDirectoryOption func(*RaceNameDirectory) - -// WithRaceNameDirectoryClock overrides the default time.Now clock used -// to stamp reserved_at_ms and registered_at_ms. It is intended for -// deterministic tests. -func WithRaceNameDirectoryClock(nowFn func() time.Time) RaceNameDirectoryOption { - return func(directory *RaceNameDirectory) { - if nowFn != nil { - directory.nowFn = nowFn - } - } -} - -// NewRaceNameDirectory constructs the Redis-backed Race Name Directory -// adapter. It returns an error when client or policy is nil. -func NewRaceNameDirectory( - client *redis.Client, - policy *racename.Policy, - opts ...RaceNameDirectoryOption, -) (*RaceNameDirectory, error) { - if client == nil { - return nil, errors.New("new race name directory: nil redis client") - } - if policy == nil { - return nil, errors.New("new race name directory: nil racename policy") - } - - directory := &RaceNameDirectory{ - client: client, - keys: Keyspace{}, - policy: policy, - nowFn: time.Now, - releaseLua: redis.NewScript(releaseAllByUserScript), - } - for _, opt := range opts { - opt(directory) - } - - return directory, nil -} - -// Canonicalize returns the canonical uniqueness key for raceName as a -// plain string. Callers map validation failures to the stable -// name_taken-adjacent error code via ports.ErrInvalidName. -func (directory *RaceNameDirectory) Canonicalize(raceName string) (string, error) { - canonical, err := directory.policy.Canonicalize(raceName) - if err != nil { - return "", fmt.Errorf("canonicalize race name: %w", ports.ErrInvalidName) - } - - return canonical.String(), nil -} - -// Check reports whether raceName is taken for actorUserID. Taken is -// false when no binding exists on the canonical key or when the -// existing binding is owned by actorUserID; the returned -// HolderUserID and Kind always mirror the underlying Redis state. -func (directory *RaceNameDirectory) Check( - ctx context.Context, - raceName, actorUserID string, -) (ports.Availability, error) { - if err := checkContext(ctx, "check race name"); err != nil { - return ports.Availability{}, err - } - actor, err := normalizeNonEmpty(actorUserID, "check race name", "actor user id") - if err != nil { - return ports.Availability{}, err - } - - canonical, err := directory.policy.Canonicalize(raceName) - if err != nil { - return ports.Availability{}, fmt.Errorf("check race name: %w", ports.ErrInvalidName) - } - - record, err := directory.loadCanonicalLookup(ctx, canonical) - switch { - case errors.Is(err, redis.Nil): - return ports.Availability{}, nil - case err != nil: - return ports.Availability{}, fmt.Errorf("check race name: %w", err) - } - - return ports.Availability{ - Taken: record.HolderUserID != actor, - HolderUserID: record.HolderUserID, - Kind: record.Kind, - }, nil -} - -// Reserve claims raceName for (gameID, userID). A second call by the -// same holder for the same tuple is a no-op; any cross-user collision on -// the canonical key returns ports.ErrNameTaken. -func (directory *RaceNameDirectory) Reserve( - ctx context.Context, - gameID, userID, raceName string, -) error { - if err := checkContext(ctx, "reserve race name"); err != nil { - return err - } - game, err := normalizeGameID(gameID, "reserve race name") - if err != nil { - return err - } - user, err := normalizeNonEmpty(userID, "reserve race name", "user id") - if err != nil { - return err - } - - displayName, err := racename.ValidateName(raceName) - if err != nil { - return fmt.Errorf("reserve race name: %w", ports.ErrInvalidName) - } - canonical, err := directory.policy.Canonical(displayName) - if err != nil { - return fmt.Errorf("reserve race name: %w", ports.ErrInvalidName) - } - - reservationKey := directory.keys.RaceNameReservation(game, canonical) - lookupKey := directory.keys.RaceNameCanonicalLookup(canonical) - userReservationsKey := directory.keys.UserRaceNameReservations(user) - - reservationMember := directory.keys.RaceNameReservationMember(game, canonical) - reservedAtMS := directory.nowFn().UTC().UnixMilli() - - watchErr := directory.client.Watch(ctx, func(tx *redis.Tx) error { - lookup, err := loadLookupTx(ctx, tx, lookupKey) - switch { - case errors.Is(err, redis.Nil): - lookup = canonicalLookupRecord{} - case err != nil: - return fmt.Errorf("reserve race name: %w", err) - } - if lookup.HolderUserID != "" && lookup.HolderUserID != user { - return ports.ErrNameTaken - } - - existing, err := loadReservationTx(ctx, tx, reservationKey) - switch { - case errors.Is(err, redis.Nil): - existing = reservationRecord{} - case err != nil: - return fmt.Errorf("reserve race name: %w", err) - } - if existing.UserID != "" { - if existing.UserID != user { - return ports.ErrNameTaken - } - // idempotent same-holder Reserve - return nil - } - - payload, err := marshalReservationRecord(reservationRecord{ - UserID: user, - RaceName: displayName, - ReservedAtMS: reservedAtMS, - Status: reservationStatusReserved, - }) - if err != nil { - return fmt.Errorf("reserve race name: %w", err) - } - - _, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error { - pipe.Set(ctx, reservationKey, payload, 0) - pipe.SAdd(ctx, userReservationsKey, reservationMember) - if lookup.HolderUserID == "" { - lookupPayload, err := marshalCanonicalLookupRecord(canonicalLookupRecord{ - Kind: ports.KindReservation, - HolderUserID: user, - GameID: game.String(), - }) - if err != nil { - return err - } - pipe.Set(ctx, lookupKey, lookupPayload, 0) - } - return nil - }) - return err - }, reservationKey, lookupKey) - - switch { - case errors.Is(watchErr, redis.TxFailedErr): - return fmt.Errorf("reserve race name: %w", ports.ErrNameTaken) - case watchErr != nil: - return watchErr - default: - return nil - } -} - -// ReleaseReservation removes the reservation held by userID for -// raceName in gameID. Missing reservation, mismatched holder, and -// invalid raceName all resolve to a silent no-op per the port contract. -func (directory *RaceNameDirectory) ReleaseReservation( - ctx context.Context, - gameID, userID, raceName string, -) error { - if err := checkContext(ctx, "release race name reservation"); err != nil { - return err - } - game, err := normalizeGameID(gameID, "release race name reservation") - if err != nil { - return err - } - user, err := normalizeNonEmpty(userID, "release race name reservation", "user id") - if err != nil { - return err - } - - canonical, err := directory.policy.Canonicalize(raceName) - if err != nil { - return nil - } - - reservationKey := directory.keys.RaceNameReservation(game, canonical) - lookupKey := directory.keys.RaceNameCanonicalLookup(canonical) - userReservationsKey := directory.keys.UserRaceNameReservations(user) - userRegisteredKey := directory.keys.UserRegisteredRaceNames(user) - reservationMember := directory.keys.RaceNameReservationMember(game, canonical) - - watchErr := directory.client.Watch(ctx, func(tx *redis.Tx) error { - existing, err := loadReservationTx(ctx, tx, reservationKey) - switch { - case errors.Is(err, redis.Nil): - return nil - case err != nil: - return fmt.Errorf("release race name reservation: %w", err) - } - if existing.UserID != user { - return nil - } - - remainingMember, remainingGame, remainingStatus, err := directory.findOtherReservationMember( - ctx, tx, userReservationsKey, canonical, reservationMember, - ) - if err != nil { - return fmt.Errorf("release race name reservation: %w", err) - } - - registeredPresent, err := registeredHeldBy(ctx, tx, directory.keys.RegisteredRaceName(canonical), user) - if err != nil { - return fmt.Errorf("release race name reservation: %w", err) - } - - _, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error { - pipe.Del(ctx, reservationKey) - pipe.SRem(ctx, userReservationsKey, reservationMember) - if existing.Status == reservationStatusPending { - pipe.ZRem(ctx, directory.keys.PendingRaceNameIndex(), reservationMember) - } - - switch { - case registeredPresent: - lookupPayload, err := marshalCanonicalLookupRecord(canonicalLookupRecord{ - Kind: ports.KindRegistered, - HolderUserID: user, - }) - if err != nil { - return err - } - pipe.Set(ctx, lookupKey, lookupPayload, 0) - case remainingMember != "": - kind := ports.KindReservation - if remainingStatus == reservationStatusPending { - kind = ports.KindPendingRegistration - } - lookupPayload, err := marshalCanonicalLookupRecord(canonicalLookupRecord{ - Kind: kind, - HolderUserID: user, - GameID: remainingGame.String(), - }) - if err != nil { - return err - } - pipe.Set(ctx, lookupKey, lookupPayload, 0) - default: - pipe.Del(ctx, lookupKey) - } - return nil - }) - return err - }, reservationKey, userReservationsKey, lookupKey, userRegisteredKey) - - switch { - case errors.Is(watchErr, redis.TxFailedErr): - // Concurrent mutation touched the reservation — reread the state - // on a retry to preserve the defensive no-op contract. - return directory.ReleaseReservation(ctx, gameID, userID, raceName) - case watchErr != nil: - return watchErr - default: - return nil - } -} - -// MarkPendingRegistration promotes the reservation for (gameID, userID) -// on raceName's canonical key to pending_registration status with the -// supplied eligibleUntil. A second call with the same eligibleUntil is -// a no-op; a call with a different eligibleUntil returns -// ports.ErrInvalidName. -func (directory *RaceNameDirectory) MarkPendingRegistration( - ctx context.Context, - gameID, userID, raceName string, - eligibleUntil time.Time, -) error { - if err := checkContext(ctx, "mark pending race name registration"); err != nil { - return err - } - game, err := normalizeGameID(gameID, "mark pending race name registration") - if err != nil { - return err - } - user, err := normalizeNonEmpty(userID, "mark pending race name registration", "user id") - if err != nil { - return err - } - if eligibleUntil.IsZero() { - return fmt.Errorf("mark pending race name registration: eligible until must be set") - } - - displayName, err := racename.ValidateName(raceName) - if err != nil { - return fmt.Errorf("mark pending race name registration: %w", ports.ErrInvalidName) - } - canonical, err := directory.policy.Canonical(displayName) - if err != nil { - return fmt.Errorf("mark pending race name registration: %w", ports.ErrInvalidName) - } - - reservationKey := directory.keys.RaceNameReservation(game, canonical) - lookupKey := directory.keys.RaceNameCanonicalLookup(canonical) - pendingIndexKey := directory.keys.PendingRaceNameIndex() - reservationMember := directory.keys.RaceNameReservationMember(game, canonical) - eligibleUntilMS := eligibleUntil.UTC().UnixMilli() - - watchErr := directory.client.Watch(ctx, func(tx *redis.Tx) error { - existing, err := loadReservationTx(ctx, tx, reservationKey) - switch { - case errors.Is(err, redis.Nil): - return fmt.Errorf("mark pending race name registration: reservation missing for game %q user %q", game, user) - case err != nil: - return fmt.Errorf("mark pending race name registration: %w", err) - } - if existing.UserID != user { - return fmt.Errorf("mark pending race name registration: reservation held by different user") - } - if existing.Status == reservationStatusPending { - if existing.EligibleUntilMS == nil || *existing.EligibleUntilMS != eligibleUntilMS { - return fmt.Errorf("mark pending race name registration: %w", ports.ErrInvalidName) - } - // idempotent: same eligible_until already stored. - return nil - } - - lookup, err := loadLookupTx(ctx, tx, lookupKey) - switch { - case errors.Is(err, redis.Nil): - lookup = canonicalLookupRecord{} - case err != nil: - return fmt.Errorf("mark pending race name registration: %w", err) - } - - existing.Status = reservationStatusPending - existing.RaceName = displayName - eligibleUntilCopy := eligibleUntilMS - existing.EligibleUntilMS = &eligibleUntilCopy - - payload, err := marshalReservationRecord(existing) - if err != nil { - return fmt.Errorf("mark pending race name registration: %w", err) - } - - _, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error { - pipe.Set(ctx, reservationKey, payload, 0) - pipe.ZAdd(ctx, pendingIndexKey, redis.Z{ - Score: float64(eligibleUntilMS), - Member: reservationMember, - }) - if lookup.Kind != ports.KindRegistered { - lookupPayload, err := marshalCanonicalLookupRecord(canonicalLookupRecord{ - Kind: ports.KindPendingRegistration, - HolderUserID: user, - GameID: game.String(), - }) - if err != nil { - return err - } - pipe.Set(ctx, lookupKey, lookupPayload, 0) - } - return nil - }) - return err - }, reservationKey, lookupKey) - - switch { - case errors.Is(watchErr, redis.TxFailedErr): - return fmt.Errorf("mark pending race name registration: %w", ports.ErrInvalidName) - case watchErr != nil: - return watchErr - default: - return nil - } -} - -// ExpirePendingRegistrations releases every pending registration whose -// eligibleUntil is at or before now. Expired entries are returned so -// callers can emit telemetry. Running twice is safe. -func (directory *RaceNameDirectory) ExpirePendingRegistrations( - ctx context.Context, - now time.Time, -) ([]ports.ExpiredPending, error) { - if err := checkContext(ctx, "expire pending race name registrations"); err != nil { - return nil, err - } - - cutoff := now.UTC().UnixMilli() - members, err := directory.client.ZRangeArgs(ctx, redis.ZRangeArgs{ - Key: directory.keys.PendingRaceNameIndex(), - ByScore: true, - Start: "-inf", - Stop: fmt.Sprintf("%d", cutoff), - }).Result() - if err != nil { - return nil, fmt.Errorf("expire pending race name registrations: %w", err) - } - if len(members) == 0 { - return nil, nil - } - - expired := make([]ports.ExpiredPending, 0, len(members)) - for _, member := range members { - game, canonical, err := splitReservationMember(member) - if err != nil { - return nil, fmt.Errorf("expire pending race name registrations: %w", err) - } - entry, released, err := directory.expireOnePending(ctx, game, canonical, member, cutoff) - if err != nil { - return nil, fmt.Errorf("expire pending race name registrations: %w", err) - } - if released { - expired = append(expired, entry) - } - } - - return expired, nil -} - -// Register converts the pending registration identified by (gameID, -// userID) on raceName's canonical key into a permanent registered name. -// Missing pending returns ports.ErrPendingMissing; expired pending -// returns ports.ErrPendingExpired; a repeated success is a no-op. -func (directory *RaceNameDirectory) Register( - ctx context.Context, - gameID, userID, raceName string, -) error { - if err := checkContext(ctx, "register race name"); err != nil { - return err - } - game, err := normalizeGameID(gameID, "register race name") - if err != nil { - return err - } - user, err := normalizeNonEmpty(userID, "register race name", "user id") - if err != nil { - return err - } - - displayName, err := racename.ValidateName(raceName) - if err != nil { - return fmt.Errorf("register race name: %w", ports.ErrInvalidName) - } - canonical, err := directory.policy.Canonical(displayName) - if err != nil { - return fmt.Errorf("register race name: %w", ports.ErrInvalidName) - } - - registeredKey := directory.keys.RegisteredRaceName(canonical) - reservationKey := directory.keys.RaceNameReservation(game, canonical) - lookupKey := directory.keys.RaceNameCanonicalLookup(canonical) - userRegisteredKey := directory.keys.UserRegisteredRaceNames(user) - userReservationsKey := directory.keys.UserRaceNameReservations(user) - pendingIndexKey := directory.keys.PendingRaceNameIndex() - reservationMember := directory.keys.RaceNameReservationMember(game, canonical) - - nowMS := directory.nowFn().UTC().UnixMilli() - - watchErr := directory.client.Watch(ctx, func(tx *redis.Tx) error { - registered, err := loadRegisteredTx(ctx, tx, registeredKey) - switch { - case errors.Is(err, redis.Nil): - registered = registeredRecord{} - case err != nil: - return fmt.Errorf("register race name: %w", err) - } - if registered.UserID != "" { - if registered.UserID == user { - // idempotent repeat - return nil - } - return ports.ErrNameTaken - } - - pending, err := loadReservationTx(ctx, tx, reservationKey) - switch { - case errors.Is(err, redis.Nil): - return ports.ErrPendingMissing - case err != nil: - return fmt.Errorf("register race name: %w", err) - } - if pending.UserID != user || pending.Status != reservationStatusPending { - return ports.ErrPendingMissing - } - if pending.EligibleUntilMS == nil || *pending.EligibleUntilMS <= nowMS { - return ports.ErrPendingExpired - } - - payload, err := marshalRegisteredRecord(registeredRecord{ - UserID: user, - RaceName: displayName, - SourceGameID: game.String(), - RegisteredAtMS: nowMS, - }) - if err != nil { - return fmt.Errorf("register race name: %w", err) - } - lookupPayload, err := marshalCanonicalLookupRecord(canonicalLookupRecord{ - Kind: ports.KindRegistered, - HolderUserID: user, - }) - if err != nil { - return fmt.Errorf("register race name: %w", err) - } - - _, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error { - pipe.Set(ctx, registeredKey, payload, 0) - pipe.SAdd(ctx, userRegisteredKey, encodeKeyComponent(canonical.String())) - pipe.Del(ctx, reservationKey) - pipe.SRem(ctx, userReservationsKey, reservationMember) - pipe.ZRem(ctx, pendingIndexKey, reservationMember) - pipe.Set(ctx, lookupKey, lookupPayload, 0) - return nil - }) - return err - }, registeredKey, reservationKey, lookupKey) - - switch { - case errors.Is(watchErr, redis.TxFailedErr): - return fmt.Errorf("register race name: %w", ports.ErrPendingMissing) - case watchErr != nil: - return watchErr - default: - return nil - } -} - -// ListRegistered returns every registered race name owned by userID. -func (directory *RaceNameDirectory) ListRegistered( - ctx context.Context, - userID string, -) ([]ports.RegisteredName, error) { - if err := checkContext(ctx, "list registered race names"); err != nil { - return nil, err - } - user, err := normalizeNonEmpty(userID, "list registered race names", "user id") - if err != nil { - return nil, err - } - - members, err := directory.client.SMembers(ctx, directory.keys.UserRegisteredRaceNames(user)).Result() - if err != nil { - return nil, fmt.Errorf("list registered race names: %w", err) - } - if len(members) == 0 { - return nil, nil - } - - keys := make([]string, len(members)) - canonicals := make([]racename.CanonicalKey, len(members)) - for index, encoded := range members { - decodedBytes, err := base64.RawURLEncoding.DecodeString(encoded) - if err != nil { - return nil, fmt.Errorf("list registered race names: decode canonical %q: %w", encoded, err) - } - canonical := racename.CanonicalKey(string(decodedBytes)) - canonicals[index] = canonical - keys[index] = directory.keys.RegisteredRaceName(canonical) - } - payloads, err := directory.client.MGet(ctx, keys...).Result() - if err != nil { - return nil, fmt.Errorf("list registered race names: %w", err) - } - - results := make([]ports.RegisteredName, 0, len(payloads)) - for index, entry := range payloads { - if entry == nil { - continue - } - raw, ok := entry.(string) - if !ok { - return nil, fmt.Errorf("list registered race names: unexpected payload type %T", entry) - } - record, err := unmarshalRegisteredRecord([]byte(raw)) - if err != nil { - return nil, fmt.Errorf("list registered race names: %w", err) - } - results = append(results, ports.RegisteredName{ - CanonicalKey: canonicals[index].String(), - RaceName: record.RaceName, - SourceGameID: record.SourceGameID, - RegisteredAtMs: record.RegisteredAtMS, - }) - } - - return results, nil -} - -// ListPendingRegistrations returns every pending registration owned by -// userID. -func (directory *RaceNameDirectory) ListPendingRegistrations( - ctx context.Context, - userID string, -) ([]ports.PendingRegistration, error) { - if err := checkContext(ctx, "list pending race name registrations"); err != nil { - return nil, err - } - user, err := normalizeNonEmpty(userID, "list pending race name registrations", "user id") - if err != nil { - return nil, err - } - - entries, err := directory.loadUserReservations(ctx, user, "list pending race name registrations") - if err != nil { - return nil, err - } - - pending := make([]ports.PendingRegistration, 0, len(entries)) - for _, entry := range entries { - if entry.record.Status != reservationStatusPending { - continue - } - eligibleUntilMS := int64(0) - if entry.record.EligibleUntilMS != nil { - eligibleUntilMS = *entry.record.EligibleUntilMS - } - pending = append(pending, ports.PendingRegistration{ - CanonicalKey: entry.canonical.String(), - RaceName: entry.record.RaceName, - GameID: entry.game.String(), - ReservedAtMs: entry.record.ReservedAtMS, - EligibleUntilMs: eligibleUntilMS, - }) - } - - return pending, nil -} - -// ListReservations returns every active reservation owned by userID -// whose status has not yet been promoted to pending_registration. -func (directory *RaceNameDirectory) ListReservations( - ctx context.Context, - userID string, -) ([]ports.Reservation, error) { - if err := checkContext(ctx, "list race name reservations"); err != nil { - return nil, err - } - user, err := normalizeNonEmpty(userID, "list race name reservations", "user id") - if err != nil { - return nil, err - } - - entries, err := directory.loadUserReservations(ctx, user, "list race name reservations") - if err != nil { - return nil, err - } - - reservations := make([]ports.Reservation, 0, len(entries)) - for _, entry := range entries { - if entry.record.Status != reservationStatusReserved { - continue - } - reservations = append(reservations, ports.Reservation{ - CanonicalKey: entry.canonical.String(), - RaceName: entry.record.RaceName, - GameID: entry.game.String(), - ReservedAtMs: entry.record.ReservedAtMS, - }) - } - - return reservations, nil -} - -// ReleaseAllByUser clears every registered, reservation, and -// pending_registration binding owned by userID via a single Lua script -// invocation, so the cascade is atomic relative to concurrent readers. -func (directory *RaceNameDirectory) ReleaseAllByUser( - ctx context.Context, - userID string, -) error { - if err := checkContext(ctx, "release all race names by user"); err != nil { - return err - } - user, err := normalizeNonEmpty(userID, "release all race names by user", "user id") - if err != nil { - return err - } - - _, err = directory.releaseLua.Run( - ctx, - directory.client, - []string{ - directory.keys.UserRegisteredRaceNames(user), - directory.keys.UserRaceNameReservations(user), - directory.keys.PendingRaceNameIndex(), - }, - defaultPrefix, - ).Result() - if err != nil && !errors.Is(err, redis.Nil) { - return fmt.Errorf("release all race names by user: %w", err) - } - - return nil -} - -// expireOneMaxRetries caps retry attempts when Watch optimistic -// concurrency fails during pending expiration, so transient contention -// cannot livelock the worker. -const expireOneMaxRetries = 8 - -// expireOnePending atomically releases one pending entry by -// reservationMember at-or-before cutoff, returning the entry for -// telemetry when the release commits. -func (directory *RaceNameDirectory) expireOnePending( - ctx context.Context, - game common.GameID, - canonical racename.CanonicalKey, - reservationMember string, - cutoff int64, -) (ports.ExpiredPending, bool, error) { - reservationKey := directory.keys.RaceNameReservation(game, canonical) - lookupKey := directory.keys.RaceNameCanonicalLookup(canonical) - pendingIndexKey := directory.keys.PendingRaceNameIndex() - - for range expireOneMaxRetries { - var ( - resultEntry ports.ExpiredPending - resultReleased bool - ) - - watchErr := directory.client.Watch(ctx, func(tx *redis.Tx) error { - existing, err := loadReservationTx(ctx, tx, reservationKey) - switch { - case errors.Is(err, redis.Nil): - // Lost the race to another release path; drop the index - // member defensively and continue. - _, pipeErr := tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error { - pipe.ZRem(ctx, pendingIndexKey, reservationMember) - return nil - }) - return pipeErr - case err != nil: - return err - } - if existing.Status != reservationStatusPending || existing.EligibleUntilMS == nil { - _, pipeErr := tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error { - pipe.ZRem(ctx, pendingIndexKey, reservationMember) - return nil - }) - return pipeErr - } - if *existing.EligibleUntilMS > cutoff { - // Extended between ZRANGEBYSCORE and now; skip. - return nil - } - - userReservationsKey := directory.keys.UserRaceNameReservations(existing.UserID) - registeredPresent, err := registeredHeldBy(ctx, tx, directory.keys.RegisteredRaceName(canonical), existing.UserID) - if err != nil { - return err - } - remainingMember, remainingGame, remainingStatus, err := directory.findOtherReservationMember( - ctx, tx, userReservationsKey, canonical, reservationMember, - ) - if err != nil { - return err - } - - _, pipeErr := tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error { - pipe.Del(ctx, reservationKey) - pipe.SRem(ctx, userReservationsKey, reservationMember) - pipe.ZRem(ctx, pendingIndexKey, reservationMember) - switch { - case registeredPresent: - lookupPayload, err := marshalCanonicalLookupRecord(canonicalLookupRecord{ - Kind: ports.KindRegistered, - HolderUserID: existing.UserID, - }) - if err != nil { - return err - } - pipe.Set(ctx, lookupKey, lookupPayload, 0) - case remainingMember != "": - kind := ports.KindReservation - if remainingStatus == reservationStatusPending { - kind = ports.KindPendingRegistration - } - lookupPayload, err := marshalCanonicalLookupRecord(canonicalLookupRecord{ - Kind: kind, - HolderUserID: existing.UserID, - GameID: remainingGame.String(), - }) - if err != nil { - return err - } - pipe.Set(ctx, lookupKey, lookupPayload, 0) - default: - pipe.Del(ctx, lookupKey) - } - return nil - }) - if pipeErr != nil { - return pipeErr - } - - resultEntry = ports.ExpiredPending{ - CanonicalKey: canonical.String(), - RaceName: existing.RaceName, - GameID: game.String(), - UserID: existing.UserID, - EligibleUntilMs: *existing.EligibleUntilMS, - } - resultReleased = true - return nil - }, reservationKey, lookupKey) - - switch { - case errors.Is(watchErr, redis.TxFailedErr): - continue - case watchErr != nil: - return ports.ExpiredPending{}, false, watchErr - default: - return resultEntry, resultReleased, nil - } - } - - return ports.ExpiredPending{}, false, fmt.Errorf("expire pending: Watch contention exceeded %d retries", expireOneMaxRetries) -} - -// reservationEntry bundles a decoded reservation record with its key -// components for list-style methods. -type reservationEntry struct { - game common.GameID - canonical racename.CanonicalKey - record reservationRecord -} - -// loadUserReservations resolves every reservation (including pending) -// owned by userID by expanding UserRaceNameReservations members. -func (directory *RaceNameDirectory) loadUserReservations( - ctx context.Context, - userID, operation string, -) ([]reservationEntry, error) { - members, err := directory.client.SMembers(ctx, directory.keys.UserRaceNameReservations(userID)).Result() - if err != nil { - return nil, fmt.Errorf("%s: %w", operation, err) - } - if len(members) == 0 { - return nil, nil - } - - keys := make([]string, 0, len(members)) - decodedMembers := make([]struct { - game common.GameID - canonical racename.CanonicalKey - }, 0, len(members)) - for _, member := range members { - game, canonical, err := splitReservationMember(member) - if err != nil { - return nil, fmt.Errorf("%s: %w", operation, err) - } - keys = append(keys, directory.keys.RaceNameReservation(game, canonical)) - decodedMembers = append(decodedMembers, struct { - game common.GameID - canonical racename.CanonicalKey - }{game, canonical}) - } - - payloads, err := directory.client.MGet(ctx, keys...).Result() - if err != nil { - return nil, fmt.Errorf("%s: %w", operation, err) - } - - entries := make([]reservationEntry, 0, len(payloads)) - for index, entry := range payloads { - if entry == nil { - continue - } - raw, ok := entry.(string) - if !ok { - return nil, fmt.Errorf("%s: unexpected payload type %T", operation, entry) - } - record, err := unmarshalReservationRecord([]byte(raw)) - if err != nil { - return nil, fmt.Errorf("%s: %w", operation, err) - } - entries = append(entries, reservationEntry{ - game: decodedMembers[index].game, - canonical: decodedMembers[index].canonical, - record: record, - }) - } - - return entries, nil -} - -// loadCanonicalLookup loads the canonical-lookup cache entry for -// canonical. -func (directory *RaceNameDirectory) loadCanonicalLookup( - ctx context.Context, - canonical racename.CanonicalKey, -) (canonicalLookupRecord, error) { - payload, err := directory.client.Get(ctx, directory.keys.RaceNameCanonicalLookup(canonical)).Bytes() - if err != nil { - return canonicalLookupRecord{}, err - } - - return unmarshalCanonicalLookupRecord(payload) -} - -// findOtherReservationMember scans user_reservations for any member -// other than skip whose canonical suffix matches. It returns the raw -// member, decoded game id, and the reservation's current status when a -// match is found. -func (directory *RaceNameDirectory) findOtherReservationMember( - ctx context.Context, - tx *redis.Tx, - userReservationsKey string, - canonical racename.CanonicalKey, - skip string, -) (string, common.GameID, string, error) { - members, err := tx.SMembers(ctx, userReservationsKey).Result() - if err != nil { - return "", "", "", err - } - - canonicalEncoded := encodeKeyComponent(canonical.String()) - for _, member := range members { - if member == skip { - continue - } - sepIndex := strings.Index(member, ":") - if sepIndex <= 0 { - continue - } - if member[sepIndex+1:] != canonicalEncoded { - continue - } - game, parsedCanonical, err := splitReservationMember(member) - if err != nil { - return "", "", "", err - } - record, err := loadReservationTx(ctx, tx, directory.keys.RaceNameReservation(game, parsedCanonical)) - switch { - case errors.Is(err, redis.Nil): - continue - case err != nil: - return "", "", "", err - } - return member, game, record.Status, nil - } - - return "", "", "", nil -} - -// loadReservationTx reads the reservation blob for reservationKey within -// a Redis transaction. redis.Nil is propagated so callers can branch. -func loadReservationTx(ctx context.Context, tx *redis.Tx, reservationKey string) (reservationRecord, error) { - payload, err := tx.Get(ctx, reservationKey).Bytes() - if err != nil { - return reservationRecord{}, err - } - return unmarshalReservationRecord(payload) -} - -// loadLookupTx reads the canonical-lookup cache entry within a -// transaction. -func loadLookupTx(ctx context.Context, tx *redis.Tx, lookupKey string) (canonicalLookupRecord, error) { - payload, err := tx.Get(ctx, lookupKey).Bytes() - if err != nil { - return canonicalLookupRecord{}, err - } - return unmarshalCanonicalLookupRecord(payload) -} - -// loadRegisteredTx reads the registered blob for registeredKey within a -// transaction. -func loadRegisteredTx(ctx context.Context, tx *redis.Tx, registeredKey string) (registeredRecord, error) { - payload, err := tx.Get(ctx, registeredKey).Bytes() - if err != nil { - return registeredRecord{}, err - } - return unmarshalRegisteredRecord(payload) -} - -// registeredHeldBy reports whether registeredKey stores a registered -// race name owned by user within a transaction. -func registeredHeldBy(ctx context.Context, tx *redis.Tx, registeredKey, user string) (bool, error) { - record, err := loadRegisteredTx(ctx, tx, registeredKey) - switch { - case errors.Is(err, redis.Nil): - return false, nil - case err != nil: - return false, err - } - return record.UserID == user, nil -} - -// splitReservationMember decodes a : -// member back into its typed components. -func splitReservationMember(member string) (common.GameID, racename.CanonicalKey, error) { - sepIndex := strings.Index(member, ":") - if sepIndex <= 0 || sepIndex >= len(member)-1 { - return "", "", fmt.Errorf("invalid reservation member %q", member) - } - gameBytes, err := base64.RawURLEncoding.DecodeString(member[:sepIndex]) - if err != nil { - return "", "", fmt.Errorf("decode game component of %q: %w", member, err) - } - canonicalBytes, err := base64.RawURLEncoding.DecodeString(member[sepIndex+1:]) - if err != nil { - return "", "", fmt.Errorf("decode canonical component of %q: %w", member, err) - } - return common.GameID(string(gameBytes)), racename.CanonicalKey(string(canonicalBytes)), nil -} - -// checkContext rejects nil or already-canceled contexts up front, so -// adapter methods always surface cancellation consistently regardless of -// whether a Redis round-trip was attempted. -func checkContext(ctx context.Context, operation string) error { - if ctx == nil { - return fmt.Errorf("%s: nil context", operation) - } - if err := ctx.Err(); err != nil { - return fmt.Errorf("%s: %w", operation, err) - } - return nil -} - -// normalizeNonEmpty trims value and rejects empty results with a -// descriptive error including operation and field names. -func normalizeNonEmpty(value, operation, field string) (string, error) { - trimmed := strings.TrimSpace(value) - if trimmed == "" { - return "", fmt.Errorf("%s: %s must not be empty", operation, field) - } - return trimmed, nil -} - -// normalizeGameID trims and converts a user-supplied game id into a -// typed common.GameID, rejecting empty input. -func normalizeGameID(value, operation string) (common.GameID, error) { - trimmed, err := normalizeNonEmpty(value, operation, "game id") - if err != nil { - return "", err - } - return common.GameID(trimmed), nil -} - -// Ensure RaceNameDirectory satisfies the ports.RaceNameDirectory -// interface at compile time. -var _ ports.RaceNameDirectory = (*RaceNameDirectory)(nil) diff --git a/lobby/internal/adapters/redisstate/racenamedir_lua.go b/lobby/internal/adapters/redisstate/racenamedir_lua.go deleted file mode 100644 index 6978bf7..0000000 --- a/lobby/internal/adapters/redisstate/racenamedir_lua.go +++ /dev/null @@ -1,52 +0,0 @@ -package redisstate - -// releaseAllByUserScript atomically clears every registered, reservation, -// and pending_registration binding owned by one user. Inputs: -// -// KEYS[1] — user_registered set key -// KEYS[2] — user_reservations set key -// KEYS[3] — pending_index sorted-set key -// ARGV[1] — Lobby Redis key prefix (e.g. "lobby:") -// -// The script returns a three-entry table `{registeredCount, -// reservationsTotal, pendingCount}` so callers can emit telemetry without -// a second round-trip. reservationsTotal includes both reserved and -// pending_registration entries; pendingCount is the pending-only subset. -const releaseAllByUserScript = ` -local userRegisteredKey = KEYS[1] -local userReservationsKey = KEYS[2] -local pendingIndexKey = KEYS[3] -local prefix = ARGV[1] - -local registered = redis.call('SMEMBERS', userRegisteredKey) -for _, canonical in ipairs(registered) do - redis.call('DEL', prefix .. 'race_names:registered:' .. canonical) - redis.call('DEL', prefix .. 'race_names:canonical_lookup:' .. canonical) -end -local registeredCount = #registered -if registeredCount > 0 then - redis.call('DEL', userRegisteredKey) -end - -local reservations = redis.call('SMEMBERS', userReservationsKey) -local pendingCount = 0 -for _, member in ipairs(reservations) do - local sep = string.find(member, ':', 1, true) - if sep then - local encGame = string.sub(member, 1, sep - 1) - local encCanonical = string.sub(member, sep + 1) - redis.call('DEL', prefix .. 'race_names:reservations:' .. encGame .. ':' .. encCanonical) - local pendingRemoved = redis.call('ZREM', pendingIndexKey, member) - if pendingRemoved == 1 then - pendingCount = pendingCount + 1 - end - redis.call('DEL', prefix .. 'race_names:canonical_lookup:' .. encCanonical) - end -end -local reservationsTotal = #reservations -if reservationsTotal > 0 then - redis.call('DEL', userReservationsKey) -end - -return {registeredCount, reservationsTotal, pendingCount} -` diff --git a/lobby/internal/adapters/redisstate/racenamedir_test.go b/lobby/internal/adapters/redisstate/racenamedir_test.go deleted file mode 100644 index 94d6d24..0000000 --- a/lobby/internal/adapters/redisstate/racenamedir_test.go +++ /dev/null @@ -1,244 +0,0 @@ -package redisstate_test - -import ( - "context" - "encoding/base64" - "encoding/json" - "errors" - "testing" - "time" - - "galaxy/lobby/internal/adapters/redisstate" - "galaxy/lobby/internal/domain/racename" - "galaxy/lobby/internal/ports" - "galaxy/lobby/internal/ports/racenamedirtest" - - "github.com/alicebob/miniredis/v2" - "github.com/redis/go-redis/v9" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func newRaceNameDirectoryAdapter( - t *testing.T, - now func() time.Time, -) (*redisstate.RaceNameDirectory, *miniredis.Miniredis, *redis.Client) { - t.Helper() - - server := miniredis.RunT(t) - client := redis.NewClient(&redis.Options{Addr: server.Addr()}) - t.Cleanup(func() { - _ = client.Close() - }) - - policy, err := racename.NewPolicy() - require.NoError(t, err) - - var opts []redisstate.RaceNameDirectoryOption - if now != nil { - opts = append(opts, redisstate.WithRaceNameDirectoryClock(now)) - } - directory, err := redisstate.NewRaceNameDirectory(client, policy, opts...) - require.NoError(t, err) - - return directory, server, client -} - -func TestRaceNameDirectoryContract(t *testing.T) { - racenamedirtest.Run(t, func(now func() time.Time) ports.RaceNameDirectory { - directory, _, _ := newRaceNameDirectoryAdapter(t, now) - return directory - }) -} - -func TestNewRaceNameDirectoryRejectsNilClient(t *testing.T) { - policy, err := racename.NewPolicy() - require.NoError(t, err) - - _, err = redisstate.NewRaceNameDirectory(nil, policy) - require.Error(t, err) -} - -func TestNewRaceNameDirectoryRejectsNilPolicy(t *testing.T) { - server := miniredis.RunT(t) - client := redis.NewClient(&redis.Options{Addr: server.Addr()}) - t.Cleanup(func() { _ = client.Close() }) - - _, err := redisstate.NewRaceNameDirectory(client, nil) - require.Error(t, err) -} - -func TestRaceNameDirectoryPersistsExactKeyShapes(t *testing.T) { - ctx := context.Background() - directory, server, _ := newRaceNameDirectoryAdapter(t, nil) - - const ( - gameID = "game-shape" - userID = "user-shape" - raceName = "PilotNova" - ) - - require.NoError(t, directory.Reserve(ctx, gameID, userID, raceName)) - - canonical, err := directory.Canonicalize(raceName) - require.NoError(t, err) - - encGame := base64URL(gameID) - encUser := base64URL(userID) - encCanonical := base64URL(canonical) - - require.True(t, server.Exists("lobby:race_names:reservations:"+encGame+":"+encCanonical)) - require.True(t, server.Exists("lobby:race_names:canonical_lookup:"+encCanonical)) - require.True(t, server.Exists("lobby:race_names:user_reservations:"+encUser)) - - members, err := server.SMembers("lobby:race_names:user_reservations:" + encUser) - require.NoError(t, err) - require.Contains(t, members, encGame+":"+encCanonical) - - lookupPayload, err := server.Get("lobby:race_names:canonical_lookup:" + encCanonical) - require.NoError(t, err) - var lookup map[string]any - require.NoError(t, json.Unmarshal([]byte(lookupPayload), &lookup)) - assert.Equal(t, ports.KindReservation, lookup["kind"]) - assert.Equal(t, userID, lookup["holder_user_id"]) - assert.Equal(t, gameID, lookup["game_id"]) -} - -func TestRaceNameDirectoryCanonicalLookupUpgradesOnPendingAndRegistered(t *testing.T) { - now, _ := fixedNow(t) - directory, server, _ := newRaceNameDirectoryAdapter(t, now) - ctx := context.Background() - - const ( - gameID = "game-upgrade" - userID = "user-upgrade" - raceName = "UpgradePilot" - ) - - require.NoError(t, directory.Reserve(ctx, gameID, userID, raceName)) - - canonical, err := directory.Canonicalize(raceName) - require.NoError(t, err) - lookupKey := "lobby:race_names:canonical_lookup:" + base64URL(canonical) - - lookupAfterReserve, err := server.Get(lookupKey) - require.NoError(t, err) - require.Contains(t, lookupAfterReserve, `"kind":"`+ports.KindReservation+`"`) - - eligibleUntil := now().Add(time.Hour) - require.NoError(t, directory.MarkPendingRegistration(ctx, gameID, userID, raceName, eligibleUntil)) - - lookupAfterPending, err := server.Get(lookupKey) - require.NoError(t, err) - require.Contains(t, lookupAfterPending, `"kind":"`+ports.KindPendingRegistration+`"`) - - require.NoError(t, directory.Register(ctx, gameID, userID, raceName)) - - lookupAfterRegister, err := server.Get(lookupKey) - require.NoError(t, err) - require.Contains(t, lookupAfterRegister, `"kind":"`+ports.KindRegistered+`"`) - require.NotContains(t, lookupAfterRegister, `"game_id"`, "registered lookup omits the game id") -} - -func TestRaceNameDirectoryCanonicalLookupDowngradesOnReleaseCrossGame(t *testing.T) { - directory, server, _ := newRaceNameDirectoryAdapter(t, nil) - ctx := context.Background() - - const ( - gameA = "game-keep-a" - gameB = "game-keep-b" - userID = "user-keep" - raceNam = "KeepPilot" - ) - - require.NoError(t, directory.Reserve(ctx, gameA, userID, raceNam)) - require.NoError(t, directory.Reserve(ctx, gameB, userID, raceNam)) - - canonical, err := directory.Canonicalize(raceNam) - require.NoError(t, err) - lookupKey := "lobby:race_names:canonical_lookup:" + base64URL(canonical) - - require.NoError(t, directory.ReleaseReservation(ctx, gameA, userID, raceNam)) - - payload, err := server.Get(lookupKey) - require.NoError(t, err) - require.Contains(t, payload, `"kind":"`+ports.KindReservation+`"`) - require.Contains(t, payload, `"game_id":"`+gameB+`"`) - - require.NoError(t, directory.ReleaseReservation(ctx, gameB, userID, raceNam)) - require.False(t, server.Exists(lookupKey)) -} - -func TestRaceNameDirectoryReleaseAllByUserLua(t *testing.T) { - now, _ := fixedNow(t) - directory, server, _ := newRaceNameDirectoryAdapter(t, now) - ctx := context.Background() - - const ( - userID = "user-lua" - otherID = "user-lua-other" - raceName = "LuaPilot" - otherRN = "LuaVanguard" - gameA = "game-lua-a" - gameB = "game-lua-b" - ) - - require.NoError(t, directory.Reserve(ctx, gameA, userID, raceName)) - require.NoError(t, directory.MarkPendingRegistration(ctx, gameA, userID, raceName, now().Add(time.Hour))) - require.NoError(t, directory.Register(ctx, gameA, userID, raceName)) - require.NoError(t, directory.Reserve(ctx, gameB, userID, otherRN)) - require.NoError(t, directory.MarkPendingRegistration(ctx, gameB, userID, otherRN, now().Add(2*time.Hour))) - - const isolatedRN = "LuaGoldenChain" - require.NoError(t, directory.Reserve(ctx, gameA, otherID, isolatedRN)) - - require.NoError(t, directory.ReleaseAllByUser(ctx, userID)) - - require.False(t, server.Exists("lobby:race_names:user_registered:"+base64URL(userID))) - require.False(t, server.Exists("lobby:race_names:user_reservations:"+base64URL(userID))) - pendingMembers, err := server.ZMembers("lobby:race_names:pending_index") - if err != nil { - require.ErrorContains(t, err, "ERR no such key") - } else { - require.Empty(t, pendingMembers) - } - - otherCanonical, err := directory.Canonicalize(isolatedRN) - require.NoError(t, err) - require.True(t, server.Exists("lobby:race_names:canonical_lookup:"+base64URL(otherCanonical))) - - reservations, err := directory.ListReservations(ctx, otherID) - require.NoError(t, err) - require.Len(t, reservations, 1) -} - -func TestRaceNameDirectoryReleaseAllByUserIsSafeOnEmpty(t *testing.T) { - directory, _, _ := newRaceNameDirectoryAdapter(t, nil) - ctx := context.Background() - - require.NoError(t, directory.ReleaseAllByUser(ctx, "unknown-user")) -} - -func TestRaceNameDirectoryCheckRejectsInvalidName(t *testing.T) { - directory, _, _ := newRaceNameDirectoryAdapter(t, nil) - - _, err := directory.Check(context.Background(), "Pilot Nova", "user-x") - require.Error(t, err) - require.True(t, errors.Is(err, ports.ErrInvalidName)) -} - -func fixedNow(t *testing.T) (func() time.Time, func(delta time.Duration)) { - t.Helper() - - instant := time.Date(2026, 5, 1, 12, 0, 0, 0, time.UTC) - var mu struct { - value time.Time - } - mu.value = instant - return func() time.Time { return mu.value }, - func(delta time.Duration) { mu.value = mu.value.Add(delta) } -} - -// base64URL is the package-level helper defined in gamestore_test.go; -// race-name adapter tests reuse it via the same test package. -var _ = base64.RawURLEncoding diff --git a/lobby/internal/app/bootstrap.go b/lobby/internal/app/bootstrap.go index dded641..9cc38bb 100644 --- a/lobby/internal/app/bootstrap.go +++ b/lobby/internal/app/bootstrap.go @@ -6,28 +6,23 @@ import ( "galaxy/lobby/internal/config" "galaxy/lobby/internal/telemetry" + "galaxy/redisconn" - "github.com/redis/go-redis/extra/redisotel/v9" "github.com/redis/go-redis/v9" ) -// newRedisClient builds a Redis client wired with the configured timeouts -// and TLS settings taken from cfg. +// newRedisClient builds the master Redis client from cfg via the shared +// `pkg/redisconn` helper. Replica clients are not opened in this iteration +// per ARCHITECTURE.md §Persistence Backends; they will be wired when read +// routing is introduced. func newRedisClient(cfg config.RedisConfig) *redis.Client { - return redis.NewClient(&redis.Options{ - Addr: cfg.Addr, - Username: cfg.Username, - Password: cfg.Password, - DB: cfg.DB, - TLSConfig: cfg.TLSConfig(), - DialTimeout: cfg.OperationTimeout, - ReadTimeout: cfg.OperationTimeout, - WriteTimeout: cfg.OperationTimeout, - }) + return redisconn.NewMasterClient(cfg.Conn) } // instrumentRedisClient attaches the OpenTelemetry tracing and metrics -// instrumentation to client when telemetryRuntime is available. +// instrumentation to client when telemetryRuntime is available. The actual +// instrumentation lives in `pkg/redisconn` so every Galaxy service shares one +// surface. func instrumentRedisClient(client *redis.Client, telemetryRuntime *telemetry.Runtime) error { if client == nil { return fmt.Errorf("instrument redis client: nil client") @@ -35,37 +30,14 @@ func instrumentRedisClient(client *redis.Client, telemetryRuntime *telemetry.Run if telemetryRuntime == nil { return nil } - - if err := redisotel.InstrumentTracing( - client, - redisotel.WithTracerProvider(telemetryRuntime.TracerProvider()), - redisotel.WithDBStatement(false), - ); err != nil { - return fmt.Errorf("instrument redis client tracing: %w", err) - } - if err := redisotel.InstrumentMetrics( - client, - redisotel.WithMeterProvider(telemetryRuntime.MeterProvider()), - ); err != nil { - return fmt.Errorf("instrument redis client metrics: %w", err) - } - - return nil + return redisconn.Instrument(client, + redisconn.WithTracerProvider(telemetryRuntime.TracerProvider()), + redisconn.WithMeterProvider(telemetryRuntime.MeterProvider()), + ) } -// pingRedis performs a single Redis PING bounded by cfg.OperationTimeout to -// confirm that the configured Redis endpoint is reachable at startup. +// pingRedis performs a single Redis PING bounded by cfg.Conn.OperationTimeout +// to confirm that the configured Redis endpoint is reachable at startup. func pingRedis(ctx context.Context, cfg config.RedisConfig, client *redis.Client) error { - if client == nil { - return fmt.Errorf("ping redis: nil client") - } - - pingCtx, cancel := context.WithTimeout(ctx, cfg.OperationTimeout) - defer cancel() - - if err := client.Ping(pingCtx).Err(); err != nil { - return fmt.Errorf("ping redis: %w", err) - } - - return nil + return redisconn.Ping(ctx, client, cfg.Conn.OperationTimeout) } diff --git a/lobby/internal/app/bootstrap_test.go b/lobby/internal/app/bootstrap_test.go index df7b1d1..ba45e7a 100644 --- a/lobby/internal/app/bootstrap_test.go +++ b/lobby/internal/app/bootstrap_test.go @@ -6,20 +6,28 @@ import ( "time" "galaxy/lobby/internal/config" + "galaxy/redisconn" "github.com/alicebob/miniredis/v2" "github.com/stretchr/testify/require" ) +func newTestRedisCfg(addr string) config.RedisConfig { + return config.RedisConfig{ + Conn: redisconn.Config{ + MasterAddr: addr, + Password: "test", + OperationTimeout: time.Second, + }, + } +} + func TestPingRedisSucceedsAgainstMiniredis(t *testing.T) { t.Parallel() server := miniredis.RunT(t) - redisCfg := config.RedisConfig{ - Addr: server.Addr(), - OperationTimeout: time.Second, - } + redisCfg := newTestRedisCfg(server.Addr()) client := newRedisClient(redisCfg) t.Cleanup(func() { _ = client.Close() }) @@ -31,10 +39,7 @@ func TestPingRedisReturnsErrorWhenClosed(t *testing.T) { server := miniredis.RunT(t) - redisCfg := config.RedisConfig{ - Addr: server.Addr(), - OperationTimeout: time.Second, - } + redisCfg := newTestRedisCfg(server.Addr()) client := newRedisClient(redisCfg) require.NoError(t, client.Close()) @@ -45,7 +50,7 @@ func TestPingRedisReturnsErrorWhenClosed(t *testing.T) { func TestPingRedisNilClient(t *testing.T) { t.Parallel() - err := pingRedis(context.Background(), config.RedisConfig{OperationTimeout: time.Second}, nil) + err := pingRedis(context.Background(), newTestRedisCfg("127.0.0.1:0"), nil) require.Error(t, err) require.Contains(t, err.Error(), "nil client") } @@ -62,10 +67,7 @@ func TestInstrumentRedisClientNilTelemetryIsNoop(t *testing.T) { t.Parallel() server := miniredis.RunT(t) - client := newRedisClient(config.RedisConfig{ - Addr: server.Addr(), - OperationTimeout: time.Second, - }) + client := newRedisClient(newTestRedisCfg(server.Addr())) t.Cleanup(func() { _ = client.Close() }) require.NoError(t, instrumentRedisClient(client, nil)) diff --git a/lobby/internal/app/runtime.go b/lobby/internal/app/runtime.go index cc9bc84..e9fcf17 100644 --- a/lobby/internal/app/runtime.go +++ b/lobby/internal/app/runtime.go @@ -7,6 +7,7 @@ import ( "log/slog" "time" + "galaxy/lobby/internal/adapters/postgres/migrations" "galaxy/lobby/internal/adapters/redisstate" "galaxy/lobby/internal/api/internalhttp" "galaxy/lobby/internal/api/publichttp" @@ -14,6 +15,7 @@ import ( "galaxy/lobby/internal/domain/game" "galaxy/lobby/internal/ports" "galaxy/lobby/internal/telemetry" + "galaxy/postgres" ) // activeGamesProbe adapts ports.GameStore to telemetry.ActiveGamesProbe by @@ -110,7 +112,31 @@ func NewRuntime(ctx context.Context, cfg config.Config, logger *slog.Logger) (*R return cleanupOnError(fmt.Errorf("new lobby runtime: %w", err)) } - wiring, err := newWiring(cfg, redisClient, time.Now, logger, telemetryRuntime) + pgPool, err := postgres.OpenPrimary(ctx, cfg.Postgres.Conn, + postgres.WithTracerProvider(telemetryRuntime.TracerProvider()), + postgres.WithMeterProvider(telemetryRuntime.MeterProvider()), + ) + if err != nil { + return cleanupOnError(fmt.Errorf("new lobby runtime: open postgres: %w", err)) + } + runtime.cleanupFns = append(runtime.cleanupFns, pgPool.Close) + unregisterPGStats, err := postgres.InstrumentDBStats(pgPool, + postgres.WithMeterProvider(telemetryRuntime.MeterProvider()), + ) + if err != nil { + return cleanupOnError(fmt.Errorf("new lobby runtime: instrument postgres: %w", err)) + } + runtime.cleanupFns = append(runtime.cleanupFns, func() error { + return unregisterPGStats() + }) + if err := postgres.Ping(ctx, pgPool, cfg.Postgres.Conn.OperationTimeout); err != nil { + return cleanupOnError(fmt.Errorf("new lobby runtime: ping postgres: %w", err)) + } + if err := postgres.RunMigrations(ctx, pgPool, migrations.FS(), "."); err != nil { + return cleanupOnError(fmt.Errorf("new lobby runtime: run postgres migrations: %w", err)) + } + + wiring, err := newWiring(cfg, redisClient, pgPool, time.Now, logger, telemetryRuntime) if err != nil { return cleanupOnError(fmt.Errorf("new lobby runtime: wiring: %w", err)) } diff --git a/lobby/internal/app/runtime_smoke_test.go b/lobby/internal/app/runtime_smoke_test.go deleted file mode 100644 index 634df23..0000000 --- a/lobby/internal/app/runtime_smoke_test.go +++ /dev/null @@ -1,159 +0,0 @@ -package app - -import ( - "context" - "io" - "log/slog" - "net" - "net/http" - "os" - "testing" - "time" - - "galaxy/lobby/internal/api/internalhttp" - "galaxy/lobby/internal/api/publichttp" - "galaxy/lobby/internal/config" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - testcontainers "github.com/testcontainers/testcontainers-go" - rediscontainer "github.com/testcontainers/testcontainers-go/modules/redis" -) - -const ( - realRuntimeSmokeEnv = "LOBBY_REAL_RUNTIME_SMOKE" - realRuntimeRedisImage = "redis:7" -) - -// TestRealRuntimeCompatibility boots the full Runtime against a real Redis -// container, verifies that both HTTP listeners serve /healthz and /readyz, -// and asserts graceful shutdown on context cancellation. The test is skipped -// unless LOBBY_REAL_RUNTIME_SMOKE=1 because it depends on Docker. -func TestRealRuntimeCompatibility(t *testing.T) { - if os.Getenv(realRuntimeSmokeEnv) != "1" { - t.Skipf("set %s=1 to run the real runtime smoke suite", realRuntimeSmokeEnv) - } - - ctx := context.Background() - - redisContainer, err := rediscontainer.Run(ctx, realRuntimeRedisImage) - require.NoError(t, err) - testcontainers.CleanupContainer(t, redisContainer) - - redisAddr, err := redisContainer.Endpoint(ctx, "") - require.NoError(t, err) - - cfg := config.DefaultConfig() - cfg.Redis.Addr = redisAddr - cfg.UserService.BaseURL = "http://127.0.0.1:1" - cfg.GM.BaseURL = "http://127.0.0.1:1" - cfg.PublicHTTP.Addr = mustFreeAddr(t) - cfg.InternalHTTP.Addr = mustFreeAddr(t) - cfg.ShutdownTimeout = 2 * time.Second - cfg.Telemetry.TracesExporter = "none" - cfg.Telemetry.MetricsExporter = "none" - - runtime, err := NewRuntime(context.Background(), cfg, testLogger()) - require.NoError(t, err) - defer func() { - require.NoError(t, runtime.Close()) - }() - - runCtx, cancel := context.WithCancel(context.Background()) - defer cancel() - - runErrCh := make(chan error, 1) - go func() { - runErrCh <- runtime.Run(runCtx) - }() - - client := newTestHTTPClient(t) - - waitForRuntimeReady(t, client, cfg.PublicHTTP.Addr, publichttp.ReadyzPath) - waitForRuntimeReady(t, client, cfg.InternalHTTP.Addr, internalhttp.ReadyzPath) - - assertHTTPStatus(t, client, "http://"+cfg.PublicHTTP.Addr+publichttp.HealthzPath, http.StatusOK) - assertHTTPStatus(t, client, "http://"+cfg.PublicHTTP.Addr+publichttp.ReadyzPath, http.StatusOK) - assertHTTPStatus(t, client, "http://"+cfg.InternalHTTP.Addr+internalhttp.HealthzPath, http.StatusOK) - assertHTTPStatus(t, client, "http://"+cfg.InternalHTTP.Addr+internalhttp.ReadyzPath, http.StatusOK) - - cancel() - waitForRunResult(t, runErrCh, cfg.ShutdownTimeout+2*time.Second) -} - -func testLogger() *slog.Logger { - return slog.New(slog.NewTextHandler(io.Discard, nil)) -} - -func newTestHTTPClient(t *testing.T) *http.Client { - t.Helper() - - transport := &http.Transport{DisableKeepAlives: true} - t.Cleanup(transport.CloseIdleConnections) - - return &http.Client{ - Timeout: 500 * time.Millisecond, - Transport: transport, - } -} - -func waitForRuntimeReady(t *testing.T, client *http.Client, addr string, path string) { - t.Helper() - - require.Eventually(t, func() bool { - request, err := http.NewRequest(http.MethodGet, "http://"+addr+path, nil) - if err != nil { - return false - } - - response, err := client.Do(request) - if err != nil { - return false - } - defer response.Body.Close() - _, _ = io.Copy(io.Discard, response.Body) - - return response.StatusCode == http.StatusOK - }, 5*time.Second, 25*time.Millisecond, "lobby runtime did not become reachable on %s", addr) -} - -func waitForRunResult(t *testing.T, runErrCh <-chan error, waitTimeout time.Duration) { - t.Helper() - - var err error - require.Eventually(t, func() bool { - select { - case err = <-runErrCh: - return true - default: - return false - } - }, waitTimeout, 10*time.Millisecond, "lobby runtime did not stop") - require.NoError(t, err) -} - -func assertHTTPStatus(t *testing.T, client *http.Client, target string, want int) { - t.Helper() - - request, err := http.NewRequest(http.MethodGet, target, nil) - require.NoError(t, err) - - response, err := client.Do(request) - require.NoError(t, err) - defer response.Body.Close() - _, _ = io.Copy(io.Discard, response.Body) - - require.Equal(t, want, response.StatusCode) -} - -func mustFreeAddr(t *testing.T) string { - t.Helper() - - listener, err := net.Listen("tcp", "127.0.0.1:0") - require.NoError(t, err) - defer func() { - assert.NoError(t, listener.Close()) - }() - - return listener.Addr().String() -} diff --git a/lobby/internal/app/runtime_test.go b/lobby/internal/app/runtime_test.go deleted file mode 100644 index 3ed7941..0000000 --- a/lobby/internal/app/runtime_test.go +++ /dev/null @@ -1,151 +0,0 @@ -package app - -import ( - "context" - "net" - "net/http" - "testing" - "time" - - "galaxy/lobby/internal/api/internalhttp" - "galaxy/lobby/internal/api/publichttp" - "galaxy/lobby/internal/config" - - "github.com/alicebob/miniredis/v2" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// newTestConfig builds a valid Config that listens on ephemeral ports and a -// miniredis instance provided by redisServer. -func newTestConfig(t *testing.T, redisAddr string) config.Config { - t.Helper() - - reserve := func() string { - listener, err := net.Listen("tcp", "127.0.0.1:0") - require.NoError(t, err) - addr := listener.Addr().String() - require.NoError(t, listener.Close()) - return addr - } - - cfg := config.DefaultConfig() - cfg.Redis.Addr = redisAddr - cfg.UserService.BaseURL = "http://127.0.0.1:1" - cfg.GM.BaseURL = "http://127.0.0.1:1" - cfg.PublicHTTP.Addr = reserve() - cfg.InternalHTTP.Addr = reserve() - - return cfg -} - -func TestNewRuntimeValidatesContext(t *testing.T) { - t.Parallel() - - _, err := NewRuntime(nil, config.Config{}, nil) //nolint:staticcheck // test exercises the nil-context guard. - require.Error(t, err) - require.Contains(t, err.Error(), "nil context") -} - -func TestNewRuntimeRejectsInvalidConfig(t *testing.T) { - t.Parallel() - - _, err := NewRuntime(context.Background(), config.Config{}, nil) - require.Error(t, err) - require.Contains(t, err.Error(), "new lobby runtime") -} - -func TestNewRuntimeSucceedsWithMiniredis(t *testing.T) { - redisServer := miniredis.RunT(t) - - runtime, err := NewRuntime(context.Background(), newTestConfig(t, redisServer.Addr()), nil) - require.NoError(t, err) - require.NotNil(t, runtime) - t.Cleanup(func() { _ = runtime.Close() }) - - assert.NotNil(t, runtime.PublicServer()) - assert.NotNil(t, runtime.InternalServer()) -} - -func TestNewRuntimeWiresRaceNameDirectory(t *testing.T) { - redisServer := miniredis.RunT(t) - - runtime, err := NewRuntime(context.Background(), newTestConfig(t, redisServer.Addr()), nil) - require.NoError(t, err) - t.Cleanup(func() { _ = runtime.Close() }) - - require.NotNil(t, runtime.wiring) - assert.NotNil(t, runtime.wiring.raceNameDirectory) -} - -func TestNewRuntimeFailsWhenRedisUnreachable(t *testing.T) { - t.Parallel() - - cfg := newTestConfig(t, "127.0.0.1:1") // guaranteed unreachable - cfg.Redis.OperationTimeout = 100 * time.Millisecond - - _, err := NewRuntime(context.Background(), cfg, nil) - require.Error(t, err) - require.Contains(t, err.Error(), "ping redis") -} - -func TestRuntimeCloseIsIdempotent(t *testing.T) { - redisServer := miniredis.RunT(t) - runtime, err := NewRuntime(context.Background(), newTestConfig(t, redisServer.Addr()), nil) - require.NoError(t, err) - - require.NoError(t, runtime.Close()) - require.NoError(t, runtime.Close()) -} - -func TestRuntimeRunServesProbesAndStopsOnCancel(t *testing.T) { - redisServer := miniredis.RunT(t) - cfg := newTestConfig(t, redisServer.Addr()) - - runtime, err := NewRuntime(context.Background(), cfg, nil) - require.NoError(t, err) - t.Cleanup(func() { _ = runtime.Close() }) - - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - - runErr := make(chan error, 1) - go func() { - runErr <- runtime.Run(ctx) - }() - - require.Eventually(t, func() bool { - return runtime.PublicServer().Addr() != "" && runtime.InternalServer().Addr() != "" - }, 2*time.Second, 10*time.Millisecond) - - for _, probe := range []struct { - label string - url string - }{ - {"public healthz", "http://" + runtime.PublicServer().Addr() + publichttp.HealthzPath}, - {"public readyz", "http://" + runtime.PublicServer().Addr() + publichttp.ReadyzPath}, - {"internal healthz", "http://" + runtime.InternalServer().Addr() + internalhttp.HealthzPath}, - {"internal readyz", "http://" + runtime.InternalServer().Addr() + internalhttp.ReadyzPath}, - } { - resp, err := http.Get(probe.url) - require.NoError(t, err, probe.label) - _ = resp.Body.Close() - assert.Equal(t, http.StatusOK, resp.StatusCode, probe.label) - } - - cancel() - - select { - case err := <-runErr: - require.NoError(t, err) - case <-time.After(3 * time.Second): - t.Fatal("runtime did not stop after cancel") - } -} - -func TestRuntimeRunNilContext(t *testing.T) { - t.Parallel() - - var runtime *Runtime - require.Error(t, runtime.Run(context.Background())) -} diff --git a/lobby/internal/app/wiring.go b/lobby/internal/app/wiring.go index d3fb0cf..641f680 100644 --- a/lobby/internal/app/wiring.go +++ b/lobby/internal/app/wiring.go @@ -1,6 +1,7 @@ package app import ( + "database/sql" "errors" "fmt" "log/slog" @@ -10,6 +11,11 @@ import ( "galaxy/lobby/internal/adapters/idgen" "galaxy/lobby/internal/adapters/metricsintentpub" "galaxy/lobby/internal/adapters/metricsracenamedir" + pgapplicationstore "galaxy/lobby/internal/adapters/postgres/applicationstore" + pggamestore "galaxy/lobby/internal/adapters/postgres/gamestore" + pginvitestore "galaxy/lobby/internal/adapters/postgres/invitestore" + pgmembershipstore "galaxy/lobby/internal/adapters/postgres/membershipstore" + pgracenamedir "galaxy/lobby/internal/adapters/postgres/racenamedir" "galaxy/lobby/internal/adapters/racenameintents" "galaxy/lobby/internal/adapters/racenamestub" "galaxy/lobby/internal/adapters/redisstate" @@ -234,6 +240,7 @@ type wiring struct { func newWiring( cfg config.Config, redisClient *redis.Client, + pgPool *sql.DB, clock func() time.Time, logger *slog.Logger, telemetryRuntime *telemetry.Runtime, @@ -249,29 +256,47 @@ func newWiring( logger = slog.Default() } - rawDirectory, err := buildRaceNameDirectory(cfg, redisClient, policy, clock) + if redisClient == nil { + return nil, errors.New("new lobby wiring: nil redis client") + } + if pgPool == nil { + return nil, errors.New("new lobby wiring: nil postgres pool") + } + + rawDirectory, err := buildRaceNameDirectory(cfg, pgPool, policy, clock) if err != nil { return nil, fmt.Errorf("new lobby wiring: %w", err) } directory := metricsracenamedir.New(rawDirectory, telemetryRuntime) - if redisClient == nil { - return nil, errors.New("new lobby wiring: nil redis client") + pgStoreCfg := struct { + DB *sql.DB + OperationTimeout time.Duration + }{ + DB: pgPool, + OperationTimeout: cfg.Postgres.Conn.OperationTimeout, } - - gameStore, err := redisstate.NewGameStore(redisClient) + gameStore, err := pggamestore.New(pggamestore.Config{ + DB: pgStoreCfg.DB, OperationTimeout: pgStoreCfg.OperationTimeout, + }) if err != nil { return nil, fmt.Errorf("new lobby wiring: %w", err) } - applicationStore, err := redisstate.NewApplicationStore(redisClient) + applicationStore, err := pgapplicationstore.New(pgapplicationstore.Config{ + DB: pgStoreCfg.DB, OperationTimeout: pgStoreCfg.OperationTimeout, + }) if err != nil { return nil, fmt.Errorf("new lobby wiring: %w", err) } - inviteStore, err := redisstate.NewInviteStore(redisClient) + inviteStore, err := pginvitestore.New(pginvitestore.Config{ + DB: pgStoreCfg.DB, OperationTimeout: pgStoreCfg.OperationTimeout, + }) if err != nil { return nil, fmt.Errorf("new lobby wiring: %w", err) } - membershipStore, err := redisstate.NewMembershipStore(redisClient) + membershipStore, err := pgmembershipstore.New(pgmembershipstore.Config{ + DB: pgStoreCfg.DB, OperationTimeout: pgStoreCfg.OperationTimeout, + }) if err != nil { return nil, fmt.Errorf("new lobby wiring: %w", err) } @@ -763,20 +788,21 @@ func newWiring( // selected by cfg.RaceNameDirectory.Backend. func buildRaceNameDirectory( cfg config.Config, - redisClient *redis.Client, + pgPool *sql.DB, policy *racename.Policy, clock func() time.Time, ) (ports.RaceNameDirectory, error) { switch cfg.RaceNameDirectory.Backend { - case config.RaceNameDirectoryBackendRedis: - if redisClient == nil { - return nil, errors.New("redis race name directory backend requires a Redis client") + case config.RaceNameDirectoryBackendPostgres: + if pgPool == nil { + return nil, errors.New("postgres race name directory backend requires a Postgres pool") } - return redisstate.NewRaceNameDirectory( - redisClient, - policy, - redisstate.WithRaceNameDirectoryClock(clock), - ) + return pgracenamedir.New(pgracenamedir.Config{ + DB: pgPool, + OperationTimeout: cfg.Postgres.Conn.OperationTimeout, + Policy: policy, + Clock: clock, + }) case config.RaceNameDirectoryBackendStub: return racenamestub.NewDirectory(racenamestub.WithClock(clock)) default: diff --git a/lobby/internal/config/config.go b/lobby/internal/config/config.go index b3de522..ff4be98 100644 --- a/lobby/internal/config/config.go +++ b/lobby/internal/config/config.go @@ -3,15 +3,18 @@ package config import ( - "crypto/tls" "fmt" "strings" "time" "galaxy/lobby/internal/telemetry" + "galaxy/postgres" + "galaxy/redisconn" ) const ( + envPrefix = "LOBBY" + shutdownTimeoutEnvVar = "LOBBY_SHUTDOWN_TIMEOUT" logLevelEnvVar = "LOBBY_LOG_LEVEL" @@ -25,13 +28,6 @@ const ( internalHTTPReadTimeoutEnvVar = "LOBBY_INTERNAL_HTTP_READ_TIMEOUT" internalHTTPIdleTimeoutEnvVar = "LOBBY_INTERNAL_HTTP_IDLE_TIMEOUT" - redisAddrEnvVar = "LOBBY_REDIS_ADDR" - redisUsernameEnvVar = "LOBBY_REDIS_USERNAME" - redisPasswordEnvVar = "LOBBY_REDIS_PASSWORD" - redisDBEnvVar = "LOBBY_REDIS_DB" - redisTLSEnabledEnvVar = "LOBBY_REDIS_TLS_ENABLED" - redisOperationTimeoutEnvVar = "LOBBY_REDIS_OPERATION_TIMEOUT" - gmEventsStreamEnvVar = "LOBBY_GM_EVENTS_STREAM" gmEventsReadBlockTimeoutEnvVar = "LOBBY_GM_EVENTS_READ_BLOCK_TIMEOUT" userLifecycleStreamEnvVar = "LOBBY_USER_LIFECYCLE_STREAM" @@ -69,8 +65,6 @@ const ( defaultReadHeaderTimeout = 2 * time.Second defaultReadTimeout = 10 * time.Second defaultIdleTimeout = time.Minute - defaultRedisDB = 0 - defaultRedisOperationTimeout = 2 * time.Second defaultGMEventsStream = "gm:lobby_events" defaultGMEventsReadBlockTimeout = 2 * time.Second defaultUserLifecycleStream = "user:lifecycle_events" @@ -86,12 +80,13 @@ const ( defaultRaceNameExpirationInterval = time.Hour defaultOTelServiceName = "galaxy-lobby" - // RaceNameDirectoryBackendRedis selects the Redis-backed Race Name - // Directory adapter. It is the default production backend. - RaceNameDirectoryBackendRedis = "redis" + // RaceNameDirectoryBackendPostgres selects the PostgreSQL-backed + // Race Name Directory adapter. It is the default production backend + // after PG_PLAN.md §6B. + RaceNameDirectoryBackendPostgres = "postgres" // RaceNameDirectoryBackendStub selects the in-process Race Name - // Directory stub used by unit tests that do not need Redis. + // Directory stub used by unit tests that do not need PostgreSQL. RaceNameDirectoryBackendStub = "stub" ) @@ -115,6 +110,10 @@ type Config struct { // consumed by the runnable service skeleton and its future workers. Redis RedisConfig + // Postgres configures the PostgreSQL-backed durable store consumed via + // `pkg/postgres`. + Postgres PostgresConfig + // UserService configures the synchronous User Service eligibility client. UserService UserServiceConfig @@ -143,7 +142,7 @@ type Config struct { // is wired into the runtime. type RaceNameDirectoryConfig struct { // Backend selects the Race Name Directory adapter. Accepted values - // are RaceNameDirectoryBackendRedis and RaceNameDirectoryBackendStub. + // are RaceNameDirectoryBackendPostgres and RaceNameDirectoryBackendStub. Backend string } @@ -151,14 +150,14 @@ type RaceNameDirectoryConfig struct { // backend selector. func (cfg RaceNameDirectoryConfig) Validate() error { switch cfg.Backend { - case RaceNameDirectoryBackendRedis, RaceNameDirectoryBackendStub: + case RaceNameDirectoryBackendPostgres, RaceNameDirectoryBackendStub: return nil case "": return fmt.Errorf("race name directory backend must not be empty") default: return fmt.Errorf("race name directory backend %q must be one of %q or %q", cfg.Backend, - RaceNameDirectoryBackendRedis, + RaceNameDirectoryBackendPostgres, RaceNameDirectoryBackendStub) } } @@ -237,26 +236,15 @@ func (cfg InternalHTTPConfig) Validate() error { } } -// RedisConfig configures the shared Redis client and the Redis-owned -// Streams keys consumed by the runnable service skeleton. +// RedisConfig configures the Game Lobby Redis connection topology and the +// Redis Stream names Lobby reads from / writes to. Per-call timeouts and +// connection topology live inside `Conn`. type RedisConfig struct { - // Addr stores the Redis network address. - Addr string - - // Username stores the optional Redis ACL username. - Username string - - // Password stores the optional Redis ACL password. - Password string - - // DB stores the Redis logical database index. - DB int - - // TLSEnabled reports whether TLS must be used for Redis connections. - TLSEnabled bool - - // OperationTimeout bounds one Redis round trip including the startup PING. - OperationTimeout time.Duration + // Conn carries the connection topology (master, replicas, password, db, + // per-call timeout). Loaded via redisconn.LoadFromEnv("LOBBY"); rejects + // the deprecated LOBBY_REDIS_TLS_ENABLED / LOBBY_REDIS_USERNAME env vars + // at startup. + Conn redisconn.Config // GMEventsStream stores the Redis Streams key for Game Master runtime // events consumed by Lobby. @@ -297,27 +285,12 @@ type RedisConfig struct { UserLifecycleReadBlockTimeout time.Duration } -// TLSConfig returns the conservative TLS configuration used by the Redis -// client when TLSEnabled is true. -func (cfg RedisConfig) TLSConfig() *tls.Config { - if !cfg.TLSEnabled { - return nil - } - - return &tls.Config{MinVersion: tls.VersionTLS12} -} - // Validate reports whether cfg stores a usable Redis configuration. func (cfg RedisConfig) Validate() error { + if err := cfg.Conn.Validate(); err != nil { + return err + } switch { - case strings.TrimSpace(cfg.Addr) == "": - return fmt.Errorf("redis addr must not be empty") - case !isTCPAddr(cfg.Addr): - return fmt.Errorf("redis addr %q must use host:port form", cfg.Addr) - case cfg.DB < 0: - return fmt.Errorf("redis db must not be negative") - case cfg.OperationTimeout <= 0: - return fmt.Errorf("redis operation timeout must be positive") case strings.TrimSpace(cfg.GMEventsStream) == "": return fmt.Errorf("redis gm events stream must not be empty") case cfg.GMEventsReadBlockTimeout <= 0: @@ -341,6 +314,19 @@ func (cfg RedisConfig) Validate() error { } } +// PostgresConfig configures the PostgreSQL-backed durable store consumed via +// `pkg/postgres`. Topology and pool tuning live in `Conn`; loaded via +// `postgres.LoadFromEnv("LOBBY")`. +type PostgresConfig struct { + // Conn carries the primary plus replica DSN topology and pool tuning. + Conn postgres.Config +} + +// Validate reports whether cfg stores a usable PostgreSQL configuration. +func (cfg PostgresConfig) Validate() error { + return cfg.Conn.Validate() +} + // UserServiceConfig configures the synchronous User Service eligibility // client used by the application flow. type UserServiceConfig struct { @@ -489,8 +475,7 @@ func DefaultConfig() Config { IdleTimeout: defaultIdleTimeout, }, Redis: RedisConfig{ - DB: defaultRedisDB, - OperationTimeout: defaultRedisOperationTimeout, + Conn: redisconn.DefaultConfig(), GMEventsStream: defaultGMEventsStream, GMEventsReadBlockTimeout: defaultGMEventsReadBlockTimeout, RuntimeStartJobsStream: defaultRuntimeStartJobsStream, @@ -501,6 +486,9 @@ func DefaultConfig() Config { UserLifecycleStream: defaultUserLifecycleStream, UserLifecycleReadBlockTimeout: defaultUserLifecycleReadBlockTimeout, }, + Postgres: PostgresConfig{ + Conn: postgres.DefaultConfig(), + }, UserService: UserServiceConfig{ Timeout: defaultUserServiceTimeout, }, @@ -511,7 +499,7 @@ func DefaultConfig() Config { Interval: defaultEnrollmentAutomationInterval, }, RaceNameDirectory: RaceNameDirectoryConfig{ - Backend: RaceNameDirectoryBackendRedis, + Backend: RaceNameDirectoryBackendPostgres, }, PendingRegistration: PendingRegistrationConfig{ Interval: defaultRaceNameExpirationInterval, diff --git a/lobby/internal/config/config_test.go b/lobby/internal/config/config_test.go index 666a7d2..ddd85c5 100644 --- a/lobby/internal/config/config_test.go +++ b/lobby/internal/config/config_test.go @@ -5,10 +5,21 @@ import ( "testing" "time" + "galaxy/postgres" + "galaxy/redisconn" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) +const ( + testDSN = "postgres://lobbyservice:lobbyservice@127.0.0.1:5432/galaxy?search_path=lobby&sslmode=disable" + testRedisAddr = "127.0.0.1:6379" + testRedisSecret = "secret" + testUserBaseURL = "http://user.internal:8090" + testGMBaseURL = "http://gm.internal:8091" +) + func TestDefaultConfig(t *testing.T) { t.Parallel() @@ -18,7 +29,8 @@ func TestDefaultConfig(t *testing.T) { assert.Equal(t, "info", cfg.Logging.Level) assert.Equal(t, ":8094", cfg.PublicHTTP.Addr) assert.Equal(t, ":8095", cfg.InternalHTTP.Addr) - assert.Equal(t, 2*time.Second, cfg.Redis.OperationTimeout) + assert.Equal(t, redisconn.DefaultOperationTimeout, cfg.Redis.Conn.OperationTimeout) + assert.Equal(t, postgres.DefaultOperationTimeout, cfg.Postgres.Conn.OperationTimeout) assert.Equal(t, "gm:lobby_events", cfg.Redis.GMEventsStream) assert.Equal(t, "runtime:start_jobs", cfg.Redis.RuntimeStartJobsStream) assert.Equal(t, "runtime:stop_jobs", cfg.Redis.RuntimeStopJobsStream) @@ -35,16 +47,20 @@ func TestDefaultConfig(t *testing.T) { func TestLoadFromEnvAppliesRequiredFields(t *testing.T) { clearAllEnv(t) - t.Setenv("LOBBY_REDIS_ADDR", "127.0.0.1:6379") - t.Setenv("LOBBY_USER_SERVICE_BASE_URL", "http://user.internal:8090") - t.Setenv("LOBBY_GM_BASE_URL", "http://gm.internal:8091") + t.Setenv("LOBBY_REDIS_MASTER_ADDR", testRedisAddr) + t.Setenv("LOBBY_REDIS_PASSWORD", testRedisSecret) + t.Setenv("LOBBY_POSTGRES_PRIMARY_DSN", testDSN) + t.Setenv("LOBBY_USER_SERVICE_BASE_URL", testUserBaseURL) + t.Setenv("LOBBY_GM_BASE_URL", testGMBaseURL) cfg, err := LoadFromEnv() require.NoError(t, err) - assert.Equal(t, "127.0.0.1:6379", cfg.Redis.Addr) - assert.Equal(t, "http://user.internal:8090", cfg.UserService.BaseURL) - assert.Equal(t, "http://gm.internal:8091", cfg.GM.BaseURL) + assert.Equal(t, testRedisAddr, cfg.Redis.Conn.MasterAddr) + assert.Equal(t, testRedisSecret, cfg.Redis.Conn.Password) + assert.Equal(t, testDSN, cfg.Postgres.Conn.PrimaryDSN) + assert.Equal(t, testUserBaseURL, cfg.UserService.BaseURL) + assert.Equal(t, testGMBaseURL, cfg.GM.BaseURL) } func TestLoadFromEnvMissingRequiredFields(t *testing.T) { @@ -52,21 +68,48 @@ func TestLoadFromEnvMissingRequiredFields(t *testing.T) { _, err := LoadFromEnv() require.Error(t, err) - require.Contains(t, err.Error(), "redis addr must not be empty") + require.Contains(t, err.Error(), "LOBBY_REDIS_MASTER_ADDR") +} + +func TestLoadFromEnvRejectsDeprecatedRedisVars(t *testing.T) { + tests := []struct { + name string + envName string + }{ + {name: "TLS_ENABLED", envName: "LOBBY_REDIS_TLS_ENABLED"}, + {name: "USERNAME", envName: "LOBBY_REDIS_USERNAME"}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + clearAllEnv(t) + t.Setenv("LOBBY_REDIS_MASTER_ADDR", testRedisAddr) + t.Setenv("LOBBY_REDIS_PASSWORD", testRedisSecret) + t.Setenv("LOBBY_POSTGRES_PRIMARY_DSN", testDSN) + t.Setenv("LOBBY_USER_SERVICE_BASE_URL", testUserBaseURL) + t.Setenv("LOBBY_GM_BASE_URL", testGMBaseURL) + t.Setenv(tt.envName, "anything") + + _, err := LoadFromEnv() + require.Error(t, err) + require.Contains(t, err.Error(), tt.envName) + }) + } } func TestLoadFromEnvOverrides(t *testing.T) { clearAllEnv(t) - t.Setenv("LOBBY_REDIS_ADDR", "127.0.0.1:6379") - t.Setenv("LOBBY_USER_SERVICE_BASE_URL", "http://user.internal:8090") - t.Setenv("LOBBY_GM_BASE_URL", "http://gm.internal:8091") + t.Setenv("LOBBY_REDIS_MASTER_ADDR", testRedisAddr) + t.Setenv("LOBBY_REDIS_PASSWORD", testRedisSecret) + t.Setenv("LOBBY_POSTGRES_PRIMARY_DSN", testDSN) + t.Setenv("LOBBY_USER_SERVICE_BASE_URL", testUserBaseURL) + t.Setenv("LOBBY_GM_BASE_URL", testGMBaseURL) t.Setenv("LOBBY_SHUTDOWN_TIMEOUT", "12s") t.Setenv("LOBBY_LOG_LEVEL", "debug") t.Setenv("LOBBY_PUBLIC_HTTP_ADDR", "127.0.0.1:9001") t.Setenv("LOBBY_INTERNAL_HTTP_ADDR", "127.0.0.1:9002") t.Setenv("LOBBY_REDIS_DB", "5") - t.Setenv("LOBBY_REDIS_TLS_ENABLED", "true") + t.Setenv("LOBBY_REDIS_OPERATION_TIMEOUT", "300ms") t.Setenv("LOBBY_GM_EVENTS_STREAM", "alt:gm_events") t.Setenv("LOBBY_NOTIFICATION_INTENTS_STREAM", "alt:intents") t.Setenv("LOBBY_ENROLLMENT_AUTOMATION_INTERVAL", "45s") @@ -80,21 +123,22 @@ func TestLoadFromEnvOverrides(t *testing.T) { assert.Equal(t, "debug", cfg.Logging.Level) assert.Equal(t, "127.0.0.1:9001", cfg.PublicHTTP.Addr) assert.Equal(t, "127.0.0.1:9002", cfg.InternalHTTP.Addr) - assert.Equal(t, 5, cfg.Redis.DB) - assert.True(t, cfg.Redis.TLSEnabled) + assert.Equal(t, 5, cfg.Redis.Conn.DB) + assert.Equal(t, 300*time.Millisecond, cfg.Redis.Conn.OperationTimeout) assert.Equal(t, "alt:gm_events", cfg.Redis.GMEventsStream) assert.Equal(t, "alt:intents", cfg.Redis.NotificationIntentsStream) assert.Equal(t, 45*time.Second, cfg.EnrollmentAutomation.Interval) assert.Equal(t, 15*time.Minute, cfg.PendingRegistration.Interval) assert.Equal(t, "galaxy-lobby-test", cfg.Telemetry.ServiceName) - assert.NotNil(t, cfg.Redis.TLSConfig()) } func TestLoadFromEnvInvalidDuration(t *testing.T) { clearAllEnv(t) - t.Setenv("LOBBY_REDIS_ADDR", "127.0.0.1:6379") - t.Setenv("LOBBY_USER_SERVICE_BASE_URL", "http://user.internal:8090") - t.Setenv("LOBBY_GM_BASE_URL", "http://gm.internal:8091") + t.Setenv("LOBBY_REDIS_MASTER_ADDR", testRedisAddr) + t.Setenv("LOBBY_REDIS_PASSWORD", testRedisSecret) + t.Setenv("LOBBY_POSTGRES_PRIMARY_DSN", testDSN) + t.Setenv("LOBBY_USER_SERVICE_BASE_URL", testUserBaseURL) + t.Setenv("LOBBY_GM_BASE_URL", testGMBaseURL) t.Setenv("LOBBY_SHUTDOWN_TIMEOUT", "not-a-duration") _, err := LoadFromEnv() @@ -153,7 +197,8 @@ func TestRedisConfigValidate(t *testing.T) { t.Parallel() base := DefaultConfig().Redis - base.Addr = "127.0.0.1:6379" + base.Conn.MasterAddr = testRedisAddr + base.Conn.Password = testRedisSecret require.NoError(t, base.Validate()) tests := []struct { @@ -161,10 +206,10 @@ func TestRedisConfigValidate(t *testing.T) { mutate func(*RedisConfig) wantErr string }{ - {name: "empty addr", mutate: func(cfg *RedisConfig) { cfg.Addr = "" }, wantErr: "addr must not be empty"}, - {name: "bad addr", mutate: func(cfg *RedisConfig) { cfg.Addr = "weird" }, wantErr: "must use host:port"}, - {name: "negative db", mutate: func(cfg *RedisConfig) { cfg.DB = -1 }, wantErr: "must not be negative"}, - {name: "zero op timeout", mutate: func(cfg *RedisConfig) { cfg.OperationTimeout = 0 }, wantErr: "operation timeout"}, + {name: "empty master addr", mutate: func(cfg *RedisConfig) { cfg.Conn.MasterAddr = "" }, wantErr: "master addr"}, + {name: "empty password", mutate: func(cfg *RedisConfig) { cfg.Conn.Password = "" }, wantErr: "password"}, + {name: "negative db", mutate: func(cfg *RedisConfig) { cfg.Conn.DB = -1 }, wantErr: "must not be negative"}, + {name: "zero op timeout", mutate: func(cfg *RedisConfig) { cfg.Conn.OperationTimeout = 0 }, wantErr: "operation timeout"}, {name: "empty gm stream", mutate: func(cfg *RedisConfig) { cfg.GMEventsStream = "" }, wantErr: "gm events stream"}, {name: "zero gm block", mutate: func(cfg *RedisConfig) { cfg.GMEventsReadBlockTimeout = 0 }, wantErr: "gm events read block timeout"}, {name: "empty start jobs", mutate: func(cfg *RedisConfig) { cfg.RuntimeStartJobsStream = "" }, wantErr: "runtime start jobs"}, @@ -188,6 +233,18 @@ func TestRedisConfigValidate(t *testing.T) { } } +func TestPostgresConfigValidate(t *testing.T) { + t.Parallel() + + base := DefaultConfig().Postgres + base.Conn.PrimaryDSN = testDSN + require.NoError(t, base.Validate()) + + bad := base + bad.Conn.PrimaryDSN = "" + require.ErrorContains(t, bad.Validate(), "primary DSN") +} + func TestUserServiceConfigValidate(t *testing.T) { t.Parallel() @@ -255,7 +312,9 @@ func TestConfigValidateLogLevel(t *testing.T) { t.Parallel() cfg := DefaultConfig() - cfg.Redis.Addr = "127.0.0.1:6379" + cfg.Redis.Conn.MasterAddr = testRedisAddr + cfg.Redis.Conn.Password = testRedisSecret + cfg.Postgres.Conn.PrimaryDSN = testDSN cfg.UserService.BaseURL = "http://u:1" cfg.GM.BaseURL = "http://gm:1" require.NoError(t, cfg.Validate()) @@ -266,18 +325,6 @@ func TestConfigValidateLogLevel(t *testing.T) { require.Contains(t, err.Error(), "slog level") } -func TestLoadFromEnvBoolParseError(t *testing.T) { - clearAllEnv(t) - t.Setenv("LOBBY_REDIS_ADDR", "127.0.0.1:6379") - t.Setenv("LOBBY_USER_SERVICE_BASE_URL", "http://u:1") - t.Setenv("LOBBY_GM_BASE_URL", "http://gm:1") - t.Setenv("LOBBY_REDIS_TLS_ENABLED", "not-bool") - - _, err := LoadFromEnv() - require.Error(t, err) - require.Contains(t, err.Error(), "LOBBY_REDIS_TLS_ENABLED") -} - // clearAllEnv unsets every environment variable the config package reads so // tests can configure their expected values explicitly. func clearAllEnv(t *testing.T) { @@ -294,12 +341,19 @@ func clearAllEnv(t *testing.T) { internalHTTPReadHeaderTimeoutEnvVar, internalHTTPReadTimeoutEnvVar, internalHTTPIdleTimeoutEnvVar, - redisAddrEnvVar, - redisUsernameEnvVar, - redisPasswordEnvVar, - redisDBEnvVar, - redisTLSEnabledEnvVar, - redisOperationTimeoutEnvVar, + "LOBBY_REDIS_MASTER_ADDR", + "LOBBY_REDIS_REPLICA_ADDRS", + "LOBBY_REDIS_PASSWORD", + "LOBBY_REDIS_DB", + "LOBBY_REDIS_OPERATION_TIMEOUT", + "LOBBY_REDIS_TLS_ENABLED", + "LOBBY_REDIS_USERNAME", + "LOBBY_POSTGRES_PRIMARY_DSN", + "LOBBY_POSTGRES_REPLICA_DSNS", + "LOBBY_POSTGRES_OPERATION_TIMEOUT", + "LOBBY_POSTGRES_MAX_OPEN_CONNS", + "LOBBY_POSTGRES_MAX_IDLE_CONNS", + "LOBBY_POSTGRES_CONN_MAX_LIFETIME", gmEventsStreamEnvVar, gmEventsReadBlockTimeoutEnvVar, runtimeStartJobsStreamEnvVar, diff --git a/lobby/internal/config/env.go b/lobby/internal/config/env.go index 308a50c..4fa52b4 100644 --- a/lobby/internal/config/env.go +++ b/lobby/internal/config/env.go @@ -6,6 +6,9 @@ import ( "strconv" "strings" "time" + + "galaxy/postgres" + "galaxy/redisconn" ) // LoadFromEnv builds Config from environment variables and validates the @@ -50,21 +53,18 @@ func LoadFromEnv() (Config, error) { return Config{}, err } - cfg.Redis.Addr = stringEnv(redisAddrEnvVar, cfg.Redis.Addr) - cfg.Redis.Username = stringEnv(redisUsernameEnvVar, cfg.Redis.Username) - cfg.Redis.Password = stringEnv(redisPasswordEnvVar, cfg.Redis.Password) - cfg.Redis.DB, err = intEnv(redisDBEnvVar, cfg.Redis.DB) + redisConn, err := redisconn.LoadFromEnv(envPrefix) if err != nil { return Config{}, err } - cfg.Redis.TLSEnabled, err = boolEnv(redisTLSEnabledEnvVar, cfg.Redis.TLSEnabled) - if err != nil { - return Config{}, err - } - cfg.Redis.OperationTimeout, err = durationEnv(redisOperationTimeoutEnvVar, cfg.Redis.OperationTimeout) + cfg.Redis.Conn = redisConn + + pgConn, err := postgres.LoadFromEnv(envPrefix) if err != nil { return Config{}, err } + cfg.Postgres.Conn = pgConn + cfg.Redis.GMEventsStream = stringEnv(gmEventsStreamEnvVar, cfg.Redis.GMEventsStream) cfg.Redis.GMEventsReadBlockTimeout, err = durationEnv(gmEventsReadBlockTimeoutEnvVar, cfg.Redis.GMEventsReadBlockTimeout) if err != nil { diff --git a/lobby/internal/ports/racenamedirtest/suite.go b/lobby/internal/ports/racenamedirtest/suite.go index 9c4caa1..db5ed34 100644 --- a/lobby/internal/ports/racenamedirtest/suite.go +++ b/lobby/internal/ports/racenamedirtest/suite.go @@ -1,7 +1,11 @@ // Package racenamedirtest exposes the shared behavioural test suite that -// every ports.RaceNameDirectory implementation must pass. The Redis +// every ports.RaceNameDirectory implementation must pass. The PostgreSQL // adapter and the in-process stub run the same cases so both back ends // stay behaviourally equivalent. +// +// Subtests run sequentially: the PostgreSQL adapter shares one +// testcontainers instance across the suite and relies on TruncateAll +// between factory invocations, which would race under t.Parallel. package racenamedirtest import ( @@ -29,144 +33,111 @@ func Run(t *testing.T, factory Factory) { t.Helper() t.Run("Canonicalize rejects invalid input", func(t *testing.T) { - t.Parallel() testCanonicalizeRejectsInvalid(t, factory) }) t.Run("Canonicalize is deterministic", func(t *testing.T) { - t.Parallel() testCanonicalizeDeterministic(t, factory) }) t.Run("Check empty directory", func(t *testing.T) { - t.Parallel() testCheckEmpty(t, factory) }) t.Run("Check treats actor as own holder", func(t *testing.T) { - t.Parallel() testCheckActorNotTaken(t, factory) }) t.Run("Check exposes holder and kind to other users", func(t *testing.T) { - t.Parallel() testCheckHolderAndKind(t, factory) }) t.Run("Reserve records new holding", func(t *testing.T) { - t.Parallel() testReserveRecords(t, factory) }) t.Run("Reserve idempotent for same holder same game", func(t *testing.T) { - t.Parallel() testReserveIdempotent(t, factory) }) t.Run("Reserve allows same user across games", func(t *testing.T) { - t.Parallel() testReserveCrossGame(t, factory) }) t.Run("Reserve rejects cross-user same game", func(t *testing.T) { - t.Parallel() testReserveCrossUserSameGame(t, factory) }) t.Run("Reserve rejects cross-user different games", func(t *testing.T) { - t.Parallel() testReserveCrossUserDifferentGames(t, factory) }) t.Run("Reserve rejects invalid name", func(t *testing.T) { - t.Parallel() testReserveInvalidName(t, factory) }) t.Run("ReleaseReservation missing", func(t *testing.T) { - t.Parallel() testReleaseReservationMissing(t, factory) }) t.Run("ReleaseReservation wrong holder", func(t *testing.T) { - t.Parallel() testReleaseReservationWrongHolder(t, factory) }) t.Run("ReleaseReservation clears sole binding", func(t *testing.T) { - t.Parallel() testReleaseReservationClears(t, factory) }) t.Run("ReleaseReservation swallows invalid name", func(t *testing.T) { - t.Parallel() testReleaseReservationInvalidName(t, factory) }) t.Run("ReleaseReservation keeps cross-game holding visible", func(t *testing.T) { - t.Parallel() testReleaseReservationKeepsCrossGame(t, factory) }) t.Run("MarkPendingRegistration promotes reservation", func(t *testing.T) { - t.Parallel() testMarkPendingPromotes(t, factory) }) t.Run("MarkPendingRegistration idempotent same eligible", func(t *testing.T) { - t.Parallel() testMarkPendingIdempotent(t, factory) }) t.Run("MarkPendingRegistration rejects different eligible", func(t *testing.T) { - t.Parallel() testMarkPendingDifferentEligible(t, factory) }) t.Run("MarkPendingRegistration rejects missing reservation", func(t *testing.T) { - t.Parallel() testMarkPendingMissing(t, factory) }) t.Run("ExpirePendingRegistrations empty", func(t *testing.T) { - t.Parallel() testExpirePendingEmpty(t, factory) }) t.Run("ExpirePendingRegistrations releases expired entries", func(t *testing.T) { - t.Parallel() testExpirePendingReleasesExpired(t, factory) }) t.Run("ExpirePendingRegistrations skips future entries", func(t *testing.T) { - t.Parallel() testExpirePendingSkipsFuture(t, factory) }) t.Run("ExpirePendingRegistrations idempotent replay", func(t *testing.T) { - t.Parallel() testExpirePendingIdempotent(t, factory) }) t.Run("Register converts pending to registered", func(t *testing.T) { - t.Parallel() testRegisterConverts(t, factory) }) t.Run("Register idempotent on repeat", func(t *testing.T) { - t.Parallel() testRegisterIdempotent(t, factory) }) t.Run("Register rejects missing pending", func(t *testing.T) { - t.Parallel() testRegisterMissingPending(t, factory) }) t.Run("Register rejects expired pending", func(t *testing.T) { - t.Parallel() testRegisterExpiredPending(t, factory) }) t.Run("List methods partition correctly", func(t *testing.T) { - t.Parallel() testListsPartition(t, factory) }) t.Run("ReleaseAllByUser clears every kind", func(t *testing.T) { - t.Parallel() testReleaseAllByUserClears(t, factory) }) t.Run("ReleaseAllByUser leaves other users intact", func(t *testing.T) { - t.Parallel() testReleaseAllByUserIsolated(t, factory) }) t.Run("ReleaseAllByUser idempotent", func(t *testing.T) { - t.Parallel() testReleaseAllByUserIdempotent(t, factory) }) t.Run("Honors canceled context", func(t *testing.T) { - t.Parallel() testContextCancellation(t, factory) }) } diff --git a/mail/Makefile b/mail/Makefile new file mode 100644 index 0000000..b2187f0 --- /dev/null +++ b/mail/Makefile @@ -0,0 +1,10 @@ +# Makefile for galaxy/mail. +# +# The `jet` target regenerates the go-jet/v2 query-builder code under +# internal/adapters/postgres/jet/ against a transient PostgreSQL container +# brought up by cmd/jetgen. Generated code is committed. + +.PHONY: jet + +jet: + go run ./cmd/jetgen diff --git a/mail/README.md b/mail/README.md index 53afc9c..fc1655d 100644 --- a/mail/README.md +++ b/mail/README.md @@ -50,13 +50,21 @@ Cross-service routing rules: `cmd/mail` starts one internal-only process with: - one trusted internal HTTP listener on `MAIL_INTERNAL_HTTP_ADDR` -- one async command consumer -- one attempt scheduler +- one async command consumer reading from `MAIL_REDIS_COMMAND_STREAM` +- one attempt scheduler driven by Postgres `FOR UPDATE SKIP LOCKED` - one attempt worker pool -- one cleanup worker +- one SQL retention worker The service has no public ingress and no dedicated admin listener. +Persistence split (steady state, see `docs/postgres-migration.md`): + +- PostgreSQL is the source of truth for durable mail state — accepted + deliveries, attempts, dead letters, payload bundles, malformed-command + audit records, and idempotency reservations. +- Redis is the source of truth only for the inbound `mail:delivery_commands` + stream and its persisted consumer offset. + Intentional runtime omissions: - no `/healthz` @@ -65,8 +73,10 @@ Intentional runtime omissions: Operational behavior: -- startup performs bounded Redis connectivity checks and fails fast on invalid - runtime configuration +- startup performs bounded Redis and PostgreSQL connectivity checks and fails + fast on invalid runtime configuration +- embedded goose migrations are applied strictly before any HTTP listener + opens; a migration failure exits with non-zero status - the template catalog is parsed once at startup and kept immutable for the lifetime of the process - template changes require process restart @@ -76,7 +86,9 @@ Operational behavior: Required for all starts: -- `MAIL_REDIS_ADDR` +- `MAIL_REDIS_MASTER_ADDR` +- `MAIL_REDIS_PASSWORD` +- `MAIL_POSTGRES_PRIMARY_DSN` Primary configuration groups: @@ -88,13 +100,21 @@ Primary configuration groups: - `MAIL_INTERNAL_HTTP_READ_HEADER_TIMEOUT` - `MAIL_INTERNAL_HTTP_READ_TIMEOUT` - `MAIL_INTERNAL_HTTP_IDLE_TIMEOUT` -- Redis connectivity: - - `MAIL_REDIS_USERNAME` +- Redis connectivity (`pkg/redisconn` shape): + - `MAIL_REDIS_MASTER_ADDR` + - `MAIL_REDIS_REPLICA_ADDRS` (comma-separated, optional) - `MAIL_REDIS_PASSWORD` - `MAIL_REDIS_DB` - - `MAIL_REDIS_TLS_ENABLED` - `MAIL_REDIS_OPERATION_TIMEOUT` - `MAIL_REDIS_COMMAND_STREAM` +- PostgreSQL connectivity (`pkg/postgres` shape): + - `MAIL_POSTGRES_PRIMARY_DSN` + - `MAIL_POSTGRES_REPLICA_DSNS` (comma-separated, optional; reserved for + future read routing) + - `MAIL_POSTGRES_OPERATION_TIMEOUT` + - `MAIL_POSTGRES_MAX_OPEN_CONNS` + - `MAIL_POSTGRES_MAX_IDLE_CONNS` + - `MAIL_POSTGRES_CONN_MAX_LIFETIME` - SMTP provider: - `MAIL_SMTP_MODE=stub|smtp` - `MAIL_SMTP_ADDR` @@ -110,6 +130,11 @@ Primary configuration groups: - `MAIL_ATTEMPT_WORKER_CONCURRENCY` - `MAIL_STREAM_BLOCK_TIMEOUT` - `MAIL_OPERATOR_REQUEST_TIMEOUT` + - `MAIL_IDEMPOTENCY_TTL` +- SQL retention worker: + - `MAIL_DELIVERY_RETENTION` (default `30d`) + - `MAIL_MALFORMED_COMMAND_RETENTION` (default `90d`) + - `MAIL_CLEANUP_INTERVAL` (default `1h`) - OpenTelemetry: - `OTEL_SERVICE_NAME` - `OTEL_TRACES_EXPORTER` @@ -125,26 +150,27 @@ Defaults worth knowing: - `MAIL_INTERNAL_HTTP_ADDR=:8080` - `MAIL_SMTP_MODE=stub` - `MAIL_SMTP_TIMEOUT=15s` - -Additional SMTP note: - -- `MAIL_SMTP_INSECURE_SKIP_VERIFY=false` by default and is intended only for - local self-signed SMTP capture or similar non-production environments - `MAIL_TEMPLATE_DIR=templates` - `MAIL_ATTEMPT_WORKER_CONCURRENCY=4` - `MAIL_STREAM_BLOCK_TIMEOUT=2s` - `MAIL_OPERATOR_REQUEST_TIMEOUT=5s` - `MAIL_SHUTDOWN_TIMEOUT=5s` +- `MAIL_IDEMPOTENCY_TTL=168h` (`7d`) +- `MAIL_DELIVERY_RETENTION=720h` (`30d`) +- `MAIL_MALFORMED_COMMAND_RETENTION=2160h` (`90d`) +- `MAIL_CLEANUP_INTERVAL=1h` -Current implementation caveats: +Additional SMTP note: -- `MAIL_REDIS_COMMAND_STREAM` is effective for the async command consumer -- `MAIL_REDIS_ATTEMPT_SCHEDULE_KEY` and `MAIL_REDIS_DEAD_LETTER_PREFIX` are - parsed but the Redis adapters still use the fixed keys - `mail:attempt_schedule` and `mail:dead_letters:` -- `MAIL_IDEMPOTENCY_TTL`, `MAIL_DELIVERY_TTL`, and `MAIL_ATTEMPT_TTL` are - parsed but the Redis adapters still enforce fixed retentions of `7d`, `30d`, - and `90d` +- `MAIL_SMTP_INSECURE_SKIP_VERIFY=false` by default and is intended only for + local self-signed SMTP capture or similar non-production environments + +Retired (Stage 4 of `PG_PLAN.md`): `MAIL_REDIS_ADDR`, `MAIL_REDIS_USERNAME`, +`MAIL_REDIS_TLS_ENABLED`, `MAIL_REDIS_ATTEMPT_SCHEDULE_KEY`, +`MAIL_REDIS_DEAD_LETTER_PREFIX`, `MAIL_DELIVERY_TTL`, `MAIL_ATTEMPT_TTL`. +The new connection envelope is supplied by `pkg/redisconn` and `pkg/postgres`, +and durable retention is enforced by the SQL retention worker against the +PostgreSQL-backed source of truth (see `docs/postgres-migration.md`). ## Stable Input Contracts @@ -370,47 +396,48 @@ Rendering rules: - missing required variables and template lookup failures are classified into stable render-failure codes -## Redis Logical Model +## Persistence Layout -Primary keys: +PostgreSQL `mail` schema (source of truth — see +[`docs/postgres-migration.md`](docs/postgres-migration.md)): -- `mail:deliveries:` -- `mail:attempts::` -- `mail:idempotency::` -- `mail:dead_letters:` -- `mail:delivery_payloads:` -- `mail:malformed_commands:` -- `mail:stream_offsets:` +- `deliveries(delivery_id PK, source, status, payload_mode, …, + idempotency_key, request_fingerprint, idempotency_expires_at, + attempt_count, next_attempt_at, created_at, updated_at, …)` with + `UNIQUE (source, idempotency_key)` and a partial scheduler index on + `next_attempt_at` +- `delivery_recipients(delivery_id FK, kind, position, email)` with + `kind ∈ {'to','cc','bcc','reply_to'}` and an `email` index that excludes + `reply_to` +- `attempts(delivery_id FK, attempt_no, status, scheduled_for, started_at, + finished_at, provider_classification, provider_summary)`, + `PRIMARY KEY (delivery_id, attempt_no)` +- `dead_letters(delivery_id PK FK, final_attempt_no, failure_classification, + provider_summary, recovery_hint, created_at)` +- `delivery_payloads(delivery_id PK FK, payload jsonb)` for raw attachment + bundles +- `malformed_commands(stream_entry_id PK, delivery_id, source, + idempotency_key, failure_code, failure_message, raw_fields jsonb, + recorded_at)` -Scheduling and ingress keys: +Redis surface (intake stream + offset only): -- `mail:delivery_commands` -- `mail:attempt_schedule` - -Operator indexes: - -- `mail:idx:recipient:` -- `mail:idx:status:` -- `mail:idx:source:` -- `mail:idx:template:` -- `mail:idx:idempotency::` -- `mail:idx:created_at` -- `mail:idx:malformed_command:created_at` +- `mail:delivery_commands` — async ingress Redis Stream +- `mail:stream_offsets:` — persisted consumer offset for the + intake stream Storage rules: -- dynamic Redis key segments are base64url-encoded -- durable records are stored as strict JSON blobs -- timestamps are stored in Unix milliseconds -- raw attachment payloads are separated from audit metadata +- timestamps are stored as PostgreSQL `timestamptz` and normalised to UTC + at the adapter boundary - malformed async commands are stored idempotently by `stream_entry_id` - -Current fixed retentions: - -- idempotency: `7d` -- deliveries and payload audit: `30d` -- attempts and dead letters: `90d` -- malformed commands: `90d` +- the `idempotency_expires_at` column is set per acceptance from + `MAIL_IDEMPOTENCY_TTL` (default `7d`); resends store an empty fingerprint + and a synthetic far-future expiry that the read helper treats as + non-idempotent +- the SQL retention worker periodically deletes deliveries older than + `MAIL_DELIVERY_RETENTION` (cascade) and malformed commands older than + `MAIL_MALFORMED_COMMAND_RETENTION` ## Provider, Retry, and Failure Policy diff --git a/mail/cmd/jetgen/main.go b/mail/cmd/jetgen/main.go new file mode 100644 index 0000000..efcbfb5 --- /dev/null +++ b/mail/cmd/jetgen/main.go @@ -0,0 +1,236 @@ +// Command jetgen regenerates the go-jet/v2 query-builder code under +// galaxy/mail/internal/adapters/postgres/jet/ against a transient PostgreSQL +// instance. +// +// The program is intended to be invoked as `go run ./cmd/jetgen` (or via the +// `make jet` Makefile target) from within `galaxy/mail`. It is not part of +// the runtime binary. +// +// Steps: +// +// 1. start a postgres:16-alpine container via testcontainers-go +// 2. open it through pkg/postgres as the superuser +// 3. CREATE ROLE mailservice and CREATE SCHEMA "mail" AUTHORIZATION +// mailservice +// 4. open a second pool as mailservice with search_path=mail and apply the +// embedded goose migrations +// 5. run jet's PostgreSQL generator against schema=mail, writing into +// ../internal/adapters/postgres/jet +package main + +import ( + "context" + "errors" + "fmt" + "log" + "net/url" + "os" + "path/filepath" + "runtime" + "time" + + "galaxy/mail/internal/adapters/postgres/migrations" + "galaxy/postgres" + + jetpostgres "github.com/go-jet/jet/v2/generator/postgres" + testcontainers "github.com/testcontainers/testcontainers-go" + tcpostgres "github.com/testcontainers/testcontainers-go/modules/postgres" + "github.com/testcontainers/testcontainers-go/wait" +) + +const ( + postgresImage = "postgres:16-alpine" + superuserName = "galaxy" + superuserPassword = "galaxy" + superuserDatabase = "galaxy_mail" + serviceRole = "mailservice" + servicePassword = "mailservice" + serviceSchema = "mail" + containerStartup = 90 * time.Second + defaultOpTimeout = 10 * time.Second + jetOutputDirSuffix = "internal/adapters/postgres/jet" +) + +func main() { + if err := run(context.Background()); err != nil { + log.Fatalf("jetgen: %v", err) + } +} + +func run(ctx context.Context) error { + outputDir, err := jetOutputDir() + if err != nil { + return err + } + + container, err := tcpostgres.Run(ctx, postgresImage, + tcpostgres.WithDatabase(superuserDatabase), + tcpostgres.WithUsername(superuserName), + tcpostgres.WithPassword(superuserPassword), + testcontainers.WithWaitStrategy( + wait.ForLog("database system is ready to accept connections"). + WithOccurrence(2). + WithStartupTimeout(containerStartup), + ), + ) + if err != nil { + return fmt.Errorf("start postgres container: %w", err) + } + defer func() { + if termErr := testcontainers.TerminateContainer(container); termErr != nil { + log.Printf("jetgen: terminate container: %v", termErr) + } + }() + + baseDSN, err := container.ConnectionString(ctx, "sslmode=disable") + if err != nil { + return fmt.Errorf("resolve container dsn: %w", err) + } + + if err := provisionRoleAndSchema(ctx, baseDSN); err != nil { + return err + } + + scopedDSN, err := dsnForServiceRole(baseDSN) + if err != nil { + return err + } + if err := applyMigrations(ctx, scopedDSN); err != nil { + return err + } + + if err := os.RemoveAll(outputDir); err != nil { + return fmt.Errorf("remove existing jet output %q: %w", outputDir, err) + } + if err := os.MkdirAll(filepath.Dir(outputDir), 0o755); err != nil { + return fmt.Errorf("ensure jet output parent: %w", err) + } + + jetCfg := postgres.DefaultConfig() + jetCfg.PrimaryDSN = scopedDSN + jetCfg.OperationTimeout = defaultOpTimeout + jetDB, err := postgres.OpenPrimary(ctx, jetCfg) + if err != nil { + return fmt.Errorf("open scoped pool for jet generation: %w", err) + } + defer func() { _ = jetDB.Close() }() + + if err := jetpostgres.GenerateDB(jetDB, serviceSchema, outputDir); err != nil { + return fmt.Errorf("jet generate: %w", err) + } + + log.Printf("jetgen: generated jet code into %s (schema=%s)", outputDir, serviceSchema) + return nil +} + +func provisionRoleAndSchema(ctx context.Context, baseDSN string) error { + cfg := postgres.DefaultConfig() + cfg.PrimaryDSN = baseDSN + cfg.OperationTimeout = defaultOpTimeout + db, err := postgres.OpenPrimary(ctx, cfg) + if err != nil { + return fmt.Errorf("open admin pool: %w", err) + } + defer func() { _ = db.Close() }() + + statements := []string{ + fmt.Sprintf(`DO $$ BEGIN + IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = %s) THEN + CREATE ROLE %s LOGIN PASSWORD %s; + END IF; + END $$;`, sqlLiteral(serviceRole), sqlIdentifier(serviceRole), sqlLiteral(servicePassword)), + fmt.Sprintf(`CREATE SCHEMA IF NOT EXISTS %s AUTHORIZATION %s;`, + sqlIdentifier(serviceSchema), sqlIdentifier(serviceRole)), + fmt.Sprintf(`GRANT USAGE ON SCHEMA %s TO %s;`, + sqlIdentifier(serviceSchema), sqlIdentifier(serviceRole)), + } + for _, statement := range statements { + if _, err := db.ExecContext(ctx, statement); err != nil { + return fmt.Errorf("provision %q/%q: %w", serviceSchema, serviceRole, err) + } + } + return nil +} + +func dsnForServiceRole(baseDSN string) (string, error) { + parsed, err := url.Parse(baseDSN) + if err != nil { + return "", fmt.Errorf("parse base dsn: %w", err) + } + values := url.Values{} + values.Set("search_path", serviceSchema) + values.Set("sslmode", "disable") + scoped := url.URL{ + Scheme: parsed.Scheme, + User: url.UserPassword(serviceRole, servicePassword), + Host: parsed.Host, + Path: parsed.Path, + RawQuery: values.Encode(), + } + return scoped.String(), nil +} + +func applyMigrations(ctx context.Context, dsn string) error { + cfg := postgres.DefaultConfig() + cfg.PrimaryDSN = dsn + cfg.OperationTimeout = defaultOpTimeout + db, err := postgres.OpenPrimary(ctx, cfg) + if err != nil { + return fmt.Errorf("open scoped pool: %w", err) + } + defer func() { _ = db.Close() }() + + if err := postgres.Ping(ctx, db, defaultOpTimeout); err != nil { + return err + } + if err := postgres.RunMigrations(ctx, db, migrations.FS(), "."); err != nil { + return fmt.Errorf("run migrations: %w", err) + } + return nil +} + +// jetOutputDir returns the absolute path that jet should write into. We rely +// on the runtime caller info to anchor it to galaxy/mail regardless of the +// invoking working directory. +func jetOutputDir() (string, error) { + _, file, _, ok := runtime.Caller(0) + if !ok { + return "", errors.New("resolve runtime caller for jet output path") + } + dir := filepath.Dir(file) + // dir = .../galaxy/mail/cmd/jetgen + moduleRoot := filepath.Clean(filepath.Join(dir, "..", "..")) + return filepath.Join(moduleRoot, jetOutputDirSuffix), nil +} + +func sqlIdentifier(name string) string { + return `"` + escapeDoubleQuotes(name) + `"` +} + +func sqlLiteral(value string) string { + return "'" + escapeSingleQuotes(value) + "'" +} + +func escapeDoubleQuotes(value string) string { + out := make([]byte, 0, len(value)) + for index := 0; index < len(value); index++ { + if value[index] == '"' { + out = append(out, '"', '"') + continue + } + out = append(out, value[index]) + } + return string(out) +} + +func escapeSingleQuotes(value string) string { + out := make([]byte, 0, len(value)) + for index := 0; index < len(value); index++ { + if value[index] == '\'' { + out = append(out, '\'', '\'') + continue + } + out = append(out, value[index]) + } + return string(out) +} diff --git a/mail/docs/README.md b/mail/docs/README.md index 0db6456..af709d5 100644 --- a/mail/docs/README.md +++ b/mail/docs/README.md @@ -9,6 +9,7 @@ Sections: - [Main flows](flows.md) - [Configuration and contract examples](examples.md) - [Operator runbook](runbook.md) +- [PostgreSQL migration decisions (Stage 4 of `PG_PLAN.md`)](postgres-migration.md) Primary references: diff --git a/mail/docs/examples.md b/mail/docs/examples.md index d9a489c..7b706e5 100644 --- a/mail/docs/examples.md +++ b/mail/docs/examples.md @@ -8,7 +8,9 @@ unless explicitly stated otherwise. Minimal local runtime with stub provider: ```dotenv -MAIL_REDIS_ADDR=127.0.0.1:6379 +MAIL_REDIS_MASTER_ADDR=127.0.0.1:6379 +MAIL_REDIS_PASSWORD=local +MAIL_POSTGRES_PRIMARY_DSN=postgres://mailservice:mailservice@127.0.0.1:5432/galaxy?search_path=mail&sslmode=disable MAIL_INTERNAL_HTTP_ADDR=:8080 MAIL_TEMPLATE_DIR=templates MAIL_SMTP_MODE=stub @@ -20,7 +22,9 @@ OTEL_METRICS_EXPORTER=none SMTP-backed shape: ```dotenv -MAIL_REDIS_ADDR=127.0.0.1:6379 +MAIL_REDIS_MASTER_ADDR=127.0.0.1:6379 +MAIL_REDIS_PASSWORD=local +MAIL_POSTGRES_PRIMARY_DSN=postgres://mailservice:mailservice@127.0.0.1:5432/galaxy?search_path=mail&sslmode=disable MAIL_INTERNAL_HTTP_ADDR=:8080 MAIL_TEMPLATE_DIR=templates diff --git a/mail/docs/flows.md b/mail/docs/flows.md index 3cd9c07..3adf273 100644 --- a/mail/docs/flows.md +++ b/mail/docs/flows.md @@ -6,22 +6,22 @@ sequenceDiagram participant Auth as Auth / Session Service participant Mail as Mail Service - participant Redis + participant Postgres participant Scheduler participant SMTP as Provider Auth->>Mail: POST /api/v1/internal/login-code-deliveries + Idempotency-Key Mail->>Mail: validate request and idempotency scope alt MAIL_SMTP_MODE = stub - Mail->>Redis: persist delivery as suppressed + Mail->>Postgres: persist delivery as suppressed Mail-->>Auth: 200 {outcome=suppressed} else MAIL_SMTP_MODE = smtp - Mail->>Redis: persist delivery as queued + attempt #1 scheduled + Mail->>Postgres: persist delivery as queued + attempt #1 scheduled Mail-->>Auth: 200 {outcome=sent} - Scheduler->>Redis: claim due attempt + Scheduler->>Postgres: claim due attempt (FOR UPDATE SKIP LOCKED) Scheduler->>SMTP: send rendered auth mail SMTP-->>Scheduler: accepted or classified failure - Scheduler->>Redis: commit sent / retry / failed / dead_letter + Scheduler->>Postgres: commit sent / retry / failed / dead_letter end ``` @@ -36,16 +36,17 @@ sequenceDiagram participant Stream as Redis Stream mail:delivery_commands participant Consumer as Command consumer participant Mail as Mail Service + participant Postgres participant Redis Notify->>Stream: XADD generic command Consumer->>Stream: XREAD from last stored offset Consumer->>Mail: decode and validate command alt malformed or conflicting command - Mail->>Redis: record malformed command entry + Mail->>Postgres: record malformed command entry Consumer->>Redis: save stream offset else valid command - Mail->>Redis: persist delivery + first attempt + optional payload bundle + Mail->>Postgres: persist delivery + first attempt + optional payload bundle Consumer->>Redis: save stream offset end ``` @@ -55,29 +56,29 @@ sequenceDiagram ```mermaid sequenceDiagram participant Scheduler - participant Redis + participant Postgres participant Worker as Attempt worker participant SMTP as Provider - Scheduler->>Redis: find next due delivery - Scheduler->>Redis: load work item + Scheduler->>Postgres: find next due delivery (next_attempt_at <= now) + Scheduler->>Postgres: load work item (delivery + active attempt) alt template delivery not yet rendered - Scheduler->>Redis: render and store materialized content + Scheduler->>Postgres: render and store materialized content end - Scheduler->>Redis: claim scheduled attempt + Scheduler->>Postgres: claim scheduled attempt (FOR UPDATE SKIP LOCKED) Scheduler->>Worker: enqueue claimed work Worker->>SMTP: send materialized message SMTP-->>Worker: accepted / suppressed / transient_failure / permanent_failure alt accepted - Worker->>Redis: commit sent + provider_accepted + Worker->>Postgres: commit sent + provider_accepted else suppressed - Worker->>Redis: commit suppressed + provider_rejected + Worker->>Postgres: commit suppressed + provider_rejected else transient failure before retry budget ends - Worker->>Redis: commit transport_failed|timed_out + next scheduled attempt + Worker->>Postgres: commit transport_failed|timed_out + next scheduled attempt else retry budget exhausted - Worker->>Redis: commit dead_letter + dead-letter entry + Worker->>Postgres: commit dead_letter + dead-letter entry else permanent failure - Worker->>Redis: commit failed + provider_rejected + Worker->>Postgres: commit failed + provider_rejected end ``` @@ -87,12 +88,12 @@ sequenceDiagram sequenceDiagram participant Ops as Trusted operator participant Mail as Mail Service - participant Redis + participant Postgres Ops->>Mail: POST /api/v1/internal/deliveries/{delivery_id}/resend - Mail->>Redis: load original delivery and optional payload bundle + Mail->>Postgres: load original delivery and optional payload bundle Mail->>Mail: verify original status is terminal - Mail->>Redis: create clone delivery with source=operator_resend + Mail->>Postgres: create clone delivery with source=operator_resend Mail-->>Ops: 200 {delivery_id=} ``` diff --git a/mail/docs/postgres-migration.md b/mail/docs/postgres-migration.md new file mode 100644 index 0000000..904bf0d --- /dev/null +++ b/mail/docs/postgres-migration.md @@ -0,0 +1,236 @@ +# PostgreSQL Migration + +PG_PLAN.md §4 migrated `galaxy/mail` from a Redis-only durable store to the +steady-state split codified in `ARCHITECTURE.md §Persistence Backends`: +PostgreSQL is the source of truth for table-shaped business state, and Redis +keeps only the inbound `mail:delivery_commands` stream and its persisted +consumer offset. + +This document records the schema decisions and the non-obvious agreements +behind them. Use it together with the migration script +(`internal/adapters/postgres/migrations/00001_init.sql`) and the runtime +wiring (`internal/app/runtime.go`). + +## Outcomes + +- Schema `mail` (provisioned externally) holds the durable state: + `deliveries`, `delivery_recipients`, `attempts`, `dead_letters`, + `delivery_payloads`, `malformed_commands`. +- The runtime opens one PostgreSQL pool via `pkg/postgres.OpenPrimary`, + applies embedded goose migrations strictly before any HTTP listener + becomes ready, and exits non-zero when migration or ping fails. +- The runtime opens one shared `*redis.Client` via + `pkg/redisconn.NewMasterClient` and passes it to the command consumer and + the stream offset store; both stores no longer hold their own connection + topology fields. +- The Redis adapter package (`internal/adapters/redisstate/`) is reduced to + the surviving `StreamOffsetStore` plus a slim `Keyspace` exposing only + `StreamOffset(stream)` and `DeliveryCommands()`. The Lua-backed atomic + writer, the secondary index keys, the recipient/template/status indexes, + the idempotency keyspace, and the per-record TTL constants are gone. +- Configuration drops `MAIL_REDIS_USERNAME`, `MAIL_REDIS_TLS_ENABLED`, + `MAIL_REDIS_ATTEMPT_SCHEDULE_KEY`, `MAIL_REDIS_DEAD_LETTER_PREFIX`, + `MAIL_DELIVERY_TTL`, and `MAIL_ATTEMPT_TTL`. `MAIL_REDIS_ADDR` becomes + `MAIL_REDIS_MASTER_ADDR` + optional `MAIL_REDIS_REPLICA_ADDRS`. + PostgreSQL-specific knobs live under `MAIL_POSTGRES_*`. New retention + knobs (`MAIL_DELIVERY_RETENTION`, `MAIL_MALFORMED_COMMAND_RETENTION`, + `MAIL_CLEANUP_INTERVAL`) drive a periodic SQL retention worker. + +## Decisions + +### 1. One schema, externally-provisioned role + +**Decision.** The `mail` schema and the matching `mailservice` role are +created outside the migration sequence (in tests, by +`integration/internal/harness/postgres_container.go::EnsureRoleAndSchema`; +in production, by an ops init script not in scope for this stage). The +embedded migration `00001_init.sql` only contains DDL for tables and +indexes and assumes it runs as the schema owner with `search_path=mail`. + +**Why.** Mixing role creation, schema creation, and table DDL into one +script forces every consumer of the migration to run as a superuser. The +schema-per-service architectural rule +(`ARCHITECTURE.md §Persistence Backends`) lines up neatly with the +operational split: ops provisions roles and schemas, the service applies +schema-scoped migrations. + +### 2. Idempotency record IS the deliveries row + +**Decision.** The deliveries table carries `source`, +`idempotency_key`, `request_fingerprint`, and `idempotency_expires_at` +columns and a `UNIQUE (source, idempotency_key)` constraint. Acceptance +flows insert the row directly; a duplicate request races on the UNIQUE +constraint and surfaces as `acceptauthdelivery.ErrConflict` / +`acceptgenericdelivery.ErrConflict`. There is no separate idempotency +table. + +**Why.** PG_PLAN.md §3 fixed this rule for every PG-backed service. With +the reservation living on the durable record, recovery is a single fact +("the row either exists or it does not"); no Redis-loss window can make a +duplicate sneak through. Resend deliveries store an empty +`request_fingerprint` and a synthetic far-future `idempotency_expires_at`; +the read helper treats those rows as non-idempotent so future operator +queries cannot mistake a clone for a hit. + +### 3. Recipients live in a normalised side table + +**Decision.** A `delivery_recipients(delivery_id, kind, position, email)` +table stores envelope addresses with a `kind` CHECK constraint +(`'to'|'cc'|'bcc'|'reply_to'`) and an `email` index that excludes +`reply_to`. The deliveries row does not embed envelope JSON. + +**Why.** PG_PLAN.md §4 prescribed `INDEX on … recipient as needed`. A +normalised table makes future recipient-filtered listing slot in without +schema work and lets the existing operator listing implement the +recipient filter as `delivery_id IN (SELECT … FROM delivery_recipients +WHERE … lower(email) = lower($1))`. The Redis adapter previously +maintained one index key per recipient — the same observable behaviour +now comes for free from the PostgreSQL row layout plus a single index. + +### 4. Timestamps are uniformly `timestamptz` and always UTC at the boundary + +**Decision.** Every time-valued column on every Stage 4 table uses +PostgreSQL's `timestamptz`. The domain model continues to use +`time.Time` / `*time.Time`; the adapter normalises every `time.Time` +parameter to UTC at the binding site (`record.X.UTC()` or the +`nullableTime` helper that wraps `*time.Time`), and re-wraps every +scanned `time.Time` with `.UTC()` (directly or via `timeFromNullable`) +before it leaves the adapter. The architecture-wide form of this rule +lives in `ARCHITECTURE.md §Persistence Backends → Timestamp handling`. + +**Why.** PG_PLAN.md §4 originally specified mixed naming +(`timestamptz` on deliveries, `bigint` epoch-ms on attempts/dead_letters/ +malformed_commands). User Service Stage 3 already uses `timestamptz` for +every table and the runtime contract tests expect Go-level `time.Time` +semantics throughout. Keeping the same shape across services reduces +adapter-layer complexity and avoids two parallel encoding paths in the +mailstore. The deviation from the literal plan is intentional and is +documented here. The defensive UTC rule on both sides eliminates the +class of bug where the pgx driver returns scanned values in +`time.Local`, which silently breaks equality tests, JSON formatting, +and comparison against pointer fields. + +### 5. Attempt scheduler reads via `FOR UPDATE SKIP LOCKED` + +**Decision.** The attempt scheduler uses two indexed predicates: + +- `SELECT delivery_id FROM deliveries WHERE next_attempt_at IS NOT NULL + AND next_attempt_at <= $now ORDER BY next_attempt_at ASC LIMIT $n` to + surface due deliveries (partial index `deliveries_due_idx`). +- `SELECT … FROM deliveries WHERE delivery_id = $id AND status IN + ('queued','rendered') AND next_attempt_at IS NOT NULL AND next_attempt_at + <= $now FOR UPDATE SKIP LOCKED` inside the claim transaction. + +The `next_attempt_at` column is maintained explicitly: acceptance and +attempt-commit transactions write it from the active scheduled attempt; +claim sets it to NULL (the row is `sending` and the row stops being a +scheduling candidate); a recovery commit re-populates it for the next +attempt. + +**Why.** `FOR UPDATE SKIP LOCKED` lets multiple scheduler instances run +concurrently without serialising work on a single sorted set. Maintaining +`next_attempt_at` in lockstep with the active attempt keeps the partial +index small and avoids reading attempt rows during the hot-path schedule +query. The previous Redis ZSET sort key was implicit; the SQL column is +explicit, which removes a class of "the index is out of sync with the +record" bugs that Lua-coordinated mutations made possible. + +### 6. Recovery uses the most-recent attempt by exact `attempt_no` + +**Decision.** `LoadWorkItem(deliveryID)` reads the delivery row and then +the attempt row whose `attempt_no = delivery.attempt_count`. Concurrent +commits that update the count and insert a new attempt are tolerated: +the load lookup uses an exact key and never observes a partial state. + +**Why.** A naive `ORDER BY attempt_no DESC LIMIT 1` racing against a +commit that already wrote the next attempt but had not yet committed +the parent delivery row could observe `attempt_no=count+1` while the +delivery still reports `count`. Keying the read by the count +deterministically returns the delivery's view of its own active attempt +even under concurrent worker progress. + +### 7. Periodic SQL retention replaces Redis index cleanup + +**Decision.** A new `worker.SQLRetentionWorker` runs the two DELETE +statements driven by config: + +- `DELETE FROM deliveries WHERE created_at < now() - $delivery_retention` + cascades to `attempts`, `dead_letters`, `delivery_payloads`, and + `delivery_recipients` via `ON DELETE CASCADE`. +- `DELETE FROM malformed_commands WHERE recorded_at < now() - $malformed_retention` + is a standalone retention pass. + +Three new env vars (`MAIL_DELIVERY_RETENTION`, `MAIL_MALFORMED_COMMAND_RETENTION`, +`MAIL_CLEANUP_INTERVAL`) drive the worker. `MAIL_IDEMPOTENCY_TTL` survives +unchanged: it controls the per-acceptance `idempotency_expires_at` column +the service layer materialises on each row. + +**Why.** PostgreSQL maintains its own indexes; the previous +`redisstate.IndexCleaner` had nothing to do once secondary index keys +were gone. A per-table retention worker is the simplest model that keeps +the mail database from accumulating audit history forever, while leaving +the per-acceptance idempotency window controlled by its existing knob. + +### 8. Shared Redis client with consumer-driven shutdown + +**Decision.** `internal/app/runtime.go` constructs one +`redisconn.NewMasterClient(cfg.Redis.Conn)` and passes it to both the +stream offset store and the command consumer. The consumer's `Shutdown` +closes the shared client to break the in-flight blocking `XREAD`; the +runtime's cleanup function tolerates `redis.ErrClosed` so a double-close +is benign. + +**Why.** Each subsequent PG_PLAN stage (Notification, Lobby) ships a +similar pattern; sharing one client is the shape we want all stages to +converge on. The dedicated client for the consumer was an artefact of +the Redis-only architecture and multiplied TCP connections, ping points, +and OpenTelemetry instrumentation hooks for no functional benefit. + +### 9. Query layer is `go-jet/jet/v2` + +**Decision.** All `mailstore` packages build SQL through the jet +builder API (`pgtable.
.INSERT/SELECT/UPDATE/DELETE` plus the +`pg.AND/OR/SET/IN/...` DSL). `cmd/jetgen` (invoked via `make jet`) +brings up a transient PostgreSQL container, applies the embedded +migrations, and runs +`github.com/go-jet/jet/v2/generator/postgres.GenerateDB` against the +provisioned schema; the generated table/model code lives under +`internal/adapters/postgres/jet/mail/{model,table}/*.go` and is +committed to the repo, so build consumers do not need Docker. +Statements are run through the `database/sql` API +(`stmt.Sql() → db/tx.Exec/Query/QueryRow`); manual scanners preserve +the codecs.go boundary translations and domain-type mapping. + +**Why.** Aligns with `PG_PLAN.md` §Library stack ("Query layer: +`github.com/go-jet/jet/v2` (PostgreSQL dialect). Generated code lives +under each service `internal/adapters/postgres/jet/`, regenerated via +a `make jet` target and committed to the repo"). Constructs the jet +builder does not cover natively (`FOR UPDATE`, `FOR UPDATE SKIP +LOCKED`, keyset-pagination row-comparison, JSONB params, +`LOWER(...)` on subselects) are expressed through the per-DSL helpers +(`.FOR(pg.UPDATE())`, `.FOR(pg.UPDATE().SKIP_LOCKED())`, `pg.LOWER`, +`OR/AND` expansion of cursor predicates). + +## Cross-References + +- `PG_PLAN.md §4` (Stage 4 — Mail Service migration). +- `ARCHITECTURE.md §Persistence Backends`. +- `internal/adapters/postgres/migrations/00001_init.sql` and + `internal/adapters/postgres/migrations/migrations.go`. +- `internal/adapters/postgres/mailstore/{store,deliveries, + auth_acceptance,generic_acceptance,render,operator, + attempt_execution,malformed_command,codecs,helpers}.go` plus the + testcontainers-backed unit suite under + `mailstore/{harness,store}_test.go`. +- `internal/adapters/postgres/jet/mail/{model,table}/*.go` (committed + generated code) plus `cmd/jetgen/main.go` and the `make jet` + Makefile target that regenerate it. +- `internal/config/{config,env,validation}.go` (PostgresConfig + the + `redisconn.Config`-shaped Redis envelope). +- `internal/app/{runtime,bootstrap}.go` (shared Redis client + PG pool + open + migration + mailstore wiring). +- `internal/worker/sqlretention.go` (periodic SQL retention worker). +- `internal/adapters/redisstate/{keyspace,offset_codec,stream_offset_store}.go` + (surviving slim Redis surface). +- `integration/internal/harness/mailservice.go` (per-suite Postgres + container + `mail`/`mailservice` provisioning). diff --git a/mail/docs/runbook.md b/mail/docs/runbook.md index 8ec4166..2dc3e82 100644 --- a/mail/docs/runbook.md +++ b/mail/docs/runbook.md @@ -7,21 +7,25 @@ verification, shutdown, and common `Mail Service` incidents. Before starting the process, confirm: -- `MAIL_REDIS_ADDR` points to the Redis deployment that stores deliveries, - attempts, idempotency reservations, malformed commands, and stream offsets -- the configured Redis ACL, DB, TLS, and timeout settings match the target - environment +- `MAIL_REDIS_MASTER_ADDR` and `MAIL_REDIS_PASSWORD` point to the Redis + deployment that hosts the inbound `mail:delivery_commands` Stream and the + persisted consumer offset +- `MAIL_POSTGRES_PRIMARY_DSN` points to the PostgreSQL deployment whose + `mail` schema (provisioned externally for the `mailservice` role) holds the + durable mail state — deliveries, attempts, dead letters, payloads, + idempotency reservations, malformed commands - `MAIL_TEMPLATE_DIR` points to the intended immutable template catalog - if `MAIL_SMTP_MODE=smtp`, the SMTP address, sender identity, and optional credentials are configured together - the OpenTelemetry exporter settings point at the intended collector when traces or metrics are expected outside the process -At startup the process performs bounded `PING` checks for both Redis clients -used by the runtime and parses the full template catalog. +At startup the process pings the shared Redis master client, opens the +PostgreSQL pool, applies embedded goose migrations strictly before any HTTP +listener opens, parses the full template catalog, and only then starts the +internal HTTP listener and background workers. -Startup fails fast if those checks fail or if the template catalog cannot be -loaded. +Startup fails fast if any of those steps fail. Known startup caveats: @@ -36,11 +40,13 @@ Known startup caveats: Practical readiness verification is: 1. confirm the process emitted startup logs for the internal HTTP listener, - command consumer, scheduler, and worker pool + command consumer, scheduler, attempt worker pool, and SQL retention + worker 2. open a TCP connection to `MAIL_INTERNAL_HTTP_ADDR` 3. issue one trusted smoke request such as `GET /api/v1/internal/deliveries/does-not-exist` -4. verify Redis connectivity and OpenTelemetry exporter health out of band +4. verify Redis and PostgreSQL connectivity, plus OpenTelemetry exporter + health, out of band Expected steady-state signals: @@ -58,14 +64,15 @@ Shutdown behavior: - coordinated shutdown is bounded by `MAIL_SHUTDOWN_TIMEOUT` - the internal HTTP listener is stopped before process resources are closed -- Redis clients are closed after the app stops +- the Redis master client and PostgreSQL pool are closed after the app stops - OpenTelemetry providers are flushed during runtime cleanup During a planned restart: 1. send `SIGTERM` 2. wait for listener and worker shutdown logs -3. restart the process with the same Redis and template configuration +3. restart the process with the same Redis, PostgreSQL, and template + configuration 4. repeat the steady-state verification steps ## Incident Triage @@ -81,7 +88,9 @@ Symptoms: Checks: 1. confirm the scheduler is still logging regular activity -2. confirm Redis connectivity and latency for attempt-schedule keys +2. confirm PostgreSQL connectivity and latency on the `deliveries` + `(next_attempt_at)` partial index — scheduler claims rely on + `FOR UPDATE SKIP LOCKED`, so contention here surfaces as backlog 3. confirm attempt workers are running and not blocked on SMTP 4. inspect `mail.provider.send.duration_ms` for elevated latency 5. verify `MAIL_ATTEMPT_WORKER_CONCURRENCY` is appropriate for the workload diff --git a/mail/docs/runtime.md b/mail/docs/runtime.md index 57c47bb..1ea5885 100644 --- a/mail/docs/runtime.md +++ b/mail/docs/runtime.md @@ -104,17 +104,21 @@ configuration or unavailable Redis. - processes only already claimed work items - concurrency is controlled by `MAIL_ATTEMPT_WORKER_CONCURRENCY` -### Cleanup worker +### SQL retention worker -- removes stale delivery-index members after primary delivery expiry -- does not clean `mail:attempt_schedule` -- does not clean malformed-command index entries +- periodically deletes expired `deliveries` rows whose retention window has + elapsed; cascades to `attempts`, `dead_letters`, `delivery_payloads`, and + `delivery_recipients` +- periodically deletes expired `malformed_commands` rows +- runs an immediate first pass at startup, then on `MAIL_CLEANUP_INTERVAL` ## Configuration Groups Required for all starts: -- `MAIL_REDIS_ADDR` +- `MAIL_REDIS_MASTER_ADDR` +- `MAIL_REDIS_PASSWORD` +- `MAIL_POSTGRES_PRIMARY_DSN` Core process config: @@ -128,16 +132,23 @@ Internal HTTP config: - `MAIL_INTERNAL_HTTP_READ_TIMEOUT` - `MAIL_INTERNAL_HTTP_IDLE_TIMEOUT` -Redis connectivity: +Redis connectivity (`pkg/redisconn` shape): -- `MAIL_REDIS_USERNAME` +- `MAIL_REDIS_MASTER_ADDR` +- `MAIL_REDIS_REPLICA_ADDRS` - `MAIL_REDIS_PASSWORD` - `MAIL_REDIS_DB` -- `MAIL_REDIS_TLS_ENABLED` - `MAIL_REDIS_OPERATION_TIMEOUT` - `MAIL_REDIS_COMMAND_STREAM` -- `MAIL_REDIS_ATTEMPT_SCHEDULE_KEY` -- `MAIL_REDIS_DEAD_LETTER_PREFIX` + +PostgreSQL connectivity (`pkg/postgres` shape): + +- `MAIL_POSTGRES_PRIMARY_DSN` +- `MAIL_POSTGRES_REPLICA_DSNS` +- `MAIL_POSTGRES_OPERATION_TIMEOUT` +- `MAIL_POSTGRES_MAX_OPEN_CONNS` +- `MAIL_POSTGRES_MAX_IDLE_CONNS` +- `MAIL_POSTGRES_CONN_MAX_LIFETIME` SMTP provider: @@ -157,8 +168,9 @@ Templates and workers: - `MAIL_STREAM_BLOCK_TIMEOUT` - `MAIL_OPERATOR_REQUEST_TIMEOUT` - `MAIL_IDEMPOTENCY_TTL` -- `MAIL_DELIVERY_TTL` -- `MAIL_ATTEMPT_TTL` +- `MAIL_DELIVERY_RETENTION` +- `MAIL_MALFORMED_COMMAND_RETENTION` +- `MAIL_CLEANUP_INTERVAL` Telemetry: @@ -174,13 +186,11 @@ Telemetry: ## Runtime Notes - `MAIL_REDIS_COMMAND_STREAM` is the only Redis key override that currently - changes runtime behavior + changes runtime behavior; durable mail state otherwise lives in PostgreSQL - `MAIL_SMTP_INSECURE_SKIP_VERIFY` is a local-development escape hatch for self-signed SMTP capture only and should remain disabled in production -- attempt-schedule and dead-letter key overrides are parsed but not yet wired - into Redis adapters -- retention overrides are parsed but storage still uses the fixed `7d`, `30d`, - and `90d` values +- the SQL retention worker is the only periodic durable cleanup; PostgreSQL + indexes are maintained by the engine - template catalog parsing is eager and immutable - auth deliveries in `MAIL_SMTP_MODE=stub` surface as `suppressed` - auth deliveries in `MAIL_SMTP_MODE=smtp` surface as `queued` and later move diff --git a/mail/go.mod b/mail/go.mod index 7e813ed..14f3133 100644 --- a/mail/go.mod +++ b/mail/go.mod @@ -3,13 +3,17 @@ module galaxy/mail go 1.26.1 require ( + galaxy/postgres v0.0.0-00010101000000-000000000000 + galaxy/redisconn v0.0.0-00010101000000-000000000000 github.com/alicebob/miniredis/v2 v2.37.0 github.com/getkin/kin-openapi v0.135.0 + github.com/go-jet/jet/v2 v2.14.1 github.com/google/uuid v1.6.0 - github.com/redis/go-redis/extra/redisotel/v9 v9.18.0 + github.com/jackc/pgx/v5 v5.9.2 github.com/redis/go-redis/v9 v9.18.0 github.com/stretchr/testify v1.11.1 github.com/testcontainers/testcontainers-go v0.42.0 + github.com/testcontainers/testcontainers-go/modules/postgres v0.42.0 github.com/testcontainers/testcontainers-go/modules/redis v0.42.0 github.com/wneessen/go-mail v0.7.2 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.68.0 @@ -32,6 +36,7 @@ require ( dario.cat/mergo v1.0.2 // indirect github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect github.com/Microsoft/go-winio v0.6.2 // indirect + github.com/XSAM/otelsql v0.42.0 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cenkalti/backoff/v5 v5.0.3 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect @@ -43,7 +48,7 @@ require ( github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/distribution/reference v0.6.0 // indirect - github.com/docker/go-connections v0.6.0 // indirect + github.com/docker/go-connections v0.7.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/ebitengine/purego v0.10.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -53,17 +58,26 @@ require ( github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/swag v0.23.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0 // indirect + github.com/jackc/chunkreader/v2 v2.0.1 // indirect + github.com/jackc/pgconn v1.14.3 // indirect + github.com/jackc/pgio v1.0.0 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgproto3/v2 v2.3.3 // indirect + github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect + github.com/jackc/pgtype v1.14.4 // indirect + github.com/jackc/puddle/v2 v2.2.2 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/klauspost/compress v1.18.5 // indirect - github.com/klauspost/cpuid/v2 v2.3.0 // indirect + github.com/lib/pq v1.10.9 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect github.com/magiconair/properties v1.8.10 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mdelapenya/tlscert v0.2.0 // indirect + github.com/mfridman/interpolate v0.0.2 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/go-archive v0.2.0 // indirect - github.com/moby/moby/api v1.54.1 // indirect - github.com/moby/moby/client v0.4.0 // indirect + github.com/moby/moby/api v1.54.2 // indirect + github.com/moby/moby/client v0.4.1 // indirect github.com/moby/patternmatcher v0.6.1 // indirect github.com/moby/sys/sequential v0.6.0 // indirect github.com/moby/sys/user v0.4.0 // indirect @@ -77,7 +91,10 @@ require ( github.com/perimeterx/marshmallow v1.1.5 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect + github.com/pressly/goose/v3 v3.27.1 // indirect github.com/redis/go-redis/extra/rediscmd/v9 v9.18.0 // indirect + github.com/redis/go-redis/extra/redisotel/v9 v9.18.0 // indirect + github.com/sethvargo/go-retry v0.3.0 // indirect github.com/shirou/gopsutil/v4 v4.26.3 // indirect github.com/sirupsen/logrus v1.9.4 // indirect github.com/tklauser/go-sysconf v0.3.16 // indirect @@ -90,11 +107,17 @@ require ( go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.43.0 // indirect go.opentelemetry.io/proto/otlp v1.10.0 // indirect go.uber.org/atomic v1.11.0 // indirect - golang.org/x/crypto v0.49.0 // indirect - golang.org/x/net v0.52.0 // indirect - golang.org/x/sys v0.42.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/crypto v0.50.0 // indirect + golang.org/x/net v0.53.0 // indirect + golang.org/x/sync v0.20.0 // indirect + golang.org/x/sys v0.43.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20260420184626-e10c466a9529 // indirect google.golang.org/grpc v1.80.0 // indirect google.golang.org/protobuf v1.36.11 // indirect ) + +replace galaxy/postgres => ../pkg/postgres + +replace galaxy/redisconn => ../pkg/redisconn diff --git a/mail/go.sum b/mail/go.sum index e7f565b..0ffa8a0 100644 --- a/mail/go.sum +++ b/mail/go.sum @@ -4,8 +4,12 @@ github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 h1:He8af github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/XSAM/otelsql v0.42.0 h1:Li0xF4eJUxG2e0x3D4rvRlys1f27yJKvjTh7ljkUP5o= +github.com/XSAM/otelsql v0.42.0/go.mod h1:4mOrEv+cS1KmKzrvTktvJnstr5GtKSAK+QHvFR9OcpI= github.com/alicebob/miniredis/v2 v2.37.0 h1:RheObYW32G1aiJIj81XVt78ZHJpHonHLHW7OLIshq68= github.com/alicebob/miniredis/v2 v2.37.0/go.mod h1:TcL7YfarKPGDAthEtl5NBeHZfeUQj6OXMm/+iu5cLMM= github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= @@ -18,6 +22,7 @@ github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1x github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= @@ -26,26 +31,37 @@ github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A= github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/cpuguy83/dockercfg v0.3.2 h1:DlJTyZGBDlXqUZ2Dk2Q3xHs/FtnooJJVaad2S9GKorA= github.com/cpuguy83/dockercfg v0.3.2/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.24 h1:bJrF4RRfyJnbTJqzRLHzcGaZK1NeM5kTC9jGgovnR1s= github.com/creack/pty v1.1.24/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94= -github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE= +github.com/docker/go-connections v0.7.0 h1:6SsRfJddP22WMrCkj19x9WKjEDTB+ahsdiGYf0mN39c= +github.com/docker/go-connections v0.7.0/go.mod h1:no1qkHdjq7kLMGUXYAduOhYPSJxxvgWBh7ogVvptn3Q= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/ebitengine/purego v0.10.0 h1:QIw4xfpWT6GWTzaW5XEKy3HXoqrJGx1ijYHzTF0/ISU= github.com/ebitengine/purego v0.10.0/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/getkin/kin-openapi v0.135.0 h1:751SjYfbiwqukYuVjwYEIKNfrSwS5YpA7DZnKSwQgtg= github.com/getkin/kin-openapi v0.135.0/go.mod h1:6dd5FJl6RdX4usBtFBaQhk9q62Yb2J0Mk5IhUO/QqFI= +github.com/go-jet/jet/v2 v2.14.1 h1:wsfD9e7CGP9h46+IFNlftfncBcmVnKddikbTtapQM3M= +github.com/go-jet/jet/v2 v2.14.1/go.mod h1:dqTAECV2Mo3S2NFjbm4vJ1aDruZjhaJ1RAAR8rGUkkc= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -57,43 +73,123 @@ github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1 github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM= github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= +github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0 h1:HWRh5R2+9EifMyIHV7ZV+MIZqgz+PMpZ14Jynv3O2Zs= github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0/go.mod h1:JfhWUomR1baixubs02l85lZYYOm7LV6om4ceouMv45c= +github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= +github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= +github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= +github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= +github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= +github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= +github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY= +github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= +github.com/jackc/pgconn v1.14.3 h1:bVoTr12EGANZz66nZPkMInAV/KHD2TxH9npjXXgiB3w= +github.com/jackc/pgconn v1.14.3/go.mod h1:RZbme4uasqzybK2RK5c65VsHxoyaml09lx3tXOcO/VM= +github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= +github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= +github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= +github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65 h1:DadwsjnMwFjfWc9y5Wi/+Zz7xoE5ALHsRQlOctkOiHc= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= +github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.3.3 h1:1HLSx5H+tXR9pW3in3zaztoEwQYRC9SQaYUHjTSUOag= +github.com/jackc/pgproto3/v2 v2.3.3/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= +github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= +github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= +github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM= +github.com/jackc/pgtype v1.14.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= +github.com/jackc/pgtype v1.14.4 h1:fKuNiCumbKTAIxQwXfB/nsrnkEI6bPJrrSiMKgbJ2j8= +github.com/jackc/pgtype v1.14.4/go.mod h1:aKeozOde08iifGosdJpz9MBZonJOUJxqNpPBcMJTlVA= +github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= +github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= +github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= +github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs= +github.com/jackc/pgx/v4 v4.18.2/go.mod h1:Ey4Oru5tH5sB6tV7hDmfWFahwF15Eb7DNXlRKx2CkVw= +github.com/jackc/pgx/v4 v4.18.3 h1:dE2/TrEsGX3RBprb3qryqSV9Y60iZN1C6i8IrmW9/BA= +github.com/jackc/pgx/v4 v4.18.3/go.mod h1:Ey4Oru5tH5sB6tV7hDmfWFahwF15Eb7DNXlRKx2CkVw= +github.com/jackc/pgx/v5 v5.9.2 h1:3ZhOzMWnR4yJ+RW1XImIPsD1aNSz4T4fyP7zlQb56hw= +github.com/jackc/pgx/v5 v5.9.2/go.mod h1:mal1tBGAFfLHvZzaYh77YS/eC6IX9OWbRV1QIIM0Jn4= +github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.3.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= +github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.18.5 h1:/h1gH5Ce+VWNLSWqPzOVn6XBO+vJbCNGvjoaGBFW2IE= github.com/klauspost/compress v1.18.5/go.mod h1:cwPg85FWrGar70rWktvGQj8/hthj3wpl0PGDogxkrSQ= github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y= github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= github.com/magiconair/properties v1.8.10 h1:s31yESBquKXCV9a/ScB3ESkOjUYYv+X0rg8SYxI99mE= github.com/magiconair/properties v1.8.10/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.21 h1:xYae+lCNBP7QuW4PUnNG61ffM4hVIfm+zUzDuSzYLGs= +github.com/mattn/go-isatty v0.0.21/go.mod h1:ZXfXG4SQHsB/w3ZeOYbR0PrPwLy+n6xiMrJlRFqopa4= github.com/mdelapenya/tlscert v0.2.0 h1:7H81W6Z/4weDvZBNOfQte5GpIMo0lGYEeWbkGp5LJHI= github.com/mdelapenya/tlscert v0.2.0/go.mod h1:O4njj3ELLnJjGdkN7M/vIVCpZ+Cf0L6muqOG4tLSl8o= +github.com/mfridman/interpolate v0.0.2 h1:pnuTK7MQIxxFz1Gr+rjSIx9u7qVjf5VOoM/u6BbAxPY= +github.com/mfridman/interpolate v0.0.2/go.mod h1:p+7uk6oE07mpE/Ik1b8EckO0O4ZXiGAfshKBWLUM9Xg= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/go-archive v0.2.0 h1:zg5QDUM2mi0JIM9fdQZWC7U8+2ZfixfTYoHL7rWUcP8= github.com/moby/go-archive v0.2.0/go.mod h1:mNeivT14o8xU+5q1YnNrkQVpK+dnNe/K6fHqnTg4qPU= -github.com/moby/moby/api v1.54.1 h1:TqVzuJkOLsgLDDwNLmYqACUuTehOHRGKiPhvH8V3Nn4= -github.com/moby/moby/api v1.54.1/go.mod h1:+RQ6wluLwtYaTd1WnPLykIDPekkuyD/ROWQClE83pzs= -github.com/moby/moby/client v0.4.0 h1:S+2XegzHQrrvTCvF6s5HFzcrywWQmuVnhOXe2kiWjIw= -github.com/moby/moby/client v0.4.0/go.mod h1:QWPbvWchQbxBNdaLSpoKpCdf5E+WxFAgNHogCWDoa7g= +github.com/moby/moby/api v1.54.2 h1:wiat9QAhnDQjA7wk1kh/TqHz2I1uUA7M7t9SAl/JNXg= +github.com/moby/moby/api v1.54.2/go.mod h1:+RQ6wluLwtYaTd1WnPLykIDPekkuyD/ROWQClE83pzs= +github.com/moby/moby/client v0.4.1 h1:DMQgisVoMkmMs7fp3ROSdiBnoAu8+vo3GggFl06M/wY= +github.com/moby/moby/client v0.4.1/go.mod h1:z52C9O2POPOsnxZAy//WtKcQ32P+jT/NGeXu/7nfjGQ= github.com/moby/patternmatcher v0.6.1 h1:qlhtafmr6kgMIJjKJMDmMWq7WLkKIo23hsrpR3x084U= github.com/moby/patternmatcher v0.6.1/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU= @@ -106,6 +202,8 @@ github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= +github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w= +github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= github.com/oasdiff/yaml v0.0.9 h1:zQOvd2UKoozsSsAknnWoDJlSK4lC0mpmjfDsfqNwX48= github.com/oasdiff/yaml v0.0.9/go.mod h1:8lvhgJG4xiKPj3HN5lDow4jZHPlx1i7dIwzkdAo6oAM= github.com/oasdiff/yaml3 v0.0.9 h1:rWPrKccrdUm8J0F3sGuU+fuh9+1K/RdJlWF7O/9yw2g= @@ -116,28 +214,60 @@ github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJw github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= github.com/perimeterx/marshmallow v1.1.5 h1:a2LALqQ1BlHM8PZblsDdidgv1mWi1DgC2UmX50IvK2s= github.com/perimeterx/marshmallow v1.1.5/go.mod h1:dsXbUu8CRzfYP5a87xpp0xq9S3u0Vchtcl8we9tYaXw= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/pressly/goose/v3 v3.27.1 h1:6uEvcprBybDmW4hcz3gYujhARhye+GoWKhEWyzD5sh4= +github.com/pressly/goose/v3 v3.27.1/go.mod h1:maruOxsPnIG2yHHyo8UqKWXYKFcH7Q76csUV7+7KYoM= github.com/redis/go-redis/extra/rediscmd/v9 v9.18.0 h1:QY4nmPHLFAJjtT5O4OMUEOxP8WVaRNOFpcbmxT2NLZU= github.com/redis/go-redis/extra/rediscmd/v9 v9.18.0/go.mod h1:WH8cY/0fT41Bsf341qzo8v4nx0GCE8FykAA23IVbVmo= github.com/redis/go-redis/extra/redisotel/v9 v9.18.0 h1:2dKdoEYBJ0CZCLPiCdvvc7luz3DPwY6hKdzjL6m1eHE= github.com/redis/go-redis/extra/redisotel/v9 v9.18.0/go.mod h1:WzkrVG9ro9BwCQD0eJOWn6AGL4Z1CleGflM45w1hu10= github.com/redis/go-redis/v9 v9.18.0 h1:pMkxYPkEbMPwRdenAzUNyFNrDgHx9U+DrBabWNfSRQs= github.com/redis/go-redis/v9 v9.18.0/go.mod h1:k3ufPphLU5YXwNTUcCRXGxUoF1fqxnhFQmscfkCoDA0= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= +github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= +github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sethvargo/go-retry v0.3.0 h1:EEt31A35QhrcRZtrYFDTBg91cqZVnFL2navjDrah2SE= +github.com/sethvargo/go-retry v0.3.0/go.mod h1:mNX17F0C/HguQMyMyJxcnU471gOZGxCLyYaFyAZraas= github.com/shirou/gopsutil/v4 v4.26.3 h1:2ESdQt90yU3oXF/CdOlRCJxrP+Am1aBYubTMTfxJ1qc= github.com/shirou/gopsutil/v4 v4.26.3/go.mod h1:LZ6ewCSkBqUpvSOf+LsTGnRinC6iaNUNMGBtDkJBaLQ= +github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.9.4 h1:TsZE7l11zFCLZnZ+teH4Umoq5BhEIfIzfRDZ1Uzql2w= github.com/sirupsen/logrus v1.9.4/go.mod h1:ftWc9WdOfJ0a92nsE2jF5u5ZwH8Bv2zdeOC42RjbV2g= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.3 h1:jmXUvGomnU1o3W/V5h2VEradbpJDwGrzugQQvL0POH4= github.com/stretchr/objx v0.5.3/go.mod h1:rDQraq+vQZU7Fde9LOZLr8Tax6zZvy4kuNKF+QYS+U0= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/testcontainers/testcontainers-go v0.42.0 h1:He3IhTzTZOygSXLJPMX7n44XtK+qhjat1nI9cneBbUY= github.com/testcontainers/testcontainers-go v0.42.0/go.mod h1:vZjdY1YmUA1qEForxOIOazfsrdyORJAbhi0bp8plN30= +github.com/testcontainers/testcontainers-go/modules/postgres v0.42.0 h1:GCbb1ndrF7OTDiIvxXyItaDab4qkzTFJ48LKFdM7EIo= +github.com/testcontainers/testcontainers-go/modules/postgres v0.42.0/go.mod h1:IRPBaI8jXdrNfD0e4Zm7Fbcgaz5shKxOQv4axiL09xs= github.com/testcontainers/testcontainers-go/modules/redis v0.42.0 h1:id/6LH8ZeDrtAUVSuNvZUAJ1kVpb82y1pr9yweAWsRg= github.com/testcontainers/testcontainers-go/modules/redis v0.42.0/go.mod h1:uF0jI8FITagQpBNOgweGBmPf6rP4K0SeL1XFPbsZSSY= github.com/tklauser/go-sysconf v0.3.16 h1:frioLaCQSsF5Cy1jgRBrzr6t502KIIwQ0MArYICU0nA= @@ -150,12 +280,14 @@ github.com/wneessen/go-mail v0.7.2 h1:xxPnhZ6IZLSgxShebmZ6DPKh1b6OJcoHfzy7UjOkzS github.com/wneessen/go-mail v0.7.2/go.mod h1:+TkW6QP3EVkgTEqHtVmnAE/1MRhmzb8Y9/W3pweuS+k= github.com/woodsbury/decimal128 v1.3.0 h1:8pffMNWIlC0O5vbyHWFZAt5yWvWcrHA+3ovIIjVWss0= github.com/woodsbury/decimal128 v1.3.0/go.mod h1:C5UTmyTjW3JftjUFzOVhC20BEQa2a4ZKOB5I6Zjb+ds= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yuin/gopher-lua v1.1.1 h1:kYKnWBjvbNP4XLT3+bPEwAXJx262OhaHDWDVOPjL46M= github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0= github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= +github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.68.0 h1:CqXxU8VOmDefoh0+ztfGaymYbhdB/tT3zs79QaZTNGY= @@ -186,40 +318,146 @@ go.opentelemetry.io/otel/trace v1.43.0 h1:BkNrHpup+4k4w+ZZ86CZoHHEkohws8AY+WTX09 go.opentelemetry.io/otel/trace v1.43.0/go.mod h1:/QJhyVBUUswCphDVxq+8mld+AvhXZLhe+8WVFxiFff0= go.opentelemetry.io/proto/otlp v1.10.0 h1:IQRWgT5srOCYfiWnpqUYz9CVmbO8bFmKcwYxpuCSL2g= go.opentelemetry.io/proto/otlp v1.10.0/go.mod h1:/CV4QoCR/S9yaPj8utp3lvQPoqMtxXdzn7ozvvozVqk= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -golang.org/x/crypto v0.49.0 h1:+Ng2ULVvLHnJ/ZFEq4KdcDd/cfjrrjjNSXNzxg0Y4U4= -golang.org/x/crypto v0.49.0/go.mod h1:ErX4dUh2UM+CFYiXZRTcMpEcN8b/1gxEuv3nODoYtCA= -golang.org/x/net v0.52.0 h1:He/TN1l0e4mmR3QqHMT2Xab3Aj3L9qjbhRm78/6jrW0= -golang.org/x/net v0.52.0/go.mod h1:R1MAz7uMZxVMualyPXb+VaqGSa3LIaUqk0eEt3w36Sw= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ= +golang.org/x/crypto v0.50.0 h1:zO47/JPrL6vsNkINmLoo/PH1gcxpls50DNogFvB5ZGI= +golang.org/x/crypto v0.50.0/go.mod h1:3muZ7vA7PBCE6xgPX7nkzzjiUq87kRItoJQM1Yo8S+Q= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.53.0 h1:d+qAbo5L0orcWAr0a9JweQpjXF19LMXJE8Ey7hwOdUA= +golang.org/x/net v0.53.0/go.mod h1:JvMuJH7rrdiCfbeHoo3fCQU24Lf5JJwT9W3sJFulfgs= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.20.0 h1:e0PTpb7pjO8GAtTs2dQ6jYa5BWYlMuX047Dco/pItO4= +golang.org/x/sync v0.20.0/go.mod h1:9xrNwdLfx4jkKbNva9FpL6vEN7evnE43NNNJQ2LF3+0= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.42.0 h1:omrd2nAlyT5ESRdCLYdm3+fMfNFE/+Rf4bDIQImRJeo= -golang.org/x/sys v0.42.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw= -golang.org/x/term v0.41.0 h1:QCgPso/Q3RTJx2Th4bDLqML4W6iJiaXFq2/ftQF13YU= -golang.org/x/term v0.41.0/go.mod h1:3pfBgksrReYfZ5lvYM0kSO0LIkAl4Yl2bXOkKP7Ec2A= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.43.0 h1:Rlag2XtaFTxp19wS8MXlJwTvoh8ArU6ezoyFsMyCTNI= +golang.org/x/sys v0.43.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.42.0 h1:UiKe+zDFmJobeJ5ggPwOshJIVt6/Ft0rcfrXZDLWAWY= +golang.org/x/term v0.42.0/go.mod h1:Dq/D+snpsbazcBG5+F9Q1n2rXV8Ma+71xEjTRufARgY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.36.0 h1:JfKh3XmcRPqZPKevfXVpI1wXPTqbkE5f7JA92a55Yxg= golang.org/x/text v0.36.0/go.mod h1:NIdBknypM8iqVmPiuco0Dh6P5Jcdk8lJL0CUebqK164= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gonum.org/v1/gonum v0.17.0 h1:VbpOemQlsSMrYmn7T2OUvQ4dqxQXU+ouZFQsZOx50z4= gonum.org/v1/gonum v0.17.0/go.mod h1:El3tOrEuMpv2UdMrbNlKEh9vd86bmQ6vqIcDwxEOc1E= google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9 h1:VPWxll4HlMw1Vs/qXtN7BvhZqsS9cdAittCNvVENElA= google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9/go.mod h1:7QBABkRtR8z+TEnmXTqIqwJLlzrZKVfAUm7tY3yGv0M= -google.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9 h1:m8qni9SQFH0tJc1X0vmnpw/0t+AImlSvp30sEupozUg= -google.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260420184626-e10c466a9529 h1:XF8+t6QQiS0o9ArVan/HW8Q7cycNPGsJf6GA2nXxYAg= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260420184626-e10c466a9529/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8= google.golang.org/grpc v1.80.0 h1:Xr6m2WmWZLETvUNvIUmeD5OAagMw3FiKmMlTdViWsHM= google.golang.org/grpc v1.80.0/go.mod h1:ho/dLnxwi3EDJA4Zghp7k2Ec1+c2jqup0bFkw07bwF4= google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +modernc.org/libc v1.72.1 h1:db1xwJ6u1kE3KHTFTTbe2GCrczHPKzlURP0aDC4NGD0= +modernc.org/libc v1.72.1/go.mod h1:HRMiC/PhPGLIPM7GzAFCbI+oSgE3dhZ8FWftmRrHVlY= +modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU= +modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg= +modernc.org/memory v1.11.0 h1:o4QC8aMQzmcwCK3t3Ux/ZHmwFPzE6hf2Y5LbkRs+hbI= +modernc.org/memory v1.11.0/go.mod h1:/JP4VbVC+K5sU2wZi9bHoq2MAkCnrt2r98UGeSK7Mjw= +modernc.org/sqlite v1.49.1 h1:dYGHTKcX1sJ+EQDnUzvz4TJ5GbuvhNJa8Fg6ElGx73U= +modernc.org/sqlite v1.49.1/go.mod h1:m0w8xhwYUVY3H6pSDwc3gkJ/irZT/0YEXwBlhaxQEew= pgregory.net/rapid v1.2.0 h1:keKAYRcjm+e1F0oAuU5F5+YPAWcyxNNRK2wud503Gnk= pgregory.net/rapid v1.2.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04= diff --git a/mail/internal/adapters/postgres/jet/mail/model/attempts.go b/mail/internal/adapters/postgres/jet/mail/model/attempts.go new file mode 100644 index 0000000..e20fd52 --- /dev/null +++ b/mail/internal/adapters/postgres/jet/mail/model/attempts.go @@ -0,0 +1,23 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package model + +import ( + "time" +) + +type Attempts struct { + DeliveryID string `sql:"primary_key"` + AttemptNo int32 `sql:"primary_key"` + Status string + ScheduledFor time.Time + StartedAt *time.Time + FinishedAt *time.Time + ProviderClassification string + ProviderSummary string +} diff --git a/mail/internal/adapters/postgres/jet/mail/model/dead_letters.go b/mail/internal/adapters/postgres/jet/mail/model/dead_letters.go new file mode 100644 index 0000000..270eb64 --- /dev/null +++ b/mail/internal/adapters/postgres/jet/mail/model/dead_letters.go @@ -0,0 +1,21 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package model + +import ( + "time" +) + +type DeadLetters struct { + DeliveryID string `sql:"primary_key"` + FinalAttemptNo int32 + FailureClassification string + ProviderSummary string + RecoveryHint string + CreatedAt time.Time +} diff --git a/mail/internal/adapters/postgres/jet/mail/model/deliveries.go b/mail/internal/adapters/postgres/jet/mail/model/deliveries.go new file mode 100644 index 0000000..c303bf4 --- /dev/null +++ b/mail/internal/adapters/postgres/jet/mail/model/deliveries.go @@ -0,0 +1,41 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package model + +import ( + "time" +) + +type Deliveries struct { + DeliveryID string `sql:"primary_key"` + ResendParentDeliveryID string + Source string + Status string + PayloadMode string + TemplateID string + Locale string + LocaleFallbackUsed bool + TemplateVariables *string + Attachments *string + Subject string + TextBody string + HTMLBody string + IdempotencyKey string + RequestFingerprint string + IdempotencyExpiresAt time.Time + AttemptCount int32 + LastAttemptStatus string + ProviderSummary string + NextAttemptAt *time.Time + CreatedAt time.Time + UpdatedAt time.Time + SentAt *time.Time + SuppressedAt *time.Time + FailedAt *time.Time + DeadLetteredAt *time.Time +} diff --git a/mail/internal/adapters/postgres/jet/mail/model/delivery_payloads.go b/mail/internal/adapters/postgres/jet/mail/model/delivery_payloads.go new file mode 100644 index 0000000..c3ee61c --- /dev/null +++ b/mail/internal/adapters/postgres/jet/mail/model/delivery_payloads.go @@ -0,0 +1,13 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package model + +type DeliveryPayloads struct { + DeliveryID string `sql:"primary_key"` + Payload string +} diff --git a/mail/internal/adapters/postgres/jet/mail/model/delivery_recipients.go b/mail/internal/adapters/postgres/jet/mail/model/delivery_recipients.go new file mode 100644 index 0000000..2f4cf74 --- /dev/null +++ b/mail/internal/adapters/postgres/jet/mail/model/delivery_recipients.go @@ -0,0 +1,15 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package model + +type DeliveryRecipients struct { + DeliveryID string `sql:"primary_key"` + Kind string `sql:"primary_key"` + Position int32 `sql:"primary_key"` + Email string +} diff --git a/mail/internal/adapters/postgres/jet/mail/model/goose_db_version.go b/mail/internal/adapters/postgres/jet/mail/model/goose_db_version.go new file mode 100644 index 0000000..c7f68e8 --- /dev/null +++ b/mail/internal/adapters/postgres/jet/mail/model/goose_db_version.go @@ -0,0 +1,19 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package model + +import ( + "time" +) + +type GooseDbVersion struct { + ID int32 `sql:"primary_key"` + VersionID int64 + IsApplied bool + Tstamp time.Time +} diff --git a/mail/internal/adapters/postgres/jet/mail/model/malformed_commands.go b/mail/internal/adapters/postgres/jet/mail/model/malformed_commands.go new file mode 100644 index 0000000..f4be065 --- /dev/null +++ b/mail/internal/adapters/postgres/jet/mail/model/malformed_commands.go @@ -0,0 +1,23 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package model + +import ( + "time" +) + +type MalformedCommands struct { + StreamEntryID string `sql:"primary_key"` + DeliveryID string + Source string + IdempotencyKey string + FailureCode string + FailureMessage string + RawFields string + RecordedAt time.Time +} diff --git a/mail/internal/adapters/postgres/jet/mail/table/attempts.go b/mail/internal/adapters/postgres/jet/mail/table/attempts.go new file mode 100644 index 0000000..27f89a7 --- /dev/null +++ b/mail/internal/adapters/postgres/jet/mail/table/attempts.go @@ -0,0 +1,99 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var Attempts = newAttemptsTable("mail", "attempts", "") + +type attemptsTable struct { + postgres.Table + + // Columns + DeliveryID postgres.ColumnString + AttemptNo postgres.ColumnInteger + Status postgres.ColumnString + ScheduledFor postgres.ColumnTimestampz + StartedAt postgres.ColumnTimestampz + FinishedAt postgres.ColumnTimestampz + ProviderClassification postgres.ColumnString + ProviderSummary postgres.ColumnString + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList + DefaultColumns postgres.ColumnList +} + +type AttemptsTable struct { + attemptsTable + + EXCLUDED attemptsTable +} + +// AS creates new AttemptsTable with assigned alias +func (a AttemptsTable) AS(alias string) *AttemptsTable { + return newAttemptsTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new AttemptsTable with assigned schema name +func (a AttemptsTable) FromSchema(schemaName string) *AttemptsTable { + return newAttemptsTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new AttemptsTable with assigned table prefix +func (a AttemptsTable) WithPrefix(prefix string) *AttemptsTable { + return newAttemptsTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new AttemptsTable with assigned table suffix +func (a AttemptsTable) WithSuffix(suffix string) *AttemptsTable { + return newAttemptsTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newAttemptsTable(schemaName, tableName, alias string) *AttemptsTable { + return &AttemptsTable{ + attemptsTable: newAttemptsTableImpl(schemaName, tableName, alias), + EXCLUDED: newAttemptsTableImpl("", "excluded", ""), + } +} + +func newAttemptsTableImpl(schemaName, tableName, alias string) attemptsTable { + var ( + DeliveryIDColumn = postgres.StringColumn("delivery_id") + AttemptNoColumn = postgres.IntegerColumn("attempt_no") + StatusColumn = postgres.StringColumn("status") + ScheduledForColumn = postgres.TimestampzColumn("scheduled_for") + StartedAtColumn = postgres.TimestampzColumn("started_at") + FinishedAtColumn = postgres.TimestampzColumn("finished_at") + ProviderClassificationColumn = postgres.StringColumn("provider_classification") + ProviderSummaryColumn = postgres.StringColumn("provider_summary") + allColumns = postgres.ColumnList{DeliveryIDColumn, AttemptNoColumn, StatusColumn, ScheduledForColumn, StartedAtColumn, FinishedAtColumn, ProviderClassificationColumn, ProviderSummaryColumn} + mutableColumns = postgres.ColumnList{StatusColumn, ScheduledForColumn, StartedAtColumn, FinishedAtColumn, ProviderClassificationColumn, ProviderSummaryColumn} + defaultColumns = postgres.ColumnList{ProviderClassificationColumn, ProviderSummaryColumn} + ) + + return attemptsTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + DeliveryID: DeliveryIDColumn, + AttemptNo: AttemptNoColumn, + Status: StatusColumn, + ScheduledFor: ScheduledForColumn, + StartedAt: StartedAtColumn, + FinishedAt: FinishedAtColumn, + ProviderClassification: ProviderClassificationColumn, + ProviderSummary: ProviderSummaryColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + DefaultColumns: defaultColumns, + } +} diff --git a/mail/internal/adapters/postgres/jet/mail/table/dead_letters.go b/mail/internal/adapters/postgres/jet/mail/table/dead_letters.go new file mode 100644 index 0000000..6ee9e6b --- /dev/null +++ b/mail/internal/adapters/postgres/jet/mail/table/dead_letters.go @@ -0,0 +1,93 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var DeadLetters = newDeadLettersTable("mail", "dead_letters", "") + +type deadLettersTable struct { + postgres.Table + + // Columns + DeliveryID postgres.ColumnString + FinalAttemptNo postgres.ColumnInteger + FailureClassification postgres.ColumnString + ProviderSummary postgres.ColumnString + RecoveryHint postgres.ColumnString + CreatedAt postgres.ColumnTimestampz + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList + DefaultColumns postgres.ColumnList +} + +type DeadLettersTable struct { + deadLettersTable + + EXCLUDED deadLettersTable +} + +// AS creates new DeadLettersTable with assigned alias +func (a DeadLettersTable) AS(alias string) *DeadLettersTable { + return newDeadLettersTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new DeadLettersTable with assigned schema name +func (a DeadLettersTable) FromSchema(schemaName string) *DeadLettersTable { + return newDeadLettersTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new DeadLettersTable with assigned table prefix +func (a DeadLettersTable) WithPrefix(prefix string) *DeadLettersTable { + return newDeadLettersTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new DeadLettersTable with assigned table suffix +func (a DeadLettersTable) WithSuffix(suffix string) *DeadLettersTable { + return newDeadLettersTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newDeadLettersTable(schemaName, tableName, alias string) *DeadLettersTable { + return &DeadLettersTable{ + deadLettersTable: newDeadLettersTableImpl(schemaName, tableName, alias), + EXCLUDED: newDeadLettersTableImpl("", "excluded", ""), + } +} + +func newDeadLettersTableImpl(schemaName, tableName, alias string) deadLettersTable { + var ( + DeliveryIDColumn = postgres.StringColumn("delivery_id") + FinalAttemptNoColumn = postgres.IntegerColumn("final_attempt_no") + FailureClassificationColumn = postgres.StringColumn("failure_classification") + ProviderSummaryColumn = postgres.StringColumn("provider_summary") + RecoveryHintColumn = postgres.StringColumn("recovery_hint") + CreatedAtColumn = postgres.TimestampzColumn("created_at") + allColumns = postgres.ColumnList{DeliveryIDColumn, FinalAttemptNoColumn, FailureClassificationColumn, ProviderSummaryColumn, RecoveryHintColumn, CreatedAtColumn} + mutableColumns = postgres.ColumnList{FinalAttemptNoColumn, FailureClassificationColumn, ProviderSummaryColumn, RecoveryHintColumn, CreatedAtColumn} + defaultColumns = postgres.ColumnList{ProviderSummaryColumn, RecoveryHintColumn} + ) + + return deadLettersTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + DeliveryID: DeliveryIDColumn, + FinalAttemptNo: FinalAttemptNoColumn, + FailureClassification: FailureClassificationColumn, + ProviderSummary: ProviderSummaryColumn, + RecoveryHint: RecoveryHintColumn, + CreatedAt: CreatedAtColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + DefaultColumns: defaultColumns, + } +} diff --git a/mail/internal/adapters/postgres/jet/mail/table/deliveries.go b/mail/internal/adapters/postgres/jet/mail/table/deliveries.go new file mode 100644 index 0000000..8d85c5b --- /dev/null +++ b/mail/internal/adapters/postgres/jet/mail/table/deliveries.go @@ -0,0 +1,153 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var Deliveries = newDeliveriesTable("mail", "deliveries", "") + +type deliveriesTable struct { + postgres.Table + + // Columns + DeliveryID postgres.ColumnString + ResendParentDeliveryID postgres.ColumnString + Source postgres.ColumnString + Status postgres.ColumnString + PayloadMode postgres.ColumnString + TemplateID postgres.ColumnString + Locale postgres.ColumnString + LocaleFallbackUsed postgres.ColumnBool + TemplateVariables postgres.ColumnString + Attachments postgres.ColumnString + Subject postgres.ColumnString + TextBody postgres.ColumnString + HTMLBody postgres.ColumnString + IdempotencyKey postgres.ColumnString + RequestFingerprint postgres.ColumnString + IdempotencyExpiresAt postgres.ColumnTimestampz + AttemptCount postgres.ColumnInteger + LastAttemptStatus postgres.ColumnString + ProviderSummary postgres.ColumnString + NextAttemptAt postgres.ColumnTimestampz + CreatedAt postgres.ColumnTimestampz + UpdatedAt postgres.ColumnTimestampz + SentAt postgres.ColumnTimestampz + SuppressedAt postgres.ColumnTimestampz + FailedAt postgres.ColumnTimestampz + DeadLetteredAt postgres.ColumnTimestampz + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList + DefaultColumns postgres.ColumnList +} + +type DeliveriesTable struct { + deliveriesTable + + EXCLUDED deliveriesTable +} + +// AS creates new DeliveriesTable with assigned alias +func (a DeliveriesTable) AS(alias string) *DeliveriesTable { + return newDeliveriesTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new DeliveriesTable with assigned schema name +func (a DeliveriesTable) FromSchema(schemaName string) *DeliveriesTable { + return newDeliveriesTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new DeliveriesTable with assigned table prefix +func (a DeliveriesTable) WithPrefix(prefix string) *DeliveriesTable { + return newDeliveriesTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new DeliveriesTable with assigned table suffix +func (a DeliveriesTable) WithSuffix(suffix string) *DeliveriesTable { + return newDeliveriesTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newDeliveriesTable(schemaName, tableName, alias string) *DeliveriesTable { + return &DeliveriesTable{ + deliveriesTable: newDeliveriesTableImpl(schemaName, tableName, alias), + EXCLUDED: newDeliveriesTableImpl("", "excluded", ""), + } +} + +func newDeliveriesTableImpl(schemaName, tableName, alias string) deliveriesTable { + var ( + DeliveryIDColumn = postgres.StringColumn("delivery_id") + ResendParentDeliveryIDColumn = postgres.StringColumn("resend_parent_delivery_id") + SourceColumn = postgres.StringColumn("source") + StatusColumn = postgres.StringColumn("status") + PayloadModeColumn = postgres.StringColumn("payload_mode") + TemplateIDColumn = postgres.StringColumn("template_id") + LocaleColumn = postgres.StringColumn("locale") + LocaleFallbackUsedColumn = postgres.BoolColumn("locale_fallback_used") + TemplateVariablesColumn = postgres.StringColumn("template_variables") + AttachmentsColumn = postgres.StringColumn("attachments") + SubjectColumn = postgres.StringColumn("subject") + TextBodyColumn = postgres.StringColumn("text_body") + HTMLBodyColumn = postgres.StringColumn("html_body") + IdempotencyKeyColumn = postgres.StringColumn("idempotency_key") + RequestFingerprintColumn = postgres.StringColumn("request_fingerprint") + IdempotencyExpiresAtColumn = postgres.TimestampzColumn("idempotency_expires_at") + AttemptCountColumn = postgres.IntegerColumn("attempt_count") + LastAttemptStatusColumn = postgres.StringColumn("last_attempt_status") + ProviderSummaryColumn = postgres.StringColumn("provider_summary") + NextAttemptAtColumn = postgres.TimestampzColumn("next_attempt_at") + CreatedAtColumn = postgres.TimestampzColumn("created_at") + UpdatedAtColumn = postgres.TimestampzColumn("updated_at") + SentAtColumn = postgres.TimestampzColumn("sent_at") + SuppressedAtColumn = postgres.TimestampzColumn("suppressed_at") + FailedAtColumn = postgres.TimestampzColumn("failed_at") + DeadLetteredAtColumn = postgres.TimestampzColumn("dead_lettered_at") + allColumns = postgres.ColumnList{DeliveryIDColumn, ResendParentDeliveryIDColumn, SourceColumn, StatusColumn, PayloadModeColumn, TemplateIDColumn, LocaleColumn, LocaleFallbackUsedColumn, TemplateVariablesColumn, AttachmentsColumn, SubjectColumn, TextBodyColumn, HTMLBodyColumn, IdempotencyKeyColumn, RequestFingerprintColumn, IdempotencyExpiresAtColumn, AttemptCountColumn, LastAttemptStatusColumn, ProviderSummaryColumn, NextAttemptAtColumn, CreatedAtColumn, UpdatedAtColumn, SentAtColumn, SuppressedAtColumn, FailedAtColumn, DeadLetteredAtColumn} + mutableColumns = postgres.ColumnList{ResendParentDeliveryIDColumn, SourceColumn, StatusColumn, PayloadModeColumn, TemplateIDColumn, LocaleColumn, LocaleFallbackUsedColumn, TemplateVariablesColumn, AttachmentsColumn, SubjectColumn, TextBodyColumn, HTMLBodyColumn, IdempotencyKeyColumn, RequestFingerprintColumn, IdempotencyExpiresAtColumn, AttemptCountColumn, LastAttemptStatusColumn, ProviderSummaryColumn, NextAttemptAtColumn, CreatedAtColumn, UpdatedAtColumn, SentAtColumn, SuppressedAtColumn, FailedAtColumn, DeadLetteredAtColumn} + defaultColumns = postgres.ColumnList{ResendParentDeliveryIDColumn, TemplateIDColumn, LocaleColumn, LocaleFallbackUsedColumn, SubjectColumn, TextBodyColumn, HTMLBodyColumn, AttemptCountColumn, LastAttemptStatusColumn, ProviderSummaryColumn} + ) + + return deliveriesTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + DeliveryID: DeliveryIDColumn, + ResendParentDeliveryID: ResendParentDeliveryIDColumn, + Source: SourceColumn, + Status: StatusColumn, + PayloadMode: PayloadModeColumn, + TemplateID: TemplateIDColumn, + Locale: LocaleColumn, + LocaleFallbackUsed: LocaleFallbackUsedColumn, + TemplateVariables: TemplateVariablesColumn, + Attachments: AttachmentsColumn, + Subject: SubjectColumn, + TextBody: TextBodyColumn, + HTMLBody: HTMLBodyColumn, + IdempotencyKey: IdempotencyKeyColumn, + RequestFingerprint: RequestFingerprintColumn, + IdempotencyExpiresAt: IdempotencyExpiresAtColumn, + AttemptCount: AttemptCountColumn, + LastAttemptStatus: LastAttemptStatusColumn, + ProviderSummary: ProviderSummaryColumn, + NextAttemptAt: NextAttemptAtColumn, + CreatedAt: CreatedAtColumn, + UpdatedAt: UpdatedAtColumn, + SentAt: SentAtColumn, + SuppressedAt: SuppressedAtColumn, + FailedAt: FailedAtColumn, + DeadLetteredAt: DeadLetteredAtColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + DefaultColumns: defaultColumns, + } +} diff --git a/mail/internal/adapters/postgres/jet/mail/table/delivery_payloads.go b/mail/internal/adapters/postgres/jet/mail/table/delivery_payloads.go new file mode 100644 index 0000000..2c83e46 --- /dev/null +++ b/mail/internal/adapters/postgres/jet/mail/table/delivery_payloads.go @@ -0,0 +1,81 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var DeliveryPayloads = newDeliveryPayloadsTable("mail", "delivery_payloads", "") + +type deliveryPayloadsTable struct { + postgres.Table + + // Columns + DeliveryID postgres.ColumnString + Payload postgres.ColumnString + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList + DefaultColumns postgres.ColumnList +} + +type DeliveryPayloadsTable struct { + deliveryPayloadsTable + + EXCLUDED deliveryPayloadsTable +} + +// AS creates new DeliveryPayloadsTable with assigned alias +func (a DeliveryPayloadsTable) AS(alias string) *DeliveryPayloadsTable { + return newDeliveryPayloadsTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new DeliveryPayloadsTable with assigned schema name +func (a DeliveryPayloadsTable) FromSchema(schemaName string) *DeliveryPayloadsTable { + return newDeliveryPayloadsTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new DeliveryPayloadsTable with assigned table prefix +func (a DeliveryPayloadsTable) WithPrefix(prefix string) *DeliveryPayloadsTable { + return newDeliveryPayloadsTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new DeliveryPayloadsTable with assigned table suffix +func (a DeliveryPayloadsTable) WithSuffix(suffix string) *DeliveryPayloadsTable { + return newDeliveryPayloadsTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newDeliveryPayloadsTable(schemaName, tableName, alias string) *DeliveryPayloadsTable { + return &DeliveryPayloadsTable{ + deliveryPayloadsTable: newDeliveryPayloadsTableImpl(schemaName, tableName, alias), + EXCLUDED: newDeliveryPayloadsTableImpl("", "excluded", ""), + } +} + +func newDeliveryPayloadsTableImpl(schemaName, tableName, alias string) deliveryPayloadsTable { + var ( + DeliveryIDColumn = postgres.StringColumn("delivery_id") + PayloadColumn = postgres.StringColumn("payload") + allColumns = postgres.ColumnList{DeliveryIDColumn, PayloadColumn} + mutableColumns = postgres.ColumnList{PayloadColumn} + defaultColumns = postgres.ColumnList{} + ) + + return deliveryPayloadsTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + DeliveryID: DeliveryIDColumn, + Payload: PayloadColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + DefaultColumns: defaultColumns, + } +} diff --git a/mail/internal/adapters/postgres/jet/mail/table/delivery_recipients.go b/mail/internal/adapters/postgres/jet/mail/table/delivery_recipients.go new file mode 100644 index 0000000..3be128b --- /dev/null +++ b/mail/internal/adapters/postgres/jet/mail/table/delivery_recipients.go @@ -0,0 +1,87 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var DeliveryRecipients = newDeliveryRecipientsTable("mail", "delivery_recipients", "") + +type deliveryRecipientsTable struct { + postgres.Table + + // Columns + DeliveryID postgres.ColumnString + Kind postgres.ColumnString + Position postgres.ColumnInteger + Email postgres.ColumnString + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList + DefaultColumns postgres.ColumnList +} + +type DeliveryRecipientsTable struct { + deliveryRecipientsTable + + EXCLUDED deliveryRecipientsTable +} + +// AS creates new DeliveryRecipientsTable with assigned alias +func (a DeliveryRecipientsTable) AS(alias string) *DeliveryRecipientsTable { + return newDeliveryRecipientsTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new DeliveryRecipientsTable with assigned schema name +func (a DeliveryRecipientsTable) FromSchema(schemaName string) *DeliveryRecipientsTable { + return newDeliveryRecipientsTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new DeliveryRecipientsTable with assigned table prefix +func (a DeliveryRecipientsTable) WithPrefix(prefix string) *DeliveryRecipientsTable { + return newDeliveryRecipientsTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new DeliveryRecipientsTable with assigned table suffix +func (a DeliveryRecipientsTable) WithSuffix(suffix string) *DeliveryRecipientsTable { + return newDeliveryRecipientsTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newDeliveryRecipientsTable(schemaName, tableName, alias string) *DeliveryRecipientsTable { + return &DeliveryRecipientsTable{ + deliveryRecipientsTable: newDeliveryRecipientsTableImpl(schemaName, tableName, alias), + EXCLUDED: newDeliveryRecipientsTableImpl("", "excluded", ""), + } +} + +func newDeliveryRecipientsTableImpl(schemaName, tableName, alias string) deliveryRecipientsTable { + var ( + DeliveryIDColumn = postgres.StringColumn("delivery_id") + KindColumn = postgres.StringColumn("kind") + PositionColumn = postgres.IntegerColumn("position") + EmailColumn = postgres.StringColumn("email") + allColumns = postgres.ColumnList{DeliveryIDColumn, KindColumn, PositionColumn, EmailColumn} + mutableColumns = postgres.ColumnList{EmailColumn} + defaultColumns = postgres.ColumnList{} + ) + + return deliveryRecipientsTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + DeliveryID: DeliveryIDColumn, + Kind: KindColumn, + Position: PositionColumn, + Email: EmailColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + DefaultColumns: defaultColumns, + } +} diff --git a/mail/internal/adapters/postgres/jet/mail/table/goose_db_version.go b/mail/internal/adapters/postgres/jet/mail/table/goose_db_version.go new file mode 100644 index 0000000..81037d6 --- /dev/null +++ b/mail/internal/adapters/postgres/jet/mail/table/goose_db_version.go @@ -0,0 +1,87 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var GooseDbVersion = newGooseDbVersionTable("mail", "goose_db_version", "") + +type gooseDbVersionTable struct { + postgres.Table + + // Columns + ID postgres.ColumnInteger + VersionID postgres.ColumnInteger + IsApplied postgres.ColumnBool + Tstamp postgres.ColumnTimestamp + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList + DefaultColumns postgres.ColumnList +} + +type GooseDbVersionTable struct { + gooseDbVersionTable + + EXCLUDED gooseDbVersionTable +} + +// AS creates new GooseDbVersionTable with assigned alias +func (a GooseDbVersionTable) AS(alias string) *GooseDbVersionTable { + return newGooseDbVersionTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new GooseDbVersionTable with assigned schema name +func (a GooseDbVersionTable) FromSchema(schemaName string) *GooseDbVersionTable { + return newGooseDbVersionTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new GooseDbVersionTable with assigned table prefix +func (a GooseDbVersionTable) WithPrefix(prefix string) *GooseDbVersionTable { + return newGooseDbVersionTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new GooseDbVersionTable with assigned table suffix +func (a GooseDbVersionTable) WithSuffix(suffix string) *GooseDbVersionTable { + return newGooseDbVersionTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newGooseDbVersionTable(schemaName, tableName, alias string) *GooseDbVersionTable { + return &GooseDbVersionTable{ + gooseDbVersionTable: newGooseDbVersionTableImpl(schemaName, tableName, alias), + EXCLUDED: newGooseDbVersionTableImpl("", "excluded", ""), + } +} + +func newGooseDbVersionTableImpl(schemaName, tableName, alias string) gooseDbVersionTable { + var ( + IDColumn = postgres.IntegerColumn("id") + VersionIDColumn = postgres.IntegerColumn("version_id") + IsAppliedColumn = postgres.BoolColumn("is_applied") + TstampColumn = postgres.TimestampColumn("tstamp") + allColumns = postgres.ColumnList{IDColumn, VersionIDColumn, IsAppliedColumn, TstampColumn} + mutableColumns = postgres.ColumnList{VersionIDColumn, IsAppliedColumn, TstampColumn} + defaultColumns = postgres.ColumnList{TstampColumn} + ) + + return gooseDbVersionTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + ID: IDColumn, + VersionID: VersionIDColumn, + IsApplied: IsAppliedColumn, + Tstamp: TstampColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + DefaultColumns: defaultColumns, + } +} diff --git a/mail/internal/adapters/postgres/jet/mail/table/malformed_commands.go b/mail/internal/adapters/postgres/jet/mail/table/malformed_commands.go new file mode 100644 index 0000000..2877ce0 --- /dev/null +++ b/mail/internal/adapters/postgres/jet/mail/table/malformed_commands.go @@ -0,0 +1,99 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var MalformedCommands = newMalformedCommandsTable("mail", "malformed_commands", "") + +type malformedCommandsTable struct { + postgres.Table + + // Columns + StreamEntryID postgres.ColumnString + DeliveryID postgres.ColumnString + Source postgres.ColumnString + IdempotencyKey postgres.ColumnString + FailureCode postgres.ColumnString + FailureMessage postgres.ColumnString + RawFields postgres.ColumnString + RecordedAt postgres.ColumnTimestampz + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList + DefaultColumns postgres.ColumnList +} + +type MalformedCommandsTable struct { + malformedCommandsTable + + EXCLUDED malformedCommandsTable +} + +// AS creates new MalformedCommandsTable with assigned alias +func (a MalformedCommandsTable) AS(alias string) *MalformedCommandsTable { + return newMalformedCommandsTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new MalformedCommandsTable with assigned schema name +func (a MalformedCommandsTable) FromSchema(schemaName string) *MalformedCommandsTable { + return newMalformedCommandsTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new MalformedCommandsTable with assigned table prefix +func (a MalformedCommandsTable) WithPrefix(prefix string) *MalformedCommandsTable { + return newMalformedCommandsTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new MalformedCommandsTable with assigned table suffix +func (a MalformedCommandsTable) WithSuffix(suffix string) *MalformedCommandsTable { + return newMalformedCommandsTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newMalformedCommandsTable(schemaName, tableName, alias string) *MalformedCommandsTable { + return &MalformedCommandsTable{ + malformedCommandsTable: newMalformedCommandsTableImpl(schemaName, tableName, alias), + EXCLUDED: newMalformedCommandsTableImpl("", "excluded", ""), + } +} + +func newMalformedCommandsTableImpl(schemaName, tableName, alias string) malformedCommandsTable { + var ( + StreamEntryIDColumn = postgres.StringColumn("stream_entry_id") + DeliveryIDColumn = postgres.StringColumn("delivery_id") + SourceColumn = postgres.StringColumn("source") + IdempotencyKeyColumn = postgres.StringColumn("idempotency_key") + FailureCodeColumn = postgres.StringColumn("failure_code") + FailureMessageColumn = postgres.StringColumn("failure_message") + RawFieldsColumn = postgres.StringColumn("raw_fields") + RecordedAtColumn = postgres.TimestampzColumn("recorded_at") + allColumns = postgres.ColumnList{StreamEntryIDColumn, DeliveryIDColumn, SourceColumn, IdempotencyKeyColumn, FailureCodeColumn, FailureMessageColumn, RawFieldsColumn, RecordedAtColumn} + mutableColumns = postgres.ColumnList{DeliveryIDColumn, SourceColumn, IdempotencyKeyColumn, FailureCodeColumn, FailureMessageColumn, RawFieldsColumn, RecordedAtColumn} + defaultColumns = postgres.ColumnList{DeliveryIDColumn, SourceColumn, IdempotencyKeyColumn} + ) + + return malformedCommandsTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + StreamEntryID: StreamEntryIDColumn, + DeliveryID: DeliveryIDColumn, + Source: SourceColumn, + IdempotencyKey: IdempotencyKeyColumn, + FailureCode: FailureCodeColumn, + FailureMessage: FailureMessageColumn, + RawFields: RawFieldsColumn, + RecordedAt: RecordedAtColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + DefaultColumns: defaultColumns, + } +} diff --git a/mail/internal/adapters/postgres/jet/mail/table/table_use_schema.go b/mail/internal/adapters/postgres/jet/mail/table/table_use_schema.go new file mode 100644 index 0000000..3afe07d --- /dev/null +++ b/mail/internal/adapters/postgres/jet/mail/table/table_use_schema.go @@ -0,0 +1,20 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +// UseSchema sets a new schema name for all generated table SQL builder types. It is recommended to invoke +// this method only once at the beginning of the program. +func UseSchema(schema string) { + Attempts = Attempts.FromSchema(schema) + DeadLetters = DeadLetters.FromSchema(schema) + Deliveries = Deliveries.FromSchema(schema) + DeliveryPayloads = DeliveryPayloads.FromSchema(schema) + DeliveryRecipients = DeliveryRecipients.FromSchema(schema) + GooseDbVersion = GooseDbVersion.FromSchema(schema) + MalformedCommands = MalformedCommands.FromSchema(schema) +} diff --git a/mail/internal/adapters/postgres/mailstore/attempt_execution.go b/mail/internal/adapters/postgres/mailstore/attempt_execution.go new file mode 100644 index 0000000..bd583e2 --- /dev/null +++ b/mail/internal/adapters/postgres/mailstore/attempt_execution.go @@ -0,0 +1,354 @@ +package mailstore + +import ( + "context" + "database/sql" + "errors" + "fmt" + "time" + + pgtable "galaxy/mail/internal/adapters/postgres/jet/mail/table" + "galaxy/mail/internal/domain/attempt" + "galaxy/mail/internal/domain/common" + deliverydomain "galaxy/mail/internal/domain/delivery" + "galaxy/mail/internal/service/acceptgenericdelivery" + "galaxy/mail/internal/service/executeattempt" + "galaxy/mail/internal/telemetry" + + pg "github.com/go-jet/jet/v2/postgres" +) + +// LoadPayload returns the raw attachment payload bundle for deliveryID. It +// satisfies executeattempt.PayloadLoader. +func (store *Store) LoadPayload(ctx context.Context, deliveryID common.DeliveryID) (acceptgenericdelivery.DeliveryPayload, bool, error) { + return store.GetDeliveryPayload(ctx, deliveryID) +} + +// AttemptExecution returns a handle that satisfies executeattempt.Store and +// the worker.AttemptExecutionStore contract used by the scheduler. +func (store *Store) AttemptExecution() *AttemptExecutionStore { + return &AttemptExecutionStore{store: store} +} + +// AttemptExecutionStore is the executeattempt.Store handle returned by +// Store.AttemptExecution. +type AttemptExecutionStore struct { + store *Store +} + +var _ executeattempt.Store = (*AttemptExecutionStore)(nil) + +// Commit applies one complete durable attempt outcome mutation: the +// terminal current attempt, an optional next scheduled retry attempt, and an +// optional dead-letter row. +func (handle *AttemptExecutionStore) Commit(ctx context.Context, input executeattempt.CommitStateInput) error { + if handle == nil || handle.store == nil { + return errors.New("commit attempt: nil store") + } + if ctx == nil { + return errors.New("commit attempt: nil context") + } + if err := input.Validate(); err != nil { + return fmt.Errorf("commit attempt: %w", err) + } + + return handle.store.withTx(ctx, "commit attempt", func(ctx context.Context, tx *sql.Tx) error { + if err := lockDelivery(ctx, tx, input.Delivery.DeliveryID); err != nil { + return fmt.Errorf("commit attempt: %w", err) + } + if err := updateAttempt(ctx, tx, input.Attempt); err != nil { + return fmt.Errorf("commit attempt: update current attempt: %w", err) + } + if input.NextAttempt != nil { + if err := insertAttempt(ctx, tx, *input.NextAttempt); err != nil { + return fmt.Errorf("commit attempt: insert next attempt: %w", err) + } + } + if input.DeadLetter != nil { + if err := insertDeadLetter(ctx, tx, *input.DeadLetter); err != nil { + return fmt.Errorf("commit attempt: insert dead-letter: %w", err) + } + } + if err := updateDelivery(ctx, tx, input.Delivery, input.NextAttempt); err != nil { + return fmt.Errorf("commit attempt: update delivery: %w", err) + } + return nil + }) +} + +// NextDueDeliveryIDs returns up to limit due delivery identifiers ordered by +// next_attempt_at. The query uses `FOR UPDATE SKIP LOCKED` to allow multiple +// schedulers to run concurrently without contending on the same row. +func (handle *AttemptExecutionStore) NextDueDeliveryIDs(ctx context.Context, now time.Time, limit int64) ([]common.DeliveryID, error) { + if handle == nil || handle.store == nil { + return nil, errors.New("next due delivery ids: nil store") + } + if ctx == nil { + return nil, errors.New("next due delivery ids: nil context") + } + if limit <= 0 { + return nil, errors.New("next due delivery ids: non-positive limit") + } + operationCtx, cancel, err := handle.store.operationContext(ctx, "next due delivery ids") + if err != nil { + return nil, err + } + defer cancel() + + stmt := pg.SELECT(pgtable.Deliveries.DeliveryID). + FROM(pgtable.Deliveries). + WHERE(pg.AND( + pgtable.Deliveries.NextAttemptAt.IS_NOT_NULL(), + pgtable.Deliveries.NextAttemptAt.LT_EQ(pg.TimestampzT(now.UTC())), + )). + ORDER_BY(pgtable.Deliveries.NextAttemptAt.ASC()). + LIMIT(limit) + + query, args := stmt.Sql() + rows, err := handle.store.db.QueryContext(operationCtx, query, args...) + if err != nil { + return nil, fmt.Errorf("next due delivery ids: %w", err) + } + defer rows.Close() + + out := make([]common.DeliveryID, 0, limit) + for rows.Next() { + var id string + if err := rows.Scan(&id); err != nil { + return nil, fmt.Errorf("next due delivery ids: scan: %w", err) + } + out = append(out, common.DeliveryID(id)) + } + if err := rows.Err(); err != nil { + return nil, fmt.Errorf("next due delivery ids: %w", err) + } + return out, nil +} + +// SendingDeliveryIDs returns every delivery currently held by an in-progress +// attempt. The recovery loop uses the result to identify rows whose claim +// might have expired. +func (handle *AttemptExecutionStore) SendingDeliveryIDs(ctx context.Context) ([]common.DeliveryID, error) { + if handle == nil || handle.store == nil { + return nil, errors.New("sending delivery ids: nil store") + } + if ctx == nil { + return nil, errors.New("sending delivery ids: nil context") + } + operationCtx, cancel, err := handle.store.operationContext(ctx, "sending delivery ids") + if err != nil { + return nil, err + } + defer cancel() + + stmt := pg.SELECT(pgtable.Deliveries.DeliveryID). + FROM(pgtable.Deliveries). + WHERE(pgtable.Deliveries.Status.EQ(pg.String(string(deliverydomain.StatusSending)))) + + query, args := stmt.Sql() + rows, err := handle.store.db.QueryContext(operationCtx, query, args...) + if err != nil { + return nil, fmt.Errorf("sending delivery ids: %w", err) + } + defer rows.Close() + + out := []common.DeliveryID{} + for rows.Next() { + var id string + if err := rows.Scan(&id); err != nil { + return nil, fmt.Errorf("sending delivery ids: scan: %w", err) + } + out = append(out, common.DeliveryID(id)) + } + if err := rows.Err(); err != nil { + return nil, fmt.Errorf("sending delivery ids: %w", err) + } + return out, nil +} + +// LoadWorkItem returns the active attempt and delivery row for deliveryID. +// found is false when the delivery row does not exist. +func (handle *AttemptExecutionStore) LoadWorkItem(ctx context.Context, deliveryID common.DeliveryID) (executeattempt.WorkItem, bool, error) { + if handle == nil || handle.store == nil { + return executeattempt.WorkItem{}, false, errors.New("load work item: nil store") + } + if ctx == nil { + return executeattempt.WorkItem{}, false, errors.New("load work item: nil context") + } + if err := deliveryID.Validate(); err != nil { + return executeattempt.WorkItem{}, false, fmt.Errorf("load work item: %w", err) + } + operationCtx, cancel, err := handle.store.operationContext(ctx, "load work item") + if err != nil { + return executeattempt.WorkItem{}, false, err + } + defer cancel() + + delivery, ok, err := loadDeliveryByID(operationCtx, handle.store.db, deliveryID) + if err != nil { + return executeattempt.WorkItem{}, false, fmt.Errorf("load work item: %w", err) + } + if !ok { + return executeattempt.WorkItem{}, false, nil + } + if delivery.AttemptCount == 0 { + return executeattempt.WorkItem{}, false, fmt.Errorf("load work item %q: zero attempt count", deliveryID) + } + active, err := loadActiveAttempt(operationCtx, handle.store.db, deliveryID, delivery.AttemptCount) + if err != nil { + return executeattempt.WorkItem{}, false, fmt.Errorf("load work item: load active attempt: %w", err) + } + return executeattempt.WorkItem{Delivery: delivery, Attempt: active}, true, nil +} + +// ClaimDueAttempt atomically claims the due scheduled attempt for deliveryID +// inside one transaction. The delivery transitions to `sending`, the active +// attempt to `in_progress`. found is false when no claimable row exists at +// now. +func (handle *AttemptExecutionStore) ClaimDueAttempt(ctx context.Context, deliveryID common.DeliveryID, now time.Time) (executeattempt.WorkItem, bool, error) { + if handle == nil || handle.store == nil { + return executeattempt.WorkItem{}, false, errors.New("claim due attempt: nil store") + } + if ctx == nil { + return executeattempt.WorkItem{}, false, errors.New("claim due attempt: nil context") + } + if err := deliveryID.Validate(); err != nil { + return executeattempt.WorkItem{}, false, fmt.Errorf("claim due attempt: %w", err) + } + + var ( + claimed executeattempt.WorkItem + found bool + ) + err := handle.store.withTx(ctx, "claim due attempt", func(ctx context.Context, tx *sql.Tx) error { + stmt := pg.SELECT(deliverySelectColumns). + FROM(pgtable.Deliveries). + WHERE(pg.AND( + pgtable.Deliveries.DeliveryID.EQ(pg.String(deliveryID.String())), + pgtable.Deliveries.Status.IN( + pg.String(string(deliverydomain.StatusQueued)), + pg.String(string(deliverydomain.StatusRendered)), + ), + pgtable.Deliveries.NextAttemptAt.IS_NOT_NULL(), + pgtable.Deliveries.NextAttemptAt.LT_EQ(pg.TimestampzT(now.UTC())), + )). + FOR(pg.UPDATE().SKIP_LOCKED()) + + query, args := stmt.Sql() + row := tx.QueryRowContext(ctx, query, args...) + delivery, _, err := scanDelivery(row) + if errors.Is(err, sql.ErrNoRows) { + return nil + } + if err != nil { + return fmt.Errorf("claim due attempt: load delivery: %w", err) + } + + envelope, err := loadEnvelope(ctx, tx, deliveryID) + if err != nil { + return fmt.Errorf("claim due attempt: load envelope: %w", err) + } + delivery.Envelope = envelope + + active, err := loadActiveAttempt(ctx, tx, deliveryID, delivery.AttemptCount) + if err != nil { + return fmt.Errorf("claim due attempt: load active attempt: %w", err) + } + if active.Status != attempt.StatusScheduled { + return nil + } + + nowUTC := now.UTC().Truncate(time.Millisecond) + active.Status = attempt.StatusInProgress + active.StartedAt = &nowUTC + + delivery.Status = deliverydomain.StatusSending + delivery.LastAttemptStatus = attempt.StatusInProgress + delivery.UpdatedAt = nowUTC + + if err := updateAttempt(ctx, tx, active); err != nil { + return fmt.Errorf("claim due attempt: update attempt: %w", err) + } + if err := updateDelivery(ctx, tx, delivery, nil); err != nil { + return fmt.Errorf("claim due attempt: update delivery: %w", err) + } + + claimed = executeattempt.WorkItem{Delivery: delivery, Attempt: active} + found = true + return nil + }) + if err != nil { + return executeattempt.WorkItem{}, false, err + } + return claimed, found, nil +} + +// RemoveScheduledDelivery clears next_attempt_at for deliveryID. The +// scheduler calls this when it discovers a stale schedule entry that no +// longer points to a claimable delivery. +func (handle *AttemptExecutionStore) RemoveScheduledDelivery(ctx context.Context, deliveryID common.DeliveryID) error { + if handle == nil || handle.store == nil { + return errors.New("remove scheduled delivery: nil store") + } + if ctx == nil { + return errors.New("remove scheduled delivery: nil context") + } + if err := deliveryID.Validate(); err != nil { + return fmt.Errorf("remove scheduled delivery: %w", err) + } + operationCtx, cancel, err := handle.store.operationContext(ctx, "remove scheduled delivery") + if err != nil { + return err + } + defer cancel() + + stmt := pgtable.Deliveries.UPDATE(pgtable.Deliveries.NextAttemptAt). + SET(pg.NULL). + WHERE(pgtable.Deliveries.DeliveryID.EQ(pg.String(deliveryID.String()))) + + query, args := stmt.Sql() + if _, err := handle.store.db.ExecContext(operationCtx, query, args...); err != nil { + return fmt.Errorf("remove scheduled delivery: %w", err) + } + return nil +} + +// ReadAttemptScheduleSnapshot returns the current attempt-schedule depth and +// oldest scheduled timestamp. The runtime exposes this via the telemetry +// snapshot reader contract. +func (handle *AttemptExecutionStore) ReadAttemptScheduleSnapshot(ctx context.Context) (telemetry.AttemptScheduleSnapshot, error) { + if handle == nil || handle.store == nil { + return telemetry.AttemptScheduleSnapshot{}, errors.New("read attempt schedule snapshot: nil store") + } + if ctx == nil { + return telemetry.AttemptScheduleSnapshot{}, errors.New("read attempt schedule snapshot: nil context") + } + operationCtx, cancel, err := handle.store.operationContext(ctx, "read attempt schedule snapshot") + if err != nil { + return telemetry.AttemptScheduleSnapshot{}, err + } + defer cancel() + + stmt := pg.SELECT( + pg.COUNT(pg.STAR), + pg.MIN(pgtable.Deliveries.NextAttemptAt), + ).FROM(pgtable.Deliveries). + WHERE(pgtable.Deliveries.NextAttemptAt.IS_NOT_NULL()) + + query, args := stmt.Sql() + row := handle.store.db.QueryRowContext(operationCtx, query, args...) + var ( + count int64 + oldest sql.NullTime + summary telemetry.AttemptScheduleSnapshot + ) + if err := row.Scan(&count, &oldest); err != nil { + return telemetry.AttemptScheduleSnapshot{}, fmt.Errorf("read attempt schedule snapshot: %w", err) + } + summary.Depth = count + if oldest.Valid { + oldestUTC := oldest.Time.UTC() + summary.OldestScheduledFor = &oldestUTC + } + return summary, nil +} diff --git a/mail/internal/adapters/postgres/mailstore/auth_acceptance.go b/mail/internal/adapters/postgres/mailstore/auth_acceptance.go new file mode 100644 index 0000000..4ea156b --- /dev/null +++ b/mail/internal/adapters/postgres/mailstore/auth_acceptance.go @@ -0,0 +1,63 @@ +package mailstore + +import ( + "context" + "database/sql" + "errors" + "fmt" + + "galaxy/mail/internal/domain/common" + deliverydomain "galaxy/mail/internal/domain/delivery" + "galaxy/mail/internal/service/acceptauthdelivery" +) + +var _ acceptauthdelivery.Store = (*Store)(nil) + +// CreateAcceptance writes one auth-delivery acceptance write set inside one +// BEGIN … COMMIT transaction. Idempotency races surface as +// acceptauthdelivery.ErrConflict. +func (store *Store) CreateAcceptance(ctx context.Context, input acceptauthdelivery.CreateAcceptanceInput) error { + if store == nil { + return errors.New("create auth acceptance: nil store") + } + if ctx == nil { + return errors.New("create auth acceptance: nil context") + } + if err := input.Validate(); err != nil { + return fmt.Errorf("create auth acceptance: %w", err) + } + + return store.withTx(ctx, "create auth acceptance", func(ctx context.Context, tx *sql.Tx) error { + if err := insertDelivery(ctx, tx, input.Delivery, input.Idempotency, input.Idempotency.ExpiresAt, input.FirstAttempt); err != nil { + if isUniqueViolation(err) { + return acceptauthdelivery.ErrConflict + } + return fmt.Errorf("create auth acceptance: insert delivery: %w", err) + } + + if input.FirstAttempt != nil { + if err := insertAttempt(ctx, tx, *input.FirstAttempt); err != nil { + return fmt.Errorf("create auth acceptance: insert first attempt: %w", err) + } + } + return nil + }) +} + +// GetDelivery loads one accepted delivery by its identifier. +func (store *Store) GetDelivery(ctx context.Context, deliveryID common.DeliveryID) (deliverydomain.Delivery, bool, error) { + if store == nil { + return deliverydomain.Delivery{}, false, errors.New("get delivery: nil store") + } + operationCtx, cancel, err := store.operationContext(ctx, "get delivery") + if err != nil { + return deliverydomain.Delivery{}, false, err + } + defer cancel() + + record, ok, err := loadDeliveryByID(operationCtx, store.db, deliveryID) + if err != nil { + return deliverydomain.Delivery{}, false, fmt.Errorf("get delivery: %w", err) + } + return record, ok, nil +} diff --git a/mail/internal/adapters/postgres/mailstore/codecs.go b/mail/internal/adapters/postgres/mailstore/codecs.go new file mode 100644 index 0000000..de673d0 --- /dev/null +++ b/mail/internal/adapters/postgres/mailstore/codecs.go @@ -0,0 +1,176 @@ +package mailstore + +import ( + "encoding/json" + "fmt" + + "galaxy/mail/internal/domain/common" + "galaxy/mail/internal/service/acceptgenericdelivery" +) + +// attachmentRow stores the on-disk JSONB encoding of one +// `common.AttachmentMetadata` entry. The encoding is intentionally explicit +// (named JSON keys) so the on-disk shape stays decoupled from accidental Go +// struct renames. +type attachmentRow struct { + Filename string `json:"filename"` + ContentType string `json:"content_type"` + SizeBytes int64 `json:"size_bytes"` +} + +// marshalAttachments returns the JSONB bytes for the attachments column. A +// nil/empty slice round-trips as `[]` to keep the column NOT NULL across +// equality tests. +func marshalAttachments(attachments []common.AttachmentMetadata) ([]byte, error) { + rows := make([]attachmentRow, 0, len(attachments)) + for _, attachment := range attachments { + rows = append(rows, attachmentRow{ + Filename: attachment.Filename, + ContentType: attachment.ContentType, + SizeBytes: attachment.SizeBytes, + }) + } + payload, err := json.Marshal(rows) + if err != nil { + return nil, fmt.Errorf("marshal attachments: %w", err) + } + return payload, nil +} + +// unmarshalAttachments decodes the attachments JSONB column into a +// domain-friendly slice. nil/empty payloads decode to a nil slice. +func unmarshalAttachments(payload []byte) ([]common.AttachmentMetadata, error) { + if len(payload) == 0 { + return nil, nil + } + var rows []attachmentRow + if err := json.Unmarshal(payload, &rows); err != nil { + return nil, fmt.Errorf("unmarshal attachments: %w", err) + } + if len(rows) == 0 { + return nil, nil + } + out := make([]common.AttachmentMetadata, 0, len(rows)) + for _, row := range rows { + out = append(out, common.AttachmentMetadata{ + Filename: row.Filename, + ContentType: row.ContentType, + SizeBytes: row.SizeBytes, + }) + } + return out, nil +} + +// marshalTemplateVariables returns the JSONB bytes for the template_variables +// column. nil maps round-trip as SQL NULL. +func marshalTemplateVariables(variables map[string]any) ([]byte, error) { + if variables == nil { + return nil, nil + } + payload, err := json.Marshal(variables) + if err != nil { + return nil, fmt.Errorf("marshal template variables: %w", err) + } + return payload, nil +} + +// unmarshalTemplateVariables decodes the template_variables JSONB column. +// SQL NULL payloads decode to a nil map. +func unmarshalTemplateVariables(payload []byte) (map[string]any, error) { + if len(payload) == 0 { + return nil, nil + } + var variables map[string]any + if err := json.Unmarshal(payload, &variables); err != nil { + return nil, fmt.Errorf("unmarshal template variables: %w", err) + } + return variables, nil +} + +// payloadAttachmentRow stores the on-disk JSONB encoding of one +// `acceptgenericdelivery.AttachmentPayload`. The base64 body stays inline so +// the entire payload bundle round-trips as one JSONB value. +type payloadAttachmentRow struct { + Filename string `json:"filename"` + ContentType string `json:"content_type"` + ContentBase64 string `json:"content_base64"` + SizeBytes int64 `json:"size_bytes"` +} + +// payloadRow stores the on-disk JSONB encoding of one +// `acceptgenericdelivery.DeliveryPayload`. delivery_id is intentionally +// excluded — the row is keyed by it via the `delivery_payloads` PRIMARY KEY. +type payloadRow struct { + Attachments []payloadAttachmentRow `json:"attachments"` +} + +// marshalDeliveryPayload returns the JSONB bytes for the delivery_payloads +// row. +func marshalDeliveryPayload(payload acceptgenericdelivery.DeliveryPayload) ([]byte, error) { + rows := make([]payloadAttachmentRow, 0, len(payload.Attachments)) + for _, attachment := range payload.Attachments { + rows = append(rows, payloadAttachmentRow{ + Filename: attachment.Filename, + ContentType: attachment.ContentType, + ContentBase64: attachment.ContentBase64, + SizeBytes: attachment.SizeBytes, + }) + } + encoded, err := json.Marshal(payloadRow{Attachments: rows}) + if err != nil { + return nil, fmt.Errorf("marshal delivery payload: %w", err) + } + return encoded, nil +} + +// unmarshalDeliveryPayload decodes the delivery_payloads row into a +// domain-friendly DeliveryPayload using deliveryID as the owning identifier. +func unmarshalDeliveryPayload(deliveryID common.DeliveryID, encoded []byte) (acceptgenericdelivery.DeliveryPayload, error) { + if len(encoded) == 0 { + return acceptgenericdelivery.DeliveryPayload{}, fmt.Errorf("unmarshal delivery payload: empty") + } + var row payloadRow + if err := json.Unmarshal(encoded, &row); err != nil { + return acceptgenericdelivery.DeliveryPayload{}, fmt.Errorf("unmarshal delivery payload: %w", err) + } + out := acceptgenericdelivery.DeliveryPayload{DeliveryID: deliveryID} + if len(row.Attachments) == 0 { + return out, nil + } + out.Attachments = make([]acceptgenericdelivery.AttachmentPayload, 0, len(row.Attachments)) + for _, attachment := range row.Attachments { + out.Attachments = append(out.Attachments, acceptgenericdelivery.AttachmentPayload{ + Filename: attachment.Filename, + ContentType: attachment.ContentType, + ContentBase64: attachment.ContentBase64, + SizeBytes: attachment.SizeBytes, + }) + } + return out, nil +} + +// marshalRawFields returns the JSONB bytes for the malformed_commands.raw_fields +// column. The map is serialised verbatim so future operator queries can match +// arbitrary keys. +func marshalRawFields(fields map[string]any) ([]byte, error) { + if fields == nil { + fields = map[string]any{} + } + payload, err := json.Marshal(fields) + if err != nil { + return nil, fmt.Errorf("marshal raw fields: %w", err) + } + return payload, nil +} + +// unmarshalRawFields decodes the malformed_commands.raw_fields column. +func unmarshalRawFields(payload []byte) (map[string]any, error) { + out := map[string]any{} + if len(payload) == 0 { + return out, nil + } + if err := json.Unmarshal(payload, &out); err != nil { + return nil, fmt.Errorf("unmarshal raw fields: %w", err) + } + return out, nil +} diff --git a/mail/internal/adapters/postgres/mailstore/deliveries.go b/mail/internal/adapters/postgres/mailstore/deliveries.go new file mode 100644 index 0000000..6d21a94 --- /dev/null +++ b/mail/internal/adapters/postgres/mailstore/deliveries.go @@ -0,0 +1,806 @@ +package mailstore + +import ( + "context" + "database/sql" + "errors" + "fmt" + "strings" + "time" + + pgtable "galaxy/mail/internal/adapters/postgres/jet/mail/table" + "galaxy/mail/internal/domain/attempt" + "galaxy/mail/internal/domain/common" + deliverydomain "galaxy/mail/internal/domain/delivery" + "galaxy/mail/internal/domain/idempotency" + + pg "github.com/go-jet/jet/v2/postgres" +) + +// queryable is satisfied by both *sql.DB and *sql.Tx so the row read/write +// helpers below run inside or outside an explicit transaction. +type queryable interface { + ExecContext(ctx context.Context, query string, args ...any) (sql.Result, error) + QueryContext(ctx context.Context, query string, args ...any) (*sql.Rows, error) + QueryRowContext(ctx context.Context, query string, args ...any) *sql.Row +} + +// recipientKind enumerates the supported delivery_recipients.kind values. +const ( + recipientKindTo = "to" + recipientKindCc = "cc" + recipientKindBcc = "bcc" + recipientKindReplyTo = "reply_to" +) + +// nextAttemptStatuses lists the delivery statuses for which next_attempt_at is +// kept populated. Other statuses store NULL so the partial scheduler index +// stays small. +var nextAttemptStatuses = map[deliverydomain.Status]struct{}{ + deliverydomain.StatusQueued: {}, + deliverydomain.StatusRendered: {}, +} + +// deliverySelectColumns is the canonical SELECT list for the deliveries +// table, matching scanDelivery's column order. +var deliverySelectColumns = pg.ColumnList{ + pgtable.Deliveries.DeliveryID, + pgtable.Deliveries.ResendParentDeliveryID, + pgtable.Deliveries.Source, + pgtable.Deliveries.Status, + pgtable.Deliveries.PayloadMode, + pgtable.Deliveries.TemplateID, + pgtable.Deliveries.Locale, + pgtable.Deliveries.LocaleFallbackUsed, + pgtable.Deliveries.TemplateVariables, + pgtable.Deliveries.Attachments, + pgtable.Deliveries.Subject, + pgtable.Deliveries.TextBody, + pgtable.Deliveries.HTMLBody, + pgtable.Deliveries.IdempotencyKey, + pgtable.Deliveries.RequestFingerprint, + pgtable.Deliveries.IdempotencyExpiresAt, + pgtable.Deliveries.AttemptCount, + pgtable.Deliveries.LastAttemptStatus, + pgtable.Deliveries.ProviderSummary, + pgtable.Deliveries.NextAttemptAt, + pgtable.Deliveries.CreatedAt, + pgtable.Deliveries.UpdatedAt, + pgtable.Deliveries.SentAt, + pgtable.Deliveries.SuppressedAt, + pgtable.Deliveries.FailedAt, + pgtable.Deliveries.DeadLetteredAt, +} + +// insertDelivery writes one delivery record together with its recipient rows. +// idem supplies the request_fingerprint and idempotency_expires_at fields; if +// zero-valued (resend), the helper stores an empty fingerprint and uses +// fallbackExpiresAt for the idempotency expiry. activeAttempt — when non-nil +// and the delivery is queued/rendered — drives the initial next_attempt_at. +func insertDelivery(ctx context.Context, q queryable, record deliverydomain.Delivery, idem idempotency.Record, fallbackExpiresAt time.Time, activeAttempt *attempt.Attempt) error { + templateVariables, err := marshalTemplateVariables(record.TemplateVariables) + if err != nil { + return err + } + attachments, err := marshalAttachments(record.Attachments) + if err != nil { + return err + } + + requestFingerprint := idem.RequestFingerprint + idemExpires := idem.ExpiresAt + if idem.IdempotencyKey.IsZero() && idem.Source == "" { + requestFingerprint = "" + idemExpires = fallbackExpiresAt + } + + stmt := pgtable.Deliveries.INSERT( + pgtable.Deliveries.DeliveryID, + pgtable.Deliveries.ResendParentDeliveryID, + pgtable.Deliveries.Source, + pgtable.Deliveries.Status, + pgtable.Deliveries.PayloadMode, + pgtable.Deliveries.TemplateID, + pgtable.Deliveries.Locale, + pgtable.Deliveries.LocaleFallbackUsed, + pgtable.Deliveries.TemplateVariables, + pgtable.Deliveries.Attachments, + pgtable.Deliveries.Subject, + pgtable.Deliveries.TextBody, + pgtable.Deliveries.HTMLBody, + pgtable.Deliveries.IdempotencyKey, + pgtable.Deliveries.RequestFingerprint, + pgtable.Deliveries.IdempotencyExpiresAt, + pgtable.Deliveries.AttemptCount, + pgtable.Deliveries.LastAttemptStatus, + pgtable.Deliveries.ProviderSummary, + pgtable.Deliveries.NextAttemptAt, + pgtable.Deliveries.CreatedAt, + pgtable.Deliveries.UpdatedAt, + pgtable.Deliveries.SentAt, + pgtable.Deliveries.SuppressedAt, + pgtable.Deliveries.FailedAt, + pgtable.Deliveries.DeadLetteredAt, + ).VALUES( + record.DeliveryID.String(), + record.ResendParentDeliveryID.String(), + string(record.Source), + string(record.Status), + string(record.PayloadMode), + record.TemplateID.String(), + record.Locale.String(), + record.LocaleFallbackUsed, + templateVariables, + attachments, + record.Content.Subject, + record.Content.TextBody, + record.Content.HTMLBody, + record.IdempotencyKey.String(), + requestFingerprint, + idemExpires.UTC(), + record.AttemptCount, + string(record.LastAttemptStatus), + record.ProviderSummary, + nextAttemptValue(record, activeAttempt), + record.CreatedAt.UTC(), + record.UpdatedAt.UTC(), + nullableTime(record.SentAt), + nullableTime(record.SuppressedAt), + nullableTime(record.FailedAt), + nullableTime(record.DeadLetteredAt), + ) + + query, args := stmt.Sql() + if _, err := q.ExecContext(ctx, query, args...); err != nil { + return err + } + + return insertRecipients(ctx, q, record.DeliveryID, record.Envelope) +} + +// insertRecipients writes one row per envelope address, preserving the +// caller's slice ordering through the position column. +func insertRecipients(ctx context.Context, q queryable, deliveryID common.DeliveryID, envelope deliverydomain.Envelope) error { + groups := []struct { + kind string + emails []common.Email + }{ + {recipientKindTo, envelope.To}, + {recipientKindCc, envelope.Cc}, + {recipientKindBcc, envelope.Bcc}, + {recipientKindReplyTo, envelope.ReplyTo}, + } + + for _, group := range groups { + for index, email := range group.emails { + stmt := pgtable.DeliveryRecipients.INSERT( + pgtable.DeliveryRecipients.DeliveryID, + pgtable.DeliveryRecipients.Kind, + pgtable.DeliveryRecipients.Position, + pgtable.DeliveryRecipients.Email, + ).VALUES( + deliveryID.String(), + group.kind, + index, + email.String(), + ) + query, args := stmt.Sql() + if _, err := q.ExecContext(ctx, query, args...); err != nil { + return fmt.Errorf("insert delivery recipient (%s[%d]): %w", group.kind, index, err) + } + } + } + return nil +} + +// updateDelivery writes mutated delivery columns. The set of columns covers +// every field that the domain model can change after acceptance: status, +// rendered content, attempt metadata, terminal timestamps, plus +// next_attempt_at. activeAttempt — when non-nil and the delivery is +// queued/rendered — drives the next_attempt_at column; otherwise NULL. +func updateDelivery(ctx context.Context, q queryable, record deliverydomain.Delivery, activeAttempt *attempt.Attempt) error { + templateVariables, err := marshalTemplateVariables(record.TemplateVariables) + if err != nil { + return err + } + attachments, err := marshalAttachments(record.Attachments) + if err != nil { + return err + } + + stmt := pgtable.Deliveries.UPDATE( + pgtable.Deliveries.Status, + pgtable.Deliveries.TemplateVariables, + pgtable.Deliveries.Attachments, + pgtable.Deliveries.Subject, + pgtable.Deliveries.TextBody, + pgtable.Deliveries.HTMLBody, + pgtable.Deliveries.Locale, + pgtable.Deliveries.LocaleFallbackUsed, + pgtable.Deliveries.AttemptCount, + pgtable.Deliveries.LastAttemptStatus, + pgtable.Deliveries.ProviderSummary, + pgtable.Deliveries.NextAttemptAt, + pgtable.Deliveries.UpdatedAt, + pgtable.Deliveries.SentAt, + pgtable.Deliveries.SuppressedAt, + pgtable.Deliveries.FailedAt, + pgtable.Deliveries.DeadLetteredAt, + ).SET( + string(record.Status), + templateVariables, + attachments, + record.Content.Subject, + record.Content.TextBody, + record.Content.HTMLBody, + record.Locale.String(), + record.LocaleFallbackUsed, + record.AttemptCount, + string(record.LastAttemptStatus), + record.ProviderSummary, + nextAttemptValue(record, activeAttempt), + record.UpdatedAt.UTC(), + nullableTime(record.SentAt), + nullableTime(record.SuppressedAt), + nullableTime(record.FailedAt), + nullableTime(record.DeadLetteredAt), + ).WHERE(pgtable.Deliveries.DeliveryID.EQ(pg.String(record.DeliveryID.String()))) + + query, args := stmt.Sql() + result, err := q.ExecContext(ctx, query, args...) + if err != nil { + return err + } + rows, err := result.RowsAffected() + if err != nil { + return err + } + if rows == 0 { + return fmt.Errorf("update delivery %q: row not found", record.DeliveryID) + } + return nil +} + +// nextAttemptValue resolves the next_attempt_at column value: the active +// attempt's scheduled_for when the delivery is queued/rendered, otherwise +// NULL. Other statuses (sending/sent/suppressed/failed/dead_letter/accepted) +// store NULL so the partial scheduler index excludes the row. +func nextAttemptValue(record deliverydomain.Delivery, activeAttempt *attempt.Attempt) any { + if activeAttempt == nil { + return nil + } + if _, ok := nextAttemptStatuses[record.Status]; !ok { + return nil + } + if activeAttempt.Status != attempt.StatusScheduled { + return nil + } + return activeAttempt.ScheduledFor.UTC() +} + +// insertAttempt writes one attempt row. +func insertAttempt(ctx context.Context, q queryable, record attempt.Attempt) error { + stmt := pgtable.Attempts.INSERT( + pgtable.Attempts.DeliveryID, + pgtable.Attempts.AttemptNo, + pgtable.Attempts.Status, + pgtable.Attempts.ScheduledFor, + pgtable.Attempts.StartedAt, + pgtable.Attempts.FinishedAt, + pgtable.Attempts.ProviderClassification, + pgtable.Attempts.ProviderSummary, + ).VALUES( + record.DeliveryID.String(), + record.AttemptNo, + string(record.Status), + record.ScheduledFor.UTC(), + nullableTime(record.StartedAt), + nullableTime(record.FinishedAt), + record.ProviderClassification, + record.ProviderSummary, + ) + + query, args := stmt.Sql() + _, err := q.ExecContext(ctx, query, args...) + return err +} + +// updateAttempt writes mutated attempt fields keyed by (delivery_id, +// attempt_no). +func updateAttempt(ctx context.Context, q queryable, record attempt.Attempt) error { + stmt := pgtable.Attempts.UPDATE( + pgtable.Attempts.Status, + pgtable.Attempts.ScheduledFor, + pgtable.Attempts.StartedAt, + pgtable.Attempts.FinishedAt, + pgtable.Attempts.ProviderClassification, + pgtable.Attempts.ProviderSummary, + ).SET( + string(record.Status), + record.ScheduledFor.UTC(), + nullableTime(record.StartedAt), + nullableTime(record.FinishedAt), + record.ProviderClassification, + record.ProviderSummary, + ).WHERE(pg.AND( + pgtable.Attempts.DeliveryID.EQ(pg.String(record.DeliveryID.String())), + pgtable.Attempts.AttemptNo.EQ(pg.Int(int64(record.AttemptNo))), + )) + + query, args := stmt.Sql() + result, err := q.ExecContext(ctx, query, args...) + if err != nil { + return err + } + rows, err := result.RowsAffected() + if err != nil { + return err + } + if rows == 0 { + return fmt.Errorf("update attempt %q/%d: row not found", record.DeliveryID, record.AttemptNo) + } + return nil +} + +// insertDeadLetter writes the dead_letters row for a delivery that exhausted +// retries. +func insertDeadLetter(ctx context.Context, q queryable, entry deliverydomain.DeadLetterEntry) error { + stmt := pgtable.DeadLetters.INSERT( + pgtable.DeadLetters.DeliveryID, + pgtable.DeadLetters.FinalAttemptNo, + pgtable.DeadLetters.FailureClassification, + pgtable.DeadLetters.ProviderSummary, + pgtable.DeadLetters.RecoveryHint, + pgtable.DeadLetters.CreatedAt, + ).VALUES( + entry.DeliveryID.String(), + entry.FinalAttemptNo, + entry.FailureClassification, + entry.ProviderSummary, + entry.RecoveryHint, + entry.CreatedAt.UTC(), + ) + + query, args := stmt.Sql() + _, err := q.ExecContext(ctx, query, args...) + return err +} + +// scanDeliveryRow scans the columns produced by selectColumns into a +// deliverydomain.Delivery + the auxiliary idempotency fingerprint/expiry +// values. The auxiliary fields are returned alongside so callers can +// translate them into idempotency.Record where needed. +type deliveryAux struct { + RequestFingerprint string + IdempotencyExpiresAt time.Time + NextAttemptAt *time.Time +} + +func scanDelivery(row interface { + Scan(dest ...any) error +}) (deliverydomain.Delivery, deliveryAux, error) { + var ( + record deliverydomain.Delivery + resendParent string + source string + status string + payloadMode string + templateID string + locale string + templateVariables []byte + attachments []byte + idempotencyKey string + lastAttemptStatusStr string + nextAttemptAt *time.Time + sentAt *time.Time + suppressedAt *time.Time + failedAt *time.Time + deadLetteredAt *time.Time + idemExpiresAt time.Time + requestFingerprint string + ) + + if err := row.Scan( + (*string)(&record.DeliveryID), + &resendParent, + &source, + &status, + &payloadMode, + &templateID, + &locale, + &record.LocaleFallbackUsed, + &templateVariables, + &attachments, + &record.Content.Subject, + &record.Content.TextBody, + &record.Content.HTMLBody, + &idempotencyKey, + &requestFingerprint, + &idemExpiresAt, + &record.AttemptCount, + &lastAttemptStatusStr, + &record.ProviderSummary, + &nextAttemptAt, + &record.CreatedAt, + &record.UpdatedAt, + &sentAt, + &suppressedAt, + &failedAt, + &deadLetteredAt, + ); err != nil { + return deliverydomain.Delivery{}, deliveryAux{}, err + } + + record.ResendParentDeliveryID = common.DeliveryID(resendParent) + record.Source = deliverydomain.Source(source) + record.Status = deliverydomain.Status(status) + record.PayloadMode = deliverydomain.PayloadMode(payloadMode) + record.TemplateID = common.TemplateID(templateID) + record.Locale = common.Locale(locale) + record.IdempotencyKey = common.IdempotencyKey(idempotencyKey) + record.LastAttemptStatus = attempt.Status(lastAttemptStatusStr) + record.CreatedAt = record.CreatedAt.UTC() + record.UpdatedAt = record.UpdatedAt.UTC() + record.SentAt = timeFromNullable(sentAt) + record.SuppressedAt = timeFromNullable(suppressedAt) + record.FailedAt = timeFromNullable(failedAt) + record.DeadLetteredAt = timeFromNullable(deadLetteredAt) + + if templateVariables != nil { + variables, err := unmarshalTemplateVariables(templateVariables) + if err != nil { + return deliverydomain.Delivery{}, deliveryAux{}, err + } + record.TemplateVariables = variables + } + atts, err := unmarshalAttachments(attachments) + if err != nil { + return deliverydomain.Delivery{}, deliveryAux{}, err + } + record.Attachments = atts + + return record, deliveryAux{ + RequestFingerprint: requestFingerprint, + IdempotencyExpiresAt: idemExpiresAt.UTC(), + NextAttemptAt: timeFromNullable(nextAttemptAt), + }, nil +} + +// loadEnvelope materialises the four envelope groups for one delivery. +func loadEnvelope(ctx context.Context, q queryable, deliveryID common.DeliveryID) (deliverydomain.Envelope, error) { + stmt := pg.SELECT( + pgtable.DeliveryRecipients.Kind, + pgtable.DeliveryRecipients.Position, + pgtable.DeliveryRecipients.Email, + ).FROM(pgtable.DeliveryRecipients). + WHERE(pgtable.DeliveryRecipients.DeliveryID.EQ(pg.String(deliveryID.String()))). + ORDER_BY(pgtable.DeliveryRecipients.Kind.ASC(), pgtable.DeliveryRecipients.Position.ASC()) + + query, args := stmt.Sql() + rows, err := q.QueryContext(ctx, query, args...) + if err != nil { + return deliverydomain.Envelope{}, err + } + defer rows.Close() + + var envelope deliverydomain.Envelope + for rows.Next() { + var ( + kind string + position int + email string + ) + if err := rows.Scan(&kind, &position, &email); err != nil { + return deliverydomain.Envelope{}, err + } + switch kind { + case recipientKindTo: + envelope.To = append(envelope.To, common.Email(email)) + case recipientKindCc: + envelope.Cc = append(envelope.Cc, common.Email(email)) + case recipientKindBcc: + envelope.Bcc = append(envelope.Bcc, common.Email(email)) + case recipientKindReplyTo: + envelope.ReplyTo = append(envelope.ReplyTo, common.Email(email)) + default: + return deliverydomain.Envelope{}, fmt.Errorf("load envelope: unknown recipient kind %q", kind) + } + } + if err := rows.Err(); err != nil { + return deliverydomain.Envelope{}, err + } + return envelope, nil +} + +// loadDeliveryByID returns the delivery referenced by deliveryID along with +// its full envelope. Returns (Delivery{}, false, nil) when the row does not +// exist. +func loadDeliveryByID(ctx context.Context, q queryable, deliveryID common.DeliveryID) (deliverydomain.Delivery, bool, error) { + stmt := pg.SELECT(deliverySelectColumns). + FROM(pgtable.Deliveries). + WHERE(pgtable.Deliveries.DeliveryID.EQ(pg.String(deliveryID.String()))) + + query, args := stmt.Sql() + row := q.QueryRowContext(ctx, query, args...) + record, _, err := scanDelivery(row) + switch { + case errors.Is(err, sql.ErrNoRows): + return deliverydomain.Delivery{}, false, nil + case err != nil: + return deliverydomain.Delivery{}, false, err + } + envelope, err := loadEnvelope(ctx, q, deliveryID) + if err != nil { + return deliverydomain.Delivery{}, false, err + } + record.Envelope = envelope + return record, true, nil +} + +// loadIdempotencyByScope returns the idempotency.Record for (source, key). +// Returns (Record{}, false, nil) when no delivery owns the scope. +func loadIdempotencyByScope(ctx context.Context, q queryable, source deliverydomain.Source, key common.IdempotencyKey) (idempotency.Record, bool, error) { + stmt := pg.SELECT( + pgtable.Deliveries.DeliveryID, + pgtable.Deliveries.RequestFingerprint, + pgtable.Deliveries.IdempotencyExpiresAt, + pgtable.Deliveries.CreatedAt, + ).FROM(pgtable.Deliveries). + WHERE(pg.AND( + pgtable.Deliveries.Source.EQ(pg.String(string(source))), + pgtable.Deliveries.IdempotencyKey.EQ(pg.String(key.String())), + )) + + query, args := stmt.Sql() + row := q.QueryRowContext(ctx, query, args...) + + var ( + deliveryID string + requestFingerprint string + expiresAt time.Time + createdAt time.Time + ) + if err := row.Scan(&deliveryID, &requestFingerprint, &expiresAt, &createdAt); err != nil { + if errors.Is(err, sql.ErrNoRows) { + return idempotency.Record{}, false, nil + } + return idempotency.Record{}, false, err + } + if strings.TrimSpace(requestFingerprint) == "" { + // Resend / non-idempotent rows expose an empty fingerprint; the + // reservation is not idempotency-scoped and must not surface as a hit. + return idempotency.Record{}, false, nil + } + return idempotency.Record{ + Source: source, + IdempotencyKey: key, + DeliveryID: common.DeliveryID(deliveryID), + RequestFingerprint: requestFingerprint, + CreatedAt: createdAt.UTC(), + ExpiresAt: expiresAt.UTC(), + }, true, nil +} + +// loadAttempts returns the attempts of deliveryID in attempt_no ASC order. +// expectedCount lets the caller fail closed when the stored sequence has a +// gap. +func loadAttempts(ctx context.Context, q queryable, deliveryID common.DeliveryID, expectedCount int) ([]attempt.Attempt, error) { + stmt := pg.SELECT( + pgtable.Attempts.AttemptNo, + pgtable.Attempts.Status, + pgtable.Attempts.ScheduledFor, + pgtable.Attempts.StartedAt, + pgtable.Attempts.FinishedAt, + pgtable.Attempts.ProviderClassification, + pgtable.Attempts.ProviderSummary, + ).FROM(pgtable.Attempts). + WHERE(pgtable.Attempts.DeliveryID.EQ(pg.String(deliveryID.String()))). + ORDER_BY(pgtable.Attempts.AttemptNo.ASC()) + + query, args := stmt.Sql() + rows, err := q.QueryContext(ctx, query, args...) + if err != nil { + return nil, err + } + defer rows.Close() + + out := make([]attempt.Attempt, 0, expectedCount) + for rows.Next() { + var ( + attemptNo int + status string + scheduledFor time.Time + startedAt *time.Time + finishedAt *time.Time + providerClassification string + providerSummary string + ) + if err := rows.Scan( + &attemptNo, &status, &scheduledFor, &startedAt, &finishedAt, + &providerClassification, &providerSummary, + ); err != nil { + return nil, err + } + out = append(out, attempt.Attempt{ + DeliveryID: deliveryID, + AttemptNo: attemptNo, + Status: attempt.Status(status), + ScheduledFor: scheduledFor.UTC(), + StartedAt: timeFromNullable(startedAt), + FinishedAt: timeFromNullable(finishedAt), + ProviderClassification: providerClassification, + ProviderSummary: providerSummary, + }) + } + if err := rows.Err(); err != nil { + return nil, err + } + if expectedCount >= 0 && len(out) != expectedCount { + return nil, fmt.Errorf("load attempts %q: expected %d, got %d", deliveryID, expectedCount, len(out)) + } + for index, record := range out { + if record.AttemptNo != index+1 { + return nil, fmt.Errorf("load attempts %q: gap at attempt %d", deliveryID, index+1) + } + } + return out, nil +} + +// loadDeadLetter returns the dead_letters row keyed by deliveryID. +func loadDeadLetter(ctx context.Context, q queryable, deliveryID common.DeliveryID) (deliverydomain.DeadLetterEntry, bool, error) { + stmt := pg.SELECT( + pgtable.DeadLetters.FinalAttemptNo, + pgtable.DeadLetters.FailureClassification, + pgtable.DeadLetters.ProviderSummary, + pgtable.DeadLetters.RecoveryHint, + pgtable.DeadLetters.CreatedAt, + ).FROM(pgtable.DeadLetters). + WHERE(pgtable.DeadLetters.DeliveryID.EQ(pg.String(deliveryID.String()))) + + query, args := stmt.Sql() + row := q.QueryRowContext(ctx, query, args...) + var ( + finalAttemptNo int + failureClassification string + providerSummary string + recoveryHint string + createdAt time.Time + ) + if err := row.Scan(&finalAttemptNo, &failureClassification, &providerSummary, &recoveryHint, &createdAt); err != nil { + if errors.Is(err, sql.ErrNoRows) { + return deliverydomain.DeadLetterEntry{}, false, nil + } + return deliverydomain.DeadLetterEntry{}, false, err + } + return deliverydomain.DeadLetterEntry{ + DeliveryID: deliveryID, + FinalAttemptNo: finalAttemptNo, + FailureClassification: failureClassification, + ProviderSummary: providerSummary, + RecoveryHint: recoveryHint, + CreatedAt: createdAt.UTC(), + }, true, nil +} + +// lockDelivery acquires a row-level lock on the deliveries row keyed by +// deliveryID for the lifetime of the surrounding transaction. +func lockDelivery(ctx context.Context, q queryable, deliveryID common.DeliveryID) error { + stmt := pg.SELECT(pgtable.Deliveries.DeliveryID). + FROM(pgtable.Deliveries). + WHERE(pgtable.Deliveries.DeliveryID.EQ(pg.String(deliveryID.String()))). + FOR(pg.UPDATE()) + + query, args := stmt.Sql() + row := q.QueryRowContext(ctx, query, args...) + var ignored string + if err := row.Scan(&ignored); err != nil { + if errors.Is(err, sql.ErrNoRows) { + return fmt.Errorf("lock delivery %q: not found", deliveryID) + } + return fmt.Errorf("lock delivery %q: %w", deliveryID, err) + } + return nil +} + +// loadActiveAttempt returns the attempt row identified by expectedAttemptNo. +// When expectedAttemptNo is zero, the helper falls back to the most-recent +// attempt (used by call sites that do not yet know the count). +func loadActiveAttempt(ctx context.Context, q queryable, deliveryID common.DeliveryID, expectedAttemptNo int) (attempt.Attempt, error) { + selectColumns := []pg.Projection{ + pgtable.Attempts.AttemptNo, + pgtable.Attempts.Status, + pgtable.Attempts.ScheduledFor, + pgtable.Attempts.StartedAt, + pgtable.Attempts.FinishedAt, + pgtable.Attempts.ProviderClassification, + pgtable.Attempts.ProviderSummary, + } + + var stmt pg.SelectStatement + if expectedAttemptNo > 0 { + stmt = pg.SELECT(selectColumns[0], selectColumns[1:]...). + FROM(pgtable.Attempts). + WHERE(pg.AND( + pgtable.Attempts.DeliveryID.EQ(pg.String(deliveryID.String())), + pgtable.Attempts.AttemptNo.EQ(pg.Int(int64(expectedAttemptNo))), + )) + } else { + stmt = pg.SELECT(selectColumns[0], selectColumns[1:]...). + FROM(pgtable.Attempts). + WHERE(pgtable.Attempts.DeliveryID.EQ(pg.String(deliveryID.String()))). + ORDER_BY(pgtable.Attempts.AttemptNo.DESC()). + LIMIT(1) + } + + query, args := stmt.Sql() + row := q.QueryRowContext(ctx, query, args...) + + var ( + attemptNo int + status string + scheduledFor time.Time + startedAt *time.Time + finishedAt *time.Time + providerClassification string + providerSummary string + ) + if err := row.Scan(&attemptNo, &status, &scheduledFor, &startedAt, &finishedAt, &providerClassification, &providerSummary); err != nil { + return attempt.Attempt{}, err + } + return attempt.Attempt{ + DeliveryID: deliveryID, + AttemptNo: attemptNo, + Status: attempt.Status(status), + ScheduledFor: scheduledFor.UTC(), + StartedAt: timeFromNullable(startedAt), + FinishedAt: timeFromNullable(finishedAt), + ProviderClassification: providerClassification, + ProviderSummary: providerSummary, + }, nil +} + +// DeleteDeliveriesOlderThan removes deliveries whose created_at predates +// cutoff. Cascading FKs drop the related attempts/dead_letters/payloads/ +// recipients automatically. The helper satisfies SQLRetentionStore. +func (store *Store) DeleteDeliveriesOlderThan(ctx context.Context, cutoff time.Time) (int64, error) { + if store == nil { + return 0, errors.New("delete deliveries: nil store") + } + operationCtx, cancel, err := store.operationContext(ctx, "delete deliveries") + if err != nil { + return 0, err + } + defer cancel() + + stmt := pgtable.Deliveries.DELETE(). + WHERE(pgtable.Deliveries.CreatedAt.LT(pg.TimestampzT(cutoff.UTC()))) + + query, args := stmt.Sql() + result, err := store.db.ExecContext(operationCtx, query, args...) + if err != nil { + return 0, fmt.Errorf("delete deliveries: %w", err) + } + rows, err := result.RowsAffected() + if err != nil { + return 0, fmt.Errorf("delete deliveries: rows affected: %w", err) + } + return rows, nil +} + +// loadDeliveryPayload returns the payload bundle for deliveryID. +func loadDeliveryPayload(ctx context.Context, q queryable, deliveryID common.DeliveryID) ([]byte, bool, error) { + stmt := pg.SELECT(pgtable.DeliveryPayloads.Payload). + FROM(pgtable.DeliveryPayloads). + WHERE(pgtable.DeliveryPayloads.DeliveryID.EQ(pg.String(deliveryID.String()))) + + query, args := stmt.Sql() + row := q.QueryRowContext(ctx, query, args...) + var payload []byte + if err := row.Scan(&payload); err != nil { + if errors.Is(err, sql.ErrNoRows) { + return nil, false, nil + } + return nil, false, err + } + return payload, true, nil +} diff --git a/mail/internal/adapters/postgres/mailstore/generic_acceptance.go b/mail/internal/adapters/postgres/mailstore/generic_acceptance.go new file mode 100644 index 0000000..84b8f96 --- /dev/null +++ b/mail/internal/adapters/postgres/mailstore/generic_acceptance.go @@ -0,0 +1,87 @@ +package mailstore + +import ( + "context" + "database/sql" + "errors" + "fmt" + + pgtable "galaxy/mail/internal/adapters/postgres/jet/mail/table" + "galaxy/mail/internal/domain/common" + deliverydomain "galaxy/mail/internal/domain/delivery" + "galaxy/mail/internal/domain/idempotency" + "galaxy/mail/internal/service/acceptgenericdelivery" +) + +// GenericAcceptance returns a handle that satisfies +// acceptgenericdelivery.Store. Generic and auth acceptance share the same +// idempotency / delivery read paths but the write input types differ — the +// adapter avoids a method-name conflict on Store.CreateAcceptance. +func (store *Store) GenericAcceptance() *GenericAcceptanceStore { + return &GenericAcceptanceStore{store: store} +} + +// GenericAcceptanceStore is the acceptgenericdelivery.Store handle returned +// by Store.GenericAcceptance. It defers to the umbrella store for shared +// reads. +type GenericAcceptanceStore struct { + store *Store +} + +var _ acceptgenericdelivery.Store = (*GenericAcceptanceStore)(nil) + +// CreateAcceptance writes one generic-delivery acceptance write set inside +// one BEGIN … COMMIT transaction. Idempotency races surface as +// acceptgenericdelivery.ErrConflict. +func (handle *GenericAcceptanceStore) CreateAcceptance(ctx context.Context, input acceptgenericdelivery.CreateAcceptanceInput) error { + if handle == nil || handle.store == nil { + return errors.New("create generic acceptance: nil store") + } + if ctx == nil { + return errors.New("create generic acceptance: nil context") + } + if err := input.Validate(); err != nil { + return fmt.Errorf("create generic acceptance: %w", err) + } + + return handle.store.withTx(ctx, "create generic acceptance", func(ctx context.Context, tx *sql.Tx) error { + first := input.FirstAttempt + if err := insertDelivery(ctx, tx, input.Delivery, input.Idempotency, input.Idempotency.ExpiresAt, &first); err != nil { + if isUniqueViolation(err) { + return acceptgenericdelivery.ErrConflict + } + return fmt.Errorf("create generic acceptance: insert delivery: %w", err) + } + if err := insertAttempt(ctx, tx, input.FirstAttempt); err != nil { + return fmt.Errorf("create generic acceptance: insert first attempt: %w", err) + } + if input.DeliveryPayload != nil { + payload, err := marshalDeliveryPayload(*input.DeliveryPayload) + if err != nil { + return fmt.Errorf("create generic acceptance: %w", err) + } + payloadStmt := pgtable.DeliveryPayloads.INSERT( + pgtable.DeliveryPayloads.DeliveryID, + pgtable.DeliveryPayloads.Payload, + ).VALUES( + input.Delivery.DeliveryID.String(), + payload, + ) + payloadQuery, payloadArgs := payloadStmt.Sql() + if _, err := tx.ExecContext(ctx, payloadQuery, payloadArgs...); err != nil { + return fmt.Errorf("create generic acceptance: insert delivery payload: %w", err) + } + } + return nil + }) +} + +// GetIdempotency forwards to the umbrella store. +func (handle *GenericAcceptanceStore) GetIdempotency(ctx context.Context, source deliverydomain.Source, key common.IdempotencyKey) (idempotency.Record, bool, error) { + return handle.store.GetIdempotency(ctx, source, key) +} + +// GetDelivery forwards to the umbrella store. +func (handle *GenericAcceptanceStore) GetDelivery(ctx context.Context, deliveryID common.DeliveryID) (deliverydomain.Delivery, bool, error) { + return handle.store.GetDelivery(ctx, deliveryID) +} diff --git a/mail/internal/adapters/postgres/mailstore/harness_test.go b/mail/internal/adapters/postgres/mailstore/harness_test.go new file mode 100644 index 0000000..f6416ef --- /dev/null +++ b/mail/internal/adapters/postgres/mailstore/harness_test.go @@ -0,0 +1,202 @@ +package mailstore + +import ( + "context" + "database/sql" + "net/url" + "os" + "sync" + "testing" + "time" + + "galaxy/mail/internal/adapters/postgres/migrations" + "galaxy/postgres" + + testcontainers "github.com/testcontainers/testcontainers-go" + tcpostgres "github.com/testcontainers/testcontainers-go/modules/postgres" + "github.com/testcontainers/testcontainers-go/wait" +) + +const ( + pkgPostgresImage = "postgres:16-alpine" + pkgSuperUser = "galaxy" + pkgSuperPassword = "galaxy" + pkgSuperDatabase = "galaxy_mail" + pkgServiceRole = "mailservice" + pkgServicePassword = "mailservice" + pkgServiceSchema = "mail" + pkgContainerStartup = 90 * time.Second + pkgOperationTimeout = 10 * time.Second +) + +var ( + pkgContainerOnce sync.Once + pkgContainerErr error + pkgContainerEnv *postgresEnv +) + +type postgresEnv struct { + container *tcpostgres.PostgresContainer + dsn string + pool *sql.DB +} + +func ensurePostgresEnv(t testing.TB) *postgresEnv { + t.Helper() + pkgContainerOnce.Do(func() { + pkgContainerEnv, pkgContainerErr = startPostgresEnv() + }) + if pkgContainerErr != nil { + t.Skipf("postgres container start failed (Docker unavailable?): %v", pkgContainerErr) + } + return pkgContainerEnv +} + +func startPostgresEnv() (*postgresEnv, error) { + ctx := context.Background() + container, err := tcpostgres.Run(ctx, pkgPostgresImage, + tcpostgres.WithDatabase(pkgSuperDatabase), + tcpostgres.WithUsername(pkgSuperUser), + tcpostgres.WithPassword(pkgSuperPassword), + testcontainers.WithWaitStrategy( + wait.ForLog("database system is ready to accept connections"). + WithOccurrence(2). + WithStartupTimeout(pkgContainerStartup), + ), + ) + if err != nil { + return nil, err + } + + baseDSN, err := container.ConnectionString(ctx, "sslmode=disable") + if err != nil { + _ = testcontainers.TerminateContainer(container) + return nil, err + } + + if err := provisionRoleAndSchema(ctx, baseDSN); err != nil { + _ = testcontainers.TerminateContainer(container) + return nil, err + } + + scopedDSN, err := dsnForServiceRole(baseDSN) + if err != nil { + _ = testcontainers.TerminateContainer(container) + return nil, err + } + + cfg := postgres.DefaultConfig() + cfg.PrimaryDSN = scopedDSN + cfg.OperationTimeout = pkgOperationTimeout + pool, err := postgres.OpenPrimary(ctx, cfg) + if err != nil { + _ = testcontainers.TerminateContainer(container) + return nil, err + } + if err := postgres.Ping(ctx, pool, pkgOperationTimeout); err != nil { + _ = pool.Close() + _ = testcontainers.TerminateContainer(container) + return nil, err + } + if err := postgres.RunMigrations(ctx, pool, migrations.FS(), "."); err != nil { + _ = pool.Close() + _ = testcontainers.TerminateContainer(container) + return nil, err + } + + return &postgresEnv{ + container: container, + dsn: scopedDSN, + pool: pool, + }, nil +} + +func provisionRoleAndSchema(ctx context.Context, baseDSN string) error { + cfg := postgres.DefaultConfig() + cfg.PrimaryDSN = baseDSN + cfg.OperationTimeout = pkgOperationTimeout + db, err := postgres.OpenPrimary(ctx, cfg) + if err != nil { + return err + } + defer func() { _ = db.Close() }() + + statements := []string{ + `DO $$ BEGIN + IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'mailservice') THEN + CREATE ROLE mailservice LOGIN PASSWORD 'mailservice'; + END IF; + END $$;`, + `CREATE SCHEMA IF NOT EXISTS mail AUTHORIZATION mailservice;`, + `GRANT USAGE ON SCHEMA mail TO mailservice;`, + } + for _, statement := range statements { + if _, err := db.ExecContext(ctx, statement); err != nil { + return err + } + } + return nil +} + +func dsnForServiceRole(baseDSN string) (string, error) { + parsed, err := url.Parse(baseDSN) + if err != nil { + return "", err + } + values := url.Values{} + values.Set("search_path", pkgServiceSchema) + values.Set("sslmode", "disable") + scoped := url.URL{ + Scheme: parsed.Scheme, + User: url.UserPassword(pkgServiceRole, pkgServicePassword), + Host: parsed.Host, + Path: parsed.Path, + RawQuery: values.Encode(), + } + return scoped.String(), nil +} + +// newTestStore returns a Store backed by the package-scoped pool. Every +// invocation truncates the mail-owned tables so individual tests start from a +// clean slate while sharing one container start. +func newTestStore(t *testing.T) *Store { + t.Helper() + env := ensurePostgresEnv(t) + truncateAll(t, env.pool) + store, err := New(Config{DB: env.pool, OperationTimeout: pkgOperationTimeout}) + if err != nil { + t.Fatalf("new store: %v", err) + } + return store +} + +func truncateAll(t *testing.T, db *sql.DB) { + t.Helper() + statement := `TRUNCATE TABLE + malformed_commands, + dead_letters, + delivery_payloads, + attempts, + delivery_recipients, + deliveries + RESTART IDENTITY CASCADE` + if _, err := db.ExecContext(context.Background(), statement); err != nil { + t.Fatalf("truncate tables: %v", err) + } +} + +// TestMain runs first when `go test` enters the package. We drive it through +// a TestMain so the container started by the first test is shut down on the +// way out, even when individual tests panic. +func TestMain(m *testing.M) { + code := m.Run() + if pkgContainerEnv != nil { + if pkgContainerEnv.pool != nil { + _ = pkgContainerEnv.pool.Close() + } + if pkgContainerEnv.container != nil { + _ = testcontainers.TerminateContainer(pkgContainerEnv.container) + } + } + os.Exit(code) +} diff --git a/mail/internal/adapters/postgres/mailstore/helpers.go b/mail/internal/adapters/postgres/mailstore/helpers.go new file mode 100644 index 0000000..0f63b85 --- /dev/null +++ b/mail/internal/adapters/postgres/mailstore/helpers.go @@ -0,0 +1,64 @@ +package mailstore + +import ( + "context" + "database/sql" + "errors" + "fmt" + "time" + + "github.com/jackc/pgx/v5/pgconn" +) + +// pgUniqueViolationCode identifies the SQLSTATE returned by PostgreSQL when +// a UNIQUE constraint is violated by INSERT or UPDATE. +const pgUniqueViolationCode = "23505" + +// isUniqueViolation reports whether err is a PostgreSQL unique-violation, +// regardless of constraint name. +func isUniqueViolation(err error) bool { + var pgErr *pgconn.PgError + if !errors.As(err, &pgErr) { + return false + } + return pgErr.Code == pgUniqueViolationCode +} + +// nullableTime returns t.UTC() when non-nil, otherwise nil for NULL columns. +func nullableTime(t *time.Time) any { + if t == nil { + return nil + } + return t.UTC() +} + +// isNoRows reports whether err is sql.ErrNoRows. +func isNoRows(err error) bool { + return errors.Is(err, sql.ErrNoRows) +} + +// timeFromNullable copies an optional *time.Time read from Postgres into a +// new pointer normalised to UTC. +func timeFromNullable(value *time.Time) *time.Time { + if value == nil { + return nil + } + utc := value.UTC() + return &utc +} + +// withTimeout derives a child context bounded by timeout and prefixes context +// errors with operation. Callers must always invoke the returned cancel. +func withTimeout(ctx context.Context, operation string, timeout time.Duration) (context.Context, context.CancelFunc, error) { + if ctx == nil { + return nil, nil, fmt.Errorf("%s: nil context", operation) + } + if err := ctx.Err(); err != nil { + return nil, nil, fmt.Errorf("%s: %w", operation, err) + } + if timeout <= 0 { + return nil, nil, fmt.Errorf("%s: operation timeout must be positive", operation) + } + bounded, cancel := context.WithTimeout(ctx, timeout) + return bounded, cancel, nil +} diff --git a/mail/internal/adapters/postgres/mailstore/malformed_command.go b/mail/internal/adapters/postgres/mailstore/malformed_command.go new file mode 100644 index 0000000..056f1dd --- /dev/null +++ b/mail/internal/adapters/postgres/mailstore/malformed_command.go @@ -0,0 +1,148 @@ +package mailstore + +import ( + "context" + "errors" + "fmt" + "time" + + pgtable "galaxy/mail/internal/adapters/postgres/jet/mail/table" + "galaxy/mail/internal/domain/malformedcommand" + + pg "github.com/go-jet/jet/v2/postgres" +) + +// Record stores entry idempotently by stream entry id. The helper satisfies +// worker.MalformedCommandRecorder. +func (store *Store) Record(ctx context.Context, entry malformedcommand.Entry) error { + if store == nil { + return errors.New("record malformed command: nil store") + } + if ctx == nil { + return errors.New("record malformed command: nil context") + } + if err := entry.Validate(); err != nil { + return fmt.Errorf("record malformed command: %w", err) + } + + rawFields, err := marshalRawFields(entry.RawFields) + if err != nil { + return fmt.Errorf("record malformed command: %w", err) + } + + operationCtx, cancel, err := store.operationContext(ctx, "record malformed command") + if err != nil { + return err + } + defer cancel() + + stmt := pgtable.MalformedCommands.INSERT( + pgtable.MalformedCommands.StreamEntryID, + pgtable.MalformedCommands.DeliveryID, + pgtable.MalformedCommands.Source, + pgtable.MalformedCommands.IdempotencyKey, + pgtable.MalformedCommands.FailureCode, + pgtable.MalformedCommands.FailureMessage, + pgtable.MalformedCommands.RawFields, + pgtable.MalformedCommands.RecordedAt, + ).VALUES( + entry.StreamEntryID, + entry.DeliveryID, + entry.Source, + entry.IdempotencyKey, + string(entry.FailureCode), + entry.FailureMessage, + rawFields, + entry.RecordedAt.UTC(), + ).ON_CONFLICT(pgtable.MalformedCommands.StreamEntryID).DO_NOTHING() + + query, args := stmt.Sql() + if _, err := store.db.ExecContext(operationCtx, query, args...); err != nil { + return fmt.Errorf("record malformed command: %w", err) + } + return nil +} + +// GetMalformedCommand loads one malformed-command entry by stream entry id. +func (store *Store) GetMalformedCommand(ctx context.Context, streamEntryID string) (malformedcommand.Entry, bool, error) { + if store == nil { + return malformedcommand.Entry{}, false, errors.New("get malformed command: nil store") + } + if ctx == nil { + return malformedcommand.Entry{}, false, errors.New("get malformed command: nil context") + } + operationCtx, cancel, err := store.operationContext(ctx, "get malformed command") + if err != nil { + return malformedcommand.Entry{}, false, err + } + defer cancel() + + stmt := pg.SELECT( + pgtable.MalformedCommands.DeliveryID, + pgtable.MalformedCommands.Source, + pgtable.MalformedCommands.IdempotencyKey, + pgtable.MalformedCommands.FailureCode, + pgtable.MalformedCommands.FailureMessage, + pgtable.MalformedCommands.RawFields, + pgtable.MalformedCommands.RecordedAt, + ).FROM(pgtable.MalformedCommands). + WHERE(pgtable.MalformedCommands.StreamEntryID.EQ(pg.String(streamEntryID))) + + query, args := stmt.Sql() + row := store.db.QueryRowContext(operationCtx, query, args...) + var ( + deliveryID string + source string + idempotencyKey string + failureCode string + failureMessage string + rawFields []byte + ) + entry := malformedcommand.Entry{StreamEntryID: streamEntryID} + if err := row.Scan(&deliveryID, &source, &idempotencyKey, &failureCode, &failureMessage, &rawFields, &entry.RecordedAt); err != nil { + if isNoRows(err) { + return malformedcommand.Entry{}, false, nil + } + return malformedcommand.Entry{}, false, fmt.Errorf("get malformed command: %w", err) + } + entry.DeliveryID = deliveryID + entry.Source = source + entry.IdempotencyKey = idempotencyKey + entry.FailureCode = malformedcommand.FailureCode(failureCode) + entry.FailureMessage = failureMessage + entry.RecordedAt = entry.RecordedAt.UTC() + fields, err := unmarshalRawFields(rawFields) + if err != nil { + return malformedcommand.Entry{}, false, fmt.Errorf("get malformed command: %w", err) + } + entry.RawFields = fields + return entry, true, nil +} + +// DeleteMalformedCommandsOlderThan removes malformed-command rows whose +// recorded_at predates cutoff. The helper satisfies the SQLRetentionStore +// contract used by the periodic retention worker. +func (store *Store) DeleteMalformedCommandsOlderThan(ctx context.Context, cutoff time.Time) (int64, error) { + if store == nil { + return 0, errors.New("delete malformed commands: nil store") + } + operationCtx, cancel, err := store.operationContext(ctx, "delete malformed commands") + if err != nil { + return 0, err + } + defer cancel() + + stmt := pgtable.MalformedCommands.DELETE(). + WHERE(pgtable.MalformedCommands.RecordedAt.LT(pg.TimestampzT(cutoff.UTC()))) + + query, args := stmt.Sql() + result, err := store.db.ExecContext(operationCtx, query, args...) + if err != nil { + return 0, fmt.Errorf("delete malformed commands: %w", err) + } + rows, err := result.RowsAffected() + if err != nil { + return 0, fmt.Errorf("delete malformed commands: rows affected: %w", err) + } + return rows, nil +} diff --git a/mail/internal/adapters/postgres/mailstore/operator.go b/mail/internal/adapters/postgres/mailstore/operator.go new file mode 100644 index 0000000..a51b26e --- /dev/null +++ b/mail/internal/adapters/postgres/mailstore/operator.go @@ -0,0 +1,306 @@ +package mailstore + +import ( + "context" + "database/sql" + "errors" + "fmt" + "time" + + pgtable "galaxy/mail/internal/adapters/postgres/jet/mail/table" + "galaxy/mail/internal/domain/attempt" + "galaxy/mail/internal/domain/common" + deliverydomain "galaxy/mail/internal/domain/delivery" + "galaxy/mail/internal/domain/idempotency" + "galaxy/mail/internal/service/acceptgenericdelivery" + "galaxy/mail/internal/service/listdeliveries" + "galaxy/mail/internal/service/resenddelivery" + + pg "github.com/go-jet/jet/v2/postgres" +) + +// resendIdempotencyExpiry stores the synthetic idempotency_expires_at value +// applied to resend deliveries. Resend rows do not carry a caller-supplied +// idempotency reservation; the fingerprint is stored as the empty string and +// the loadIdempotencyByScope helper treats those rows as non-idempotent — +// the expiry is therefore irrelevant in practice but must satisfy the +// `NOT NULL > created_at` invariant used by the deliveries column. +const resendIdempotencyExpiry = 100 * 365 * 24 * time.Hour + +// maxIdempotencyExpiry is the fallback expiry duration used when no caller- +// supplied idempotency.Record reservation accompanies the write. +var maxIdempotencyExpiry = resendIdempotencyExpiry + +// GetIdempotency loads the idempotency reservation for one (source, key) +// scope. It is shared by the auth-acceptance and generic-acceptance flows. +func (store *Store) GetIdempotency(ctx context.Context, source deliverydomain.Source, key common.IdempotencyKey) (idempotency.Record, bool, error) { + if store == nil { + return idempotency.Record{}, false, errors.New("get idempotency: nil store") + } + operationCtx, cancel, err := store.operationContext(ctx, "get idempotency") + if err != nil { + return idempotency.Record{}, false, err + } + defer cancel() + + record, ok, err := loadIdempotencyByScope(operationCtx, store.db, source, key) + if err != nil { + return idempotency.Record{}, false, fmt.Errorf("get idempotency: %w", err) + } + return record, ok, nil +} + +// GetDeadLetter loads the dead_letters row for deliveryID when one exists. +func (store *Store) GetDeadLetter(ctx context.Context, deliveryID common.DeliveryID) (deliverydomain.DeadLetterEntry, bool, error) { + if store == nil { + return deliverydomain.DeadLetterEntry{}, false, errors.New("get dead-letter: nil store") + } + operationCtx, cancel, err := store.operationContext(ctx, "get dead-letter") + if err != nil { + return deliverydomain.DeadLetterEntry{}, false, err + } + defer cancel() + + entry, ok, err := loadDeadLetter(operationCtx, store.db, deliveryID) + if err != nil { + return deliverydomain.DeadLetterEntry{}, false, fmt.Errorf("get dead-letter: %w", err) + } + return entry, ok, nil +} + +// GetDeliveryPayload returns the raw attachment payload bundle for deliveryID +// when one exists. +func (store *Store) GetDeliveryPayload(ctx context.Context, deliveryID common.DeliveryID) (acceptgenericdelivery.DeliveryPayload, bool, error) { + if store == nil { + return acceptgenericdelivery.DeliveryPayload{}, false, errors.New("get delivery payload: nil store") + } + operationCtx, cancel, err := store.operationContext(ctx, "get delivery payload") + if err != nil { + return acceptgenericdelivery.DeliveryPayload{}, false, err + } + defer cancel() + + encoded, ok, err := loadDeliveryPayload(operationCtx, store.db, deliveryID) + if err != nil { + return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("get delivery payload: %w", err) + } + if !ok { + return acceptgenericdelivery.DeliveryPayload{}, false, nil + } + payload, err := unmarshalDeliveryPayload(deliveryID, encoded) + if err != nil { + return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("get delivery payload: %w", err) + } + return payload, true, nil +} + +// ListAttempts loads exactly expectedCount attempts in attempt_no ASC order +// for deliveryID. A gap in the stored sequence surfaces as an error so +// operator reads fail closed on durable-state corruption. +func (store *Store) ListAttempts(ctx context.Context, deliveryID common.DeliveryID, expectedCount int) ([]attempt.Attempt, error) { + if store == nil { + return nil, errors.New("list attempts: nil store") + } + if expectedCount < 0 { + return nil, errors.New("list attempts: negative expected count") + } + if expectedCount == 0 { + return []attempt.Attempt{}, nil + } + if err := deliveryID.Validate(); err != nil { + return nil, fmt.Errorf("list attempts: %w", err) + } + operationCtx, cancel, err := store.operationContext(ctx, "list attempts") + if err != nil { + return nil, err + } + defer cancel() + + out, err := loadAttempts(operationCtx, store.db, deliveryID, expectedCount) + if err != nil { + return nil, fmt.Errorf("list attempts: %w", err) + } + return out, nil +} + +// List returns one filtered ordered page of delivery records keyed by +// (created_at DESC, delivery_id DESC). Filters compose into SQL WHERE +// clauses — every supported filter is index-friendly. +func (store *Store) List(ctx context.Context, input listdeliveries.Input) (listdeliveries.Result, error) { + if store == nil { + return listdeliveries.Result{}, errors.New("list deliveries: nil store") + } + if err := input.Validate(); err != nil { + return listdeliveries.Result{}, fmt.Errorf("list deliveries: %w", err) + } + limit := input.Limit + if limit <= 0 { + limit = listdeliveries.DefaultLimit + } + + operationCtx, cancel, err := store.operationContext(ctx, "list deliveries") + if err != nil { + return listdeliveries.Result{}, err + } + defer cancel() + + if input.Cursor != nil { + cursorStmt := pg.SELECT(pgtable.Deliveries.CreatedAt). + FROM(pgtable.Deliveries). + WHERE(pgtable.Deliveries.DeliveryID.EQ(pg.String(input.Cursor.DeliveryID.String()))) + cursorQuery, cursorArgs := cursorStmt.Sql() + row := store.db.QueryRowContext(operationCtx, cursorQuery, cursorArgs...) + var createdAt sql.NullTime + if err := row.Scan(&createdAt); err != nil { + if errors.Is(err, sql.ErrNoRows) { + return listdeliveries.Result{}, listdeliveries.ErrInvalidCursor + } + return listdeliveries.Result{}, fmt.Errorf("list deliveries: validate cursor: %w", err) + } + if !createdAt.Valid || !createdAt.Time.UTC().Equal(input.Cursor.CreatedAt.UTC()) { + return listdeliveries.Result{}, listdeliveries.ErrInvalidCursor + } + } + + conditions := make([]pg.BoolExpression, 0, 8) + + if input.Cursor != nil { + cursorCreatedAt := pg.TimestampzT(input.Cursor.CreatedAt.UTC()) + cursorID := pg.String(input.Cursor.DeliveryID.String()) + // (created_at, delivery_id) < (cursorCreatedAt, cursorID) expressed as + // the equivalent OR/AND expansion since jet has no row-comparison + // builder. + conditions = append(conditions, pg.OR( + pgtable.Deliveries.CreatedAt.LT(cursorCreatedAt), + pg.AND( + pgtable.Deliveries.CreatedAt.EQ(cursorCreatedAt), + pgtable.Deliveries.DeliveryID.LT(cursorID), + ), + )) + } + if input.Filters.Status != "" { + conditions = append(conditions, pgtable.Deliveries.Status.EQ(pg.String(string(input.Filters.Status)))) + } + if input.Filters.Source != "" { + conditions = append(conditions, pgtable.Deliveries.Source.EQ(pg.String(string(input.Filters.Source)))) + } + if !input.Filters.TemplateID.IsZero() { + conditions = append(conditions, pgtable.Deliveries.TemplateID.EQ(pg.String(input.Filters.TemplateID.String()))) + } + if !input.Filters.IdempotencyKey.IsZero() { + conditions = append(conditions, pgtable.Deliveries.IdempotencyKey.EQ(pg.String(input.Filters.IdempotencyKey.String()))) + } + if input.Filters.FromCreatedAt != nil { + conditions = append(conditions, pgtable.Deliveries.CreatedAt.GT_EQ(pg.TimestampzT(input.Filters.FromCreatedAt.UTC()))) + } + if input.Filters.ToCreatedAt != nil { + conditions = append(conditions, pgtable.Deliveries.CreatedAt.LT_EQ(pg.TimestampzT(input.Filters.ToCreatedAt.UTC()))) + } + if !input.Filters.Recipient.IsZero() { + recipientSub := pg.SELECT(pgtable.DeliveryRecipients.DeliveryID). + FROM(pgtable.DeliveryRecipients). + WHERE(pg.AND( + pgtable.DeliveryRecipients.Kind.NOT_EQ(pg.String(recipientKindReplyTo)), + pg.LOWER(pgtable.DeliveryRecipients.Email).EQ(pg.LOWER(pg.String(input.Filters.Recipient.String()))), + )) + conditions = append(conditions, pgtable.Deliveries.DeliveryID.IN(recipientSub)) + } + + stmt := pg.SELECT(deliverySelectColumns). + FROM(pgtable.Deliveries) + + if len(conditions) > 0 { + stmt = stmt.WHERE(pg.AND(conditions...)) + } + stmt = stmt. + ORDER_BY(pgtable.Deliveries.CreatedAt.DESC(), pgtable.Deliveries.DeliveryID.DESC()). + LIMIT(int64(limit + 1)) + + query, args := stmt.Sql() + rows, err := store.db.QueryContext(operationCtx, query, args...) + if err != nil { + return listdeliveries.Result{}, fmt.Errorf("list deliveries: %w", err) + } + defer rows.Close() + + items := make([]deliverydomain.Delivery, 0, limit+1) + for rows.Next() { + record, _, err := scanDelivery(rows) + if err != nil { + return listdeliveries.Result{}, fmt.Errorf("list deliveries: scan: %w", err) + } + envelope, err := loadEnvelope(operationCtx, store.db, record.DeliveryID) + if err != nil { + return listdeliveries.Result{}, fmt.Errorf("list deliveries: load envelope: %w", err) + } + record.Envelope = envelope + items = append(items, record) + } + if err := rows.Err(); err != nil { + return listdeliveries.Result{}, fmt.Errorf("list deliveries: %w", err) + } + + result := listdeliveries.Result{} + if len(items) > limit { + next := listdeliveries.Cursor{ + CreatedAt: items[limit-1].CreatedAt.UTC(), + DeliveryID: items[limit-1].DeliveryID, + } + result.NextCursor = &next + items = items[:limit] + } + result.Items = items + return result, nil +} + +// CreateResend writes the cloned delivery, its first attempt, and the +// optional cloned payload bundle inside one transaction. Resend deliveries +// share the (source, idempotency_key) UNIQUE constraint, so a duplicate clone +// surfaces as a generic acceptance conflict — but the resend service +// generates fresh idempotency keys, so a conflict here always indicates a +// caller bug rather than user-replay. +func (store *Store) CreateResend(ctx context.Context, input resenddelivery.CreateResendInput) error { + if store == nil { + return errors.New("create resend: nil store") + } + if ctx == nil { + return errors.New("create resend: nil context") + } + if err := input.Validate(); err != nil { + return fmt.Errorf("create resend: %w", err) + } + + return store.withTx(ctx, "create resend", func(ctx context.Context, tx *sql.Tx) error { + // Use the delivery's own UpdatedAt as a deterministic finite expiry — + // the resend has no caller-supplied idempotency.Record reservation. + fallbackExpiresAt := input.Delivery.CreatedAt.Add(maxIdempotencyExpiry) + first := input.FirstAttempt + if err := insertDelivery(ctx, tx, input.Delivery, idempotency.Record{}, fallbackExpiresAt, &first); err != nil { + if isUniqueViolation(err) { + return fmt.Errorf("create resend: %w", err) + } + return fmt.Errorf("create resend: insert delivery: %w", err) + } + if err := insertAttempt(ctx, tx, input.FirstAttempt); err != nil { + return fmt.Errorf("create resend: insert first attempt: %w", err) + } + if input.DeliveryPayload != nil { + payload, err := marshalDeliveryPayload(*input.DeliveryPayload) + if err != nil { + return fmt.Errorf("create resend: %w", err) + } + payloadStmt := pgtable.DeliveryPayloads.INSERT( + pgtable.DeliveryPayloads.DeliveryID, + pgtable.DeliveryPayloads.Payload, + ).VALUES( + input.Delivery.DeliveryID.String(), + payload, + ) + payloadQuery, payloadArgs := payloadStmt.Sql() + if _, err := tx.ExecContext(ctx, payloadQuery, payloadArgs...); err != nil { + return fmt.Errorf("create resend: insert delivery payload: %w", err) + } + } + return nil + }) +} diff --git a/mail/internal/adapters/postgres/mailstore/render.go b/mail/internal/adapters/postgres/mailstore/render.go new file mode 100644 index 0000000..8e1292b --- /dev/null +++ b/mail/internal/adapters/postgres/mailstore/render.go @@ -0,0 +1,101 @@ +package mailstore + +import ( + "context" + "database/sql" + "errors" + "fmt" + + pgtable "galaxy/mail/internal/adapters/postgres/jet/mail/table" + "galaxy/mail/internal/service/renderdelivery" + + pg "github.com/go-jet/jet/v2/postgres" +) + +// RenderDelivery returns a handle that satisfies renderdelivery.Store. +func (store *Store) RenderDelivery() *RenderDeliveryStore { + return &RenderDeliveryStore{store: store} +} + +// RenderDeliveryStore is the renderdelivery.Store handle returned by +// Store.RenderDelivery. +type RenderDeliveryStore struct { + store *Store +} + +var _ renderdelivery.Store = (*RenderDeliveryStore)(nil) + +// MarkRendered persists the rendered subject, bodies, and locale_fallback +// flag for a queued template-mode delivery and transitions its status to +// rendered. The active attempt remains scheduled with its existing +// scheduled_for so the scheduler picks the row up via next_attempt_at. +func (handle *RenderDeliveryStore) MarkRendered(ctx context.Context, input renderdelivery.MarkRenderedInput) error { + if handle == nil || handle.store == nil { + return errors.New("mark rendered: nil store") + } + if ctx == nil { + return errors.New("mark rendered: nil context") + } + if err := input.Validate(); err != nil { + return fmt.Errorf("mark rendered: %w", err) + } + + return handle.store.withTx(ctx, "mark rendered", func(ctx context.Context, tx *sql.Tx) error { + // Lock the active attempt for the duration of the update so a + // concurrent attempt-claim races against the same row. + lockStmt := pg.SELECT(pgtable.Attempts.ScheduledFor). + FROM(pgtable.Attempts). + WHERE(pg.AND( + pgtable.Attempts.DeliveryID.EQ(pg.String(input.Delivery.DeliveryID.String())), + pgtable.Attempts.AttemptNo.EQ(pg.Int(int64(input.Delivery.AttemptCount))), + )). + FOR(pg.UPDATE()) + + lockQuery, lockArgs := lockStmt.Sql() + row := tx.QueryRowContext(ctx, lockQuery, lockArgs...) + var ignored any + if err := row.Scan(&ignored); err != nil { + return fmt.Errorf("mark rendered: lock active attempt: %w", err) + } + if err := lockDelivery(ctx, tx, input.Delivery.DeliveryID); err != nil { + return fmt.Errorf("mark rendered: %w", err) + } + + activeAttempt, err := loadActiveAttempt(ctx, tx, input.Delivery.DeliveryID, input.Delivery.AttemptCount) + if err != nil { + return fmt.Errorf("mark rendered: load active attempt: %w", err) + } + if err := updateDelivery(ctx, tx, input.Delivery, &activeAttempt); err != nil { + return fmt.Errorf("mark rendered: update delivery: %w", err) + } + return nil + }) +} + +// MarkRenderFailed persists one classified terminal render failure. The +// active attempt becomes terminal (`render_failed`) and the delivery becomes +// `failed`. +func (handle *RenderDeliveryStore) MarkRenderFailed(ctx context.Context, input renderdelivery.MarkRenderFailedInput) error { + if handle == nil || handle.store == nil { + return errors.New("mark render failed: nil store") + } + if ctx == nil { + return errors.New("mark render failed: nil context") + } + if err := input.Validate(); err != nil { + return fmt.Errorf("mark render failed: %w", err) + } + + return handle.store.withTx(ctx, "mark render failed", func(ctx context.Context, tx *sql.Tx) error { + if err := lockDelivery(ctx, tx, input.Delivery.DeliveryID); err != nil { + return fmt.Errorf("mark render failed: %w", err) + } + if err := updateAttempt(ctx, tx, input.Attempt); err != nil { + return fmt.Errorf("mark render failed: update attempt: %w", err) + } + if err := updateDelivery(ctx, tx, input.Delivery, nil); err != nil { + return fmt.Errorf("mark render failed: update delivery: %w", err) + } + return nil + }) +} diff --git a/mail/internal/adapters/postgres/mailstore/store.go b/mail/internal/adapters/postgres/mailstore/store.go new file mode 100644 index 0000000..11ed7c7 --- /dev/null +++ b/mail/internal/adapters/postgres/mailstore/store.go @@ -0,0 +1,119 @@ +// Package mailstore implements the PostgreSQL-backed source-of-truth +// persistence used by Mail Service. +// +// The package owns the on-disk shape of the `mail` schema (defined in +// `galaxy/mail/internal/adapters/postgres/migrations`) and translates the +// schema-agnostic Store interfaces declared by each `internal/service/*` use +// case into concrete `database/sql` operations driven by the pgx driver. +// Atomic composite operations (acceptance, render, attempt commit, resend) +// execute inside explicit `BEGIN … COMMIT` transactions; the attempt +// scheduler's claim path uses `SELECT … FOR UPDATE SKIP LOCKED` to coordinate +// across multiple worker processes. +// +// Stage 4 of `PG_PLAN.md` migrates Mail Service away from Redis-backed +// durable state. The inbound `mail:delivery_commands` Redis Stream and its +// consumer offset remain on Redis; the store is no longer aware of them. +package mailstore + +import ( + "context" + "database/sql" + "errors" + "fmt" + "time" +) + +// Config configures one PostgreSQL-backed mail store instance. The store does +// not own the underlying *sql.DB lifecycle: the caller (typically the service +// runtime) opens, instruments, migrates, and closes the pool. The store only +// borrows the pool and bounds individual round trips with OperationTimeout. +type Config struct { + // DB stores the connection pool the store uses for every query. + DB *sql.DB + + // OperationTimeout bounds one round trip. The store creates a derived + // context for each operation so callers cannot starve the pool with an + // unbounded ctx. Multi-statement transactions inherit this bound for the + // whole BEGIN … COMMIT span. + OperationTimeout time.Duration +} + +// Store persists Mail Service durable state in PostgreSQL and exposes the +// per-use-case Store interfaces required by acceptance, render, execution, +// operator listing, and the attempt scheduler. +type Store struct { + db *sql.DB + operationTimeout time.Duration +} + +// New constructs one PostgreSQL-backed mail store from cfg. +func New(cfg Config) (*Store, error) { + if cfg.DB == nil { + return nil, errors.New("new postgres mail store: db must not be nil") + } + if cfg.OperationTimeout <= 0 { + return nil, errors.New("new postgres mail store: operation timeout must be positive") + } + return &Store{ + db: cfg.DB, + operationTimeout: cfg.OperationTimeout, + }, nil +} + +// Close is a no-op for the PostgreSQL-backed store: the connection pool is +// owned by the caller (the runtime) and closed once the runtime shuts down. +// The accessor remains so the runtime wiring can treat the store like the +// previous Redis-backed implementation. +func (store *Store) Close() error { + return nil +} + +// Ping verifies that the configured PostgreSQL backend is reachable. It runs +// `db.PingContext` under the configured operation timeout. +func (store *Store) Ping(ctx context.Context) error { + operationCtx, cancel, err := withTimeout(ctx, "ping postgres mail store", store.operationTimeout) + if err != nil { + return err + } + defer cancel() + + if err := store.db.PingContext(operationCtx); err != nil { + return fmt.Errorf("ping postgres mail store: %w", err) + } + return nil +} + +// withTx runs fn inside a BEGIN … COMMIT transaction bounded by the store's +// operation timeout. It rolls back on any error or panic and returns whatever +// fn returned. The transaction uses the default isolation level (`READ +// COMMITTED`); per-row locking is achieved through `SELECT … FOR UPDATE` +// issued inside fn. +func (store *Store) withTx(ctx context.Context, operation string, fn func(ctx context.Context, tx *sql.Tx) error) error { + operationCtx, cancel, err := withTimeout(ctx, operation, store.operationTimeout) + if err != nil { + return err + } + defer cancel() + + tx, err := store.db.BeginTx(operationCtx, nil) + if err != nil { + return fmt.Errorf("%s: begin: %w", operation, err) + } + + if err := fn(operationCtx, tx); err != nil { + _ = tx.Rollback() + return err + } + + if err := tx.Commit(); err != nil { + return fmt.Errorf("%s: commit: %w", operation, err) + } + return nil +} + +// operationContext bounds one read or write that does not need a transaction +// envelope (single statement). It mirrors store.withTx for non-transactional +// callers. +func (store *Store) operationContext(ctx context.Context, operation string) (context.Context, context.CancelFunc, error) { + return withTimeout(ctx, operation, store.operationTimeout) +} diff --git a/mail/internal/adapters/postgres/mailstore/store_test.go b/mail/internal/adapters/postgres/mailstore/store_test.go new file mode 100644 index 0000000..d1f2a47 --- /dev/null +++ b/mail/internal/adapters/postgres/mailstore/store_test.go @@ -0,0 +1,586 @@ +package mailstore + +import ( + "context" + "errors" + "reflect" + "testing" + "time" + + "galaxy/mail/internal/domain/attempt" + "galaxy/mail/internal/domain/common" + deliverydomain "galaxy/mail/internal/domain/delivery" + "galaxy/mail/internal/domain/idempotency" + "galaxy/mail/internal/domain/malformedcommand" + "galaxy/mail/internal/service/acceptauthdelivery" + "galaxy/mail/internal/service/acceptgenericdelivery" + "galaxy/mail/internal/service/executeattempt" + "galaxy/mail/internal/service/listdeliveries" + "galaxy/mail/internal/service/renderdelivery" + "galaxy/mail/internal/service/resenddelivery" +) + +const ( + fixtureDeliveryID common.DeliveryID = "delivery-001" + fixtureKey common.IdempotencyKey = "key-001" + fixtureFingerprint = "sha256:abcdef" + fixtureRecipient common.Email = "user@example.com" +) + +func fixtureNow() time.Time { + return time.Date(2026, time.April, 26, 12, 0, 0, 0, time.UTC) +} + +func fixtureAuthDelivery(id common.DeliveryID, key common.IdempotencyKey, status deliverydomain.Status) deliverydomain.Delivery { + now := fixtureNow() + record := deliverydomain.Delivery{ + DeliveryID: id, + Source: deliverydomain.SourceAuthSession, + PayloadMode: deliverydomain.PayloadModeRendered, + Envelope: deliverydomain.Envelope{To: []common.Email{fixtureRecipient}}, + Content: deliverydomain.Content{Subject: "Login code", TextBody: "Your code is 123456"}, + IdempotencyKey: key, + Status: status, + AttemptCount: 1, + CreatedAt: now, + UpdatedAt: now, + } + if status == deliverydomain.StatusSuppressed { + record.AttemptCount = 0 + record.SuppressedAt = &now + } + return record +} + +func fixtureGenericDelivery(id common.DeliveryID, key common.IdempotencyKey) deliverydomain.Delivery { + now := fixtureNow() + return deliverydomain.Delivery{ + DeliveryID: id, + Source: deliverydomain.SourceNotification, + PayloadMode: deliverydomain.PayloadModeTemplate, + TemplateID: common.TemplateID("generic-news"), + Locale: common.Locale("en"), + TemplateVariables: map[string]any{"name": "Alice"}, + Envelope: deliverydomain.Envelope{To: []common.Email{fixtureRecipient}, ReplyTo: []common.Email{"reply@example.com"}}, + Attachments: []common.AttachmentMetadata{{Filename: "f.txt", ContentType: "text/plain", SizeBytes: 5}}, + IdempotencyKey: key, + Status: deliverydomain.StatusQueued, + AttemptCount: 1, + CreatedAt: now, + UpdatedAt: now, + } +} + +func fixtureFirstAttempt(id common.DeliveryID, attemptNo int) attempt.Attempt { + now := fixtureNow().Add(time.Minute) + return attempt.Attempt{ + DeliveryID: id, + AttemptNo: attemptNo, + Status: attempt.StatusScheduled, + ScheduledFor: now, + } +} + +func fixtureIdempotency(source deliverydomain.Source, id common.DeliveryID, key common.IdempotencyKey) idempotency.Record { + now := fixtureNow() + return idempotency.Record{ + Source: source, + IdempotencyKey: key, + DeliveryID: id, + RequestFingerprint: fixtureFingerprint, + CreatedAt: now, + ExpiresAt: now.Add(7 * 24 * time.Hour), + } +} + +func TestPing(t *testing.T) { + store := newTestStore(t) + if err := store.Ping(context.Background()); err != nil { + t.Fatalf("ping: %v", err) + } +} + +func TestAuthAcceptanceCreate_GetIdempotency_GetDelivery(t *testing.T) { + store := newTestStore(t) + ctx := context.Background() + + delivery := fixtureAuthDelivery(fixtureDeliveryID, fixtureKey, deliverydomain.StatusQueued) + first := fixtureFirstAttempt(delivery.DeliveryID, 1) + idem := fixtureIdempotency(delivery.Source, delivery.DeliveryID, delivery.IdempotencyKey) + + if err := store.CreateAcceptance(ctx, acceptauthdelivery.CreateAcceptanceInput{ + Delivery: delivery, + FirstAttempt: &first, + Idempotency: idem, + }); err != nil { + t.Fatalf("create acceptance: %v", err) + } + + got, ok, err := store.GetIdempotency(ctx, delivery.Source, delivery.IdempotencyKey) + if err != nil { + t.Fatalf("get idempotency: %v", err) + } + if !ok { + t.Fatal("idempotency not found") + } + if got.DeliveryID != delivery.DeliveryID || got.RequestFingerprint != fixtureFingerprint { + t.Fatalf("idempotency mismatch: %+v", got) + } + + loaded, ok, err := store.GetDelivery(ctx, delivery.DeliveryID) + if err != nil { + t.Fatalf("get delivery: %v", err) + } + if !ok { + t.Fatal("delivery not found") + } + if loaded.DeliveryID != delivery.DeliveryID || loaded.Status != deliverydomain.StatusQueued { + t.Fatalf("delivery mismatch: %+v", loaded) + } + if !reflect.DeepEqual(loaded.Envelope.To, []common.Email{fixtureRecipient}) { + t.Fatalf("envelope.to mismatch: %+v", loaded.Envelope) + } +} + +func TestAuthAcceptanceConflict(t *testing.T) { + store := newTestStore(t) + ctx := context.Background() + + delivery := fixtureAuthDelivery(fixtureDeliveryID, fixtureKey, deliverydomain.StatusQueued) + first := fixtureFirstAttempt(delivery.DeliveryID, 1) + idem := fixtureIdempotency(delivery.Source, delivery.DeliveryID, delivery.IdempotencyKey) + + if err := store.CreateAcceptance(ctx, acceptauthdelivery.CreateAcceptanceInput{ + Delivery: delivery, + FirstAttempt: &first, + Idempotency: idem, + }); err != nil { + t.Fatalf("first create: %v", err) + } + + dup := delivery + dup.DeliveryID = "delivery-002" + dupAttempt := fixtureFirstAttempt(dup.DeliveryID, 1) + dupIdem := idem + dupIdem.DeliveryID = dup.DeliveryID + + err := store.CreateAcceptance(ctx, acceptauthdelivery.CreateAcceptanceInput{ + Delivery: dup, + FirstAttempt: &dupAttempt, + Idempotency: dupIdem, + }) + if !errors.Is(err, acceptauthdelivery.ErrConflict) { + t.Fatalf("expected acceptauthdelivery.ErrConflict, got %v", err) + } +} + +func TestGenericAcceptanceCreate_GetDeliveryPayload(t *testing.T) { + store := newTestStore(t) + ctx := context.Background() + + delivery := fixtureGenericDelivery(fixtureDeliveryID, fixtureKey) + first := fixtureFirstAttempt(delivery.DeliveryID, 1) + idem := fixtureIdempotency(delivery.Source, delivery.DeliveryID, delivery.IdempotencyKey) + payload := &acceptgenericdelivery.DeliveryPayload{ + DeliveryID: delivery.DeliveryID, + Attachments: []acceptgenericdelivery.AttachmentPayload{{ + Filename: "f.txt", + ContentType: "text/plain", + ContentBase64: "aGVsbG8=", // "hello" + SizeBytes: 5, + }}, + } + + handle := store.GenericAcceptance() + if err := handle.CreateAcceptance(ctx, acceptgenericdelivery.CreateAcceptanceInput{ + Delivery: delivery, + FirstAttempt: first, + DeliveryPayload: payload, + Idempotency: idem, + }); err != nil { + t.Fatalf("create generic acceptance: %v", err) + } + + got, ok, err := store.GetDeliveryPayload(ctx, delivery.DeliveryID) + if err != nil { + t.Fatalf("get delivery payload: %v", err) + } + if !ok { + t.Fatal("payload not found") + } + if got.DeliveryID != delivery.DeliveryID || len(got.Attachments) != 1 { + t.Fatalf("payload mismatch: %+v", got) + } + if got.Attachments[0].ContentBase64 != "aGVsbG8=" { + t.Fatalf("payload base64 mismatch: %+v", got.Attachments[0]) + } +} + +func TestSchedulerClaimAndCommit(t *testing.T) { + store := newTestStore(t) + ctx := context.Background() + + delivery := fixtureAuthDelivery(fixtureDeliveryID, fixtureKey, deliverydomain.StatusQueued) + first := fixtureFirstAttempt(delivery.DeliveryID, 1) + idem := fixtureIdempotency(delivery.Source, delivery.DeliveryID, delivery.IdempotencyKey) + if err := store.CreateAcceptance(ctx, acceptauthdelivery.CreateAcceptanceInput{ + Delivery: delivery, + FirstAttempt: &first, + Idempotency: idem, + }); err != nil { + t.Fatalf("create acceptance: %v", err) + } + + scheduler := store.AttemptExecution() + now := first.ScheduledFor.Add(time.Second) + ids, err := scheduler.NextDueDeliveryIDs(ctx, now, 10) + if err != nil { + t.Fatalf("next due: %v", err) + } + if len(ids) != 1 || ids[0] != delivery.DeliveryID { + t.Fatalf("next due ids: %+v", ids) + } + + claimed, ok, err := scheduler.ClaimDueAttempt(ctx, delivery.DeliveryID, now) + if err != nil { + t.Fatalf("claim due: %v", err) + } + if !ok { + t.Fatal("claim due: not found") + } + if claimed.Delivery.Status != deliverydomain.StatusSending { + t.Fatalf("expected sending, got %q", claimed.Delivery.Status) + } + if claimed.Attempt.Status != attempt.StatusInProgress { + t.Fatalf("expected in_progress, got %q", claimed.Attempt.Status) + } + + // After claim, the row should not be picked up again. + again, err := scheduler.NextDueDeliveryIDs(ctx, now.Add(time.Second), 10) + if err != nil { + t.Fatalf("next due (after claim): %v", err) + } + if len(again) != 0 { + t.Fatalf("expected zero due deliveries after claim, got %+v", again) + } + + completed := claimed.Attempt + finishedAt := now.Add(time.Second) + completed.Status = attempt.StatusProviderAccepted + completed.FinishedAt = &finishedAt + completed.ProviderClassification = "accepted" + completed.ProviderSummary = "ok" + + finalDelivery := claimed.Delivery + finalDelivery.Status = deliverydomain.StatusSent + finalDelivery.LastAttemptStatus = attempt.StatusProviderAccepted + finalDelivery.SentAt = &finishedAt + finalDelivery.UpdatedAt = finishedAt + finalDelivery.ProviderSummary = "ok" + + if err := scheduler.Commit(ctx, executeattempt.CommitStateInput{ + Delivery: finalDelivery, + Attempt: completed, + }); err != nil { + t.Fatalf("commit attempt: %v", err) + } + + loaded, ok, err := store.GetDelivery(ctx, delivery.DeliveryID) + if err != nil || !ok { + t.Fatalf("get delivery after commit: ok=%v err=%v", ok, err) + } + if loaded.Status != deliverydomain.StatusSent { + t.Fatalf("expected sent, got %q", loaded.Status) + } +} + +func TestRenderMarkRendered(t *testing.T) { + store := newTestStore(t) + ctx := context.Background() + + delivery := fixtureGenericDelivery(fixtureDeliveryID, fixtureKey) + first := fixtureFirstAttempt(delivery.DeliveryID, 1) + idem := fixtureIdempotency(delivery.Source, delivery.DeliveryID, delivery.IdempotencyKey) + if err := store.GenericAcceptance().CreateAcceptance(ctx, acceptgenericdelivery.CreateAcceptanceInput{ + Delivery: delivery, + FirstAttempt: first, + Idempotency: idem, + }); err != nil { + t.Fatalf("create acceptance: %v", err) + } + + rendered := delivery + rendered.Status = deliverydomain.StatusRendered + rendered.Content = deliverydomain.Content{Subject: "Hello Alice", TextBody: "Hi"} + rendered.UpdatedAt = fixtureNow().Add(time.Second) + + if err := store.RenderDelivery().MarkRendered(ctx, renderdelivery.MarkRenderedInput{Delivery: rendered}); err != nil { + t.Fatalf("mark rendered: %v", err) + } + + loaded, ok, err := store.GetDelivery(ctx, delivery.DeliveryID) + if err != nil || !ok { + t.Fatalf("get delivery: ok=%v err=%v", ok, err) + } + if loaded.Status != deliverydomain.StatusRendered { + t.Fatalf("expected rendered, got %q", loaded.Status) + } + if loaded.Content.Subject != "Hello Alice" { + t.Fatalf("subject mismatch: %q", loaded.Content.Subject) + } +} + +func TestListDeliveriesPaging(t *testing.T) { + store := newTestStore(t) + ctx := context.Background() + + for i := range 3 { + key := common.IdempotencyKey([]byte{'k', '0' + byte(i)}) + id := common.DeliveryID([]byte{'d', '0' + byte(i)}) + delivery := fixtureAuthDelivery(id, key, deliverydomain.StatusQueued) + // Stagger created_at so listing order is deterministic. + delivery.CreatedAt = fixtureNow().Add(time.Duration(i) * time.Second) + delivery.UpdatedAt = delivery.CreatedAt + first := fixtureFirstAttempt(id, 1) + first.ScheduledFor = delivery.CreatedAt.Add(time.Minute) + idem := fixtureIdempotency(delivery.Source, id, key) + idem.CreatedAt = delivery.CreatedAt + idem.ExpiresAt = delivery.CreatedAt.Add(7 * 24 * time.Hour) + if err := store.CreateAcceptance(ctx, acceptauthdelivery.CreateAcceptanceInput{ + Delivery: delivery, + FirstAttempt: &first, + Idempotency: idem, + }); err != nil { + t.Fatalf("create %d: %v", i, err) + } + } + + page1, err := store.List(ctx, listdeliveries.Input{Limit: 2}) + if err != nil { + t.Fatalf("list page 1: %v", err) + } + if len(page1.Items) != 2 || page1.NextCursor == nil { + t.Fatalf("page 1 unexpected: items=%d cursor=%v", len(page1.Items), page1.NextCursor) + } + if page1.Items[0].DeliveryID != "d2" || page1.Items[1].DeliveryID != "d1" { + t.Fatalf("page 1 ordering: %+v", []common.DeliveryID{page1.Items[0].DeliveryID, page1.Items[1].DeliveryID}) + } + + page2, err := store.List(ctx, listdeliveries.Input{Limit: 2, Cursor: page1.NextCursor}) + if err != nil { + t.Fatalf("list page 2: %v", err) + } + if len(page2.Items) != 1 || page2.NextCursor != nil { + t.Fatalf("page 2 unexpected: items=%d cursor=%v", len(page2.Items), page2.NextCursor) + } + if page2.Items[0].DeliveryID != "d0" { + t.Fatalf("page 2 expected d0, got %s", page2.Items[0].DeliveryID) + } +} + +func TestListAttemptsAndDeadLetter(t *testing.T) { + store := newTestStore(t) + ctx := context.Background() + + delivery := fixtureAuthDelivery(fixtureDeliveryID, fixtureKey, deliverydomain.StatusQueued) + first := fixtureFirstAttempt(delivery.DeliveryID, 1) + idem := fixtureIdempotency(delivery.Source, delivery.DeliveryID, delivery.IdempotencyKey) + if err := store.CreateAcceptance(ctx, acceptauthdelivery.CreateAcceptanceInput{ + Delivery: delivery, + FirstAttempt: &first, + Idempotency: idem, + }); err != nil { + t.Fatalf("create acceptance: %v", err) + } + + // Claim and commit a transport_failed → next attempt scheduled (delivery + // stays queued); then claim attempt 2 and commit dead-letter. + scheduler := store.AttemptExecution() + now := first.ScheduledFor.Add(time.Second) + claimed1, ok, err := scheduler.ClaimDueAttempt(ctx, delivery.DeliveryID, now) + if err != nil || !ok { + t.Fatalf("claim attempt 1: ok=%v err=%v", ok, err) + } + + finishedAt1 := now.Add(time.Second) + terminal1 := claimed1.Attempt + terminal1.Status = attempt.StatusTransportFailed + terminal1.FinishedAt = &finishedAt1 + terminal1.ProviderClassification = "transport_failed" + + nextAttempt := attempt.Attempt{ + DeliveryID: delivery.DeliveryID, + AttemptNo: 2, + Status: attempt.StatusScheduled, + ScheduledFor: finishedAt1.Add(5 * time.Minute), + } + + delivery2 := claimed1.Delivery + delivery2.Status = deliverydomain.StatusQueued + delivery2.LastAttemptStatus = attempt.StatusTransportFailed + delivery2.AttemptCount = 2 + delivery2.UpdatedAt = finishedAt1 + + if err := scheduler.Commit(ctx, executeattempt.CommitStateInput{ + Delivery: delivery2, + Attempt: terminal1, + NextAttempt: &nextAttempt, + }); err != nil { + t.Fatalf("commit attempt 1: %v", err) + } + + // Claim attempt 2. + now2 := nextAttempt.ScheduledFor.Add(time.Second) + claimed2, ok, err := scheduler.ClaimDueAttempt(ctx, delivery.DeliveryID, now2) + if err != nil || !ok { + t.Fatalf("claim attempt 2: ok=%v err=%v", ok, err) + } + + finishedAt2 := now2.Add(time.Second) + terminal2 := claimed2.Attempt + terminal2.Status = attempt.StatusTransportFailed + terminal2.FinishedAt = &finishedAt2 + terminal2.ProviderClassification = "retry_exhausted" + + dlEntry := &deliverydomain.DeadLetterEntry{ + DeliveryID: delivery.DeliveryID, + FinalAttemptNo: 2, + FailureClassification: "retry_exhausted", + CreatedAt: finishedAt2, + } + + delivery3 := claimed2.Delivery + delivery3.Status = deliverydomain.StatusDeadLetter + delivery3.LastAttemptStatus = attempt.StatusTransportFailed + delivery3.DeadLetteredAt = &finishedAt2 + delivery3.UpdatedAt = finishedAt2 + + if err := scheduler.Commit(ctx, executeattempt.CommitStateInput{ + Delivery: delivery3, + Attempt: terminal2, + DeadLetter: dlEntry, + }); err != nil { + t.Fatalf("commit attempt 2: %v", err) + } + + loaded, ok, err := store.GetDelivery(ctx, delivery.DeliveryID) + if err != nil || !ok { + t.Fatalf("get delivery: ok=%v err=%v", ok, err) + } + if loaded.Status != deliverydomain.StatusDeadLetter { + t.Fatalf("expected dead_letter, got %q", loaded.Status) + } + + dl, ok, err := store.GetDeadLetter(ctx, delivery.DeliveryID) + if err != nil || !ok { + t.Fatalf("get dead-letter: ok=%v err=%v", ok, err) + } + if dl.FailureClassification != "retry_exhausted" { + t.Fatalf("dead-letter mismatch: %+v", dl) + } + + attempts, err := store.ListAttempts(ctx, delivery.DeliveryID, loaded.AttemptCount) + if err != nil { + t.Fatalf("list attempts: %v", err) + } + if len(attempts) != 2 { + t.Fatalf("expected 2 attempts, got %d", len(attempts)) + } + if attempts[0].AttemptNo != 1 || attempts[1].AttemptNo != 2 { + t.Fatalf("attempt sequence: %+v", attempts) + } +} + +func TestMalformedCommandRecord(t *testing.T) { + store := newTestStore(t) + ctx := context.Background() + + entry := malformedcommand.Entry{ + StreamEntryID: "1234-0", + DeliveryID: "delivery-x", + Source: "notification", + IdempotencyKey: "k", + FailureCode: malformedcommand.FailureCodeInvalidPayload, + FailureMessage: "missing required field", + RawFields: map[string]any{"raw": "value"}, + RecordedAt: fixtureNow(), + } + if err := store.Record(ctx, entry); err != nil { + t.Fatalf("record malformed: %v", err) + } + // Idempotent re-record: same entry should not error. + if err := store.Record(ctx, entry); err != nil { + t.Fatalf("re-record malformed: %v", err) + } + + got, ok, err := store.GetMalformedCommand(ctx, entry.StreamEntryID) + if err != nil || !ok { + t.Fatalf("get malformed: ok=%v err=%v", ok, err) + } + if got.FailureCode != malformedcommand.FailureCodeInvalidPayload { + t.Fatalf("failure code mismatch: %q", got.FailureCode) + } +} + +func TestResendCreate(t *testing.T) { + store := newTestStore(t) + ctx := context.Background() + + parent := fixtureAuthDelivery(fixtureDeliveryID, fixtureKey, deliverydomain.StatusQueued) + parentAttempt := fixtureFirstAttempt(parent.DeliveryID, 1) + parentIdem := fixtureIdempotency(parent.Source, parent.DeliveryID, parent.IdempotencyKey) + if err := store.CreateAcceptance(ctx, acceptauthdelivery.CreateAcceptanceInput{ + Delivery: parent, + FirstAttempt: &parentAttempt, + Idempotency: parentIdem, + }); err != nil { + t.Fatalf("create parent: %v", err) + } + + cloneID := common.DeliveryID("clone-001") + cloneIdempKey := common.IdempotencyKey("resend-clone-001") + now := fixtureNow().Add(time.Hour) + clone := deliverydomain.Delivery{ + DeliveryID: cloneID, + ResendParentDeliveryID: parent.DeliveryID, + Source: deliverydomain.SourceOperatorResend, + PayloadMode: deliverydomain.PayloadModeRendered, + Envelope: parent.Envelope, + Content: parent.Content, + IdempotencyKey: cloneIdempKey, + Status: deliverydomain.StatusQueued, + AttemptCount: 1, + CreatedAt: now, + UpdatedAt: now, + } + cloneAttempt := attempt.Attempt{ + DeliveryID: cloneID, + AttemptNo: 1, + Status: attempt.StatusScheduled, + ScheduledFor: now.Add(time.Minute), + } + + if err := store.CreateResend(ctx, resenddelivery.CreateResendInput{ + Delivery: clone, + FirstAttempt: cloneAttempt, + }); err != nil { + t.Fatalf("create resend: %v", err) + } + + loaded, ok, err := store.GetDelivery(ctx, cloneID) + if err != nil || !ok { + t.Fatalf("get clone: ok=%v err=%v", ok, err) + } + if loaded.ResendParentDeliveryID != parent.DeliveryID { + t.Fatalf("expected resend parent %q, got %q", parent.DeliveryID, loaded.ResendParentDeliveryID) + } + + // Resend deliveries do not surface as idempotency hits. + _, ok, err = store.GetIdempotency(ctx, deliverydomain.SourceOperatorResend, cloneIdempKey) + if err != nil { + t.Fatalf("get idempotency for resend: %v", err) + } + if ok { + t.Fatal("resend delivery should not surface as idempotency hit") + } +} diff --git a/mail/internal/adapters/postgres/migrations/00001_init.sql b/mail/internal/adapters/postgres/migrations/00001_init.sql new file mode 100644 index 0000000..a8ffc95 --- /dev/null +++ b/mail/internal/adapters/postgres/migrations/00001_init.sql @@ -0,0 +1,134 @@ +-- +goose Up +-- deliveries holds one durable record per accepted logical mail delivery. +-- The (source, idempotency_key) UNIQUE constraint replaces the previous Redis +-- idempotency keyspace: the durable row IS the idempotency reservation. +-- next_attempt_at is populated for deliveries whose active attempt is due in +-- the future and drives the attempt scheduler's `FOR UPDATE SKIP LOCKED` pull. +CREATE TABLE deliveries ( + delivery_id text PRIMARY KEY, + resend_parent_delivery_id text NOT NULL DEFAULT '', + source text NOT NULL, + status text NOT NULL, + payload_mode text NOT NULL, + template_id text NOT NULL DEFAULT '', + locale text NOT NULL DEFAULT '', + locale_fallback_used boolean NOT NULL DEFAULT false, + template_variables jsonb, + attachments jsonb, + subject text NOT NULL DEFAULT '', + text_body text NOT NULL DEFAULT '', + html_body text NOT NULL DEFAULT '', + idempotency_key text NOT NULL, + request_fingerprint text NOT NULL, + idempotency_expires_at timestamptz NOT NULL, + attempt_count integer NOT NULL DEFAULT 0, + last_attempt_status text NOT NULL DEFAULT '', + provider_summary text NOT NULL DEFAULT '', + next_attempt_at timestamptz, + created_at timestamptz NOT NULL, + updated_at timestamptz NOT NULL, + sent_at timestamptz, + suppressed_at timestamptz, + failed_at timestamptz, + dead_lettered_at timestamptz, + CONSTRAINT deliveries_idempotency_unique UNIQUE (source, idempotency_key) +); + +-- Drives the scheduler's due-attempt pull. The partial predicate keeps the +-- index narrow: rows in terminal status (sent/suppressed/failed/dead_letter) +-- never appear here. +CREATE INDEX deliveries_due_idx + ON deliveries (next_attempt_at) + WHERE next_attempt_at IS NOT NULL; + +-- Drives the recovery pass (deliveries currently held by an in-progress +-- attempt whose worker may have crashed). +CREATE INDEX deliveries_sending_idx + ON deliveries (status) + WHERE status = 'sending'; + +-- Newest-first listing index used by the operator delivery list surface. +CREATE INDEX deliveries_listing_idx + ON deliveries (created_at DESC, delivery_id DESC); + +-- Coarse status / source / template filters used by the operator listing. +CREATE INDEX deliveries_status_idx ON deliveries (status); +CREATE INDEX deliveries_source_idx ON deliveries (source); +CREATE INDEX deliveries_template_id_idx ON deliveries (template_id) WHERE template_id <> ''; + +-- delivery_recipients normalises the SMTP envelope so future recipient- +-- filtered listing slots in without touching the deliveries row layout. +-- 'reply_to' addresses are stored for round-trip fidelity but excluded from +-- the email index per the prior keyspace rule. +CREATE TABLE delivery_recipients ( + delivery_id text NOT NULL REFERENCES deliveries(delivery_id) ON DELETE CASCADE, + kind text NOT NULL, + position integer NOT NULL, + email text NOT NULL, + PRIMARY KEY (delivery_id, kind, position), + CONSTRAINT delivery_recipients_kind_check + CHECK (kind IN ('to', 'cc', 'bcc', 'reply_to')) +); + +CREATE INDEX delivery_recipients_email_idx + ON delivery_recipients (email) + WHERE kind <> 'reply_to'; + +-- attempts stores the immutable execution history of one delivery. attempt_no +-- is monotonically increasing per delivery, starting at 1. +CREATE TABLE attempts ( + delivery_id text NOT NULL REFERENCES deliveries(delivery_id) ON DELETE CASCADE, + attempt_no integer NOT NULL, + status text NOT NULL, + scheduled_for timestamptz NOT NULL, + started_at timestamptz, + finished_at timestamptz, + provider_classification text NOT NULL DEFAULT '', + provider_summary text NOT NULL DEFAULT '', + PRIMARY KEY (delivery_id, attempt_no) +); + +-- dead_letters holds the operator-visible record for one delivery that +-- exhausted automated handling. +CREATE TABLE dead_letters ( + delivery_id text PRIMARY KEY REFERENCES deliveries(delivery_id) ON DELETE CASCADE, + final_attempt_no integer NOT NULL, + failure_classification text NOT NULL, + provider_summary text NOT NULL DEFAULT '', + recovery_hint text NOT NULL DEFAULT '', + created_at timestamptz NOT NULL +); + +-- delivery_payloads stores the raw generic-delivery attachment bundle +-- referenced by the delivery row. The payload column carries the +-- acceptgenericdelivery.DeliveryPayload JSON shape; raw attachment bytes +-- remain inside that JSON value as base64 strings. +CREATE TABLE delivery_payloads ( + delivery_id text PRIMARY KEY REFERENCES deliveries(delivery_id) ON DELETE CASCADE, + payload jsonb NOT NULL +); + +-- malformed_commands stores operator-visible records for stream commands the +-- intake validator could not accept. +CREATE TABLE malformed_commands ( + stream_entry_id text PRIMARY KEY, + delivery_id text NOT NULL DEFAULT '', + source text NOT NULL DEFAULT '', + idempotency_key text NOT NULL DEFAULT '', + failure_code text NOT NULL, + failure_message text NOT NULL, + raw_fields jsonb NOT NULL, + recorded_at timestamptz NOT NULL +); + +-- Newest-first listing index used by the operator malformed-command list. +CREATE INDEX malformed_commands_listing_idx + ON malformed_commands (recorded_at DESC, stream_entry_id DESC); + +-- +goose Down +DROP TABLE IF EXISTS malformed_commands; +DROP TABLE IF EXISTS delivery_payloads; +DROP TABLE IF EXISTS dead_letters; +DROP TABLE IF EXISTS attempts; +DROP TABLE IF EXISTS delivery_recipients; +DROP TABLE IF EXISTS deliveries; diff --git a/mail/internal/adapters/postgres/migrations/migrations.go b/mail/internal/adapters/postgres/migrations/migrations.go new file mode 100644 index 0000000..37e5b51 --- /dev/null +++ b/mail/internal/adapters/postgres/migrations/migrations.go @@ -0,0 +1,19 @@ +// Package migrations exposes the embedded goose migration files used by Mail +// Service to provision its `mail` schema in PostgreSQL. +// +// The embedded filesystem is consumed by `pkg/postgres.RunMigrations` during +// mail-service startup and by `cmd/jetgen` when regenerating the +// `internal/adapters/postgres/jet/` code against a transient PostgreSQL +// instance. +package migrations + +import "embed" + +//go:embed *.sql +var fs embed.FS + +// FS returns the embedded filesystem containing every numbered goose +// migration shipped with Mail Service. +func FS() embed.FS { + return fs +} diff --git a/mail/internal/adapters/redisstate/atomic_writer.go b/mail/internal/adapters/redisstate/atomic_writer.go deleted file mode 100644 index b74930b..0000000 --- a/mail/internal/adapters/redisstate/atomic_writer.go +++ /dev/null @@ -1,501 +0,0 @@ -package redisstate - -import ( - "context" - "errors" - "fmt" - "time" - - "galaxy/mail/internal/domain/attempt" - deliverydomain "galaxy/mail/internal/domain/delivery" - "galaxy/mail/internal/domain/idempotency" - "galaxy/mail/internal/service/acceptgenericdelivery" - - "github.com/redis/go-redis/v9" -) - -// AtomicWriter performs the minimal multi-key Redis mutations that later Mail -// Service acceptance flows will need. -type AtomicWriter struct { - client *redis.Client - keyspace Keyspace -} - -// CreateAcceptanceInput describes the frozen write set required to durably -// accept one delivery into Redis-backed state. -type CreateAcceptanceInput struct { - // Delivery stores the accepted delivery record. - Delivery deliverydomain.Delivery - - // FirstAttempt stores the optional first scheduled attempt record. - FirstAttempt *attempt.Attempt - - // DeliveryPayload stores the optional raw attachment payload bundle. - DeliveryPayload *acceptgenericdelivery.DeliveryPayload - - // Idempotency stores the optional idempotency reservation to create - // together with the delivery. Resend clone creation can omit it. - Idempotency *idempotency.Record -} - -// MarkRenderedInput describes the durable mutation applied after successful -// template materialization. -type MarkRenderedInput struct { - // Delivery stores the rendered delivery record. - Delivery deliverydomain.Delivery -} - -// Validate reports whether input contains one rendered template delivery. -func (input MarkRenderedInput) Validate() error { - if err := input.Delivery.Validate(); err != nil { - return fmt.Errorf("delivery: %w", err) - } - if input.Delivery.PayloadMode != deliverydomain.PayloadModeTemplate { - return fmt.Errorf("delivery payload mode must be %q", deliverydomain.PayloadModeTemplate) - } - if input.Delivery.Status != deliverydomain.StatusRendered { - return fmt.Errorf("delivery status must be %q", deliverydomain.StatusRendered) - } - - return nil -} - -// MarkRenderFailedInput describes the durable mutation applied after one -// classified render failure. -type MarkRenderFailedInput struct { - // Delivery stores the failed delivery record. - Delivery deliverydomain.Delivery - - // Attempt stores the terminal render-failed attempt. - Attempt attempt.Attempt -} - -// Validate reports whether input contains one failed delivery and its -// terminal render-failed attempt. -func (input MarkRenderFailedInput) Validate() error { - if err := input.Delivery.Validate(); err != nil { - return fmt.Errorf("delivery: %w", err) - } - if err := input.Attempt.Validate(); err != nil { - return fmt.Errorf("attempt: %w", err) - } - if input.Delivery.PayloadMode != deliverydomain.PayloadModeTemplate { - return fmt.Errorf("delivery payload mode must be %q", deliverydomain.PayloadModeTemplate) - } - if input.Delivery.Status != deliverydomain.StatusFailed { - return fmt.Errorf("delivery status must be %q", deliverydomain.StatusFailed) - } - if input.Attempt.Status != attempt.StatusRenderFailed { - return fmt.Errorf("attempt status must be %q", attempt.StatusRenderFailed) - } - if input.Attempt.DeliveryID != input.Delivery.DeliveryID { - return errors.New("attempt delivery id must match delivery id") - } - if input.Delivery.LastAttemptStatus != attempt.StatusRenderFailed { - return fmt.Errorf("delivery last attempt status must be %q", attempt.StatusRenderFailed) - } - - return nil -} - -// Validate reports whether CreateAcceptanceInput is internally consistent. -func (input CreateAcceptanceInput) Validate() error { - if err := input.Delivery.Validate(); err != nil { - return fmt.Errorf("delivery: %w", err) - } - - switch { - case input.FirstAttempt == nil: - if input.Delivery.Status != deliverydomain.StatusSuppressed { - return errors.New("first attempt must not be nil unless delivery status is suppressed") - } - case input.Delivery.Status == deliverydomain.StatusSuppressed: - return errors.New("suppressed delivery must not create first attempt") - default: - if err := input.FirstAttempt.Validate(); err != nil { - return fmt.Errorf("first attempt: %w", err) - } - if input.FirstAttempt.DeliveryID != input.Delivery.DeliveryID { - return errors.New("first attempt delivery id must match delivery id") - } - if input.FirstAttempt.Status != attempt.StatusScheduled { - return fmt.Errorf("first attempt status must be %q", attempt.StatusScheduled) - } - } - - if input.DeliveryPayload != nil { - if err := input.DeliveryPayload.Validate(); err != nil { - return fmt.Errorf("delivery payload: %w", err) - } - if input.DeliveryPayload.DeliveryID != input.Delivery.DeliveryID { - return errors.New("delivery payload delivery id must match delivery id") - } - } - - if input.Idempotency == nil { - return nil - } - - if err := input.Idempotency.Validate(); err != nil { - return fmt.Errorf("idempotency: %w", err) - } - if input.Idempotency.DeliveryID != input.Delivery.DeliveryID { - return errors.New("idempotency delivery id must match delivery id") - } - if input.Idempotency.Source != input.Delivery.Source { - return errors.New("idempotency source must match delivery source") - } - if input.Idempotency.IdempotencyKey != input.Delivery.IdempotencyKey { - return errors.New("idempotency key must match delivery idempotency key") - } - if input.Idempotency.ExpiresAt.Sub(input.Idempotency.CreatedAt) != IdempotencyTTL { - return fmt.Errorf("idempotency retention must equal %s", IdempotencyTTL) - } - - return nil -} - -// NewAtomicWriter constructs a low-level Redis mutation helper. -func NewAtomicWriter(client *redis.Client) (*AtomicWriter, error) { - if client == nil { - return nil, errors.New("new redis atomic writer: nil client") - } - - return &AtomicWriter{ - client: client, - keyspace: Keyspace{}, - }, nil -} - -// CreateAcceptance stores one delivery, the optional first scheduled attempt, -// the optional first schedule entry, the delivery-level secondary indexes, and -// an optional idempotency record in one optimistic Redis transaction. -func (writer *AtomicWriter) CreateAcceptance(ctx context.Context, input CreateAcceptanceInput) error { - if writer == nil || writer.client == nil { - return errors.New("create acceptance in redis: nil writer") - } - if ctx == nil { - return errors.New("create acceptance in redis: nil context") - } - if err := input.Validate(); err != nil { - return fmt.Errorf("create acceptance in redis: %w", err) - } - - deliveryPayload, err := MarshalDelivery(input.Delivery) - if err != nil { - return fmt.Errorf("create acceptance in redis: %w", err) - } - var ( - attemptKey string - attemptPayload []byte - deliveryPayloadKey string - deliveryPayloadBytes []byte - scheduleScore float64 - idempotencyKey string - idempotencyPayload []byte - idempotencyTTL time.Duration - ) - if input.FirstAttempt != nil { - attemptPayload, err = MarshalAttempt(*input.FirstAttempt) - if err != nil { - return fmt.Errorf("create acceptance in redis: %w", err) - } - attemptKey = writer.keyspace.Attempt(input.FirstAttempt.DeliveryID, input.FirstAttempt.AttemptNo) - scheduleScore = ScheduledForScore(input.FirstAttempt.ScheduledFor) - } - if input.DeliveryPayload != nil { - deliveryPayloadBytes, err = MarshalDeliveryPayload(*input.DeliveryPayload) - if err != nil { - return fmt.Errorf("create acceptance in redis: %w", err) - } - deliveryPayloadKey = writer.keyspace.DeliveryPayload(input.DeliveryPayload.DeliveryID) - } - if input.Idempotency != nil { - idempotencyPayload, err = MarshalIdempotency(*input.Idempotency) - if err != nil { - return fmt.Errorf("create acceptance in redis: %w", err) - } - idempotencyTTL, err = ttlUntil(input.Idempotency.ExpiresAt) - if err != nil { - return fmt.Errorf("create acceptance in redis: %w", err) - } - idempotencyKey = writer.keyspace.Idempotency(input.Idempotency.Source, input.Idempotency.IdempotencyKey) - } - - deliveryKey := writer.keyspace.Delivery(input.Delivery.DeliveryID) - watchKeys := []string{deliveryKey} - if attemptKey != "" { - watchKeys = append(watchKeys, attemptKey) - } - if deliveryPayloadKey != "" { - watchKeys = append(watchKeys, deliveryPayloadKey) - } - if idempotencyKey != "" { - watchKeys = append(watchKeys, idempotencyKey) - } - - indexKeys := writer.keyspace.DeliveryIndexKeys(input.Delivery) - createdAtScore := CreatedAtScore(input.Delivery.CreatedAt) - deliveryMember := input.Delivery.DeliveryID.String() - - watchErr := writer.client.Watch(ctx, func(tx *redis.Tx) error { - for _, key := range watchKeys { - if err := ensureKeyAbsent(ctx, tx, key); err != nil { - return fmt.Errorf("create acceptance in redis: %w", err) - } - } - - _, err := tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error { - pipe.Set(ctx, deliveryKey, deliveryPayload, DeliveryTTL) - if attemptKey != "" { - pipe.Set(ctx, attemptKey, attemptPayload, AttemptTTL) - } - if deliveryPayloadKey != "" { - pipe.Set(ctx, deliveryPayloadKey, deliveryPayloadBytes, DeliveryTTL) - } - if idempotencyKey != "" { - pipe.Set(ctx, idempotencyKey, idempotencyPayload, idempotencyTTL) - } - if attemptKey != "" { - pipe.ZAdd(ctx, writer.keyspace.AttemptSchedule(), redis.Z{ - Score: scheduleScore, - Member: deliveryMember, - }) - } - for _, indexKey := range indexKeys { - pipe.ZAdd(ctx, indexKey, redis.Z{ - Score: createdAtScore, - Member: deliveryMember, - }) - } - - return nil - }) - if err != nil { - return fmt.Errorf("create acceptance in redis: %w", err) - } - - return nil - }, watchKeys...) - - switch { - case errors.Is(watchErr, redis.TxFailedErr): - return fmt.Errorf("create acceptance in redis: %w", ErrConflict) - case watchErr != nil: - return watchErr - default: - return nil - } -} - -// MarkRendered stores the successful materialization result for one queued -// template delivery and updates the delivery-status secondary index -// atomically. -func (writer *AtomicWriter) MarkRendered(ctx context.Context, input MarkRenderedInput) error { - if writer == nil || writer.client == nil { - return errors.New("mark rendered in redis: nil writer") - } - if ctx == nil { - return errors.New("mark rendered in redis: nil context") - } - if err := input.Validate(); err != nil { - return fmt.Errorf("mark rendered in redis: %w", err) - } - - deliveryKey := writer.keyspace.Delivery(input.Delivery.DeliveryID) - deliveryPayload, err := MarshalDelivery(input.Delivery) - if err != nil { - return fmt.Errorf("mark rendered in redis: %w", err) - } - - watchErr := writer.client.Watch(ctx, func(tx *redis.Tx) error { - currentDelivery, err := loadDeliveryFromTx(ctx, tx, deliveryKey) - if err != nil { - return fmt.Errorf("mark rendered in redis: %w", err) - } - if currentDelivery.Status != deliverydomain.StatusQueued { - return fmt.Errorf("mark rendered in redis: %w", ErrConflict) - } - - deliveryTTL, err := ttlForExistingKey(ctx, tx, deliveryKey, DeliveryTTL) - if err != nil { - return fmt.Errorf("mark rendered in redis: %w", err) - } - - createdAtScore := CreatedAtScore(currentDelivery.CreatedAt) - deliveryMember := input.Delivery.DeliveryID.String() - - _, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error { - pipe.Set(ctx, deliveryKey, deliveryPayload, deliveryTTL) - pipe.ZRem(ctx, writer.keyspace.StatusIndex(currentDelivery.Status), deliveryMember) - pipe.ZAdd(ctx, writer.keyspace.StatusIndex(input.Delivery.Status), redis.Z{ - Score: createdAtScore, - Member: deliveryMember, - }) - return nil - }) - if err != nil { - return fmt.Errorf("mark rendered in redis: %w", err) - } - - return nil - }, deliveryKey) - - switch { - case errors.Is(watchErr, redis.TxFailedErr): - return fmt.Errorf("mark rendered in redis: %w", ErrConflict) - case watchErr != nil: - return watchErr - default: - return nil - } -} - -// MarkRenderFailed stores one terminal render-failed attempt together with -// the owning failed delivery and updates the delivery-status secondary index -// atomically. -func (writer *AtomicWriter) MarkRenderFailed(ctx context.Context, input MarkRenderFailedInput) error { - if writer == nil || writer.client == nil { - return errors.New("mark render failed in redis: nil writer") - } - if ctx == nil { - return errors.New("mark render failed in redis: nil context") - } - if err := input.Validate(); err != nil { - return fmt.Errorf("mark render failed in redis: %w", err) - } - - deliveryKey := writer.keyspace.Delivery(input.Delivery.DeliveryID) - attemptKey := writer.keyspace.Attempt(input.Attempt.DeliveryID, input.Attempt.AttemptNo) - - deliveryPayload, err := MarshalDelivery(input.Delivery) - if err != nil { - return fmt.Errorf("mark render failed in redis: %w", err) - } - attemptPayload, err := MarshalAttempt(input.Attempt) - if err != nil { - return fmt.Errorf("mark render failed in redis: %w", err) - } - - watchErr := writer.client.Watch(ctx, func(tx *redis.Tx) error { - currentDelivery, err := loadDeliveryFromTx(ctx, tx, deliveryKey) - if err != nil { - return fmt.Errorf("mark render failed in redis: %w", err) - } - currentAttempt, err := loadAttemptFromTx(ctx, tx, attemptKey) - if err != nil { - return fmt.Errorf("mark render failed in redis: %w", err) - } - if currentDelivery.Status != deliverydomain.StatusQueued { - return fmt.Errorf("mark render failed in redis: %w", ErrConflict) - } - if currentAttempt.Status != attempt.StatusScheduled { - return fmt.Errorf("mark render failed in redis: %w", ErrConflict) - } - - deliveryTTL, err := ttlForExistingKey(ctx, tx, deliveryKey, DeliveryTTL) - if err != nil { - return fmt.Errorf("mark render failed in redis: %w", err) - } - attemptTTL, err := ttlForExistingKey(ctx, tx, attemptKey, AttemptTTL) - if err != nil { - return fmt.Errorf("mark render failed in redis: %w", err) - } - - createdAtScore := CreatedAtScore(currentDelivery.CreatedAt) - deliveryMember := input.Delivery.DeliveryID.String() - - _, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error { - pipe.Set(ctx, deliveryKey, deliveryPayload, deliveryTTL) - pipe.Set(ctx, attemptKey, attemptPayload, attemptTTL) - pipe.ZRem(ctx, writer.keyspace.StatusIndex(currentDelivery.Status), deliveryMember) - pipe.ZAdd(ctx, writer.keyspace.StatusIndex(input.Delivery.Status), redis.Z{ - Score: createdAtScore, - Member: deliveryMember, - }) - pipe.ZRem(ctx, writer.keyspace.AttemptSchedule(), deliveryMember) - return nil - }) - if err != nil { - return fmt.Errorf("mark render failed in redis: %w", err) - } - - return nil - }, deliveryKey, attemptKey) - - switch { - case errors.Is(watchErr, redis.TxFailedErr): - return fmt.Errorf("mark render failed in redis: %w", ErrConflict) - case watchErr != nil: - return watchErr - default: - return nil - } -} - -func ensureKeyAbsent(ctx context.Context, tx *redis.Tx, key string) error { - exists, err := tx.Exists(ctx, key).Result() - if err != nil { - return err - } - if exists > 0 { - return ErrConflict - } - - return nil -} - -func loadDeliveryFromTx(ctx context.Context, tx *redis.Tx, key string) (deliverydomain.Delivery, error) { - payload, err := tx.Get(ctx, key).Bytes() - switch { - case errors.Is(err, redis.Nil): - return deliverydomain.Delivery{}, ErrConflict - case err != nil: - return deliverydomain.Delivery{}, err - } - - record, err := UnmarshalDelivery(payload) - if err != nil { - return deliverydomain.Delivery{}, err - } - - return record, nil -} - -func loadAttemptFromTx(ctx context.Context, tx *redis.Tx, key string) (attempt.Attempt, error) { - payload, err := tx.Get(ctx, key).Bytes() - switch { - case errors.Is(err, redis.Nil): - return attempt.Attempt{}, ErrConflict - case err != nil: - return attempt.Attempt{}, err - } - - record, err := UnmarshalAttempt(payload) - if err != nil { - return attempt.Attempt{}, err - } - - return record, nil -} - -func ttlForExistingKey(ctx context.Context, tx *redis.Tx, key string, fallback time.Duration) (time.Duration, error) { - ttl, err := tx.PTTL(ctx, key).Result() - if err != nil { - return 0, err - } - if ttl <= 0 { - return fallback, nil - } - - return ttl, nil -} - -func ttlUntil(expiresAt time.Time) (time.Duration, error) { - ttl := time.Until(expiresAt) - if ttl <= 0 { - return 0, errors.New("idempotency expires at must be in the future") - } - - return ttl, nil -} diff --git a/mail/internal/adapters/redisstate/atomic_writer_test.go b/mail/internal/adapters/redisstate/atomic_writer_test.go deleted file mode 100644 index f790e07..0000000 --- a/mail/internal/adapters/redisstate/atomic_writer_test.go +++ /dev/null @@ -1,429 +0,0 @@ -package redisstate - -import ( - "context" - "errors" - "sync" - "testing" - "time" - - "galaxy/mail/internal/domain/attempt" - "galaxy/mail/internal/domain/common" - deliverydomain "galaxy/mail/internal/domain/delivery" - - "github.com/alicebob/miniredis/v2" - "github.com/redis/go-redis/v9" - "github.com/stretchr/testify/require" -) - -func TestAtomicWriterCreateAcceptanceStoresStateWithoutIdempotencyRecord(t *testing.T) { - t.Parallel() - - server := miniredis.RunT(t) - client := redis.NewClient(&redis.Options{Addr: server.Addr()}) - t.Cleanup(func() { require.NoError(t, client.Close()) }) - - writer, err := NewAtomicWriter(client) - require.NoError(t, err) - - record := validDelivery(t) - record.Source = deliverydomain.SourceNotification - record.ResendParentDeliveryID = "" - record.Status = deliverydomain.StatusQueued - record.SentAt = nil - record.LocaleFallbackUsed = false - record.UpdatedAt = record.CreatedAt.Add(time.Minute) - require.NoError(t, record.Validate()) - - firstAttempt := validScheduledAttempt(t, record.DeliveryID) - input := CreateAcceptanceInput{ - Delivery: record, - FirstAttempt: ptr(firstAttempt), - DeliveryPayload: ptr(validDeliveryPayload(t, record.DeliveryID)), - } - - require.NoError(t, writer.CreateAcceptance(context.Background(), input)) - - storedDelivery, err := client.Get(context.Background(), Keyspace{}.Delivery(record.DeliveryID)).Bytes() - require.NoError(t, err) - decodedDelivery, err := UnmarshalDelivery(storedDelivery) - require.NoError(t, err) - require.Equal(t, record, decodedDelivery) - - storedAttempt, err := client.Get(context.Background(), Keyspace{}.Attempt(record.DeliveryID, firstAttempt.AttemptNo)).Bytes() - require.NoError(t, err) - decodedAttempt, err := UnmarshalAttempt(storedAttempt) - require.NoError(t, err) - require.Equal(t, firstAttempt, decodedAttempt) - - storedDeliveryPayload, err := client.Get(context.Background(), Keyspace{}.DeliveryPayload(record.DeliveryID)).Bytes() - require.NoError(t, err) - decodedDeliveryPayload, err := UnmarshalDeliveryPayload(storedDeliveryPayload) - require.NoError(t, err) - require.Equal(t, *input.DeliveryPayload, decodedDeliveryPayload) - - scheduledDeliveries, err := client.ZRange(context.Background(), Keyspace{}.AttemptSchedule(), 0, -1).Result() - require.NoError(t, err) - require.Equal(t, []string{record.DeliveryID.String()}, scheduledDeliveries) - - recipientMembers, err := client.ZRange(context.Background(), Keyspace{}.RecipientIndex(record.Envelope.To[0]), 0, -1).Result() - require.NoError(t, err) - require.Equal(t, []string{record.DeliveryID.String()}, recipientMembers) - - idempotencyMembers, err := client.ZRange(context.Background(), Keyspace{}.IdempotencyIndex(record.Source, record.IdempotencyKey), 0, -1).Result() - require.NoError(t, err) - require.Equal(t, []string{record.DeliveryID.String()}, idempotencyMembers) -} - -func TestAtomicWriterCreateAcceptanceDetectsDuplicateIdempotencyRace(t *testing.T) { - t.Parallel() - - server := miniredis.RunT(t) - client := redis.NewClient(&redis.Options{Addr: server.Addr()}) - t.Cleanup(func() { require.NoError(t, client.Close()) }) - - writer, err := NewAtomicWriter(client) - require.NoError(t, err) - - record := validDelivery(t) - record.Source = deliverydomain.SourceNotification - record.ResendParentDeliveryID = "" - record.Status = deliverydomain.StatusQueued - record.SentAt = nil - record.LocaleFallbackUsed = false - record.UpdatedAt = record.CreatedAt.Add(time.Minute) - require.NoError(t, record.Validate()) - - input := CreateAcceptanceInput{ - Delivery: record, - FirstAttempt: ptr(validScheduledAttempt(t, record.DeliveryID)), - DeliveryPayload: ptr(validDeliveryPayload(t, record.DeliveryID)), - Idempotency: ptr(validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey)), - } - - const contenders = 8 - - var ( - wg sync.WaitGroup - successes int - conflicts int - mu sync.Mutex - ) - - for range contenders { - wg.Add(1) - go func() { - defer wg.Done() - - err := writer.CreateAcceptance(context.Background(), input) - - mu.Lock() - defer mu.Unlock() - switch { - case err == nil: - successes++ - case errors.Is(err, ErrConflict): - conflicts++ - default: - t.Errorf("unexpected error: %v", err) - } - }() - } - wg.Wait() - - require.Equal(t, 1, successes) - require.Equal(t, contenders-1, conflicts) - - require.True(t, server.Exists(Keyspace{}.Delivery(record.DeliveryID))) - require.NotNil(t, input.FirstAttempt) - require.True(t, server.Exists(Keyspace{}.Attempt(record.DeliveryID, input.FirstAttempt.AttemptNo))) - require.True(t, server.Exists(Keyspace{}.DeliveryPayload(record.DeliveryID))) - require.True(t, server.Exists(Keyspace{}.Idempotency(record.Source, record.IdempotencyKey))) - - scheduleCard, err := client.ZCard(context.Background(), Keyspace{}.AttemptSchedule()).Result() - require.NoError(t, err) - require.EqualValues(t, 1, scheduleCard) - - createdAtCard, err := client.ZCard(context.Background(), Keyspace{}.CreatedAtIndex()).Result() - require.NoError(t, err) - require.EqualValues(t, 1, createdAtCard) - - idempotencyCard, err := client.ZCard(context.Background(), Keyspace{}.IdempotencyIndex(record.Source, record.IdempotencyKey)).Result() - require.NoError(t, err) - require.EqualValues(t, 1, idempotencyCard) -} - -func TestCreateAcceptanceInputValidateRejectsMismatchedDeliveryPayload(t *testing.T) { - t.Parallel() - - record := validDelivery(t) - record.Source = deliverydomain.SourceNotification - record.ResendParentDeliveryID = "" - record.Status = deliverydomain.StatusQueued - record.SentAt = nil - record.LocaleFallbackUsed = false - record.UpdatedAt = record.CreatedAt.Add(time.Minute) - require.NoError(t, record.Validate()) - - payload := validDeliveryPayload(t, common.DeliveryID("delivery-other")) - input := CreateAcceptanceInput{ - Delivery: record, - FirstAttempt: ptr(validScheduledAttempt(t, record.DeliveryID)), - DeliveryPayload: &payload, - Idempotency: ptr(validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey)), - } - - err := input.Validate() - require.Error(t, err) - require.ErrorContains(t, err, "delivery payload delivery id must match delivery id") -} - -func TestCreateAcceptanceInputValidateRejectsMismatchedIdempotency(t *testing.T) { - t.Parallel() - - record := validDelivery(t) - record.Source = deliverydomain.SourceNotification - record.ResendParentDeliveryID = "" - record.Status = deliverydomain.StatusQueued - record.SentAt = nil - record.LocaleFallbackUsed = false - record.UpdatedAt = record.CreatedAt.Add(time.Minute) - require.NoError(t, record.Validate()) - - input := CreateAcceptanceInput{ - Delivery: record, - FirstAttempt: ptr(validScheduledAttempt(t, record.DeliveryID)), - Idempotency: ptr(validIdempotencyRecord(t, deliverydomain.SourceAuthSession, record.DeliveryID, record.IdempotencyKey)), - } - - err := input.Validate() - require.Error(t, err) - require.ErrorContains(t, err, "idempotency source must match delivery source") -} - -func TestCreateAcceptanceInputValidateRejectsUnexpectedIdempotencyRetention(t *testing.T) { - t.Parallel() - - record := validDelivery(t) - record.Source = deliverydomain.SourceNotification - record.ResendParentDeliveryID = "" - record.Status = deliverydomain.StatusQueued - record.SentAt = nil - record.LocaleFallbackUsed = false - record.UpdatedAt = record.CreatedAt.Add(time.Minute) - require.NoError(t, record.Validate()) - - idempotencyRecord := validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey) - idempotencyRecord.ExpiresAt = idempotencyRecord.CreatedAt.Add(time.Hour) - - input := CreateAcceptanceInput{ - Delivery: record, - FirstAttempt: ptr(validScheduledAttempt(t, record.DeliveryID)), - Idempotency: ptr(idempotencyRecord), - } - - err := input.Validate() - require.Error(t, err) - require.ErrorContains(t, err, "idempotency retention must equal") -} - -func TestAtomicWriterCreateAcceptanceStoresSuppressedStateWithoutAttempt(t *testing.T) { - t.Parallel() - - server := miniredis.RunT(t) - client := redis.NewClient(&redis.Options{Addr: server.Addr()}) - t.Cleanup(func() { require.NoError(t, client.Close()) }) - - writer, err := NewAtomicWriter(client) - require.NoError(t, err) - - record := validDelivery(t) - record.Source = deliverydomain.SourceAuthSession - record.ResendParentDeliveryID = "" - record.Status = deliverydomain.StatusSuppressed - record.AttemptCount = 0 - record.LastAttemptStatus = "" - record.ProviderSummary = "" - record.LocaleFallbackUsed = false - record.UpdatedAt = record.CreatedAt.Add(time.Minute) - record.SentAt = nil - record.SuppressedAt = ptr(record.UpdatedAt) - require.NoError(t, record.Validate()) - - input := CreateAcceptanceInput{ - Delivery: record, - Idempotency: ptr(validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey)), - } - - require.NoError(t, writer.CreateAcceptance(context.Background(), input)) - - storedDelivery, err := client.Get(context.Background(), Keyspace{}.Delivery(record.DeliveryID)).Bytes() - require.NoError(t, err) - decodedDelivery, err := UnmarshalDelivery(storedDelivery) - require.NoError(t, err) - require.Equal(t, record, decodedDelivery) - - require.False(t, server.Exists(Keyspace{}.Attempt(record.DeliveryID, 1))) - - scheduleCard, err := client.ZCard(context.Background(), Keyspace{}.AttemptSchedule()).Result() - require.NoError(t, err) - require.Zero(t, scheduleCard) -} - -func TestAtomicWriterMarkRenderedUpdatesDeliveryAndStatusIndex(t *testing.T) { - t.Parallel() - - server := miniredis.RunT(t) - client := redis.NewClient(&redis.Options{Addr: server.Addr()}) - t.Cleanup(func() { require.NoError(t, client.Close()) }) - - writer, err := NewAtomicWriter(client) - require.NoError(t, err) - - record := validQueuedTemplateDelivery(t) - firstAttempt := validScheduledAttempt(t, record.DeliveryID) - createInput := CreateAcceptanceInput{ - Delivery: record, - FirstAttempt: ptr(firstAttempt), - Idempotency: ptr(validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey)), - } - require.NoError(t, writer.CreateAcceptance(context.Background(), createInput)) - - rendered := record - rendered.Status = deliverydomain.StatusRendered - rendered.Content = deliverydomain.Content{ - Subject: "Turn 54", - TextBody: "Hello Pilot", - HTMLBody: "

Hello Pilot

", - } - rendered.LocaleFallbackUsed = true - rendered.UpdatedAt = rendered.CreatedAt.Add(time.Minute) - require.NoError(t, rendered.Validate()) - - require.NoError(t, writer.MarkRendered(context.Background(), MarkRenderedInput{ - Delivery: rendered, - })) - - storedDelivery, err := client.Get(context.Background(), Keyspace{}.Delivery(record.DeliveryID)).Bytes() - require.NoError(t, err) - decodedDelivery, err := UnmarshalDelivery(storedDelivery) - require.NoError(t, err) - require.Equal(t, rendered, decodedDelivery) - - queuedMembers, err := client.ZRange(context.Background(), Keyspace{}.StatusIndex(deliverydomain.StatusQueued), 0, -1).Result() - require.NoError(t, err) - require.Empty(t, queuedMembers) - - renderedMembers, err := client.ZRange(context.Background(), Keyspace{}.StatusIndex(deliverydomain.StatusRendered), 0, -1).Result() - require.NoError(t, err) - require.Equal(t, []string{record.DeliveryID.String()}, renderedMembers) -} - -func TestAtomicWriterMarkRenderFailedUpdatesDeliveryAttemptAndStatusIndex(t *testing.T) { - t.Parallel() - - server := miniredis.RunT(t) - client := redis.NewClient(&redis.Options{Addr: server.Addr()}) - t.Cleanup(func() { require.NoError(t, client.Close()) }) - - writer, err := NewAtomicWriter(client) - require.NoError(t, err) - - record := validQueuedTemplateDelivery(t) - firstAttempt := validScheduledAttempt(t, record.DeliveryID) - createInput := CreateAcceptanceInput{ - Delivery: record, - FirstAttempt: ptr(firstAttempt), - Idempotency: ptr(validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey)), - } - require.NoError(t, writer.CreateAcceptance(context.Background(), createInput)) - - failed := record - failed.Status = deliverydomain.StatusFailed - failed.LastAttemptStatus = attempt.StatusRenderFailed - failed.ProviderSummary = "missing required variables: player.name" - failed.UpdatedAt = failed.CreatedAt.Add(time.Minute) - failed.FailedAt = ptr(failed.UpdatedAt) - require.NoError(t, failed.Validate()) - - renderFailedAttempt := validRenderFailedAttempt(t, record.DeliveryID) - - require.NoError(t, writer.MarkRenderFailed(context.Background(), MarkRenderFailedInput{ - Delivery: failed, - Attempt: renderFailedAttempt, - })) - - storedDelivery, err := client.Get(context.Background(), Keyspace{}.Delivery(record.DeliveryID)).Bytes() - require.NoError(t, err) - decodedDelivery, err := UnmarshalDelivery(storedDelivery) - require.NoError(t, err) - require.Equal(t, failed, decodedDelivery) - - storedAttempt, err := client.Get(context.Background(), Keyspace{}.Attempt(record.DeliveryID, 1)).Bytes() - require.NoError(t, err) - decodedAttempt, err := UnmarshalAttempt(storedAttempt) - require.NoError(t, err) - require.Equal(t, renderFailedAttempt, decodedAttempt) - - queuedMembers, err := client.ZRange(context.Background(), Keyspace{}.StatusIndex(deliverydomain.StatusQueued), 0, -1).Result() - require.NoError(t, err) - require.Empty(t, queuedMembers) - - failedMembers, err := client.ZRange(context.Background(), Keyspace{}.StatusIndex(deliverydomain.StatusFailed), 0, -1).Result() - require.NoError(t, err) - require.Equal(t, []string{record.DeliveryID.String()}, failedMembers) - - scheduledMembers, err := client.ZRange(context.Background(), Keyspace{}.AttemptSchedule(), 0, -1).Result() - require.NoError(t, err) - require.Empty(t, scheduledMembers) -} - -func TestAtomicWriterMarkRenderedRejectsUnexpectedCurrentState(t *testing.T) { - t.Parallel() - - server := miniredis.RunT(t) - client := redis.NewClient(&redis.Options{Addr: server.Addr()}) - t.Cleanup(func() { require.NoError(t, client.Close()) }) - - writer, err := NewAtomicWriter(client) - require.NoError(t, err) - - record := validQueuedTemplateDelivery(t) - firstAttempt := validScheduledAttempt(t, record.DeliveryID) - require.NoError(t, writer.CreateAcceptance(context.Background(), CreateAcceptanceInput{ - Delivery: record, - FirstAttempt: ptr(firstAttempt), - Idempotency: ptr(validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey)), - })) - - failed := record - failed.Status = deliverydomain.StatusFailed - failed.LastAttemptStatus = attempt.StatusRenderFailed - failed.ProviderSummary = "missing required variables: player.name" - failed.UpdatedAt = failed.CreatedAt.Add(time.Minute) - failed.FailedAt = ptr(failed.UpdatedAt) - require.NoError(t, failed.Validate()) - require.NoError(t, writer.MarkRenderFailed(context.Background(), MarkRenderFailedInput{ - Delivery: failed, - Attempt: validRenderFailedAttempt(t, record.DeliveryID), - })) - - rendered := record - rendered.Status = deliverydomain.StatusRendered - rendered.Content = deliverydomain.Content{ - Subject: "Turn 54", - TextBody: "Hello Pilot", - } - rendered.UpdatedAt = rendered.CreatedAt.Add(2 * time.Minute) - require.NoError(t, rendered.Validate()) - - err = writer.MarkRendered(context.Background(), MarkRenderedInput{Delivery: rendered}) - require.Error(t, err) - require.ErrorIs(t, err, ErrConflict) -} - -func ptr[T any](value T) *T { - return &value -} - -var _ = attempt.Attempt{} diff --git a/mail/internal/adapters/redisstate/attempt_execution_store.go b/mail/internal/adapters/redisstate/attempt_execution_store.go deleted file mode 100644 index baccdad..0000000 --- a/mail/internal/adapters/redisstate/attempt_execution_store.go +++ /dev/null @@ -1,502 +0,0 @@ -package redisstate - -import ( - "context" - "errors" - "fmt" - "time" - - "galaxy/mail/internal/domain/attempt" - "galaxy/mail/internal/domain/common" - deliverydomain "galaxy/mail/internal/domain/delivery" - "galaxy/mail/internal/service/acceptgenericdelivery" - "galaxy/mail/internal/service/executeattempt" - "galaxy/mail/internal/telemetry" - - "github.com/redis/go-redis/v9" -) - -var errNotClaimable = errors.New("attempt is not claimable") - -// AttemptExecutionStore provides the Redis-backed durable storage used by the -// attempt scheduler and attempt execution service. -type AttemptExecutionStore struct { - client *redis.Client - keys Keyspace -} - -// NewAttemptExecutionStore constructs one Redis-backed attempt execution -// store. -func NewAttemptExecutionStore(client *redis.Client) (*AttemptExecutionStore, error) { - if client == nil { - return nil, errors.New("new attempt execution store: nil redis client") - } - - return &AttemptExecutionStore{ - client: client, - keys: Keyspace{}, - }, nil -} - -// NextDueDeliveryIDs returns up to limit due delivery identifiers ordered by -// the attempt schedule score. -func (store *AttemptExecutionStore) NextDueDeliveryIDs(ctx context.Context, now time.Time, limit int64) ([]common.DeliveryID, error) { - if store == nil || store.client == nil { - return nil, errors.New("next due delivery ids: nil store") - } - if ctx == nil { - return nil, errors.New("next due delivery ids: nil context") - } - if limit <= 0 { - return nil, errors.New("next due delivery ids: non-positive limit") - } - - values, err := store.client.ZRangeByScore(ctx, store.keys.AttemptSchedule(), &redis.ZRangeBy{ - Min: "-inf", - Max: fmt.Sprintf("%d", now.UTC().UnixMilli()), - Count: limit, - }).Result() - if err != nil { - return nil, fmt.Errorf("next due delivery ids: %w", err) - } - - ids := make([]common.DeliveryID, len(values)) - for index, value := range values { - ids[index] = common.DeliveryID(value) - } - - return ids, nil -} - -// ReadAttemptScheduleSnapshot returns the current depth of the durable attempt -// schedule together with its oldest scheduled timestamp when one exists. -func (store *AttemptExecutionStore) ReadAttemptScheduleSnapshot(ctx context.Context) (telemetry.AttemptScheduleSnapshot, error) { - if store == nil || store.client == nil { - return telemetry.AttemptScheduleSnapshot{}, errors.New("read attempt schedule snapshot: nil store") - } - if ctx == nil { - return telemetry.AttemptScheduleSnapshot{}, errors.New("read attempt schedule snapshot: nil context") - } - - depth, err := store.client.ZCard(ctx, store.keys.AttemptSchedule()).Result() - if err != nil { - return telemetry.AttemptScheduleSnapshot{}, fmt.Errorf("read attempt schedule snapshot: depth: %w", err) - } - - snapshot := telemetry.AttemptScheduleSnapshot{ - Depth: depth, - } - if depth == 0 { - return snapshot, nil - } - - values, err := store.client.ZRangeWithScores(ctx, store.keys.AttemptSchedule(), 0, 0).Result() - if err != nil { - return telemetry.AttemptScheduleSnapshot{}, fmt.Errorf("read attempt schedule snapshot: oldest scheduled entry: %w", err) - } - if len(values) == 0 { - return snapshot, nil - } - - oldestScheduledFor := time.UnixMilli(int64(values[0].Score)).UTC() - snapshot.OldestScheduledFor = &oldestScheduledFor - return snapshot, nil -} - -// SendingDeliveryIDs returns every delivery id currently indexed as -// `mail_delivery.status=sending`. -func (store *AttemptExecutionStore) SendingDeliveryIDs(ctx context.Context) ([]common.DeliveryID, error) { - if store == nil || store.client == nil { - return nil, errors.New("sending delivery ids: nil store") - } - if ctx == nil { - return nil, errors.New("sending delivery ids: nil context") - } - - values, err := store.client.ZRange(ctx, store.keys.StatusIndex(deliverydomain.StatusSending), 0, -1).Result() - if err != nil { - return nil, fmt.Errorf("sending delivery ids: %w", err) - } - - ids := make([]common.DeliveryID, len(values)) - for index, value := range values { - ids[index] = common.DeliveryID(value) - } - - return ids, nil -} - -// RemoveScheduledDelivery removes deliveryID from the attempt schedule set. -func (store *AttemptExecutionStore) RemoveScheduledDelivery(ctx context.Context, deliveryID common.DeliveryID) error { - if store == nil || store.client == nil { - return errors.New("remove scheduled delivery: nil store") - } - if ctx == nil { - return errors.New("remove scheduled delivery: nil context") - } - if err := deliveryID.Validate(); err != nil { - return fmt.Errorf("remove scheduled delivery: %w", err) - } - - if err := store.client.ZRem(ctx, store.keys.AttemptSchedule(), deliveryID.String()).Err(); err != nil { - return fmt.Errorf("remove scheduled delivery: %w", err) - } - - return nil -} - -// LoadWorkItem loads the current delivery and its latest attempt when both are -// present. -func (store *AttemptExecutionStore) LoadWorkItem(ctx context.Context, deliveryID common.DeliveryID) (executeattempt.WorkItem, bool, error) { - if store == nil || store.client == nil { - return executeattempt.WorkItem{}, false, errors.New("load attempt work item: nil store") - } - if ctx == nil { - return executeattempt.WorkItem{}, false, errors.New("load attempt work item: nil context") - } - if err := deliveryID.Validate(); err != nil { - return executeattempt.WorkItem{}, false, fmt.Errorf("load attempt work item: %w", err) - } - - deliveryRecord, found, err := store.loadDelivery(ctx, deliveryID) - if err != nil || !found { - return executeattempt.WorkItem{}, found, err - } - if deliveryRecord.AttemptCount < 1 { - return executeattempt.WorkItem{}, false, nil - } - - attemptRecord, found, err := store.loadAttempt(ctx, deliveryID, deliveryRecord.AttemptCount) - if err != nil || !found { - return executeattempt.WorkItem{}, found, err - } - - return executeattempt.WorkItem{ - Delivery: deliveryRecord, - Attempt: attemptRecord, - }, true, nil -} - -// LoadPayload loads one stored raw attachment payload bundle. -func (store *AttemptExecutionStore) LoadPayload(ctx context.Context, deliveryID common.DeliveryID) (acceptgenericdelivery.DeliveryPayload, bool, error) { - if store == nil || store.client == nil { - return acceptgenericdelivery.DeliveryPayload{}, false, errors.New("load attempt payload: nil store") - } - if ctx == nil { - return acceptgenericdelivery.DeliveryPayload{}, false, errors.New("load attempt payload: nil context") - } - if err := deliveryID.Validate(); err != nil { - return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("load attempt payload: %w", err) - } - - payload, err := store.client.Get(ctx, store.keys.DeliveryPayload(deliveryID)).Bytes() - switch { - case errors.Is(err, redis.Nil): - return acceptgenericdelivery.DeliveryPayload{}, false, nil - case err != nil: - return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("load attempt payload: %w", err) - } - - record, err := UnmarshalDeliveryPayload(payload) - if err != nil { - return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("load attempt payload: %w", err) - } - - return record, true, nil -} - -// ClaimDueAttempt transitions one due scheduled attempt into `in_progress` -// ownership and returns the claimed work item. -func (store *AttemptExecutionStore) ClaimDueAttempt(ctx context.Context, deliveryID common.DeliveryID, now time.Time) (executeattempt.WorkItem, bool, error) { - if store == nil || store.client == nil { - return executeattempt.WorkItem{}, false, errors.New("claim due attempt: nil store") - } - if ctx == nil { - return executeattempt.WorkItem{}, false, errors.New("claim due attempt: nil context") - } - if err := deliveryID.Validate(); err != nil { - return executeattempt.WorkItem{}, false, fmt.Errorf("claim due attempt: %w", err) - } - - claimedAt := now.UTC().Truncate(time.Millisecond) - if claimedAt.IsZero() { - return executeattempt.WorkItem{}, false, errors.New("claim due attempt: zero claim time") - } - - deliveryKey := store.keys.Delivery(deliveryID) - - var claimed executeattempt.WorkItem - - watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error { - deliveryRecord, err := loadDeliveryFromTx(ctx, tx, deliveryKey) - switch { - case errors.Is(err, ErrConflict): - return errNotClaimable - case err != nil: - return fmt.Errorf("claim due attempt: %w", err) - } - if deliveryRecord.AttemptCount < 1 { - return errNotClaimable - } - - attemptKey := store.keys.Attempt(deliveryID, deliveryRecord.AttemptCount) - attemptRecord, err := loadAttemptFromTx(ctx, tx, attemptKey) - switch { - case errors.Is(err, ErrConflict): - return errNotClaimable - case err != nil: - return fmt.Errorf("claim due attempt: %w", err) - } - - score, err := tx.ZScore(ctx, store.keys.AttemptSchedule(), deliveryID.String()).Result() - switch { - case errors.Is(err, redis.Nil): - return errNotClaimable - case err != nil: - return fmt.Errorf("claim due attempt: read attempt schedule: %w", err) - } - - switch deliveryRecord.Status { - case deliverydomain.StatusQueued, deliverydomain.StatusRendered: - default: - return errNotClaimable - } - if attemptRecord.Status != attempt.StatusScheduled { - return errNotClaimable - } - if score > ScheduledForScore(claimedAt) || attemptRecord.ScheduledFor.After(claimedAt) { - return errNotClaimable - } - - claimedDelivery := deliveryRecord - claimedDelivery.Status = deliverydomain.StatusSending - claimedDelivery.UpdatedAt = claimedAt - if err := claimedDelivery.Validate(); err != nil { - return fmt.Errorf("claim due attempt: build claimed delivery: %w", err) - } - - claimedAttempt := attemptRecord - claimedAttempt.Status = attempt.StatusInProgress - claimedAttempt.StartedAt = ptrTime(claimedAt) - if err := claimedAttempt.Validate(); err != nil { - return fmt.Errorf("claim due attempt: build claimed attempt: %w", err) - } - - deliveryPayload, err := MarshalDelivery(claimedDelivery) - if err != nil { - return fmt.Errorf("claim due attempt: %w", err) - } - attemptPayload, err := MarshalAttempt(claimedAttempt) - if err != nil { - return fmt.Errorf("claim due attempt: %w", err) - } - - deliveryTTL, err := ttlForExistingKey(ctx, tx, deliveryKey, DeliveryTTL) - if err != nil { - return fmt.Errorf("claim due attempt: delivery ttl: %w", err) - } - attemptTTL, err := ttlForExistingKey(ctx, tx, attemptKey, AttemptTTL) - if err != nil { - return fmt.Errorf("claim due attempt: attempt ttl: %w", err) - } - - createdAtScore := CreatedAtScore(deliveryRecord.CreatedAt) - - _, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error { - pipe.Set(ctx, deliveryKey, deliveryPayload, deliveryTTL) - pipe.Set(ctx, attemptKey, attemptPayload, attemptTTL) - pipe.ZRem(ctx, store.keys.StatusIndex(deliveryRecord.Status), deliveryID.String()) - pipe.ZAdd(ctx, store.keys.StatusIndex(deliverydomain.StatusSending), redis.Z{ - Score: createdAtScore, - Member: deliveryID.String(), - }) - pipe.ZRem(ctx, store.keys.AttemptSchedule(), deliveryID.String()) - return nil - }) - if err != nil { - return fmt.Errorf("claim due attempt: %w", err) - } - - claimed = executeattempt.WorkItem{ - Delivery: claimedDelivery, - Attempt: claimedAttempt, - } - return nil - }, deliveryKey) - - switch { - case errors.Is(watchErr, errNotClaimable), errors.Is(watchErr, redis.TxFailedErr): - return executeattempt.WorkItem{}, false, nil - case watchErr != nil: - return executeattempt.WorkItem{}, false, watchErr - default: - return claimed, true, nil - } -} - -// Commit atomically stores one complete attempt execution outcome. -func (store *AttemptExecutionStore) Commit(ctx context.Context, input executeattempt.CommitStateInput) error { - if store == nil || store.client == nil { - return errors.New("commit attempt outcome: nil store") - } - if ctx == nil { - return errors.New("commit attempt outcome: nil context") - } - if err := input.Validate(); err != nil { - return fmt.Errorf("commit attempt outcome: %w", err) - } - - deliveryKey := store.keys.Delivery(input.Delivery.DeliveryID) - currentAttemptKey := store.keys.Attempt(input.Attempt.DeliveryID, input.Attempt.AttemptNo) - - deliveryPayload, err := MarshalDelivery(input.Delivery) - if err != nil { - return fmt.Errorf("commit attempt outcome: %w", err) - } - attemptPayload, err := MarshalAttempt(input.Attempt) - if err != nil { - return fmt.Errorf("commit attempt outcome: %w", err) - } - - var ( - nextAttemptKey string - nextAttemptPayload []byte - nextAttemptScore float64 - deadLetterKey string - deadLetterPayload []byte - ) - if input.NextAttempt != nil { - nextAttemptKey = store.keys.Attempt(input.NextAttempt.DeliveryID, input.NextAttempt.AttemptNo) - nextAttemptPayload, err = MarshalAttempt(*input.NextAttempt) - if err != nil { - return fmt.Errorf("commit attempt outcome: %w", err) - } - nextAttemptScore = ScheduledForScore(input.NextAttempt.ScheduledFor) - } - if input.DeadLetter != nil { - deadLetterKey = store.keys.DeadLetter(input.DeadLetter.DeliveryID) - deadLetterPayload, err = MarshalDeadLetter(*input.DeadLetter) - if err != nil { - return fmt.Errorf("commit attempt outcome: %w", err) - } - } - - watchKeys := []string{deliveryKey, currentAttemptKey} - if nextAttemptKey != "" { - watchKeys = append(watchKeys, nextAttemptKey) - } - if deadLetterKey != "" { - watchKeys = append(watchKeys, deadLetterKey) - } - - watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error { - currentDelivery, err := loadDeliveryFromTx(ctx, tx, deliveryKey) - if err != nil { - return fmt.Errorf("commit attempt outcome: %w", err) - } - currentAttempt, err := loadAttemptFromTx(ctx, tx, currentAttemptKey) - if err != nil { - return fmt.Errorf("commit attempt outcome: %w", err) - } - if currentDelivery.Status != deliverydomain.StatusSending { - return fmt.Errorf("commit attempt outcome: %w", ErrConflict) - } - if currentAttempt.Status != attempt.StatusInProgress { - return fmt.Errorf("commit attempt outcome: %w", ErrConflict) - } - if nextAttemptKey != "" { - if err := ensureKeyAbsent(ctx, tx, nextAttemptKey); err != nil { - return fmt.Errorf("commit attempt outcome: %w", err) - } - } - if deadLetterKey != "" { - if err := ensureKeyAbsent(ctx, tx, deadLetterKey); err != nil { - return fmt.Errorf("commit attempt outcome: %w", err) - } - } - - deliveryTTL, err := ttlForExistingKey(ctx, tx, deliveryKey, DeliveryTTL) - if err != nil { - return fmt.Errorf("commit attempt outcome: delivery ttl: %w", err) - } - attemptTTL, err := ttlForExistingKey(ctx, tx, currentAttemptKey, AttemptTTL) - if err != nil { - return fmt.Errorf("commit attempt outcome: attempt ttl: %w", err) - } - createdAtScore := CreatedAtScore(currentDelivery.CreatedAt) - - _, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error { - pipe.Set(ctx, deliveryKey, deliveryPayload, deliveryTTL) - pipe.Set(ctx, currentAttemptKey, attemptPayload, attemptTTL) - pipe.ZRem(ctx, store.keys.StatusIndex(currentDelivery.Status), input.Delivery.DeliveryID.String()) - pipe.ZAdd(ctx, store.keys.StatusIndex(input.Delivery.Status), redis.Z{ - Score: createdAtScore, - Member: input.Delivery.DeliveryID.String(), - }) - pipe.ZRem(ctx, store.keys.AttemptSchedule(), input.Delivery.DeliveryID.String()) - if nextAttemptKey != "" { - pipe.Set(ctx, nextAttemptKey, nextAttemptPayload, AttemptTTL) - pipe.ZAdd(ctx, store.keys.AttemptSchedule(), redis.Z{ - Score: nextAttemptScore, - Member: input.Delivery.DeliveryID.String(), - }) - } - if deadLetterKey != "" { - pipe.Set(ctx, deadLetterKey, deadLetterPayload, DeadLetterTTL) - } - return nil - }) - if err != nil { - return fmt.Errorf("commit attempt outcome: %w", err) - } - - return nil - }, watchKeys...) - - switch { - case errors.Is(watchErr, redis.TxFailedErr): - return fmt.Errorf("commit attempt outcome: %w", ErrConflict) - case watchErr != nil: - return watchErr - default: - return nil - } -} - -func (store *AttemptExecutionStore) loadDelivery(ctx context.Context, deliveryID common.DeliveryID) (deliverydomain.Delivery, bool, error) { - payload, err := store.client.Get(ctx, store.keys.Delivery(deliveryID)).Bytes() - switch { - case errors.Is(err, redis.Nil): - return deliverydomain.Delivery{}, false, nil - case err != nil: - return deliverydomain.Delivery{}, false, fmt.Errorf("load attempt delivery: %w", err) - } - - record, err := UnmarshalDelivery(payload) - if err != nil { - return deliverydomain.Delivery{}, false, fmt.Errorf("load attempt delivery: %w", err) - } - - return record, true, nil -} - -func (store *AttemptExecutionStore) loadAttempt(ctx context.Context, deliveryID common.DeliveryID, attemptNo int) (attempt.Attempt, bool, error) { - payload, err := store.client.Get(ctx, store.keys.Attempt(deliveryID, attemptNo)).Bytes() - switch { - case errors.Is(err, redis.Nil): - return attempt.Attempt{}, false, nil - case err != nil: - return attempt.Attempt{}, false, fmt.Errorf("load attempt record: %w", err) - } - - record, err := UnmarshalAttempt(payload) - if err != nil { - return attempt.Attempt{}, false, fmt.Errorf("load attempt record: %w", err) - } - - return record, true, nil -} - -func ptrTime(value time.Time) *time.Time { - return &value -} diff --git a/mail/internal/adapters/redisstate/attempt_execution_store_test.go b/mail/internal/adapters/redisstate/attempt_execution_store_test.go deleted file mode 100644 index 702c21b..0000000 --- a/mail/internal/adapters/redisstate/attempt_execution_store_test.go +++ /dev/null @@ -1,301 +0,0 @@ -package redisstate - -import ( - "context" - "sync" - "testing" - "time" - - "galaxy/mail/internal/domain/attempt" - "galaxy/mail/internal/domain/common" - deliverydomain "galaxy/mail/internal/domain/delivery" - "galaxy/mail/internal/service/executeattempt" - - "github.com/alicebob/miniredis/v2" - "github.com/redis/go-redis/v9" - "github.com/stretchr/testify/require" -) - -func TestAttemptExecutionStoreClaimDueAttemptTransitionsState(t *testing.T) { - t.Parallel() - - server, client, store := newAttemptExecutionFixture(t) - record := queuedRenderedDelivery(t, common.DeliveryID("delivery-claim")) - createAcceptedDelivery(t, store, record) - - claimed, found, err := store.ClaimDueAttempt(context.Background(), record.DeliveryID, record.CreatedAt.Add(time.Minute)) - require.NoError(t, err) - require.True(t, found) - require.Equal(t, deliverydomain.StatusSending, claimed.Delivery.Status) - require.Equal(t, attempt.StatusInProgress, claimed.Attempt.Status) - require.NotNil(t, claimed.Attempt.StartedAt) - - require.False(t, server.Exists(Keyspace{}.AttemptSchedule())) - - storedDelivery, err := client.Get(context.Background(), Keyspace{}.Delivery(record.DeliveryID)).Bytes() - require.NoError(t, err) - decodedDelivery, err := UnmarshalDelivery(storedDelivery) - require.NoError(t, err) - require.Equal(t, claimed.Delivery, decodedDelivery) - - sendingMembers, err := client.ZRange(context.Background(), Keyspace{}.StatusIndex(deliverydomain.StatusSending), 0, -1).Result() - require.NoError(t, err) - require.Equal(t, []string{record.DeliveryID.String()}, sendingMembers) -} - -func TestAttemptExecutionStoreClaimDueAttemptAllowsOnlyOneOwner(t *testing.T) { - t.Parallel() - - _, _, store := newAttemptExecutionFixture(t) - record := queuedRenderedDelivery(t, common.DeliveryID("delivery-race")) - createAcceptedDelivery(t, store, record) - - const contenders = 8 - - var ( - waitGroup sync.WaitGroup - mu sync.Mutex - successes int - ) - - for range contenders { - waitGroup.Add(1) - go func() { - defer waitGroup.Done() - - _, found, err := store.ClaimDueAttempt(context.Background(), record.DeliveryID, record.CreatedAt.Add(time.Minute)) - require.NoError(t, err) - - mu.Lock() - defer mu.Unlock() - if found { - successes++ - } - }() - } - waitGroup.Wait() - - require.Equal(t, 1, successes) -} - -func TestAttemptExecutionStoreCommitSchedulesRetry(t *testing.T) { - t.Parallel() - - _, client, store := newAttemptExecutionFixture(t) - workItem := inProgressWorkItem(t, common.DeliveryID("delivery-retry"), 1) - seedWorkItemState(t, client, workItem) - - finishedAt := workItem.Attempt.StartedAt.Add(30 * time.Second) - currentAttempt := workItem.Attempt - currentAttempt.Status = attempt.StatusTransportFailed - currentAttempt.FinishedAt = ptrTimeAttemptStore(finishedAt) - currentAttempt.ProviderClassification = "transient_failure" - currentAttempt.ProviderSummary = "provider=smtp result=transient_failure phase=data smtp_code=451" - require.NoError(t, currentAttempt.Validate()) - - nextAttempt := attempt.Attempt{ - DeliveryID: workItem.Delivery.DeliveryID, - AttemptNo: 2, - ScheduledFor: finishedAt.Add(time.Minute), - Status: attempt.StatusScheduled, - } - require.NoError(t, nextAttempt.Validate()) - - deliveryRecord := workItem.Delivery - deliveryRecord.Status = deliverydomain.StatusQueued - deliveryRecord.AttemptCount = nextAttempt.AttemptNo - deliveryRecord.LastAttemptStatus = currentAttempt.Status - deliveryRecord.ProviderSummary = currentAttempt.ProviderSummary - deliveryRecord.UpdatedAt = finishedAt - require.NoError(t, deliveryRecord.Validate()) - - input := executeattempt.CommitStateInput{ - Delivery: deliveryRecord, - Attempt: currentAttempt, - NextAttempt: &nextAttempt, - } - require.NoError(t, input.Validate()) - require.NoError(t, store.Commit(context.Background(), input)) - - reloaded, found, err := store.LoadWorkItem(context.Background(), workItem.Delivery.DeliveryID) - require.NoError(t, err) - require.True(t, found) - require.Equal(t, deliveryRecord, reloaded.Delivery) - require.Equal(t, nextAttempt, reloaded.Attempt) - - firstAttemptPayload, err := client.Get(context.Background(), Keyspace{}.Attempt(workItem.Delivery.DeliveryID, 1)).Bytes() - require.NoError(t, err) - firstAttemptRecord, err := UnmarshalAttempt(firstAttemptPayload) - require.NoError(t, err) - require.Equal(t, currentAttempt, firstAttemptRecord) - - scheduledMembers, err := client.ZRange(context.Background(), Keyspace{}.AttemptSchedule(), 0, -1).Result() - require.NoError(t, err) - require.Equal(t, []string{workItem.Delivery.DeliveryID.String()}, scheduledMembers) -} - -func TestAttemptExecutionStoreCommitCreatesDeadLetter(t *testing.T) { - t.Parallel() - - _, client, store := newAttemptExecutionFixture(t) - workItem := inProgressWorkItem(t, common.DeliveryID("delivery-dead-letter"), 4) - seedWorkItemState(t, client, workItem) - - finishedAt := workItem.Attempt.StartedAt.Add(30 * time.Second) - currentAttempt := workItem.Attempt - currentAttempt.Status = attempt.StatusTimedOut - currentAttempt.FinishedAt = ptrTimeAttemptStore(finishedAt) - currentAttempt.ProviderClassification = "deadline_exceeded" - currentAttempt.ProviderSummary = "attempt claim TTL expired" - require.NoError(t, currentAttempt.Validate()) - - deliveryRecord := workItem.Delivery - deliveryRecord.Status = deliverydomain.StatusDeadLetter - deliveryRecord.LastAttemptStatus = currentAttempt.Status - deliveryRecord.ProviderSummary = currentAttempt.ProviderSummary - deliveryRecord.UpdatedAt = finishedAt - deliveryRecord.DeadLetteredAt = ptrTimeAttemptStore(finishedAt) - require.NoError(t, deliveryRecord.Validate()) - - deadLetter := &deliverydomain.DeadLetterEntry{ - DeliveryID: deliveryRecord.DeliveryID, - FinalAttemptNo: currentAttempt.AttemptNo, - FailureClassification: "retry_exhausted", - ProviderSummary: currentAttempt.ProviderSummary, - CreatedAt: finishedAt, - RecoveryHint: "check SMTP connectivity", - } - require.NoError(t, deadLetter.ValidateFor(deliveryRecord)) - - input := executeattempt.CommitStateInput{ - Delivery: deliveryRecord, - Attempt: currentAttempt, - DeadLetter: deadLetter, - } - require.NoError(t, input.Validate()) - require.NoError(t, store.Commit(context.Background(), input)) - - storedDelivery, found, err := store.LoadWorkItem(context.Background(), workItem.Delivery.DeliveryID) - require.NoError(t, err) - require.True(t, found) - require.Equal(t, deliveryRecord, storedDelivery.Delivery) - require.Equal(t, currentAttempt, storedDelivery.Attempt) - - deadLetterPayload, err := client.Get(context.Background(), Keyspace{}.DeadLetter(workItem.Delivery.DeliveryID)).Bytes() - require.NoError(t, err) - decodedDeadLetter, err := UnmarshalDeadLetter(deadLetterPayload) - require.NoError(t, err) - require.Equal(t, *deadLetter, decodedDeadLetter) -} - -func newAttemptExecutionFixture(t *testing.T) (*miniredis.Miniredis, *redis.Client, *AttemptExecutionStore) { - t.Helper() - - server := miniredis.RunT(t) - client := redis.NewClient(&redis.Options{Addr: server.Addr()}) - t.Cleanup(func() { require.NoError(t, client.Close()) }) - - store, err := NewAttemptExecutionStore(client) - require.NoError(t, err) - - return server, client, store -} - -func createAcceptedDelivery(t *testing.T, store *AttemptExecutionStore, record deliverydomain.Delivery) { - t.Helper() - - client := store.client - writer, err := NewAtomicWriter(client) - require.NoError(t, err) - - firstAttempt := attempt.Attempt{ - DeliveryID: record.DeliveryID, - AttemptNo: 1, - ScheduledFor: record.CreatedAt, - Status: attempt.StatusScheduled, - } - require.NoError(t, firstAttempt.Validate()) - - require.NoError(t, writer.CreateAcceptance(context.Background(), CreateAcceptanceInput{ - Delivery: record, - FirstAttempt: &firstAttempt, - })) -} - -func queuedRenderedDelivery(t *testing.T, deliveryID common.DeliveryID) deliverydomain.Delivery { - t.Helper() - - record := validDelivery(t) - record.DeliveryID = deliveryID - record.ResendParentDeliveryID = "" - record.Source = deliverydomain.SourceNotification - record.PayloadMode = deliverydomain.PayloadModeRendered - record.TemplateID = "" - record.Locale = "" - record.TemplateVariables = nil - record.LocaleFallbackUsed = false - record.Attachments = nil - record.Status = deliverydomain.StatusQueued - record.AttemptCount = 1 - record.LastAttemptStatus = "" - record.ProviderSummary = "" - record.CreatedAt = time.Unix(1_775_121_700, 0).UTC() - record.UpdatedAt = record.CreatedAt - record.SentAt = nil - record.SuppressedAt = nil - record.FailedAt = nil - record.DeadLetteredAt = nil - record.IdempotencyKey = common.IdempotencyKey("notification:" + deliveryID.String()) - require.NoError(t, record.Validate()) - - return record -} - -func inProgressWorkItem(t *testing.T, deliveryID common.DeliveryID, attemptNo int) executeattempt.WorkItem { - t.Helper() - - deliveryRecord := queuedRenderedDelivery(t, deliveryID) - deliveryRecord.Status = deliverydomain.StatusSending - deliveryRecord.AttemptCount = attemptNo - deliveryRecord.UpdatedAt = deliveryRecord.CreatedAt.Add(time.Duration(attemptNo) * time.Minute) - require.NoError(t, deliveryRecord.Validate()) - - scheduledFor := deliveryRecord.CreatedAt.Add(time.Duration(attemptNo-1) * time.Minute) - startedAt := scheduledFor.Add(5 * time.Second) - attemptRecord := attempt.Attempt{ - DeliveryID: deliveryID, - AttemptNo: attemptNo, - ScheduledFor: scheduledFor, - StartedAt: &startedAt, - Status: attempt.StatusInProgress, - } - require.NoError(t, attemptRecord.Validate()) - - return executeattempt.WorkItem{ - Delivery: deliveryRecord, - Attempt: attemptRecord, - } -} - -func seedWorkItemState(t *testing.T, client *redis.Client, item executeattempt.WorkItem) { - t.Helper() - - deliveryPayload, err := MarshalDelivery(item.Delivery) - require.NoError(t, err) - attemptPayload, err := MarshalAttempt(item.Attempt) - require.NoError(t, err) - - err = client.Set(context.Background(), Keyspace{}.Delivery(item.Delivery.DeliveryID), deliveryPayload, DeliveryTTL).Err() - require.NoError(t, err) - err = client.Set(context.Background(), Keyspace{}.Attempt(item.Attempt.DeliveryID, item.Attempt.AttemptNo), attemptPayload, AttemptTTL).Err() - require.NoError(t, err) - err = client.ZAdd(context.Background(), Keyspace{}.StatusIndex(deliverydomain.StatusSending), redis.Z{ - Score: CreatedAtScore(item.Delivery.CreatedAt), - Member: item.Delivery.DeliveryID.String(), - }).Err() - require.NoError(t, err) -} - -func ptrTimeAttemptStore(value time.Time) *time.Time { - return &value -} diff --git a/mail/internal/adapters/redisstate/auth_acceptance_store.go b/mail/internal/adapters/redisstate/auth_acceptance_store.go deleted file mode 100644 index 19fa6ab..0000000 --- a/mail/internal/adapters/redisstate/auth_acceptance_store.go +++ /dev/null @@ -1,117 +0,0 @@ -package redisstate - -import ( - "context" - "errors" - "fmt" - - "galaxy/mail/internal/domain/common" - deliverydomain "galaxy/mail/internal/domain/delivery" - "galaxy/mail/internal/domain/idempotency" - "galaxy/mail/internal/service/acceptauthdelivery" - - "github.com/redis/go-redis/v9" -) - -// AcceptanceStore provides the Redis-backed durable storage used by the -// auth-delivery acceptance use case. -type AcceptanceStore struct { - client *redis.Client - writer *AtomicWriter - keys Keyspace -} - -// NewAcceptanceStore constructs one Redis-backed auth acceptance store. -func NewAcceptanceStore(client *redis.Client) (*AcceptanceStore, error) { - if client == nil { - return nil, errors.New("new auth acceptance store: nil redis client") - } - - writer, err := NewAtomicWriter(client) - if err != nil { - return nil, fmt.Errorf("new auth acceptance store: %w", err) - } - - return &AcceptanceStore{ - client: client, - writer: writer, - keys: Keyspace{}, - }, nil -} - -// CreateAcceptance stores one auth-delivery acceptance write set in Redis. -func (store *AcceptanceStore) CreateAcceptance(ctx context.Context, input acceptauthdelivery.CreateAcceptanceInput) error { - if store == nil || store.client == nil || store.writer == nil { - return errors.New("create auth acceptance: nil store") - } - if ctx == nil { - return errors.New("create auth acceptance: nil context") - } - if err := input.Validate(); err != nil { - return fmt.Errorf("create auth acceptance: %w", err) - } - - err := store.writer.CreateAcceptance(ctx, CreateAcceptanceInput{ - Delivery: input.Delivery, - FirstAttempt: input.FirstAttempt, - Idempotency: &input.Idempotency, - }) - if errors.Is(err, ErrConflict) { - return fmt.Errorf("create auth acceptance: %w", acceptauthdelivery.ErrConflict) - } - if err != nil { - return fmt.Errorf("create auth acceptance: %w", err) - } - - return nil -} - -// GetIdempotency loads one accepted idempotency scope from Redis. -func (store *AcceptanceStore) GetIdempotency(ctx context.Context, source deliverydomain.Source, key common.IdempotencyKey) (idempotency.Record, bool, error) { - if store == nil || store.client == nil { - return idempotency.Record{}, false, errors.New("get auth acceptance idempotency: nil store") - } - if ctx == nil { - return idempotency.Record{}, false, errors.New("get auth acceptance idempotency: nil context") - } - - payload, err := store.client.Get(ctx, store.keys.Idempotency(source, key)).Bytes() - switch { - case errors.Is(err, redis.Nil): - return idempotency.Record{}, false, nil - case err != nil: - return idempotency.Record{}, false, fmt.Errorf("get auth acceptance idempotency: %w", err) - } - - record, err := UnmarshalIdempotency(payload) - if err != nil { - return idempotency.Record{}, false, fmt.Errorf("get auth acceptance idempotency: %w", err) - } - - return record, true, nil -} - -// GetDelivery loads one accepted delivery from Redis. -func (store *AcceptanceStore) GetDelivery(ctx context.Context, deliveryID common.DeliveryID) (deliverydomain.Delivery, bool, error) { - if store == nil || store.client == nil { - return deliverydomain.Delivery{}, false, errors.New("get auth acceptance delivery: nil store") - } - if ctx == nil { - return deliverydomain.Delivery{}, false, errors.New("get auth acceptance delivery: nil context") - } - - payload, err := store.client.Get(ctx, store.keys.Delivery(deliveryID)).Bytes() - switch { - case errors.Is(err, redis.Nil): - return deliverydomain.Delivery{}, false, nil - case err != nil: - return deliverydomain.Delivery{}, false, fmt.Errorf("get auth acceptance delivery: %w", err) - } - - record, err := UnmarshalDelivery(payload) - if err != nil { - return deliverydomain.Delivery{}, false, fmt.Errorf("get auth acceptance delivery: %w", err) - } - - return record, true, nil -} diff --git a/mail/internal/adapters/redisstate/auth_acceptance_store_test.go b/mail/internal/adapters/redisstate/auth_acceptance_store_test.go deleted file mode 100644 index 97ad342..0000000 --- a/mail/internal/adapters/redisstate/auth_acceptance_store_test.go +++ /dev/null @@ -1,117 +0,0 @@ -package redisstate - -import ( - "context" - "testing" - "time" - - "galaxy/mail/internal/domain/common" - deliverydomain "galaxy/mail/internal/domain/delivery" - "galaxy/mail/internal/domain/idempotency" - "galaxy/mail/internal/service/acceptauthdelivery" - - "github.com/alicebob/miniredis/v2" - "github.com/redis/go-redis/v9" - "github.com/stretchr/testify/require" -) - -func TestAcceptanceStoreCreateAndReadQueuedDelivery(t *testing.T) { - t.Parallel() - - server := miniredis.RunT(t) - client := redis.NewClient(&redis.Options{Addr: server.Addr()}) - t.Cleanup(func() { require.NoError(t, client.Close()) }) - - store, err := NewAcceptanceStore(client) - require.NoError(t, err) - - record := validDelivery(t) - record.Source = deliverydomain.SourceAuthSession - record.ResendParentDeliveryID = "" - record.Status = deliverydomain.StatusQueued - record.AttemptCount = 1 - record.LastAttemptStatus = "" - record.ProviderSummary = "" - record.LocaleFallbackUsed = false - record.UpdatedAt = record.CreatedAt - record.SentAt = nil - require.NoError(t, record.Validate()) - - input := acceptauthdelivery.CreateAcceptanceInput{ - Delivery: record, - FirstAttempt: ptr(validScheduledAttempt(t, record.DeliveryID)), - Idempotency: validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey), - } - - require.NoError(t, store.CreateAcceptance(context.Background(), input)) - - storedDelivery, found, err := store.GetDelivery(context.Background(), record.DeliveryID) - require.NoError(t, err) - require.True(t, found) - require.Equal(t, record, storedDelivery) - - storedIdempotency, found, err := store.GetIdempotency(context.Background(), record.Source, record.IdempotencyKey) - require.NoError(t, err) - require.True(t, found) - require.Equal(t, input.Idempotency, storedIdempotency) -} - -func TestAcceptanceStoreCreateAndReadSuppressedDelivery(t *testing.T) { - t.Parallel() - - server := miniredis.RunT(t) - client := redis.NewClient(&redis.Options{Addr: server.Addr()}) - t.Cleanup(func() { require.NoError(t, client.Close()) }) - - store, err := NewAcceptanceStore(client) - require.NoError(t, err) - - record := validDelivery(t) - record.Source = deliverydomain.SourceAuthSession - record.ResendParentDeliveryID = "" - record.Status = deliverydomain.StatusSuppressed - record.AttemptCount = 0 - record.LastAttemptStatus = "" - record.ProviderSummary = "" - record.LocaleFallbackUsed = false - record.UpdatedAt = record.CreatedAt.Add(time.Minute) - record.SentAt = nil - record.SuppressedAt = ptr(record.UpdatedAt) - require.NoError(t, record.Validate()) - - input := acceptauthdelivery.CreateAcceptanceInput{ - Delivery: record, - Idempotency: validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey), - } - - require.NoError(t, store.CreateAcceptance(context.Background(), input)) - - storedDelivery, found, err := store.GetDelivery(context.Background(), record.DeliveryID) - require.NoError(t, err) - require.True(t, found) - require.Equal(t, record, storedDelivery) - - attemptExists := server.Exists(Keyspace{}.Attempt(record.DeliveryID, 1)) - require.False(t, attemptExists) -} - -func TestAcceptanceStoreReturnsNotFound(t *testing.T) { - t.Parallel() - - server := miniredis.RunT(t) - client := redis.NewClient(&redis.Options{Addr: server.Addr()}) - t.Cleanup(func() { require.NoError(t, client.Close()) }) - - store, err := NewAcceptanceStore(client) - require.NoError(t, err) - - deliveryRecord, found, err := store.GetDelivery(context.Background(), common.DeliveryID("missing")) - require.NoError(t, err) - require.False(t, found) - require.Equal(t, deliverydomain.Delivery{}, deliveryRecord) - - idempotencyRecord, found, err := store.GetIdempotency(context.Background(), deliverydomain.SourceAuthSession, common.IdempotencyKey("missing")) - require.NoError(t, err) - require.False(t, found) - require.Equal(t, idempotency.Record{}, idempotencyRecord) -} diff --git a/mail/internal/adapters/redisstate/codecs.go b/mail/internal/adapters/redisstate/codecs.go deleted file mode 100644 index cffb329..0000000 --- a/mail/internal/adapters/redisstate/codecs.go +++ /dev/null @@ -1,697 +0,0 @@ -package redisstate - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "strings" - "time" - - "galaxy/mail/internal/domain/attempt" - "galaxy/mail/internal/domain/common" - deliverydomain "galaxy/mail/internal/domain/delivery" - "galaxy/mail/internal/domain/idempotency" - "galaxy/mail/internal/domain/malformedcommand" - "galaxy/mail/internal/service/acceptgenericdelivery" -) - -type deliveryRecord struct { - DeliveryID string `json:"delivery_id"` - ResendParentDeliveryID string `json:"resend_parent_delivery_id,omitempty"` - Source deliverydomain.Source `json:"source"` - PayloadMode deliverydomain.PayloadMode `json:"payload_mode"` - TemplateID string `json:"template_id,omitempty"` - TemplateVariables *map[string]any `json:"template_variables,omitempty"` - To []string `json:"to"` - Cc []string `json:"cc"` - Bcc []string `json:"bcc"` - ReplyTo []string `json:"reply_to"` - Subject string `json:"subject,omitempty"` - TextBody string `json:"text_body,omitempty"` - HTMLBody string `json:"html_body,omitempty"` - Attachments []attachmentRecord `json:"attachments"` - Locale string `json:"locale,omitempty"` - LocaleFallbackUsed bool `json:"locale_fallback_used"` - IdempotencyKey string `json:"idempotency_key"` - Status deliverydomain.Status `json:"status"` - AttemptCount int `json:"attempt_count"` - LastAttemptStatus attempt.Status `json:"last_attempt_status,omitempty"` - ProviderSummary string `json:"provider_summary,omitempty"` - CreatedAtMS int64 `json:"created_at_ms"` - UpdatedAtMS int64 `json:"updated_at_ms"` - SentAtMS *int64 `json:"sent_at_ms,omitempty"` - SuppressedAtMS *int64 `json:"suppressed_at_ms,omitempty"` - FailedAtMS *int64 `json:"failed_at_ms,omitempty"` - DeadLetteredAtMS *int64 `json:"dead_lettered_at_ms,omitempty"` -} - -type attemptRecord struct { - DeliveryID string `json:"delivery_id"` - AttemptNo int `json:"attempt_no"` - ScheduledForMS int64 `json:"scheduled_for_ms"` - StartedAtMS *int64 `json:"started_at_ms,omitempty"` - FinishedAtMS *int64 `json:"finished_at_ms,omitempty"` - Status attempt.Status `json:"status"` - ProviderClassification string `json:"provider_classification,omitempty"` - ProviderSummary string `json:"provider_summary,omitempty"` -} - -type idempotencyRecord struct { - Source deliverydomain.Source `json:"source"` - IdempotencyKey string `json:"idempotency_key"` - DeliveryID string `json:"delivery_id"` - RequestFingerprint string `json:"request_fingerprint"` - CreatedAtMS int64 `json:"created_at_ms"` - ExpiresAtMS int64 `json:"expires_at_ms"` -} - -type deadLetterRecord struct { - DeliveryID string `json:"delivery_id"` - FinalAttemptNo int `json:"final_attempt_no"` - FailureClassification string `json:"failure_classification"` - ProviderSummary string `json:"provider_summary,omitempty"` - CreatedAtMS int64 `json:"created_at_ms"` - RecoveryHint string `json:"recovery_hint,omitempty"` -} - -type deliveryPayloadRecord struct { - DeliveryID string `json:"delivery_id"` - Attachments []deliveryPayloadAttachmentRecord `json:"attachments"` -} - -type deliveryPayloadAttachmentRecord struct { - Filename string `json:"filename"` - ContentType string `json:"content_type"` - ContentBase64 string `json:"content_base64"` - SizeBytes int64 `json:"size_bytes"` -} - -type malformedCommandRecord struct { - StreamEntryID string `json:"stream_entry_id"` - DeliveryID string `json:"delivery_id,omitempty"` - Source string `json:"source,omitempty"` - IdempotencyKey string `json:"idempotency_key,omitempty"` - FailureCode malformedcommand.FailureCode `json:"failure_code"` - FailureMessage string `json:"failure_message"` - RawFieldsJSON map[string]any `json:"raw_fields_json"` - RecordedAtMS int64 `json:"recorded_at_ms"` -} - -type streamOffsetRecord struct { - Stream string `json:"stream"` - LastProcessedEntryID string `json:"last_processed_entry_id"` - UpdatedAtMS int64 `json:"updated_at_ms"` -} - -// StreamOffset stores the persisted progress of one plain-XREAD consumer. -type StreamOffset struct { - // Stream stores the Redis Stream name. - Stream string - - // LastProcessedEntryID stores the last durably processed entry id. - LastProcessedEntryID string - - // UpdatedAt stores when the offset was updated. - UpdatedAt time.Time -} - -// Validate reports whether offset contains a complete persisted progress -// record. -func (offset StreamOffset) Validate() error { - if strings.TrimSpace(offset.Stream) == "" { - return fmt.Errorf("stream offset stream must not be empty") - } - if strings.TrimSpace(offset.LastProcessedEntryID) == "" { - return fmt.Errorf("stream offset last processed entry id must not be empty") - } - if err := common.ValidateTimestamp("stream offset updated at", offset.UpdatedAt); err != nil { - return err - } - - return nil -} - -type attachmentRecord struct { - Filename string `json:"filename"` - ContentType string `json:"content_type"` - SizeBytes int64 `json:"size_bytes"` -} - -// MarshalDelivery encodes record into the strict Redis JSON shape used for -// mail_delivery records. -func MarshalDelivery(record deliverydomain.Delivery) ([]byte, error) { - if err := record.Validate(); err != nil { - return nil, fmt.Errorf("marshal redis delivery record: %w", err) - } - - stored := deliveryRecord{ - DeliveryID: record.DeliveryID.String(), - ResendParentDeliveryID: record.ResendParentDeliveryID.String(), - Source: record.Source, - PayloadMode: record.PayloadMode, - TemplateID: record.TemplateID.String(), - TemplateVariables: optionalJSONObject(record.TemplateVariables), - To: cloneEmailStrings(record.Envelope.To), - Cc: cloneEmailStrings(record.Envelope.Cc), - Bcc: cloneEmailStrings(record.Envelope.Bcc), - ReplyTo: cloneEmailStrings(record.Envelope.ReplyTo), - Subject: record.Content.Subject, - TextBody: record.Content.TextBody, - HTMLBody: record.Content.HTMLBody, - Attachments: cloneAttachments(record.Attachments), - Locale: record.Locale.String(), - LocaleFallbackUsed: record.LocaleFallbackUsed, - IdempotencyKey: record.IdempotencyKey.String(), - Status: record.Status, - AttemptCount: record.AttemptCount, - LastAttemptStatus: record.LastAttemptStatus, - ProviderSummary: record.ProviderSummary, - CreatedAtMS: record.CreatedAt.UTC().UnixMilli(), - UpdatedAtMS: record.UpdatedAt.UTC().UnixMilli(), - SentAtMS: optionalUnixMilli(record.SentAt), - SuppressedAtMS: optionalUnixMilli(record.SuppressedAt), - FailedAtMS: optionalUnixMilli(record.FailedAt), - DeadLetteredAtMS: optionalUnixMilli(record.DeadLetteredAt), - } - - payload, err := json.Marshal(stored) - if err != nil { - return nil, fmt.Errorf("marshal redis delivery record: %w", err) - } - - return payload, nil -} - -// UnmarshalDelivery decodes payload from the strict Redis JSON shape used for -// mail_delivery records. -func UnmarshalDelivery(payload []byte) (deliverydomain.Delivery, error) { - var stored deliveryRecord - if err := decodeStrictJSON("decode redis delivery record", payload, &stored); err != nil { - return deliverydomain.Delivery{}, err - } - - record := deliverydomain.Delivery{ - DeliveryID: common.DeliveryID(stored.DeliveryID), - ResendParentDeliveryID: common.DeliveryID(stored.ResendParentDeliveryID), - Source: stored.Source, - PayloadMode: stored.PayloadMode, - TemplateID: common.TemplateID(stored.TemplateID), - TemplateVariables: cloneJSONObjectPtr(stored.TemplateVariables), - Envelope: deliverydomain.Envelope{ - To: cloneEmails(stored.To), - Cc: cloneEmails(stored.Cc), - Bcc: cloneEmails(stored.Bcc), - ReplyTo: cloneEmails(stored.ReplyTo), - }, - Content: deliverydomain.Content{ - Subject: stored.Subject, - TextBody: stored.TextBody, - HTMLBody: stored.HTMLBody, - }, - Attachments: inflateAttachments(stored.Attachments), - Locale: common.Locale(stored.Locale), - LocaleFallbackUsed: stored.LocaleFallbackUsed, - IdempotencyKey: common.IdempotencyKey(stored.IdempotencyKey), - Status: stored.Status, - AttemptCount: stored.AttemptCount, - LastAttemptStatus: stored.LastAttemptStatus, - ProviderSummary: stored.ProviderSummary, - CreatedAt: time.UnixMilli(stored.CreatedAtMS).UTC(), - UpdatedAt: time.UnixMilli(stored.UpdatedAtMS).UTC(), - SentAt: inflateOptionalTime(stored.SentAtMS), - SuppressedAt: inflateOptionalTime(stored.SuppressedAtMS), - FailedAt: inflateOptionalTime(stored.FailedAtMS), - DeadLetteredAt: inflateOptionalTime(stored.DeadLetteredAtMS), - } - if err := record.Validate(); err != nil { - return deliverydomain.Delivery{}, fmt.Errorf("decode redis delivery record: %w", err) - } - - return record, nil -} - -// MarshalAttempt encodes record into the strict Redis JSON shape used for -// mail_attempt records. -func MarshalAttempt(record attempt.Attempt) ([]byte, error) { - if err := record.Validate(); err != nil { - return nil, fmt.Errorf("marshal redis attempt record: %w", err) - } - - stored := attemptRecord{ - DeliveryID: record.DeliveryID.String(), - AttemptNo: record.AttemptNo, - ScheduledForMS: record.ScheduledFor.UTC().UnixMilli(), - StartedAtMS: optionalUnixMilli(record.StartedAt), - FinishedAtMS: optionalUnixMilli(record.FinishedAt), - Status: record.Status, - ProviderClassification: record.ProviderClassification, - ProviderSummary: record.ProviderSummary, - } - - payload, err := json.Marshal(stored) - if err != nil { - return nil, fmt.Errorf("marshal redis attempt record: %w", err) - } - - return payload, nil -} - -// UnmarshalAttempt decodes payload from the strict Redis JSON shape used for -// mail_attempt records. -func UnmarshalAttempt(payload []byte) (attempt.Attempt, error) { - var stored attemptRecord - if err := decodeStrictJSON("decode redis attempt record", payload, &stored); err != nil { - return attempt.Attempt{}, err - } - - record := attempt.Attempt{ - DeliveryID: common.DeliveryID(stored.DeliveryID), - AttemptNo: stored.AttemptNo, - ScheduledFor: time.UnixMilli(stored.ScheduledForMS).UTC(), - StartedAt: inflateOptionalTime(stored.StartedAtMS), - FinishedAt: inflateOptionalTime(stored.FinishedAtMS), - Status: stored.Status, - ProviderClassification: stored.ProviderClassification, - ProviderSummary: stored.ProviderSummary, - } - if err := record.Validate(); err != nil { - return attempt.Attempt{}, fmt.Errorf("decode redis attempt record: %w", err) - } - - return record, nil -} - -// MarshalIdempotency encodes record into the strict Redis JSON shape used for -// mail_idempotency_record values. -func MarshalIdempotency(record idempotency.Record) ([]byte, error) { - if err := record.Validate(); err != nil { - return nil, fmt.Errorf("marshal redis idempotency record: %w", err) - } - - stored := idempotencyRecord{ - Source: record.Source, - IdempotencyKey: record.IdempotencyKey.String(), - DeliveryID: record.DeliveryID.String(), - RequestFingerprint: record.RequestFingerprint, - CreatedAtMS: record.CreatedAt.UTC().UnixMilli(), - ExpiresAtMS: record.ExpiresAt.UTC().UnixMilli(), - } - - payload, err := json.Marshal(stored) - if err != nil { - return nil, fmt.Errorf("marshal redis idempotency record: %w", err) - } - - return payload, nil -} - -// UnmarshalIdempotency decodes payload from the strict Redis JSON shape used -// for mail_idempotency_record values. -func UnmarshalIdempotency(payload []byte) (idempotency.Record, error) { - var stored idempotencyRecord - if err := decodeStrictJSON("decode redis idempotency record", payload, &stored); err != nil { - return idempotency.Record{}, err - } - - record := idempotency.Record{ - Source: stored.Source, - IdempotencyKey: common.IdempotencyKey(stored.IdempotencyKey), - DeliveryID: common.DeliveryID(stored.DeliveryID), - RequestFingerprint: stored.RequestFingerprint, - CreatedAt: time.UnixMilli(stored.CreatedAtMS).UTC(), - ExpiresAt: time.UnixMilli(stored.ExpiresAtMS).UTC(), - } - if err := record.Validate(); err != nil { - return idempotency.Record{}, fmt.Errorf("decode redis idempotency record: %w", err) - } - - return record, nil -} - -// MarshalDeadLetter encodes entry into the strict Redis JSON shape used for -// mail_dead_letter_entry values. -func MarshalDeadLetter(entry deliverydomain.DeadLetterEntry) ([]byte, error) { - if err := entry.Validate(); err != nil { - return nil, fmt.Errorf("marshal redis dead-letter record: %w", err) - } - - stored := deadLetterRecord{ - DeliveryID: entry.DeliveryID.String(), - FinalAttemptNo: entry.FinalAttemptNo, - FailureClassification: entry.FailureClassification, - ProviderSummary: entry.ProviderSummary, - CreatedAtMS: entry.CreatedAt.UTC().UnixMilli(), - RecoveryHint: entry.RecoveryHint, - } - - payload, err := json.Marshal(stored) - if err != nil { - return nil, fmt.Errorf("marshal redis dead-letter record: %w", err) - } - - return payload, nil -} - -// UnmarshalDeadLetter decodes payload from the strict Redis JSON shape used -// for mail_dead_letter_entry values. -func UnmarshalDeadLetter(payload []byte) (deliverydomain.DeadLetterEntry, error) { - var stored deadLetterRecord - if err := decodeStrictJSON("decode redis dead-letter record", payload, &stored); err != nil { - return deliverydomain.DeadLetterEntry{}, err - } - - entry := deliverydomain.DeadLetterEntry{ - DeliveryID: common.DeliveryID(stored.DeliveryID), - FinalAttemptNo: stored.FinalAttemptNo, - FailureClassification: stored.FailureClassification, - ProviderSummary: stored.ProviderSummary, - CreatedAt: time.UnixMilli(stored.CreatedAtMS).UTC(), - RecoveryHint: stored.RecoveryHint, - } - if err := entry.Validate(); err != nil { - return deliverydomain.DeadLetterEntry{}, fmt.Errorf("decode redis dead-letter record: %w", err) - } - - return entry, nil -} - -// MarshalDeliveryPayload encodes payload into the strict Redis JSON shape used -// for raw generic-delivery attachment bundles. -func MarshalDeliveryPayload(payload acceptgenericdelivery.DeliveryPayload) ([]byte, error) { - if err := payload.Validate(); err != nil { - return nil, fmt.Errorf("marshal redis delivery payload record: %w", err) - } - - stored := deliveryPayloadRecord{ - DeliveryID: payload.DeliveryID.String(), - Attachments: cloneDeliveryPayloadAttachments(payload.Attachments), - } - - encoded, err := json.Marshal(stored) - if err != nil { - return nil, fmt.Errorf("marshal redis delivery payload record: %w", err) - } - - return encoded, nil -} - -// UnmarshalDeliveryPayload decodes payload from the strict Redis JSON shape -// used for raw generic-delivery attachment bundles. -func UnmarshalDeliveryPayload(payload []byte) (acceptgenericdelivery.DeliveryPayload, error) { - var stored deliveryPayloadRecord - if err := decodeStrictJSON("decode redis delivery payload record", payload, &stored); err != nil { - return acceptgenericdelivery.DeliveryPayload{}, err - } - - record := acceptgenericdelivery.DeliveryPayload{ - DeliveryID: common.DeliveryID(stored.DeliveryID), - Attachments: inflateDeliveryPayloadAttachments(stored.Attachments), - } - if err := record.Validate(); err != nil { - return acceptgenericdelivery.DeliveryPayload{}, fmt.Errorf("decode redis delivery payload record: %w", err) - } - - return record, nil -} - -// MarshalMalformedCommand encodes entry into the strict Redis JSON shape used -// for operator-visible malformed async command records. -func MarshalMalformedCommand(entry malformedcommand.Entry) ([]byte, error) { - if err := entry.Validate(); err != nil { - return nil, fmt.Errorf("marshal redis malformed command record: %w", err) - } - - stored := malformedCommandRecord{ - StreamEntryID: entry.StreamEntryID, - DeliveryID: entry.DeliveryID, - Source: entry.Source, - IdempotencyKey: entry.IdempotencyKey, - FailureCode: entry.FailureCode, - FailureMessage: entry.FailureMessage, - RawFieldsJSON: cloneJSONObject(entry.RawFields), - RecordedAtMS: entry.RecordedAt.UTC().UnixMilli(), - } - - encoded, err := json.Marshal(stored) - if err != nil { - return nil, fmt.Errorf("marshal redis malformed command record: %w", err) - } - - return encoded, nil -} - -// UnmarshalMalformedCommand decodes payload from the strict Redis JSON shape -// used for operator-visible malformed async command records. -func UnmarshalMalformedCommand(payload []byte) (malformedcommand.Entry, error) { - var stored malformedCommandRecord - if err := decodeStrictJSON("decode redis malformed command record", payload, &stored); err != nil { - return malformedcommand.Entry{}, err - } - - entry := malformedcommand.Entry{ - StreamEntryID: stored.StreamEntryID, - DeliveryID: stored.DeliveryID, - Source: stored.Source, - IdempotencyKey: stored.IdempotencyKey, - FailureCode: stored.FailureCode, - FailureMessage: stored.FailureMessage, - RawFields: cloneJSONObject(stored.RawFieldsJSON), - RecordedAt: time.UnixMilli(stored.RecordedAtMS).UTC(), - } - if err := entry.Validate(); err != nil { - return malformedcommand.Entry{}, fmt.Errorf("decode redis malformed command record: %w", err) - } - - return entry, nil -} - -// MarshalStreamOffset encodes offset into the strict Redis JSON shape used for -// persisted consumer progress. -func MarshalStreamOffset(offset StreamOffset) ([]byte, error) { - if err := offset.Validate(); err != nil { - return nil, fmt.Errorf("marshal redis stream offset record: %w", err) - } - - stored := streamOffsetRecord{ - Stream: offset.Stream, - LastProcessedEntryID: offset.LastProcessedEntryID, - UpdatedAtMS: offset.UpdatedAt.UTC().UnixMilli(), - } - - encoded, err := json.Marshal(stored) - if err != nil { - return nil, fmt.Errorf("marshal redis stream offset record: %w", err) - } - - return encoded, nil -} - -// UnmarshalStreamOffset decodes payload from the strict Redis JSON shape used -// for persisted consumer progress. -func UnmarshalStreamOffset(payload []byte) (StreamOffset, error) { - var stored streamOffsetRecord - if err := decodeStrictJSON("decode redis stream offset record", payload, &stored); err != nil { - return StreamOffset{}, err - } - - offset := StreamOffset{ - Stream: stored.Stream, - LastProcessedEntryID: stored.LastProcessedEntryID, - UpdatedAt: time.UnixMilli(stored.UpdatedAtMS).UTC(), - } - if err := offset.Validate(); err != nil { - return StreamOffset{}, fmt.Errorf("decode redis stream offset record: %w", err) - } - - return offset, nil -} - -func decodeStrictJSON(operation string, payload []byte, target any) error { - decoder := json.NewDecoder(bytes.NewReader(payload)) - decoder.DisallowUnknownFields() - - if err := decoder.Decode(target); err != nil { - return fmt.Errorf("%s: %w", operation, err) - } - if err := decoder.Decode(&struct{}{}); err != io.EOF { - if err == nil { - return fmt.Errorf("%s: unexpected trailing JSON input", operation) - } - return fmt.Errorf("%s: %w", operation, err) - } - - return nil -} - -func cloneEmailStrings(values []common.Email) []string { - if values == nil { - return nil - } - - cloned := make([]string, len(values)) - for index, value := range values { - cloned[index] = value.String() - } - - return cloned -} - -func cloneEmails(values []string) []common.Email { - if values == nil { - return nil - } - - cloned := make([]common.Email, len(values)) - for index, value := range values { - cloned[index] = common.Email(value) - } - - return cloned -} - -func cloneAttachments(values []common.AttachmentMetadata) []attachmentRecord { - if values == nil { - return nil - } - - cloned := make([]attachmentRecord, len(values)) - for index, value := range values { - cloned[index] = attachmentRecord{ - Filename: value.Filename, - ContentType: value.ContentType, - SizeBytes: value.SizeBytes, - } - } - - return cloned -} - -func inflateAttachments(values []attachmentRecord) []common.AttachmentMetadata { - if values == nil { - return nil - } - - cloned := make([]common.AttachmentMetadata, len(values)) - for index, value := range values { - cloned[index] = common.AttachmentMetadata{ - Filename: value.Filename, - ContentType: value.ContentType, - SizeBytes: value.SizeBytes, - } - } - - return cloned -} - -func optionalJSONObject(value map[string]any) *map[string]any { - if value == nil { - return nil - } - - cloned := make(map[string]any, len(value)) - for key, item := range value { - cloned[key] = cloneJSONValue(item) - } - - return &cloned -} - -func cloneJSONObjectPtr(value *map[string]any) map[string]any { - if value == nil { - return nil - } - - cloned := make(map[string]any, len(*value)) - for key, item := range *value { - cloned[key] = cloneJSONValue(item) - } - - return cloned -} - -func cloneJSONObject(value map[string]any) map[string]any { - if value == nil { - return nil - } - - cloned := make(map[string]any, len(value)) - for key, item := range value { - cloned[key] = cloneJSONValue(item) - } - - return cloned -} - -func cloneJSONValue(value any) any { - switch typed := value.(type) { - case map[string]any: - cloned := make(map[string]any, len(typed)) - for key, item := range typed { - cloned[key] = cloneJSONValue(item) - } - return cloned - case []any: - cloned := make([]any, len(typed)) - for index, item := range typed { - cloned[index] = cloneJSONValue(item) - } - return cloned - default: - return typed - } -} - -func cloneDeliveryPayloadAttachments(values []acceptgenericdelivery.AttachmentPayload) []deliveryPayloadAttachmentRecord { - if values == nil { - return nil - } - - cloned := make([]deliveryPayloadAttachmentRecord, len(values)) - for index, value := range values { - cloned[index] = deliveryPayloadAttachmentRecord{ - Filename: value.Filename, - ContentType: value.ContentType, - ContentBase64: value.ContentBase64, - SizeBytes: value.SizeBytes, - } - } - - return cloned -} - -func inflateDeliveryPayloadAttachments(values []deliveryPayloadAttachmentRecord) []acceptgenericdelivery.AttachmentPayload { - if values == nil { - return nil - } - - cloned := make([]acceptgenericdelivery.AttachmentPayload, len(values)) - for index, value := range values { - cloned[index] = acceptgenericdelivery.AttachmentPayload{ - Filename: value.Filename, - ContentType: value.ContentType, - ContentBase64: value.ContentBase64, - SizeBytes: value.SizeBytes, - } - } - - return cloned -} - -func optionalUnixMilli(value *time.Time) *int64 { - if value == nil { - return nil - } - - milliseconds := value.UTC().UnixMilli() - return &milliseconds -} - -func inflateOptionalTime(value *int64) *time.Time { - if value == nil { - return nil - } - - converted := time.UnixMilli(*value).UTC() - return &converted -} diff --git a/mail/internal/adapters/redisstate/codecs_test.go b/mail/internal/adapters/redisstate/codecs_test.go deleted file mode 100644 index dcd91bc..0000000 --- a/mail/internal/adapters/redisstate/codecs_test.go +++ /dev/null @@ -1,124 +0,0 @@ -package redisstate - -import ( - "bytes" - "testing" - - "galaxy/mail/internal/domain/attempt" - deliverydomain "galaxy/mail/internal/domain/delivery" - "galaxy/mail/internal/domain/idempotency" - - "github.com/stretchr/testify/require" -) - -func TestDeliveryCodecRoundTrip(t *testing.T) { - t.Parallel() - - record := validDelivery(t) - - payload, err := MarshalDelivery(record) - require.NoError(t, err) - - decoded, err := UnmarshalDelivery(payload) - require.NoError(t, err) - require.Equal(t, record, decoded) -} - -func TestAttemptCodecRoundTrip(t *testing.T) { - t.Parallel() - - record := validTerminalAttempt(t, validDelivery(t).DeliveryID) - - payload, err := MarshalAttempt(record) - require.NoError(t, err) - - decoded, err := UnmarshalAttempt(payload) - require.NoError(t, err) - require.Equal(t, record, decoded) -} - -func TestIdempotencyCodecRoundTrip(t *testing.T) { - t.Parallel() - - deliveryRecord := validDelivery(t) - record := validIdempotencyRecord(t, deliveryRecord.Source, deliveryRecord.DeliveryID, deliveryRecord.IdempotencyKey) - - payload, err := MarshalIdempotency(record) - require.NoError(t, err) - - decoded, err := UnmarshalIdempotency(payload) - require.NoError(t, err) - require.Equal(t, record, decoded) -} - -func TestDeadLetterCodecRoundTrip(t *testing.T) { - t.Parallel() - - record := validDeadLetterEntry(t, validDelivery(t).DeliveryID) - - payload, err := MarshalDeadLetter(record) - require.NoError(t, err) - - decoded, err := UnmarshalDeadLetter(payload) - require.NoError(t, err) - require.Equal(t, record, decoded) -} - -func TestDeliveryCodecRejectsUnknownField(t *testing.T) { - t.Parallel() - - payload, err := MarshalDelivery(validDelivery(t)) - require.NoError(t, err) - - payload = append(payload[:len(payload)-1], []byte(`,"extra":true}`)...) - - _, err = UnmarshalDelivery(payload) - require.Error(t, err) - require.ErrorContains(t, err, "unknown field") -} - -func TestAttemptCodecRejectsWrongType(t *testing.T) { - t.Parallel() - - payload, err := MarshalAttempt(validTerminalAttempt(t, validDelivery(t).DeliveryID)) - require.NoError(t, err) - - payload = bytes.Replace(payload, []byte(`"attempt_no":2`), []byte(`"attempt_no":"2"`), 1) - - _, err = UnmarshalAttempt(payload) - require.Error(t, err) - require.ErrorContains(t, err, "cannot unmarshal") -} - -func TestIdempotencyCodecRejectsTrailingJSON(t *testing.T) { - t.Parallel() - - deliveryRecord := validDelivery(t) - payload, err := MarshalIdempotency(validIdempotencyRecord(t, deliveryRecord.Source, deliveryRecord.DeliveryID, deliveryRecord.IdempotencyKey)) - require.NoError(t, err) - - payload = append(payload, []byte(` {}`)...) - - _, err = UnmarshalIdempotency(payload) - require.Error(t, err) - require.ErrorContains(t, err, "unexpected trailing JSON input") -} - -func TestDeadLetterCodecRejectsUnknownField(t *testing.T) { - t.Parallel() - - payload, err := MarshalDeadLetter(validDeadLetterEntry(t, validDelivery(t).DeliveryID)) - require.NoError(t, err) - - payload = append(payload[:len(payload)-1], []byte(`,"unexpected":"value"}`)...) - - _, err = UnmarshalDeadLetter(payload) - require.Error(t, err) - require.ErrorContains(t, err, "unknown field") -} - -var ( - _ = attempt.Attempt{} - _ = deliverydomain.DeadLetterEntry{} - _ = idempotency.Record{} -) diff --git a/mail/internal/adapters/redisstate/errors.go b/mail/internal/adapters/redisstate/errors.go deleted file mode 100644 index ebefcf3..0000000 --- a/mail/internal/adapters/redisstate/errors.go +++ /dev/null @@ -1,12 +0,0 @@ -// Package redisstate defines the frozen Redis keyspace, strict JSON records, -// and low-level mutation helpers used by future Mail Service Redis adapters. -package redisstate - -import "errors" - -var ( - // ErrConflict reports that a Redis mutation could not be applied because - // one of the watched or newly created keys already existed or changed - // concurrently. - ErrConflict = errors.New("redis state conflict") -) diff --git a/mail/internal/adapters/redisstate/fixtures_test.go b/mail/internal/adapters/redisstate/fixtures_test.go deleted file mode 100644 index 26ed85b..0000000 --- a/mail/internal/adapters/redisstate/fixtures_test.go +++ /dev/null @@ -1,201 +0,0 @@ -package redisstate - -import ( - "encoding/base64" - "time" - - "galaxy/mail/internal/domain/attempt" - "galaxy/mail/internal/domain/common" - deliverydomain "galaxy/mail/internal/domain/delivery" - "galaxy/mail/internal/domain/idempotency" - "galaxy/mail/internal/domain/malformedcommand" - "galaxy/mail/internal/service/acceptgenericdelivery" - - "github.com/stretchr/testify/require" -) - -func validDelivery(t require.TestingT) deliverydomain.Delivery { - locale, err := common.ParseLocale("fr-fr") - require.NoError(t, err) - - createdAt := time.Unix(1_775_121_700, 0).UTC() - updatedAt := createdAt.Add(2 * time.Minute) - sentAt := updatedAt.Add(15 * time.Second) - - record := deliverydomain.Delivery{ - DeliveryID: common.DeliveryID("delivery-123"), - ResendParentDeliveryID: common.DeliveryID("delivery-parent-001"), - Source: deliverydomain.SourceOperatorResend, - PayloadMode: deliverydomain.PayloadModeTemplate, - TemplateID: common.TemplateID("auth.login_code"), - Envelope: deliverydomain.Envelope{ - To: []common.Email{common.Email("pilot@example.com")}, - Cc: []common.Email{common.Email("copilot@example.com")}, - Bcc: []common.Email{common.Email("ops@example.com")}, - ReplyTo: []common.Email{common.Email("noreply@example.com")}, - }, - Content: deliverydomain.Content{ - Subject: "Your login code", - TextBody: "Code: 123456", - HTMLBody: "

Code: 123456

", - }, - Attachments: []common.AttachmentMetadata{ - {Filename: "instructions.txt", ContentType: "text/plain; charset=utf-8", SizeBytes: 128}, - }, - Locale: locale, - TemplateVariables: map[string]any{ - "code": "123456", - }, - LocaleFallbackUsed: true, - IdempotencyKey: common.IdempotencyKey("operator:resend:delivery-123"), - Status: deliverydomain.StatusSent, - AttemptCount: 2, - LastAttemptStatus: attempt.StatusProviderAccepted, - ProviderSummary: "queued by provider", - CreatedAt: createdAt, - UpdatedAt: updatedAt, - SentAt: &sentAt, - } - require.NoError(t, record.Validate()) - - return record -} - -func validScheduledAttempt(t require.TestingT, deliveryID common.DeliveryID) attempt.Attempt { - scheduledFor := time.Unix(1_775_121_820, 0).UTC() - - record := attempt.Attempt{ - DeliveryID: deliveryID, - AttemptNo: 1, - ScheduledFor: scheduledFor, - Status: attempt.StatusScheduled, - } - require.NoError(t, record.Validate()) - - return record -} - -func validQueuedTemplateDelivery(t require.TestingT) deliverydomain.Delivery { - record := validDelivery(t) - record.DeliveryID = common.DeliveryID("delivery-queued") - record.ResendParentDeliveryID = "" - record.Source = deliverydomain.SourceNotification - record.Status = deliverydomain.StatusQueued - record.AttemptCount = 1 - record.LastAttemptStatus = "" - record.ProviderSummary = "" - record.LocaleFallbackUsed = false - record.Content = deliverydomain.Content{} - record.CreatedAt = time.Unix(1_775_121_700, 0).UTC() - record.UpdatedAt = record.CreatedAt - record.SentAt = nil - record.SuppressedAt = nil - record.FailedAt = nil - record.DeadLetteredAt = nil - record.IdempotencyKey = common.IdempotencyKey("notification:delivery-queued") - require.NoError(t, record.Validate()) - - return record -} - -func validTerminalAttempt(t require.TestingT, deliveryID common.DeliveryID) attempt.Attempt { - scheduledFor := time.Unix(1_775_121_820, 0).UTC() - startedAt := scheduledFor.Add(5 * time.Second) - finishedAt := startedAt.Add(2 * time.Second) - - record := attempt.Attempt{ - DeliveryID: deliveryID, - AttemptNo: 2, - ScheduledFor: scheduledFor, - StartedAt: &startedAt, - FinishedAt: &finishedAt, - Status: attempt.StatusProviderAccepted, - ProviderClassification: "accepted", - ProviderSummary: "queued by provider", - } - require.NoError(t, record.Validate()) - - return record -} - -func validRenderFailedAttempt(t require.TestingT, deliveryID common.DeliveryID) attempt.Attempt { - record := validScheduledAttempt(t, deliveryID) - startedAt := record.ScheduledFor.Add(time.Second) - finishedAt := startedAt - record.StartedAt = &startedAt - record.FinishedAt = &finishedAt - record.Status = attempt.StatusRenderFailed - record.ProviderClassification = "missing_required_variable" - record.ProviderSummary = "missing required variables: player.name" - require.NoError(t, record.Validate()) - - return record -} - -func validIdempotencyRecord(t require.TestingT, source deliverydomain.Source, deliveryID common.DeliveryID, key common.IdempotencyKey) idempotency.Record { - createdAt := time.Now().UTC().Truncate(time.Millisecond).Add(-time.Minute) - - record := idempotency.Record{ - Source: source, - IdempotencyKey: key, - DeliveryID: deliveryID, - RequestFingerprint: "sha256:abcdef123456", - CreatedAt: createdAt, - ExpiresAt: createdAt.Add(IdempotencyTTL), - } - require.NoError(t, record.Validate()) - - return record -} - -func validDeadLetterEntry(t require.TestingT, deliveryID common.DeliveryID) deliverydomain.DeadLetterEntry { - entry := deliverydomain.DeadLetterEntry{ - DeliveryID: deliveryID, - FinalAttemptNo: 3, - FailureClassification: "retry_exhausted", - ProviderSummary: "smtp timeout", - CreatedAt: time.Unix(1_775_122_000, 0).UTC(), - RecoveryHint: "check SMTP connectivity", - } - require.NoError(t, entry.Validate()) - - return entry -} - -func validDeliveryPayload(t require.TestingT, deliveryID common.DeliveryID) acceptgenericdelivery.DeliveryPayload { - payload := acceptgenericdelivery.DeliveryPayload{ - DeliveryID: deliveryID, - Attachments: []acceptgenericdelivery.AttachmentPayload{ - { - Filename: "instructions.txt", - ContentType: "text/plain; charset=utf-8", - ContentBase64: base64.StdEncoding.EncodeToString([]byte("read me")), - SizeBytes: int64(len([]byte("read me"))), - }, - }, - } - require.NoError(t, payload.Validate()) - - return payload -} - -func validMalformedCommandEntry(t require.TestingT) malformedcommand.Entry { - entry := malformedcommand.Entry{ - StreamEntryID: "1775121700000-0", - DeliveryID: "mail-123", - Source: "notification", - IdempotencyKey: "notification:mail-123", - FailureCode: malformedcommand.FailureCodeInvalidPayload, - FailureMessage: "payload_json.subject is required", - RawFields: map[string]any{ - "delivery_id": "mail-123", - "source": "notification", - "payload_mode": "rendered", - "idempotency_key": "notification:mail-123", - }, - RecordedAt: time.Unix(1_775_121_700, 0).UTC(), - } - require.NoError(t, entry.Validate()) - - return entry -} diff --git a/mail/internal/adapters/redisstate/generic_acceptance_store.go b/mail/internal/adapters/redisstate/generic_acceptance_store.go deleted file mode 100644 index 8c9ab7b..0000000 --- a/mail/internal/adapters/redisstate/generic_acceptance_store.go +++ /dev/null @@ -1,148 +0,0 @@ -package redisstate - -import ( - "context" - "errors" - "fmt" - - "galaxy/mail/internal/domain/common" - deliverydomain "galaxy/mail/internal/domain/delivery" - "galaxy/mail/internal/domain/idempotency" - "galaxy/mail/internal/service/acceptgenericdelivery" - - "github.com/redis/go-redis/v9" -) - -// GenericAcceptanceStore provides the Redis-backed durable storage used by the -// generic-delivery acceptance use case. -type GenericAcceptanceStore struct { - client *redis.Client - writer *AtomicWriter - keys Keyspace -} - -// NewGenericAcceptanceStore constructs one Redis-backed generic acceptance -// store. -func NewGenericAcceptanceStore(client *redis.Client) (*GenericAcceptanceStore, error) { - if client == nil { - return nil, errors.New("new generic acceptance store: nil redis client") - } - - writer, err := NewAtomicWriter(client) - if err != nil { - return nil, fmt.Errorf("new generic acceptance store: %w", err) - } - - return &GenericAcceptanceStore{ - client: client, - writer: writer, - keys: Keyspace{}, - }, nil -} - -// CreateAcceptance stores one generic-delivery acceptance write set in Redis. -func (store *GenericAcceptanceStore) CreateAcceptance(ctx context.Context, input acceptgenericdelivery.CreateAcceptanceInput) error { - if store == nil || store.client == nil || store.writer == nil { - return errors.New("create generic acceptance: nil store") - } - if ctx == nil { - return errors.New("create generic acceptance: nil context") - } - if err := input.Validate(); err != nil { - return fmt.Errorf("create generic acceptance: %w", err) - } - - writerInput := CreateAcceptanceInput{ - Delivery: input.Delivery, - FirstAttempt: &input.FirstAttempt, - Idempotency: &input.Idempotency, - } - if input.DeliveryPayload != nil { - writerInput.DeliveryPayload = input.DeliveryPayload - } - - err := store.writer.CreateAcceptance(ctx, writerInput) - if errors.Is(err, ErrConflict) { - return fmt.Errorf("create generic acceptance: %w", acceptgenericdelivery.ErrConflict) - } - if err != nil { - return fmt.Errorf("create generic acceptance: %w", err) - } - - return nil -} - -// GetIdempotency loads one accepted idempotency scope from Redis. -func (store *GenericAcceptanceStore) GetIdempotency(ctx context.Context, source deliverydomain.Source, key common.IdempotencyKey) (idempotency.Record, bool, error) { - if store == nil || store.client == nil { - return idempotency.Record{}, false, errors.New("get generic acceptance idempotency: nil store") - } - if ctx == nil { - return idempotency.Record{}, false, errors.New("get generic acceptance idempotency: nil context") - } - - payload, err := store.client.Get(ctx, store.keys.Idempotency(source, key)).Bytes() - switch { - case errors.Is(err, redis.Nil): - return idempotency.Record{}, false, nil - case err != nil: - return idempotency.Record{}, false, fmt.Errorf("get generic acceptance idempotency: %w", err) - } - - record, err := UnmarshalIdempotency(payload) - if err != nil { - return idempotency.Record{}, false, fmt.Errorf("get generic acceptance idempotency: %w", err) - } - - return record, true, nil -} - -// GetDelivery loads one accepted delivery by its identifier. -func (store *GenericAcceptanceStore) GetDelivery(ctx context.Context, deliveryID common.DeliveryID) (deliverydomain.Delivery, bool, error) { - if store == nil || store.client == nil { - return deliverydomain.Delivery{}, false, errors.New("get generic acceptance delivery: nil store") - } - if ctx == nil { - return deliverydomain.Delivery{}, false, errors.New("get generic acceptance delivery: nil context") - } - - payload, err := store.client.Get(ctx, store.keys.Delivery(deliveryID)).Bytes() - switch { - case errors.Is(err, redis.Nil): - return deliverydomain.Delivery{}, false, nil - case err != nil: - return deliverydomain.Delivery{}, false, fmt.Errorf("get generic acceptance delivery: %w", err) - } - - record, err := UnmarshalDelivery(payload) - if err != nil { - return deliverydomain.Delivery{}, false, fmt.Errorf("get generic acceptance delivery: %w", err) - } - - return record, true, nil -} - -// GetDeliveryPayload loads one raw accepted attachment bundle by delivery id. -func (store *GenericAcceptanceStore) GetDeliveryPayload(ctx context.Context, deliveryID common.DeliveryID) (acceptgenericdelivery.DeliveryPayload, bool, error) { - if store == nil || store.client == nil { - return acceptgenericdelivery.DeliveryPayload{}, false, errors.New("get generic acceptance delivery payload: nil store") - } - if ctx == nil { - return acceptgenericdelivery.DeliveryPayload{}, false, errors.New("get generic acceptance delivery payload: nil context") - } - - payload, err := store.client.Get(ctx, store.keys.DeliveryPayload(deliveryID)).Bytes() - switch { - case errors.Is(err, redis.Nil): - return acceptgenericdelivery.DeliveryPayload{}, false, nil - case err != nil: - return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("get generic acceptance delivery payload: %w", err) - } - - record, err := UnmarshalDeliveryPayload(payload) - if err != nil { - return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("get generic acceptance delivery payload: %w", err) - } - - return record, true, nil -} diff --git a/mail/internal/adapters/redisstate/generic_acceptance_store_test.go b/mail/internal/adapters/redisstate/generic_acceptance_store_test.go deleted file mode 100644 index 2d63d39..0000000 --- a/mail/internal/adapters/redisstate/generic_acceptance_store_test.go +++ /dev/null @@ -1,145 +0,0 @@ -package redisstate - -import ( - "context" - "testing" - "time" - - "galaxy/mail/internal/domain/common" - deliverydomain "galaxy/mail/internal/domain/delivery" - "galaxy/mail/internal/service/acceptgenericdelivery" - - "github.com/alicebob/miniredis/v2" - "github.com/redis/go-redis/v9" - "github.com/stretchr/testify/require" -) - -func TestGenericAcceptanceStoreCreateAndReadRenderedDelivery(t *testing.T) { - t.Parallel() - - server := miniredis.RunT(t) - client := redis.NewClient(&redis.Options{Addr: server.Addr()}) - t.Cleanup(func() { require.NoError(t, client.Close()) }) - - store, err := NewGenericAcceptanceStore(client) - require.NoError(t, err) - - record := validDelivery(t) - record.Source = deliverydomain.SourceNotification - record.ResendParentDeliveryID = "" - record.PayloadMode = deliverydomain.PayloadModeRendered - record.TemplateID = "" - record.TemplateVariables = nil - record.Locale = "" - record.LocaleFallbackUsed = false - record.Status = deliverydomain.StatusQueued - record.AttemptCount = 1 - record.LastAttemptStatus = "" - record.ProviderSummary = "" - record.SentAt = nil - record.UpdatedAt = record.CreatedAt - require.NoError(t, record.Validate()) - - input := acceptgenericdelivery.CreateAcceptanceInput{ - Delivery: record, - FirstAttempt: validScheduledAttempt(t, record.DeliveryID), - DeliveryPayload: ptr(validDeliveryPayload(t, record.DeliveryID)), - Idempotency: validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey), - } - - require.NoError(t, store.CreateAcceptance(context.Background(), input)) - - storedDelivery, found, err := store.GetDelivery(context.Background(), record.DeliveryID) - require.NoError(t, err) - require.True(t, found) - require.Equal(t, record, storedDelivery) - - storedPayload, found, err := store.GetDeliveryPayload(context.Background(), record.DeliveryID) - require.NoError(t, err) - require.True(t, found) - require.Equal(t, *input.DeliveryPayload, storedPayload) -} - -func TestGenericAcceptanceStoreReturnsMissingPayload(t *testing.T) { - t.Parallel() - - server := miniredis.RunT(t) - client := redis.NewClient(&redis.Options{Addr: server.Addr()}) - t.Cleanup(func() { require.NoError(t, client.Close()) }) - - store, err := NewGenericAcceptanceStore(client) - require.NoError(t, err) - - payload, found, err := store.GetDeliveryPayload(context.Background(), common.DeliveryID("missing")) - require.NoError(t, err) - require.False(t, found) - require.Equal(t, acceptgenericdelivery.DeliveryPayload{}, payload) -} - -func TestMalformedCommandStoreRecordIsIdempotent(t *testing.T) { - t.Parallel() - - server := miniredis.RunT(t) - client := redis.NewClient(&redis.Options{Addr: server.Addr()}) - t.Cleanup(func() { require.NoError(t, client.Close()) }) - - store, err := NewMalformedCommandStore(client) - require.NoError(t, err) - - entry := validMalformedCommandEntry(t) - - require.NoError(t, store.Record(context.Background(), entry)) - require.NoError(t, store.Record(context.Background(), entry)) - - storedEntry, found, err := store.Get(context.Background(), entry.StreamEntryID) - require.NoError(t, err) - require.True(t, found) - require.Equal(t, entry, storedEntry) - - indexCard, err := client.ZCard(context.Background(), Keyspace{}.MalformedCommandCreatedAtIndex()).Result() - require.NoError(t, err) - require.EqualValues(t, 1, indexCard) -} - -func TestMalformedCommandStoreAppliesRetention(t *testing.T) { - t.Parallel() - - server := miniredis.RunT(t) - client := redis.NewClient(&redis.Options{Addr: server.Addr()}) - t.Cleanup(func() { require.NoError(t, client.Close()) }) - - store, err := NewMalformedCommandStore(client) - require.NoError(t, err) - - entry := validMalformedCommandEntry(t) - require.NoError(t, store.Record(context.Background(), entry)) - - ttl := server.TTL(Keyspace{}.MalformedCommand(entry.StreamEntryID)) - require.InDelta(t, DeadLetterTTL.Seconds(), ttl.Seconds(), 1) -} - -func TestStreamOffsetStoreSaveAndLoad(t *testing.T) { - t.Parallel() - - server := miniredis.RunT(t) - client := redis.NewClient(&redis.Options{Addr: server.Addr()}) - t.Cleanup(func() { require.NoError(t, client.Close()) }) - - store, err := NewStreamOffsetStore(client) - require.NoError(t, err) - - require.NoError(t, store.Save(context.Background(), "mail:delivery_commands", "1775121700000-0")) - - entryID, found, err := store.Load(context.Background(), "mail:delivery_commands") - require.NoError(t, err) - require.True(t, found) - require.Equal(t, "1775121700000-0", entryID) - - payload, err := client.Get(context.Background(), Keyspace{}.StreamOffset("mail:delivery_commands")).Bytes() - require.NoError(t, err) - offset, err := UnmarshalStreamOffset(payload) - require.NoError(t, err) - require.Equal(t, "mail:delivery_commands", offset.Stream) - require.Equal(t, "1775121700000-0", offset.LastProcessedEntryID) - require.WithinDuration(t, time.Now().UTC(), offset.UpdatedAt, time.Second) -} diff --git a/mail/internal/adapters/redisstate/index_cleaner.go b/mail/internal/adapters/redisstate/index_cleaner.go deleted file mode 100644 index ce03e4f..0000000 --- a/mail/internal/adapters/redisstate/index_cleaner.go +++ /dev/null @@ -1,118 +0,0 @@ -package redisstate - -import ( - "context" - "errors" - "fmt" - "strings" - - "galaxy/mail/internal/domain/common" - - "github.com/redis/go-redis/v9" -) - -// CleanupReport describes the work done by IndexCleaner. -type CleanupReport struct { - // ScannedIndexes stores how many secondary index keys were inspected. - ScannedIndexes int - - // ScannedMembers stores how many index members were examined. - ScannedMembers int - - // RemovedMembers stores how many stale members were removed. - RemovedMembers int -} - -// IndexCleaner removes stale delivery references from the Mail Service -// secondary indexes after primary delivery keys expire by TTL. -type IndexCleaner struct { - client *redis.Client - keyspace Keyspace -} - -// NewIndexCleaner constructs one delivery-index cleanup helper. -func NewIndexCleaner(client *redis.Client) (*IndexCleaner, error) { - if client == nil { - return nil, errors.New("new redis index cleaner: nil client") - } - - return &IndexCleaner{ - client: client, - keyspace: Keyspace{}, - }, nil -} - -// CleanDeliveryIndexes scans every `mail:idx:*` key and removes members that -// no longer have a primary delivery record. -func (cleaner *IndexCleaner) CleanDeliveryIndexes(ctx context.Context) (CleanupReport, error) { - if cleaner == nil || cleaner.client == nil { - return CleanupReport{}, errors.New("clean delivery indexes in redis: nil cleaner") - } - if ctx == nil { - return CleanupReport{}, errors.New("clean delivery indexes in redis: nil context") - } - - var ( - report CleanupReport - cursor uint64 - ) - - for { - keys, nextCursor, err := cleaner.client.Scan(ctx, cursor, cleaner.keyspace.SecondaryIndexPattern(), 0).Result() - if err != nil { - return CleanupReport{}, fmt.Errorf("clean delivery indexes in redis: %w", err) - } - - for _, key := range keys { - if key == cleaner.keyspace.MalformedCommandCreatedAtIndex() { - continue - } - - report.ScannedIndexes++ - - members, err := cleaner.client.ZRange(ctx, key, 0, -1).Result() - if err != nil { - return CleanupReport{}, fmt.Errorf("clean delivery indexes in redis: read index %q: %w", key, err) - } - - report.ScannedMembers += len(members) - for _, member := range members { - remove, err := cleaner.shouldRemoveMember(ctx, member) - if err != nil { - return CleanupReport{}, fmt.Errorf("clean delivery indexes in redis: inspect index %q member %q: %w", key, member, err) - } - if !remove { - continue - } - - if err := cleaner.client.ZRem(ctx, key, member).Err(); err != nil { - return CleanupReport{}, fmt.Errorf("clean delivery indexes in redis: remove index %q member %q: %w", key, member, err) - } - report.RemovedMembers++ - } - } - - if nextCursor == 0 { - return report, nil - } - cursor = nextCursor - } -} - -func (cleaner *IndexCleaner) shouldRemoveMember(ctx context.Context, member string) (bool, error) { - if strings.TrimSpace(member) == "" { - return true, nil - } - - deliveryID := common.DeliveryID(member) - if err := deliveryID.Validate(); err != nil { - return true, nil - } - - exists, err := cleaner.client.Exists(ctx, cleaner.keyspace.Delivery(deliveryID)).Result() - if err != nil { - return false, err - } - - return exists == 0, nil -} diff --git a/mail/internal/adapters/redisstate/index_cleaner_test.go b/mail/internal/adapters/redisstate/index_cleaner_test.go deleted file mode 100644 index 35edd2f..0000000 --- a/mail/internal/adapters/redisstate/index_cleaner_test.go +++ /dev/null @@ -1,112 +0,0 @@ -package redisstate - -import ( - "context" - "testing" - "time" - - "galaxy/mail/internal/domain/attempt" - deliverydomain "galaxy/mail/internal/domain/delivery" - - "github.com/alicebob/miniredis/v2" - "github.com/redis/go-redis/v9" - "github.com/stretchr/testify/require" -) - -func TestIndexCleanerRemovesStaleMembersAfterDeliveryExpiry(t *testing.T) { - t.Parallel() - - server := miniredis.RunT(t) - client := redis.NewClient(&redis.Options{Addr: server.Addr()}) - t.Cleanup(func() { require.NoError(t, client.Close()) }) - - writer, err := NewAtomicWriter(client) - require.NoError(t, err) - cleaner, err := NewIndexCleaner(client) - require.NoError(t, err) - - record := validDelivery(t) - record.Source = deliverydomain.SourceNotification - record.ResendParentDeliveryID = "" - record.Status = deliverydomain.StatusQueued - record.SentAt = nil - record.LocaleFallbackUsed = false - record.UpdatedAt = record.CreatedAt.Add(time.Minute) - require.NoError(t, record.Validate()) - - input := CreateAcceptanceInput{ - Delivery: record, - FirstAttempt: ptr(validScheduledAttempt(t, record.DeliveryID)), - Idempotency: ptr(validIdempotencyRecord(t, record.Source, record.DeliveryID, record.IdempotencyKey)), - } - require.NoError(t, writer.CreateAcceptance(context.Background(), input)) - - deadLetterEntry := validDeadLetterEntry(t, record.DeliveryID) - deadLetterPayload, err := MarshalDeadLetter(deadLetterEntry) - require.NoError(t, err) - require.NoError(t, client.Set(context.Background(), Keyspace{}.DeadLetter(record.DeliveryID), deadLetterPayload, DeadLetterTTL).Err()) - - server.FastForward(DeliveryTTL + time.Second) - - require.False(t, server.Exists(Keyspace{}.Delivery(record.DeliveryID))) - require.True(t, server.Exists(Keyspace{}.Attempt(record.DeliveryID, input.FirstAttempt.AttemptNo))) - require.True(t, server.Exists(Keyspace{}.DeadLetter(record.DeliveryID))) - - report, err := cleaner.CleanDeliveryIndexes(context.Background()) - require.NoError(t, err) - require.Positive(t, report.ScannedIndexes) - require.Positive(t, report.ScannedMembers) - require.Positive(t, report.RemovedMembers) - - assertZCard := func(key string, want int64) { - t.Helper() - - got, err := client.ZCard(context.Background(), key).Result() - require.NoError(t, err) - require.Equal(t, want, got) - } - - assertZCard(Keyspace{}.CreatedAtIndex(), 0) - assertZCard(Keyspace{}.SourceIndex(record.Source), 0) - assertZCard(Keyspace{}.StatusIndex(record.Status), 0) - assertZCard(Keyspace{}.RecipientIndex(record.Envelope.To[0]), 0) - assertZCard(Keyspace{}.RecipientIndex(record.Envelope.Cc[0]), 0) - assertZCard(Keyspace{}.RecipientIndex(record.Envelope.Bcc[0]), 0) - assertZCard(Keyspace{}.TemplateIndex(record.TemplateID), 0) - assertZCard(Keyspace{}.IdempotencyIndex(record.Source, record.IdempotencyKey), 0) - - require.True(t, server.Exists(Keyspace{}.Attempt(record.DeliveryID, input.FirstAttempt.AttemptNo))) - require.True(t, server.Exists(Keyspace{}.DeadLetter(record.DeliveryID))) - scheduleCard, err := client.ZCard(context.Background(), Keyspace{}.AttemptSchedule()).Result() - require.NoError(t, err) - require.EqualValues(t, 1, scheduleCard) -} - -func TestIndexCleanerSkipsMalformedCommandIndex(t *testing.T) { - t.Parallel() - - server := miniredis.RunT(t) - client := redis.NewClient(&redis.Options{Addr: server.Addr()}) - t.Cleanup(func() { require.NoError(t, client.Close()) }) - - cleaner, err := NewIndexCleaner(client) - require.NoError(t, err) - - entry := validMalformedCommandEntry(t) - require.NoError(t, client.ZAdd(context.Background(), Keyspace{}.MalformedCommandCreatedAtIndex(), redis.Z{ - Score: float64(entry.RecordedAt.UTC().UnixMilli()), - Member: entry.StreamEntryID, - }).Err()) - - report, err := cleaner.CleanDeliveryIndexes(context.Background()) - require.NoError(t, err) - require.Zero(t, report.ScannedIndexes) - require.Zero(t, report.ScannedMembers) - require.Zero(t, report.RemovedMembers) - - indexMembers, err := client.ZRange(context.Background(), Keyspace{}.MalformedCommandCreatedAtIndex(), 0, -1).Result() - require.NoError(t, err) - require.Equal(t, []string{entry.StreamEntryID}, indexMembers) -} - -var _ = attempt.Attempt{} diff --git a/mail/internal/adapters/redisstate/keyspace.go b/mail/internal/adapters/redisstate/keyspace.go index 2ea57b6..50fda4d 100644 --- a/mail/internal/adapters/redisstate/keyspace.go +++ b/mail/internal/adapters/redisstate/keyspace.go @@ -1,68 +1,20 @@ +// Package redisstate hosts the small surface of Redis state that survived the +// PG_PLAN.md §4 migration: the inbound `mail:delivery_commands` stream and +// the persisted offset of its consumer. Every other durable record (auth and +// generic acceptance, attempt execution, malformed commands, dead letters, +// operator listing) now lives in PostgreSQL via `mailstore`. package redisstate -import ( - "encoding/base64" - "sort" - "strconv" - "time" - - "galaxy/mail/internal/domain/common" - deliverydomain "galaxy/mail/internal/domain/delivery" -) +import "encoding/base64" const defaultPrefix = "mail:" -const ( - // IdempotencyTTL is the frozen Redis retention for idempotency records. - IdempotencyTTL = 7 * 24 * time.Hour - - // DeliveryTTL is the frozen Redis retention for accepted delivery records. - DeliveryTTL = 30 * 24 * time.Hour - - // AttemptTTL is the frozen Redis retention for attempt records. - AttemptTTL = 90 * 24 * time.Hour - - // DeadLetterTTL is the frozen Redis retention for dead-letter entries. - DeadLetterTTL = 90 * 24 * time.Hour -) - -// Keyspace builds the frozen Mail Service Redis keys. All dynamic key -// segments are encoded with base64url so raw key structure does not depend on -// user-provided or caller-provided characters. +// Keyspace builds the small surviving Mail Service Redis keyspace. Dynamic +// segments (the stream key embedded in the offset key) are encoded with +// base64url so raw key structure does not depend on caller-provided +// characters. type Keyspace struct{} -// Delivery returns the primary Redis key for one mail_delivery record. -func (Keyspace) Delivery(deliveryID common.DeliveryID) string { - return defaultPrefix + "deliveries:" + encodeKeyComponent(deliveryID.String()) -} - -// Attempt returns the primary Redis key for one mail_attempt record. -func (Keyspace) Attempt(deliveryID common.DeliveryID, attemptNo int) string { - return defaultPrefix + "attempts:" + encodeKeyComponent(deliveryID.String()) + ":" + encodeKeyComponent(strconv.Itoa(attemptNo)) -} - -// Idempotency returns the primary Redis key for one mail_idempotency_record. -func (Keyspace) Idempotency(source deliverydomain.Source, key common.IdempotencyKey) string { - return defaultPrefix + "idempotency:" + encodeKeyComponent(string(source)) + ":" + encodeKeyComponent(key.String()) -} - -// DeadLetter returns the primary Redis key for one mail_dead_letter_entry. -func (Keyspace) DeadLetter(deliveryID common.DeliveryID) string { - return defaultPrefix + "dead_letters:" + encodeKeyComponent(deliveryID.String()) -} - -// DeliveryPayload returns the primary Redis key for one raw generic-delivery -// payload bundle. -func (Keyspace) DeliveryPayload(deliveryID common.DeliveryID) string { - return defaultPrefix + "delivery_payloads:" + encodeKeyComponent(deliveryID.String()) -} - -// MalformedCommand returns the primary Redis key for one operator-visible -// malformed async command record. -func (Keyspace) MalformedCommand(streamEntryID string) string { - return defaultPrefix + "malformed_commands:" + encodeKeyComponent(streamEntryID) -} - // StreamOffset returns the primary Redis key for one persisted stream-consumer // offset. func (Keyspace) StreamOffset(stream string) string { @@ -74,99 +26,6 @@ func (Keyspace) DeliveryCommands() string { return defaultPrefix + "delivery_commands" } -// AttemptSchedule returns the frozen attempt schedule sorted-set key. -func (Keyspace) AttemptSchedule() string { - return defaultPrefix + "attempt_schedule" -} - -// RecipientIndex returns the secondary index key for one effective recipient. -func (Keyspace) RecipientIndex(email common.Email) string { - return defaultPrefix + "idx:recipient:" + encodeKeyComponent(email.String()) -} - -// StatusIndex returns the secondary index key for one delivery status. -func (Keyspace) StatusIndex(status deliverydomain.Status) string { - return defaultPrefix + "idx:status:" + encodeKeyComponent(string(status)) -} - -// SourceIndex returns the secondary index key for one delivery source. -func (Keyspace) SourceIndex(source deliverydomain.Source) string { - return defaultPrefix + "idx:source:" + encodeKeyComponent(string(source)) -} - -// TemplateIndex returns the secondary index key for one template id. -func (Keyspace) TemplateIndex(templateID common.TemplateID) string { - return defaultPrefix + "idx:template:" + encodeKeyComponent(templateID.String()) -} - -// IdempotencyIndex returns the secondary lookup key for one `(source, -// idempotency_key)` scope. -func (Keyspace) IdempotencyIndex(source deliverydomain.Source, key common.IdempotencyKey) string { - return defaultPrefix + "idx:idempotency:" + encodeKeyComponent(string(source)) + ":" + encodeKeyComponent(key.String()) -} - -// CreatedAtIndex returns the newest-first delivery ordering index key. -func (Keyspace) CreatedAtIndex() string { - return defaultPrefix + "idx:created_at" -} - -// MalformedCommandCreatedAtIndex returns the newest-first malformed-command -// ordering index key. -func (Keyspace) MalformedCommandCreatedAtIndex() string { - return defaultPrefix + "idx:malformed_command:created_at" -} - -// SecondaryIndexPattern returns the key-scan pattern that matches every -// delivery-level secondary index owned by Mail Service. -func (Keyspace) SecondaryIndexPattern() string { - return defaultPrefix + "idx:*" -} - -// DeliveryIndexKeys returns the full set of secondary index keys that must -// reference record at creation time. Recipient indexing covers `to`, `cc`, and -// `bcc`, but intentionally excludes `reply_to`. -func (keyspace Keyspace) DeliveryIndexKeys(record deliverydomain.Delivery) []string { - keys := []string{ - keyspace.StatusIndex(record.Status), - keyspace.SourceIndex(record.Source), - keyspace.IdempotencyIndex(record.Source, record.IdempotencyKey), - keyspace.CreatedAtIndex(), - } - if !record.TemplateID.IsZero() { - keys = append(keys, keyspace.TemplateIndex(record.TemplateID)) - } - - seen := make(map[string]struct{}, len(keys)+len(record.Envelope.To)+len(record.Envelope.Cc)+len(record.Envelope.Bcc)) - for _, key := range keys { - seen[key] = struct{}{} - } - for _, group := range [][]common.Email{record.Envelope.To, record.Envelope.Cc, record.Envelope.Bcc} { - for _, email := range group { - seen[keyspace.RecipientIndex(email)] = struct{}{} - } - } - - keys = keys[:0] - for key := range seen { - keys = append(keys, key) - } - sort.Strings(keys) - - return keys -} - -// CreatedAtScore returns the frozen sorted-set score representation for -// delivery creation timestamps. -func CreatedAtScore(createdAt time.Time) float64 { - return float64(createdAt.UTC().UnixMilli()) -} - -// ScheduledForScore returns the frozen sorted-set score representation for -// attempt schedule timestamps. -func ScheduledForScore(scheduledFor time.Time) float64 { - return float64(scheduledFor.UTC().UnixMilli()) -} - func encodeKeyComponent(value string) string { return base64.RawURLEncoding.EncodeToString([]byte(value)) } diff --git a/mail/internal/adapters/redisstate/keyspace_test.go b/mail/internal/adapters/redisstate/keyspace_test.go index 61ee078..acc6809 100644 --- a/mail/internal/adapters/redisstate/keyspace_test.go +++ b/mail/internal/adapters/redisstate/keyspace_test.go @@ -4,9 +4,8 @@ import ( "testing" "time" - "galaxy/mail/internal/domain/common" - deliverydomain "galaxy/mail/internal/domain/delivery" - + "github.com/alicebob/miniredis/v2" + "github.com/redis/go-redis/v9" "github.com/stretchr/testify/require" ) @@ -15,54 +14,42 @@ func TestKeyspaceBuildsStableKeys(t *testing.T) { keyspace := Keyspace{} - require.Equal(t, "mail:deliveries:ZGVsaXZlcnktMTIz", keyspace.Delivery(common.DeliveryID("delivery-123"))) - require.Equal(t, "mail:attempts:ZGVsaXZlcnktMTIz:MQ", keyspace.Attempt(common.DeliveryID("delivery-123"), 1)) - require.Equal(t, "mail:idempotency:bm90aWZpY2F0aW9u:bm90aWZpY2F0aW9uOm1haWwtMTIz", keyspace.Idempotency(deliverydomain.SourceNotification, common.IdempotencyKey("notification:mail-123"))) - require.Equal(t, "mail:dead_letters:ZGVsaXZlcnktMTIz", keyspace.DeadLetter(common.DeliveryID("delivery-123"))) require.Equal(t, "mail:delivery_commands", keyspace.DeliveryCommands()) - require.Equal(t, "mail:attempt_schedule", keyspace.AttemptSchedule()) - require.Equal(t, "mail:idx:recipient:cGlsb3RAZXhhbXBsZS5jb20", keyspace.RecipientIndex(common.Email("pilot@example.com"))) - require.Equal(t, "mail:idx:status:c2VudA", keyspace.StatusIndex(deliverydomain.StatusSent)) - require.Equal(t, "mail:idx:source:bm90aWZpY2F0aW9u", keyspace.SourceIndex(deliverydomain.SourceNotification)) - require.Equal(t, "mail:idx:template:YXV0aC5sb2dpbl9jb2Rl", keyspace.TemplateIndex(common.TemplateID("auth.login_code"))) - require.Equal(t, "mail:idx:idempotency:bm90aWZpY2F0aW9u:bm90aWZpY2F0aW9uOm1haWwtMTIz", keyspace.IdempotencyIndex(deliverydomain.SourceNotification, common.IdempotencyKey("notification:mail-123"))) - require.Equal(t, "mail:idx:created_at", keyspace.CreatedAtIndex()) - require.Equal(t, "mail:idx:*", keyspace.SecondaryIndexPattern()) + require.Equal(t, "mail:stream_offsets:bWFpbDpkZWxpdmVyeV9jb21tYW5kcw", keyspace.StreamOffset("mail:delivery_commands")) } -func TestDeliveryIndexKeysDedupeRecipientsAndIgnoreReplyTo(t *testing.T) { +func TestStreamOffsetStoreRoundTrip(t *testing.T) { t.Parallel() - record := validDelivery(t) - record.Source = deliverydomain.SourceNotification - record.ResendParentDeliveryID = "" - record.Status = deliverydomain.StatusQueued - record.SentAt = nil - record.LocaleFallbackUsed = false - record.UpdatedAt = record.CreatedAt.Add(time.Minute) - record.Envelope.Cc = []common.Email{common.Email("pilot@example.com")} - record.Envelope.ReplyTo = []common.Email{common.Email("reply@example.com")} - require.NoError(t, record.Validate()) + server := miniredis.RunT(t) + client := redis.NewClient(&redis.Options{Addr: server.Addr()}) + t.Cleanup(func() { _ = client.Close() }) - require.Equal(t, []string{ - "mail:idx:created_at", - "mail:idx:idempotency:bm90aWZpY2F0aW9u:b3BlcmF0b3I6cmVzZW5kOmRlbGl2ZXJ5LTEyMw", - "mail:idx:recipient:b3BzQGV4YW1wbGUuY29t", - "mail:idx:recipient:cGlsb3RAZXhhbXBsZS5jb20", - "mail:idx:source:bm90aWZpY2F0aW9u", - "mail:idx:status:cXVldWVk", - "mail:idx:template:YXV0aC5sb2dpbl9jb2Rl", - }, Keyspace{}.DeliveryIndexKeys(record)) + store, err := NewStreamOffsetStore(client) + require.NoError(t, err) + + stream := "mail:delivery_commands" + require.NoError(t, store.Save(t.Context(), stream, "1234-5")) + + got, ok, err := store.Load(t.Context(), stream) + require.NoError(t, err) + require.True(t, ok) + require.Equal(t, "1234-5", got) } -func TestScoresAndRetentionConstants(t *testing.T) { +func TestUnmarshalStreamOffsetRequiresUpdatedAt(t *testing.T) { t.Parallel() - value := time.Unix(1_775_240_000, 123_000_000).UTC() - require.Equal(t, float64(value.UnixMilli()), CreatedAtScore(value)) - require.Equal(t, float64(value.UnixMilli()), ScheduledForScore(value)) - require.Equal(t, 7*24*time.Hour, IdempotencyTTL) - require.Equal(t, 30*24*time.Hour, DeliveryTTL) - require.Equal(t, 90*24*time.Hour, AttemptTTL) - require.Equal(t, 90*24*time.Hour, DeadLetterTTL) + payload, err := MarshalStreamOffset(StreamOffset{ + Stream: "mail:delivery_commands", + LastProcessedEntryID: "1-0", + UpdatedAt: time.Now().UTC(), + }) + require.NoError(t, err) + got, err := UnmarshalStreamOffset(payload) + require.NoError(t, err) + require.Equal(t, "1-0", got.LastProcessedEntryID) + + _, err = UnmarshalStreamOffset([]byte(`{"stream":"x","last_processed_entry_id":"1"}`)) + require.Error(t, err) } diff --git a/mail/internal/adapters/redisstate/malformed_command_store.go b/mail/internal/adapters/redisstate/malformed_command_store.go deleted file mode 100644 index ac5c9a0..0000000 --- a/mail/internal/adapters/redisstate/malformed_command_store.go +++ /dev/null @@ -1,111 +0,0 @@ -package redisstate - -import ( - "context" - "errors" - "fmt" - - "galaxy/mail/internal/domain/malformedcommand" - - "github.com/redis/go-redis/v9" -) - -// MalformedCommandStore provides the Redis-backed storage used for -// operator-visible malformed async command records. -type MalformedCommandStore struct { - client *redis.Client - keys Keyspace -} - -// NewMalformedCommandStore constructs one Redis-backed malformed-command -// store. -func NewMalformedCommandStore(client *redis.Client) (*MalformedCommandStore, error) { - if client == nil { - return nil, errors.New("new malformed command store: nil redis client") - } - - return &MalformedCommandStore{ - client: client, - keys: Keyspace{}, - }, nil -} - -// Record stores entry idempotently by stream entry id. -func (store *MalformedCommandStore) Record(ctx context.Context, entry malformedcommand.Entry) error { - if store == nil || store.client == nil { - return errors.New("record malformed command: nil store") - } - if ctx == nil { - return errors.New("record malformed command: nil context") - } - if err := entry.Validate(); err != nil { - return fmt.Errorf("record malformed command: %w", err) - } - - payload, err := MarshalMalformedCommand(entry) - if err != nil { - return fmt.Errorf("record malformed command: %w", err) - } - - key := store.keys.MalformedCommand(entry.StreamEntryID) - indexKey := store.keys.MalformedCommandCreatedAtIndex() - score := float64(entry.RecordedAt.UTC().UnixMilli()) - - watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error { - exists, err := tx.Exists(ctx, key).Result() - if err != nil { - return fmt.Errorf("record malformed command: %w", err) - } - if exists > 0 { - return nil - } - - _, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error { - pipe.Set(ctx, key, payload, DeadLetterTTL) - pipe.ZAdd(ctx, indexKey, redis.Z{ - Score: score, - Member: entry.StreamEntryID, - }) - - return nil - }) - if err != nil { - return fmt.Errorf("record malformed command: %w", err) - } - - return nil - }, key) - switch { - case errors.Is(watchErr, redis.TxFailedErr): - return nil - case watchErr != nil: - return watchErr - default: - return nil - } -} - -// Get loads one malformed-command entry by stream entry id. -func (store *MalformedCommandStore) Get(ctx context.Context, streamEntryID string) (malformedcommand.Entry, bool, error) { - if store == nil || store.client == nil { - return malformedcommand.Entry{}, false, errors.New("get malformed command: nil store") - } - if ctx == nil { - return malformedcommand.Entry{}, false, errors.New("get malformed command: nil context") - } - - payload, err := store.client.Get(ctx, store.keys.MalformedCommand(streamEntryID)).Bytes() - switch { - case errors.Is(err, redis.Nil): - return malformedcommand.Entry{}, false, nil - case err != nil: - return malformedcommand.Entry{}, false, fmt.Errorf("get malformed command: %w", err) - } - - entry, err := UnmarshalMalformedCommand(payload) - if err != nil { - return malformedcommand.Entry{}, false, fmt.Errorf("get malformed command: %w", err) - } - - return entry, true, nil -} diff --git a/mail/internal/adapters/redisstate/offset_codec.go b/mail/internal/adapters/redisstate/offset_codec.go new file mode 100644 index 0000000..c1f3f72 --- /dev/null +++ b/mail/internal/adapters/redisstate/offset_codec.go @@ -0,0 +1,40 @@ +package redisstate + +import ( + "encoding/json" + "fmt" + "time" +) + +// StreamOffset stores the persisted progress of one plain-XREAD consumer. +type StreamOffset struct { + // Stream stores the Redis Stream key the offset belongs to. + Stream string `json:"stream"` + + // LastProcessedEntryID stores the most recently processed Stream entry id. + LastProcessedEntryID string `json:"last_processed_entry_id"` + + // UpdatedAt stores when the offset was last persisted. + UpdatedAt time.Time `json:"updated_at"` +} + +// MarshalStreamOffset returns the JSON encoding of the persisted offset. +func MarshalStreamOffset(offset StreamOffset) ([]byte, error) { + payload, err := json.Marshal(offset) + if err != nil { + return nil, fmt.Errorf("marshal stream offset: %w", err) + } + return payload, nil +} + +// UnmarshalStreamOffset parses one persisted offset payload. +func UnmarshalStreamOffset(payload []byte) (StreamOffset, error) { + var offset StreamOffset + if err := json.Unmarshal(payload, &offset); err != nil { + return StreamOffset{}, fmt.Errorf("unmarshal stream offset: %w", err) + } + if offset.UpdatedAt.IsZero() { + return StreamOffset{}, fmt.Errorf("unmarshal stream offset: updated_at must not be zero") + } + return offset, nil +} diff --git a/mail/internal/adapters/redisstate/operator_store.go b/mail/internal/adapters/redisstate/operator_store.go deleted file mode 100644 index 0c2d510..0000000 --- a/mail/internal/adapters/redisstate/operator_store.go +++ /dev/null @@ -1,532 +0,0 @@ -package redisstate - -import ( - "context" - "errors" - "fmt" - "slices" - "time" - - "galaxy/mail/internal/domain/attempt" - "galaxy/mail/internal/domain/common" - deliverydomain "galaxy/mail/internal/domain/delivery" - "galaxy/mail/internal/service/acceptgenericdelivery" - "galaxy/mail/internal/service/listattempts" - "galaxy/mail/internal/service/listdeliveries" - "galaxy/mail/internal/service/resenddelivery" - - "github.com/redis/go-redis/v9" -) - -// OperatorStore provides the Redis-backed durable storage used by the -// operator read and resend workflows. -type OperatorStore struct { - client *redis.Client - writer *AtomicWriter - keys Keyspace -} - -// NewOperatorStore constructs one Redis-backed operator store. -func NewOperatorStore(client *redis.Client) (*OperatorStore, error) { - if client == nil { - return nil, errors.New("new operator store: nil redis client") - } - - writer, err := NewAtomicWriter(client) - if err != nil { - return nil, fmt.Errorf("new operator store: %w", err) - } - - return &OperatorStore{ - client: client, - writer: writer, - keys: Keyspace{}, - }, nil -} - -// GetDelivery loads one accepted delivery by its identifier. -func (store *OperatorStore) GetDelivery(ctx context.Context, deliveryID common.DeliveryID) (deliverydomain.Delivery, bool, error) { - if store == nil || store.client == nil { - return deliverydomain.Delivery{}, false, errors.New("get operator delivery: nil store") - } - if ctx == nil { - return deliverydomain.Delivery{}, false, errors.New("get operator delivery: nil context") - } - if err := deliveryID.Validate(); err != nil { - return deliverydomain.Delivery{}, false, fmt.Errorf("get operator delivery: %w", err) - } - - payload, err := store.client.Get(ctx, store.keys.Delivery(deliveryID)).Bytes() - switch { - case errors.Is(err, redis.Nil): - return deliverydomain.Delivery{}, false, nil - case err != nil: - return deliverydomain.Delivery{}, false, fmt.Errorf("get operator delivery: %w", err) - } - - record, err := UnmarshalDelivery(payload) - if err != nil { - return deliverydomain.Delivery{}, false, fmt.Errorf("get operator delivery: %w", err) - } - - return record, true, nil -} - -// GetDeadLetter loads the dead-letter entry associated with deliveryID when -// one exists. -func (store *OperatorStore) GetDeadLetter(ctx context.Context, deliveryID common.DeliveryID) (deliverydomain.DeadLetterEntry, bool, error) { - if store == nil || store.client == nil { - return deliverydomain.DeadLetterEntry{}, false, errors.New("get operator dead-letter entry: nil store") - } - if ctx == nil { - return deliverydomain.DeadLetterEntry{}, false, errors.New("get operator dead-letter entry: nil context") - } - if err := deliveryID.Validate(); err != nil { - return deliverydomain.DeadLetterEntry{}, false, fmt.Errorf("get operator dead-letter entry: %w", err) - } - - payload, err := store.client.Get(ctx, store.keys.DeadLetter(deliveryID)).Bytes() - switch { - case errors.Is(err, redis.Nil): - return deliverydomain.DeadLetterEntry{}, false, nil - case err != nil: - return deliverydomain.DeadLetterEntry{}, false, fmt.Errorf("get operator dead-letter entry: %w", err) - } - - entry, err := UnmarshalDeadLetter(payload) - if err != nil { - return deliverydomain.DeadLetterEntry{}, false, fmt.Errorf("get operator dead-letter entry: %w", err) - } - - return entry, true, nil -} - -// GetDeliveryPayload loads one raw accepted attachment bundle by delivery id. -func (store *OperatorStore) GetDeliveryPayload(ctx context.Context, deliveryID common.DeliveryID) (acceptgenericdelivery.DeliveryPayload, bool, error) { - if store == nil || store.client == nil { - return acceptgenericdelivery.DeliveryPayload{}, false, errors.New("get operator delivery payload: nil store") - } - if ctx == nil { - return acceptgenericdelivery.DeliveryPayload{}, false, errors.New("get operator delivery payload: nil context") - } - if err := deliveryID.Validate(); err != nil { - return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("get operator delivery payload: %w", err) - } - - payload, err := store.client.Get(ctx, store.keys.DeliveryPayload(deliveryID)).Bytes() - switch { - case errors.Is(err, redis.Nil): - return acceptgenericdelivery.DeliveryPayload{}, false, nil - case err != nil: - return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("get operator delivery payload: %w", err) - } - - record, err := UnmarshalDeliveryPayload(payload) - if err != nil { - return acceptgenericdelivery.DeliveryPayload{}, false, fmt.Errorf("get operator delivery payload: %w", err) - } - - return record, true, nil -} - -// ListAttempts loads exactly expectedCount attempts in ascending attempt -// number order. Missing attempts are treated as durable-state corruption. -func (store *OperatorStore) ListAttempts(ctx context.Context, deliveryID common.DeliveryID, expectedCount int) ([]attempt.Attempt, error) { - if store == nil || store.client == nil { - return nil, errors.New("list operator attempts: nil store") - } - if ctx == nil { - return nil, errors.New("list operator attempts: nil context") - } - if err := deliveryID.Validate(); err != nil { - return nil, fmt.Errorf("list operator attempts: %w", err) - } - if expectedCount < 0 { - return nil, errors.New("list operator attempts: negative expected count") - } - if expectedCount == 0 { - return []attempt.Attempt{}, nil - } - - result := make([]attempt.Attempt, 0, expectedCount) - for attemptNo := 1; attemptNo <= expectedCount; attemptNo++ { - payload, err := store.client.Get(ctx, store.keys.Attempt(deliveryID, attemptNo)).Bytes() - switch { - case errors.Is(err, redis.Nil): - return nil, fmt.Errorf("list operator attempts: missing attempt %d for delivery %q", attemptNo, deliveryID) - case err != nil: - return nil, fmt.Errorf("list operator attempts: %w", err) - } - - record, err := UnmarshalAttempt(payload) - if err != nil { - return nil, fmt.Errorf("list operator attempts: %w", err) - } - result = append(result, record) - } - - return result, nil -} - -// List loads one filtered ordered page of delivery records. -func (store *OperatorStore) List(ctx context.Context, input listdeliveries.Input) (listdeliveries.Result, error) { - if store == nil || store.client == nil { - return listdeliveries.Result{}, errors.New("list operator deliveries: nil store") - } - if ctx == nil { - return listdeliveries.Result{}, errors.New("list operator deliveries: nil context") - } - if err := input.Validate(); err != nil { - return listdeliveries.Result{}, fmt.Errorf("list operator deliveries: %w", err) - } - - selection := chooseListIndex(store.keys, input.Filters) - if selection.mergeIDempotency { - return store.listMergedIdempotency(ctx, input, selection.keys) - } - - return store.listSingleIndex(ctx, input, selection.keys[0]) -} - -// CreateResend atomically creates the cloned delivery, its first attempt, and -// the optional cloned raw payload bundle. -func (store *OperatorStore) CreateResend(ctx context.Context, input resenddelivery.CreateResendInput) error { - if store == nil || store.client == nil || store.writer == nil { - return errors.New("create operator resend: nil store") - } - if ctx == nil { - return errors.New("create operator resend: nil context") - } - if err := input.Validate(); err != nil { - return fmt.Errorf("create operator resend: %w", err) - } - - writerInput := CreateAcceptanceInput{ - Delivery: input.Delivery, - FirstAttempt: &input.FirstAttempt, - } - if input.DeliveryPayload != nil { - writerInput.DeliveryPayload = input.DeliveryPayload - } - - if err := store.writer.CreateAcceptance(ctx, writerInput); err != nil { - return fmt.Errorf("create operator resend: %w", err) - } - - return nil -} - -type listSelection struct { - keys []string - mergeIDempotency bool -} - -func chooseListIndex(keyspace Keyspace, filters listdeliveries.Filters) listSelection { - switch { - case filters.IdempotencyKey != "" && filters.Source != "": - return listSelection{ - keys: []string{keyspace.IdempotencyIndex(filters.Source, filters.IdempotencyKey)}, - } - case filters.IdempotencyKey != "": - return listSelection{ - keys: []string{ - keyspace.IdempotencyIndex(deliverydomain.SourceAuthSession, filters.IdempotencyKey), - keyspace.IdempotencyIndex(deliverydomain.SourceNotification, filters.IdempotencyKey), - keyspace.IdempotencyIndex(deliverydomain.SourceOperatorResend, filters.IdempotencyKey), - }, - mergeIDempotency: true, - } - case filters.Recipient != "": - return listSelection{keys: []string{keyspace.RecipientIndex(filters.Recipient)}} - case filters.TemplateID != "": - return listSelection{keys: []string{keyspace.TemplateIndex(filters.TemplateID)}} - case filters.Status != "": - return listSelection{keys: []string{keyspace.StatusIndex(filters.Status)}} - case filters.Source != "": - return listSelection{keys: []string{keyspace.SourceIndex(filters.Source)}} - default: - return listSelection{keys: []string{keyspace.CreatedAtIndex()}} - } -} - -func (store *OperatorStore) listSingleIndex(ctx context.Context, input listdeliveries.Input, indexKey string) (listdeliveries.Result, error) { - startIndex := int64(0) - if input.Cursor != nil { - cursorIndex, err := cursorStartIndex(ctx, store.client, indexKey, *input.Cursor) - if err != nil { - return listdeliveries.Result{}, err - } - startIndex = cursorIndex - } - - items, nextCursor, err := store.collectFromIndex(ctx, indexKey, startIndex, input.Limit, input.Filters) - if err != nil { - return listdeliveries.Result{}, err - } - - return listdeliveries.Result{ - Items: items, - NextCursor: nextCursor, - }, nil -} - -func (store *OperatorStore) listMergedIdempotency(ctx context.Context, input listdeliveries.Input, indexKeys []string) (listdeliveries.Result, error) { - iterators := make([]*redisIndexIterator, 0, len(indexKeys)) - for _, key := range indexKeys { - iterators = append(iterators, &redisIndexIterator{ - client: store.client, - indexKey: key, - batchSize: listBatchSize(input.Limit), - cursor: input.Cursor, - }) - } - - heads := make([]indexedRef, 0, len(iterators)) - for index, iterator := range iterators { - ref, err := iterator.Next(ctx) - if err != nil { - return listdeliveries.Result{}, err - } - if ref != nil { - heads = append(heads, indexedRef{streamIndex: index, ref: *ref}) - } - } - - items := make([]deliverydomain.Delivery, 0, input.Limit+1) - for len(heads) > 0 && len(items) <= input.Limit { - bestIndex := 0 - for index := 1; index < len(heads); index++ { - if compareDeliveryOrder(heads[index].ref, heads[bestIndex].ref) < 0 { - bestIndex = index - } - } - - selected := heads[bestIndex] - heads = slices.Delete(heads, bestIndex, bestIndex+1) - - record, found, err := store.GetDelivery(ctx, selected.ref.DeliveryID) - if err != nil { - return listdeliveries.Result{}, err - } - if found && input.Filters.Matches(record) { - items = append(items, record) - } - - nextRef, err := iterators[selected.streamIndex].Next(ctx) - if err != nil { - return listdeliveries.Result{}, err - } - if nextRef != nil { - heads = append(heads, indexedRef{streamIndex: selected.streamIndex, ref: *nextRef}) - } - } - - result := listdeliveries.Result{} - if len(items) > input.Limit { - next := cursorFromDelivery(items[input.Limit-1]) - result.NextCursor = &next - items = items[:input.Limit] - } - result.Items = items - - return result, nil -} - -func (store *OperatorStore) collectFromIndex( - ctx context.Context, - indexKey string, - startIndex int64, - limit int, - filters listdeliveries.Filters, -) ([]deliverydomain.Delivery, *listdeliveries.Cursor, error) { - items := make([]deliverydomain.Delivery, 0, limit+1) - batchSize := listBatchSize(limit) - - for len(items) <= limit { - batch, err := store.client.ZRevRangeWithScores(ctx, indexKey, startIndex, startIndex+int64(batchSize)-1).Result() - if err != nil { - return nil, nil, fmt.Errorf("list operator deliveries: %w", err) - } - if len(batch) == 0 { - break - } - - startIndex += int64(len(batch)) - for _, member := range batch { - deliveryID, err := memberDeliveryID(member.Member) - if err != nil { - return nil, nil, fmt.Errorf("list operator deliveries: %w", err) - } - - record, found, err := store.GetDelivery(ctx, deliveryID) - if err != nil { - return nil, nil, err - } - if !found || !filters.Matches(record) { - continue - } - - items = append(items, record) - if len(items) > limit { - break - } - } - } - - var nextCursor *listdeliveries.Cursor - if len(items) > limit { - next := cursorFromDelivery(items[limit-1]) - nextCursor = &next - items = items[:limit] - } - - return items, nextCursor, nil -} - -type indexedRef struct { - streamIndex int - ref deliveryRef -} - -type deliveryRef struct { - CreatedAt time.Time - DeliveryID common.DeliveryID -} - -type redisIndexIterator struct { - client *redis.Client - indexKey string - batchSize int - offset int64 - cursor *listdeliveries.Cursor - batch []redis.Z - position int -} - -func (iterator *redisIndexIterator) Next(ctx context.Context) (*deliveryRef, error) { - for { - if iterator.position >= len(iterator.batch) { - batch, err := iterator.client.ZRevRangeWithScores( - ctx, - iterator.indexKey, - iterator.offset, - iterator.offset+int64(iterator.batchSize)-1, - ).Result() - if err != nil { - return nil, fmt.Errorf("list operator deliveries: %w", err) - } - if len(batch) == 0 { - return nil, nil - } - - iterator.batch = batch - iterator.position = 0 - iterator.offset += int64(len(batch)) - } - - ref, err := deliveryRefFromSortedSet(iterator.batch[iterator.position]) - iterator.position++ - if err != nil { - return nil, fmt.Errorf("list operator deliveries: %w", err) - } - if iterator.cursor != nil && !isAfterCursor(ref, *iterator.cursor) { - continue - } - - return &ref, nil - } -} - -func cursorStartIndex(ctx context.Context, client *redis.Client, indexKey string, cursor listdeliveries.Cursor) (int64, error) { - score, err := client.ZScore(ctx, indexKey, cursor.DeliveryID.String()).Result() - switch { - case errors.Is(err, redis.Nil): - return 0, listdeliveries.ErrInvalidCursor - case err != nil: - return 0, fmt.Errorf("list operator deliveries: %w", err) - } - if !time.UnixMilli(int64(score)).UTC().Equal(cursor.CreatedAt.UTC()) { - return 0, listdeliveries.ErrInvalidCursor - } - - rank, err := client.ZRevRank(ctx, indexKey, cursor.DeliveryID.String()).Result() - switch { - case errors.Is(err, redis.Nil): - return 0, listdeliveries.ErrInvalidCursor - case err != nil: - return 0, fmt.Errorf("list operator deliveries: %w", err) - default: - return rank + 1, nil - } -} - -func compareDeliveryOrder(left deliveryRef, right deliveryRef) int { - switch { - case left.CreatedAt.After(right.CreatedAt): - return -1 - case left.CreatedAt.Before(right.CreatedAt): - return 1 - case left.DeliveryID.String() > right.DeliveryID.String(): - return -1 - case left.DeliveryID.String() < right.DeliveryID.String(): - return 1 - default: - return 0 - } -} - -func isAfterCursor(ref deliveryRef, cursor listdeliveries.Cursor) bool { - return compareDeliveryOrder(ref, deliveryRef{ - CreatedAt: cursor.CreatedAt.UTC(), - DeliveryID: cursor.DeliveryID, - }) > 0 -} - -func cursorFromDelivery(record deliverydomain.Delivery) listdeliveries.Cursor { - return listdeliveries.Cursor{ - CreatedAt: record.CreatedAt.UTC(), - DeliveryID: record.DeliveryID, - } -} - -func deliveryRefFromSortedSet(member redis.Z) (deliveryRef, error) { - deliveryID, err := memberDeliveryID(member.Member) - if err != nil { - return deliveryRef{}, err - } - - return deliveryRef{ - CreatedAt: time.UnixMilli(int64(member.Score)).UTC(), - DeliveryID: deliveryID, - }, nil -} - -func memberDeliveryID(member any) (common.DeliveryID, error) { - value, ok := member.(string) - if !ok { - return "", fmt.Errorf("unexpected delivery index member type %T", member) - } - - deliveryID := common.DeliveryID(value) - if err := deliveryID.Validate(); err != nil { - return "", fmt.Errorf("delivery index member delivery id: %w", err) - } - - return deliveryID, nil -} - -func listBatchSize(limit int) int { - size := limit * 4 - if size < limit+1 { - size = limit + 1 - } - if size < 100 { - size = 100 - } - - return size -} - -var _ listdeliveries.Store = (*OperatorStore)(nil) -var _ listattempts.Store = (*OperatorStore)(nil) -var _ resenddelivery.Store = (*OperatorStore)(nil) diff --git a/mail/internal/adapters/redisstate/operator_store_test.go b/mail/internal/adapters/redisstate/operator_store_test.go deleted file mode 100644 index 1ebd888..0000000 --- a/mail/internal/adapters/redisstate/operator_store_test.go +++ /dev/null @@ -1,346 +0,0 @@ -package redisstate - -import ( - "context" - "testing" - "time" - - "galaxy/mail/internal/domain/attempt" - "galaxy/mail/internal/domain/common" - deliverydomain "galaxy/mail/internal/domain/delivery" - "galaxy/mail/internal/service/listdeliveries" - "galaxy/mail/internal/service/resenddelivery" - - "github.com/alicebob/miniredis/v2" - "github.com/redis/go-redis/v9" - "github.com/stretchr/testify/require" -) - -func TestOperatorStoreListFilters(t *testing.T) { - t.Parallel() - - type testCase struct { - name string - filters listdeliveries.Filters - wantIDs []common.DeliveryID - } - - cases := []testCase{ - { - name: "recipient", - filters: listdeliveries.Filters{Recipient: common.Email("recipient-filter@example.com")}, - wantIDs: []common.DeliveryID{"delivery-recipient"}, - }, - { - name: "status", - filters: listdeliveries.Filters{Status: deliverydomain.StatusSuppressed}, - wantIDs: []common.DeliveryID{"delivery-status"}, - }, - { - name: "source", - filters: listdeliveries.Filters{Source: deliverydomain.SourceOperatorResend}, - wantIDs: []common.DeliveryID{"delivery-source"}, - }, - { - name: "template", - filters: listdeliveries.Filters{TemplateID: common.TemplateID("template.filter")}, - wantIDs: []common.DeliveryID{"delivery-template"}, - }, - { - name: "idempotency", - filters: listdeliveries.Filters{IdempotencyKey: common.IdempotencyKey("idempotency-filter")}, - wantIDs: []common.DeliveryID{"delivery-idempotency"}, - }, - } - - for _, tt := range cases { - tt := tt - - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - store, client := newOperatorStoreForTest(t) - seedOperatorFilterDataset(t, client) - - result, err := store.List(context.Background(), listdeliveries.Input{ - Limit: 10, - Filters: tt.filters, - }) - require.NoError(t, err) - require.Equal(t, tt.wantIDs, deliveryIDs(result.Items)) - require.Nil(t, result.NextCursor) - }) - } -} - -func TestOperatorStoreListCursorPaginationUsesCreatedAtDescDeliveryIDDesc(t *testing.T) { - t.Parallel() - - store, client := newOperatorStoreForTest(t) - - createdAt := time.Unix(1_775_122_500, 0).UTC() - seedDeliveryRecord(t, client, buildStoredDelivery("delivery-a", createdAt, deliverydomain.SourceNotification, common.IdempotencyKey("notification:delivery-a"), deliverydomain.StatusSent)) - seedDeliveryRecord(t, client, buildStoredDelivery("delivery-c", createdAt, deliverydomain.SourceNotification, common.IdempotencyKey("notification:delivery-c"), deliverydomain.StatusSent)) - seedDeliveryRecord(t, client, buildStoredDelivery("delivery-b", createdAt, deliverydomain.SourceNotification, common.IdempotencyKey("notification:delivery-b"), deliverydomain.StatusSent)) - - firstPage, err := store.List(context.Background(), listdeliveries.Input{Limit: 2}) - require.NoError(t, err) - require.Equal(t, []common.DeliveryID{"delivery-c", "delivery-b"}, deliveryIDs(firstPage.Items)) - require.NotNil(t, firstPage.NextCursor) - - secondPage, err := store.List(context.Background(), listdeliveries.Input{ - Limit: 2, - Cursor: firstPage.NextCursor, - }) - require.NoError(t, err) - require.Equal(t, []common.DeliveryID{"delivery-a"}, deliveryIDs(secondPage.Items)) - require.Nil(t, secondPage.NextCursor) -} - -func TestOperatorStoreListMergesIdempotencyAcrossSources(t *testing.T) { - t.Parallel() - - store, client := newOperatorStoreForTest(t) - sharedKey := common.IdempotencyKey("shared-idempotency") - seedDeliveryRecord(t, client, buildStoredDelivery("delivery-auth", time.Unix(1_775_122_100, 0).UTC(), deliverydomain.SourceAuthSession, sharedKey, deliverydomain.StatusSuppressed)) - seedDeliveryRecord(t, client, buildStoredDelivery("delivery-notification", time.Unix(1_775_122_200, 0).UTC(), deliverydomain.SourceNotification, sharedKey, deliverydomain.StatusSent)) - seedDeliveryRecord(t, client, buildStoredDelivery("delivery-resend", time.Unix(1_775_122_300, 0).UTC(), deliverydomain.SourceOperatorResend, sharedKey, deliverydomain.StatusSent)) - - result, err := store.List(context.Background(), listdeliveries.Input{ - Limit: 10, - Filters: listdeliveries.Filters{ - IdempotencyKey: sharedKey, - }, - }) - require.NoError(t, err) - require.Equal(t, []common.DeliveryID{"delivery-resend", "delivery-notification", "delivery-auth"}, deliveryIDs(result.Items)) -} - -func TestOperatorStoreGetDeadLetter(t *testing.T) { - t.Parallel() - - store, client := newOperatorStoreForTest(t) - record := buildStoredDelivery("delivery-dead-letter", time.Unix(1_775_122_400, 0).UTC(), deliverydomain.SourceNotification, common.IdempotencyKey("notification:delivery-dead-letter"), deliverydomain.StatusDeadLetter) - seedDeliveryRecord(t, client, record) - - entry := validDeadLetterEntry(t, record.DeliveryID) - payload, err := MarshalDeadLetter(entry) - require.NoError(t, err) - require.NoError(t, client.Set(context.Background(), Keyspace{}.DeadLetter(record.DeliveryID), payload, DeadLetterTTL).Err()) - - got, found, err := store.GetDeadLetter(context.Background(), record.DeliveryID) - require.NoError(t, err) - require.True(t, found) - require.Equal(t, entry, got) -} - -func TestOperatorStoreListAttempts(t *testing.T) { - t.Parallel() - - store, client := newOperatorStoreForTest(t) - record := buildStoredDelivery("delivery-attempts", time.Unix(1_775_122_410, 0).UTC(), deliverydomain.SourceNotification, common.IdempotencyKey("notification:delivery-attempts"), deliverydomain.StatusFailed) - record.AttemptCount = 2 - failedAt := record.UpdatedAt - record.FailedAt = &failedAt - require.NoError(t, record.Validate()) - seedDeliveryRecord(t, client, record) - - firstAttempt := validTerminalAttempt(t, record.DeliveryID) - firstAttempt.AttemptNo = 1 - secondAttempt := validTerminalAttempt(t, record.DeliveryID) - secondAttempt.AttemptNo = 2 - secondAttempt.Status = attempt.StatusProviderRejected - payload, err := MarshalAttempt(firstAttempt) - require.NoError(t, err) - require.NoError(t, client.Set(context.Background(), Keyspace{}.Attempt(record.DeliveryID, 1), payload, AttemptTTL).Err()) - payload, err = MarshalAttempt(secondAttempt) - require.NoError(t, err) - require.NoError(t, client.Set(context.Background(), Keyspace{}.Attempt(record.DeliveryID, 2), payload, AttemptTTL).Err()) - - got, err := store.ListAttempts(context.Background(), record.DeliveryID, 2) - require.NoError(t, err) - require.Equal(t, []attempt.Attempt{firstAttempt, secondAttempt}, got) -} - -func TestOperatorStoreCreateResendAtomicallyCreatesCloneState(t *testing.T) { - t.Parallel() - - store, client := newOperatorStoreForTest(t) - - createdAt := time.Unix(1_775_122_600, 0).UTC() - clone := buildStoredDelivery("delivery-clone", createdAt, deliverydomain.SourceOperatorResend, common.IdempotencyKey("operator:resend:delivery-parent"), deliverydomain.StatusQueued) - clone.ResendParentDeliveryID = common.DeliveryID("delivery-parent") - clone.AttemptCount = 1 - require.NoError(t, clone.Validate()) - - firstAttempt := validScheduledAttempt(t, clone.DeliveryID) - firstAttempt.AttemptNo = 1 - firstAttempt.ScheduledFor = createdAt - require.NoError(t, firstAttempt.Validate()) - - deliveryPayload := validDeliveryPayload(t, clone.DeliveryID) - input := resenddelivery.CreateResendInput{ - Delivery: clone, - FirstAttempt: firstAttempt, - DeliveryPayload: &deliveryPayload, - } - - require.NoError(t, store.CreateResend(context.Background(), input)) - - storedDelivery, found, err := store.GetDelivery(context.Background(), clone.DeliveryID) - require.NoError(t, err) - require.True(t, found) - require.Equal(t, clone, storedDelivery) - - storedPayload, found, err := store.GetDeliveryPayload(context.Background(), clone.DeliveryID) - require.NoError(t, err) - require.True(t, found) - require.Equal(t, deliveryPayload, storedPayload) - - attemptPayload, err := client.Get(context.Background(), Keyspace{}.Attempt(clone.DeliveryID, 1)).Bytes() - require.NoError(t, err) - decodedAttempt, err := UnmarshalAttempt(attemptPayload) - require.NoError(t, err) - require.Equal(t, firstAttempt, decodedAttempt) - - scheduledMembers, err := client.ZRange(context.Background(), Keyspace{}.AttemptSchedule(), 0, -1).Result() - require.NoError(t, err) - require.Equal(t, []string{clone.DeliveryID.String()}, scheduledMembers) - - indexMembers, err := client.ZRange(context.Background(), Keyspace{}.IdempotencyIndex(clone.Source, clone.IdempotencyKey), 0, -1).Result() - require.NoError(t, err) - require.Equal(t, []string{clone.DeliveryID.String()}, indexMembers) - - _, err = client.Get(context.Background(), Keyspace{}.Idempotency(clone.Source, clone.IdempotencyKey)).Bytes() - require.ErrorIs(t, err, redis.Nil) -} - -func newOperatorStoreForTest(t *testing.T) (*OperatorStore, *redis.Client) { - t.Helper() - - server := miniredis.RunT(t) - client := redis.NewClient(&redis.Options{Addr: server.Addr()}) - t.Cleanup(func() { require.NoError(t, client.Close()) }) - - store, err := NewOperatorStore(client) - require.NoError(t, err) - - return store, client -} - -func seedOperatorFilterDataset(t *testing.T, client *redis.Client) { - t.Helper() - - seedDeliveryRecord(t, client, func() deliverydomain.Delivery { - record := buildStoredDelivery("delivery-recipient", time.Unix(1_775_122_001, 0).UTC(), deliverydomain.SourceNotification, common.IdempotencyKey("notification:delivery-recipient"), deliverydomain.StatusSent) - record.Envelope.To = []common.Email{common.Email("recipient-filter@example.com")} - require.NoError(t, record.Validate()) - return record - }()) - - seedDeliveryRecord(t, client, func() deliverydomain.Delivery { - record := buildStoredDelivery("delivery-status", time.Unix(1_775_122_002, 0).UTC(), deliverydomain.SourceAuthSession, common.IdempotencyKey("authsession:delivery-status"), deliverydomain.StatusSuppressed) - record.SentAt = nil - suppressedAt := record.UpdatedAt - record.SuppressedAt = &suppressedAt - require.NoError(t, record.Validate()) - return record - }()) - - seedDeliveryRecord(t, client, buildStoredDelivery("delivery-source", time.Unix(1_775_122_003, 0).UTC(), deliverydomain.SourceOperatorResend, common.IdempotencyKey("operator:resend:delivery-source"), deliverydomain.StatusSent)) - - seedDeliveryRecord(t, client, func() deliverydomain.Delivery { - record := buildStoredDelivery("delivery-template", time.Unix(1_775_122_004, 0).UTC(), deliverydomain.SourceNotification, common.IdempotencyKey("notification:delivery-template"), deliverydomain.StatusSent) - record.TemplateID = common.TemplateID("template.filter") - record.PayloadMode = deliverydomain.PayloadModeTemplate - record.Locale = common.Locale("en") - record.TemplateVariables = map[string]any{"name": "Pilot"} - require.NoError(t, record.Validate()) - return record - }()) - - seedDeliveryRecord(t, client, buildStoredDelivery("delivery-idempotency", time.Unix(1_775_122_005, 0).UTC(), deliverydomain.SourceNotification, common.IdempotencyKey("idempotency-filter"), deliverydomain.StatusSent)) -} - -func seedDeliveryRecord(t *testing.T, client *redis.Client, record deliverydomain.Delivery) { - t.Helper() - - keyspace := Keyspace{} - payload, err := MarshalDelivery(record) - require.NoError(t, err) - require.NoError(t, client.Set(context.Background(), keyspace.Delivery(record.DeliveryID), payload, DeliveryTTL).Err()) - - score := CreatedAtScore(record.CreatedAt) - for _, indexKey := range keyspace.DeliveryIndexKeys(record) { - require.NoError(t, client.ZAdd(context.Background(), indexKey, redis.Z{ - Score: score, - Member: record.DeliveryID.String(), - }).Err()) - } -} - -func buildStoredDelivery( - deliveryID string, - createdAt time.Time, - source deliverydomain.Source, - idempotencyKey common.IdempotencyKey, - status deliverydomain.Status, -) deliverydomain.Delivery { - updatedAt := createdAt.Add(time.Minute) - record := deliverydomain.Delivery{ - DeliveryID: common.DeliveryID(deliveryID), - Source: source, - PayloadMode: deliverydomain.PayloadModeRendered, - Envelope: deliverydomain.Envelope{ - To: []common.Email{common.Email("pilot@example.com")}, - }, - Content: deliverydomain.Content{ - Subject: "Test subject", - TextBody: "Test body", - }, - IdempotencyKey: idempotencyKey, - Status: status, - CreatedAt: createdAt, - UpdatedAt: updatedAt, - } - - switch status { - case deliverydomain.StatusSent: - record.AttemptCount = 1 - record.LastAttemptStatus = attempt.StatusProviderAccepted - sentAt := updatedAt - record.SentAt = &sentAt - case deliverydomain.StatusSuppressed: - suppressedAt := updatedAt - record.SuppressedAt = &suppressedAt - case deliverydomain.StatusFailed: - record.AttemptCount = 1 - record.LastAttemptStatus = attempt.StatusProviderRejected - failedAt := updatedAt - record.FailedAt = &failedAt - case deliverydomain.StatusDeadLetter: - record.AttemptCount = 1 - record.LastAttemptStatus = attempt.StatusTimedOut - deadLetteredAt := updatedAt - record.DeadLetteredAt = &deadLetteredAt - default: - record.AttemptCount = 1 - } - if source == deliverydomain.SourceOperatorResend { - record.ResendParentDeliveryID = common.DeliveryID("parent-" + deliveryID) - } - if err := record.Validate(); err != nil { - panic(err) - } - - return record -} - -func deliveryIDs(records []deliverydomain.Delivery) []common.DeliveryID { - result := make([]common.DeliveryID, len(records)) - for index, record := range records { - result[index] = record.DeliveryID - } - - return result -} diff --git a/mail/internal/adapters/redisstate/render_store.go b/mail/internal/adapters/redisstate/render_store.go deleted file mode 100644 index ff33e70..0000000 --- a/mail/internal/adapters/redisstate/render_store.go +++ /dev/null @@ -1,74 +0,0 @@ -package redisstate - -import ( - "context" - "errors" - "fmt" - - "galaxy/mail/internal/service/renderdelivery" - - "github.com/redis/go-redis/v9" -) - -// RenderStore provides the Redis-backed durable storage used by the -// render-delivery use case. -type RenderStore struct { - writer *AtomicWriter -} - -// NewRenderStore constructs one Redis-backed render-delivery store. -func NewRenderStore(client *redis.Client) (*RenderStore, error) { - if client == nil { - return nil, errors.New("new render store: nil redis client") - } - - writer, err := NewAtomicWriter(client) - if err != nil { - return nil, fmt.Errorf("new render store: %w", err) - } - - return &RenderStore{writer: writer}, nil -} - -// MarkRendered stores one successfully materialized template delivery. -func (store *RenderStore) MarkRendered(ctx context.Context, input renderdelivery.MarkRenderedInput) error { - if store == nil || store.writer == nil { - return errors.New("mark rendered in render store: nil store") - } - if ctx == nil { - return errors.New("mark rendered in render store: nil context") - } - if err := input.Validate(); err != nil { - return fmt.Errorf("mark rendered in render store: %w", err) - } - - if err := store.writer.MarkRendered(ctx, MarkRenderedInput{ - Delivery: input.Delivery, - }); err != nil { - return fmt.Errorf("mark rendered in render store: %w", err) - } - - return nil -} - -// MarkRenderFailed stores one classified terminal render failure. -func (store *RenderStore) MarkRenderFailed(ctx context.Context, input renderdelivery.MarkRenderFailedInput) error { - if store == nil || store.writer == nil { - return errors.New("mark render failed in render store: nil store") - } - if ctx == nil { - return errors.New("mark render failed in render store: nil context") - } - if err := input.Validate(); err != nil { - return fmt.Errorf("mark render failed in render store: %w", err) - } - - if err := store.writer.MarkRenderFailed(ctx, MarkRenderFailedInput{ - Delivery: input.Delivery, - Attempt: input.Attempt, - }); err != nil { - return fmt.Errorf("mark render failed in render store: %w", err) - } - - return nil -} diff --git a/mail/internal/app/app.go b/mail/internal/app/app.go index 97e3012..d09cfb8 100644 --- a/mail/internal/app/app.go +++ b/mail/internal/app/app.go @@ -114,6 +114,11 @@ func classifyComponentResult(parentCtx context.Context, result componentResult) return fmt.Errorf("run mail app: component %d exited without error before shutdown", result.index) case errors.Is(result.err, context.Canceled) && parentCtx.Err() != nil: return nil + case errors.Is(result.err, context.DeadlineExceeded) && parentCtx.Err() != nil: + // In-flight provider sends bound by their own short timeout race with + // the parent context cancel; either outcome is benign here because the + // claim will be recovered by the next runtime instance. + return nil default: return fmt.Errorf("run mail app: component %d: %w", result.index, result.err) } diff --git a/mail/internal/app/bootstrap.go b/mail/internal/app/bootstrap.go index 9b2db42..22f08a9 100644 --- a/mail/internal/app/bootstrap.go +++ b/mail/internal/app/bootstrap.go @@ -11,22 +11,13 @@ import ( "galaxy/mail/internal/config" "galaxy/mail/internal/ports" "galaxy/mail/internal/telemetry" + "galaxy/redisconn" - "github.com/redis/go-redis/extra/redisotel/v9" "github.com/redis/go-redis/v9" ) func newRedisClient(cfg config.RedisConfig) *redis.Client { - return redis.NewClient(&redis.Options{ - Addr: cfg.Addr, - Username: cfg.Username, - Password: cfg.Password, - DB: cfg.DB, - TLSConfig: cfg.TLSConfig(), - DialTimeout: cfg.OperationTimeout, - ReadTimeout: cfg.OperationTimeout, - WriteTimeout: cfg.OperationTimeout, - }) + return redisconn.NewMasterClient(cfg.Conn) } func instrumentRedisClient(client *redis.Client, telemetryRuntime *telemetry.Runtime) error { @@ -37,20 +28,12 @@ func instrumentRedisClient(client *redis.Client, telemetryRuntime *telemetry.Run return nil } - if err := redisotel.InstrumentTracing( - client, - redisotel.WithTracerProvider(telemetryRuntime.TracerProvider()), - redisotel.WithDBStatement(false), + if err := redisconn.Instrument(client, + redisconn.WithTracerProvider(telemetryRuntime.TracerProvider()), + redisconn.WithMeterProvider(telemetryRuntime.MeterProvider()), ); err != nil { - return fmt.Errorf("instrument redis client tracing: %w", err) + return fmt.Errorf("instrument redis client: %w", err) } - if err := redisotel.InstrumentMetrics( - client, - redisotel.WithMeterProvider(telemetryRuntime.MeterProvider()), - ); err != nil { - return fmt.Errorf("instrument redis client metrics: %w", err) - } - return nil } @@ -58,14 +41,9 @@ func pingRedis(ctx context.Context, cfg config.RedisConfig, client *redis.Client if client == nil { return fmt.Errorf("ping redis: nil client") } - - pingCtx, cancel := context.WithTimeout(ctx, cfg.OperationTimeout) - defer cancel() - - if err := client.Ping(pingCtx).Err(); err != nil { + if err := redisconn.Ping(ctx, client, cfg.Conn.OperationTimeout); err != nil { return fmt.Errorf("ping redis: %w", err) } - return nil } diff --git a/mail/internal/app/runtime.go b/mail/internal/app/runtime.go index 1c75b23..189ca7a 100644 --- a/mail/internal/app/runtime.go +++ b/mail/internal/app/runtime.go @@ -8,10 +8,13 @@ import ( "time" "galaxy/mail/internal/adapters/id" + "galaxy/mail/internal/adapters/postgres/mailstore" + "galaxy/mail/internal/adapters/postgres/migrations" "galaxy/mail/internal/adapters/redisstate" templatedir "galaxy/mail/internal/adapters/templates" "galaxy/mail/internal/api/internalhttp" "galaxy/mail/internal/config" + "galaxy/mail/internal/ports" "galaxy/mail/internal/service/acceptauthdelivery" "galaxy/mail/internal/service/acceptgenericdelivery" "galaxy/mail/internal/service/executeattempt" @@ -22,7 +25,7 @@ import ( "galaxy/mail/internal/service/resenddelivery" "galaxy/mail/internal/telemetry" "galaxy/mail/internal/worker" - "galaxy/mail/internal/ports" + "galaxy/postgres" "github.com/redis/go-redis/v9" ) @@ -47,11 +50,11 @@ type runtimeClock interface { type runtimeProviderFactory func(config.SMTPConfig, *slog.Logger) (ports.Provider, error) type runtimeDependencies struct { - clock runtimeClock - providerFactory runtimeProviderFactory - schedulerPoll time.Duration - schedulerRecovery time.Duration - schedulerGrace time.Duration + clock runtimeClock + providerFactory runtimeProviderFactory + schedulerPoll time.Duration + schedulerRecovery time.Duration + schedulerGrace time.Duration } func (deps runtimeDependencies) withDefaults() runtimeDependencies { @@ -112,17 +115,58 @@ func newRuntime(ctx context.Context, cfg config.Config, logger *slog.Logger, dep return telemetryRuntime.Shutdown(shutdownCtx) }) + // Open one shared Redis master client. The command consumer, the stream + // offset store, and the malformed-command recorder all borrow it. redisClient := newRedisClient(cfg.Redis) if err := instrumentRedisClient(redisClient, telemetryRuntime); err != nil { return cleanupOnError(fmt.Errorf("new mail runtime: %w", err)) } runtime.cleanupFns = append(runtime.cleanupFns, func() error { - return redisClient.Close() + if err := redisClient.Close(); err != nil && !errors.Is(err, redis.ErrClosed) { + return err + } + return nil }) if err := pingRedis(ctx, cfg.Redis, redisClient); err != nil { return cleanupOnError(fmt.Errorf("new mail runtime: %w", err)) } + // Open the PostgreSQL pool, attach instrumentation, ping it, run embedded + // migrations strictly before any HTTP listener opens. A failure at any of + // these steps is fatal. + pgPool, err := postgres.OpenPrimary(ctx, cfg.Postgres.Conn, + postgres.WithTracerProvider(telemetryRuntime.TracerProvider()), + postgres.WithMeterProvider(telemetryRuntime.MeterProvider()), + ) + if err != nil { + return cleanupOnError(fmt.Errorf("new mail runtime: open postgres primary: %w", err)) + } + runtime.cleanupFns = append(runtime.cleanupFns, pgPool.Close) + unregisterDBStats, err := postgres.InstrumentDBStats(pgPool, + postgres.WithMeterProvider(telemetryRuntime.MeterProvider()), + ) + if err != nil { + return cleanupOnError(fmt.Errorf("new mail runtime: instrument postgres db stats: %w", err)) + } + runtime.cleanupFns = append(runtime.cleanupFns, unregisterDBStats) + if err := postgres.Ping(ctx, pgPool, cfg.Postgres.Conn.OperationTimeout); err != nil { + return cleanupOnError(fmt.Errorf("new mail runtime: %w", err)) + } + if err := postgres.RunMigrations(ctx, pgPool, migrations.FS(), "."); err != nil { + return cleanupOnError(fmt.Errorf("new mail runtime: run postgres migrations: %w", err)) + } + + store, err := mailstore.New(mailstore.Config{ + DB: pgPool, + OperationTimeout: cfg.Postgres.Conn.OperationTimeout, + }) + if err != nil { + return cleanupOnError(fmt.Errorf("new mail runtime: postgres mail store: %w", err)) + } + if err := store.Ping(ctx); err != nil { + return cleanupOnError(fmt.Errorf("new mail runtime: ping postgres mail store: %w", err)) + } + templateCatalog, err := newTemplateCatalog(cfg.Templates) if err != nil { return cleanupOnError(fmt.Errorf("new mail runtime: %w", err)) @@ -135,47 +179,35 @@ func newRuntime(ctx context.Context, cfg config.Config, logger *slog.Logger, dep } runtime.cleanupFns = append(runtime.cleanupFns, provider.Close) - acceptanceStore, err := redisstate.NewAcceptanceStore(redisClient) - if err != nil { - return cleanupOnError(fmt.Errorf("new mail runtime: auth acceptance store: %w", err)) - } authAcceptanceService, err := acceptauthdelivery.New(acceptauthdelivery.Config{ - Store: acceptanceStore, + Store: store, DeliveryIDGenerator: id.Generator{}, Clock: deps.clock, Telemetry: telemetryRuntime, TracerProvider: telemetryRuntime.TracerProvider(), Logger: logger, - IdempotencyTTL: redisstate.IdempotencyTTL, + IdempotencyTTL: cfg.IdempotencyTTL, SuppressOutbound: cfg.SMTP.Mode == config.SMTPModeStub, }) if err != nil { return cleanupOnError(fmt.Errorf("new mail runtime: auth acceptance service: %w", err)) } - genericAcceptanceStore, err := redisstate.NewGenericAcceptanceStore(redisClient) - if err != nil { - return cleanupOnError(fmt.Errorf("new mail runtime: generic acceptance store: %w", err)) - } genericAcceptanceService, err := acceptgenericdelivery.New(acceptgenericdelivery.Config{ - Store: genericAcceptanceStore, + Store: store.GenericAcceptance(), Clock: deps.clock, Telemetry: telemetryRuntime, TracerProvider: telemetryRuntime.TracerProvider(), Logger: logger, - IdempotencyTTL: redisstate.IdempotencyTTL, + IdempotencyTTL: cfg.IdempotencyTTL, }) if err != nil { return cleanupOnError(fmt.Errorf("new mail runtime: generic acceptance service: %w", err)) } - renderStore, err := redisstate.NewRenderStore(redisClient) - if err != nil { - return cleanupOnError(fmt.Errorf("new mail runtime: render store: %w", err)) - } renderDeliveryService, err := renderdelivery.New(renderdelivery.Config{ Catalog: templateCatalog, - Store: renderStore, + Store: store.RenderDelivery(), Clock: deps.clock, Telemetry: telemetryRuntime, TracerProvider: telemetryRuntime.TracerProvider(), @@ -186,27 +218,18 @@ func newRuntime(ctx context.Context, cfg config.Config, logger *slog.Logger, dep } runtime.renderDeliveryService = renderDeliveryService - malformedCommandStore, err := redisstate.NewMalformedCommandStore(redisClient) - if err != nil { - return cleanupOnError(fmt.Errorf("new mail runtime: malformed command store: %w", err)) - } streamOffsetStore, err := redisstate.NewStreamOffsetStore(redisClient) if err != nil { return cleanupOnError(fmt.Errorf("new mail runtime: stream offset store: %w", err)) } - attemptExecutionStore, err := redisstate.NewAttemptExecutionStore(redisClient) - if err != nil { - return cleanupOnError(fmt.Errorf("new mail runtime: attempt execution store: %w", err)) - } + + attemptExecutionStore := store.AttemptExecution() telemetryRuntime.SetAttemptScheduleSnapshotReader(attemptExecutionStore) - operatorStore, err := redisstate.NewOperatorStore(redisClient) - if err != nil { - return cleanupOnError(fmt.Errorf("new mail runtime: operator store: %w", err)) - } + attemptExecutionService, err := executeattempt.New(executeattempt.Config{ Renderer: renderDeliveryService, Provider: provider, - PayloadLoader: attemptExecutionStore, + PayloadLoader: store, Store: attemptExecutionStore, Clock: deps.clock, Telemetry: telemetryRuntime, @@ -217,26 +240,27 @@ func newRuntime(ctx context.Context, cfg config.Config, logger *slog.Logger, dep if err != nil { return cleanupOnError(fmt.Errorf("new mail runtime: attempt execution service: %w", err)) } + listDeliveriesService, err := listdeliveries.New(listdeliveries.Config{ - Store: operatorStore, + Store: store, }) if err != nil { return cleanupOnError(fmt.Errorf("new mail runtime: list deliveries service: %w", err)) } getDeliveryService, err := getdelivery.New(getdelivery.Config{ - Store: operatorStore, + Store: store, }) if err != nil { return cleanupOnError(fmt.Errorf("new mail runtime: get delivery service: %w", err)) } listAttemptsService, err := listattempts.New(listattempts.Config{ - Store: operatorStore, + Store: store, }) if err != nil { return cleanupOnError(fmt.Errorf("new mail runtime: list attempts service: %w", err)) } resendDeliveryService, err := resenddelivery.New(resenddelivery.Config{ - Store: operatorStore, + Store: store, DeliveryIDGenerator: id.Generator{}, Clock: deps.clock, Telemetry: telemetryRuntime, @@ -247,21 +271,6 @@ func newRuntime(ctx context.Context, cfg config.Config, logger *slog.Logger, dep return cleanupOnError(fmt.Errorf("new mail runtime: resend delivery service: %w", err)) } - commandConsumerRedisClient := newRedisClient(cfg.Redis) - if err := instrumentRedisClient(commandConsumerRedisClient, telemetryRuntime); err != nil { - return cleanupOnError(fmt.Errorf("new mail runtime: %w", err)) - } - runtime.cleanupFns = append(runtime.cleanupFns, func() error { - err := commandConsumerRedisClient.Close() - if errors.Is(err, redis.ErrClosed) { - return nil - } - return err - }) - if err := pingRedis(ctx, cfg.Redis, commandConsumerRedisClient); err != nil { - return cleanupOnError(fmt.Errorf("new mail runtime: %w", err)) - } - httpServer, err := internalhttp.NewServer(internalhttp.Config{ Addr: cfg.InternalHTTP.Addr, ReadHeaderTimeout: cfg.InternalHTTP.ReadHeaderTimeout, @@ -282,11 +291,11 @@ func newRuntime(ctx context.Context, cfg config.Config, logger *slog.Logger, dep } commandConsumer, err := worker.NewCommandConsumer(worker.CommandConsumerConfig{ - Client: commandConsumerRedisClient, + Client: redisClient, Stream: cfg.Redis.CommandStream, BlockTimeout: cfg.StreamBlockTimeout, Acceptor: genericAcceptanceService, - MalformedRecorder: malformedCommandStore, + MalformedRecorder: store, OffsetStore: streamOffsetStore, Telemetry: telemetryRuntime, Clock: deps.clock, @@ -317,16 +326,18 @@ func newRuntime(ctx context.Context, cfg config.Config, logger *slog.Logger, dep if err != nil { return cleanupOnError(fmt.Errorf("new mail runtime: attempt worker pool: %w", err)) } - indexCleaner, err := redisstate.NewIndexCleaner(redisClient) + retentionWorker, err := worker.NewSQLRetentionWorker(worker.SQLRetentionConfig{ + Store: store, + DeliveryRetention: cfg.Retention.DeliveryRetention, + MalformedCommandRetention: cfg.Retention.MalformedCommandRetention, + CleanupInterval: cfg.Retention.CleanupInterval, + Clock: deps.clock, + }, logger) if err != nil { - return cleanupOnError(fmt.Errorf("new mail runtime: cleanup index cleaner: %w", err)) - } - cleanupWorker, err := worker.NewCleanupWorker(indexCleaner, logger) - if err != nil { - return cleanupOnError(fmt.Errorf("new mail runtime: cleanup worker: %w", err)) + return cleanupOnError(fmt.Errorf("new mail runtime: sql retention worker: %w", err)) } - runtime.app = New(cfg, httpServer, commandConsumer, scheduler, attemptWorkers, cleanupWorker) + runtime.app = New(cfg, httpServer, commandConsumer, scheduler, attemptWorkers, retentionWorker) return runtime, nil } diff --git a/mail/internal/app/runtime_pgharness_test.go b/mail/internal/app/runtime_pgharness_test.go new file mode 100644 index 0000000..9ac4cdf --- /dev/null +++ b/mail/internal/app/runtime_pgharness_test.go @@ -0,0 +1,208 @@ +package app + +import ( + "context" + "database/sql" + "net/url" + "os" + "sync" + "testing" + "time" + + "galaxy/mail/internal/adapters/postgres/migrations" + mailconfig "galaxy/mail/internal/config" + "galaxy/postgres" + + testcontainers "github.com/testcontainers/testcontainers-go" + tcpostgres "github.com/testcontainers/testcontainers-go/modules/postgres" + "github.com/testcontainers/testcontainers-go/wait" +) + +const ( + pkgPGImage = "postgres:16-alpine" + pkgPGSuperUser = "galaxy" + pkgPGSuperPassword = "galaxy" + pkgPGSuperDatabase = "galaxy_mail" + pkgPGServiceRole = "mailservice" + pkgPGServicePassword = "mailservice" + pkgPGServiceSchema = "mail" + pkgPGContainerStartup = 90 * time.Second + pkgPGOperationTimeout = 10 * time.Second +) + +var ( + pkgPGContainerOnce sync.Once + pkgPGContainerErr error + pkgPGContainerEnv *runtimePostgresEnv +) + +type runtimePostgresEnv struct { + container *tcpostgres.PostgresContainer + dsn string + pool *sql.DB +} + +func ensureRuntimePostgresEnv(t testing.TB) *runtimePostgresEnv { + t.Helper() + pkgPGContainerOnce.Do(func() { + pkgPGContainerEnv, pkgPGContainerErr = startRuntimePostgresEnv() + }) + if pkgPGContainerErr != nil { + t.Skipf("postgres container start failed (Docker unavailable?): %v", pkgPGContainerErr) + } + return pkgPGContainerEnv +} + +func startRuntimePostgresEnv() (*runtimePostgresEnv, error) { + ctx := context.Background() + container, err := tcpostgres.Run(ctx, pkgPGImage, + tcpostgres.WithDatabase(pkgPGSuperDatabase), + tcpostgres.WithUsername(pkgPGSuperUser), + tcpostgres.WithPassword(pkgPGSuperPassword), + testcontainers.WithWaitStrategy( + wait.ForLog("database system is ready to accept connections"). + WithOccurrence(2). + WithStartupTimeout(pkgPGContainerStartup), + ), + ) + if err != nil { + return nil, err + } + + baseDSN, err := container.ConnectionString(ctx, "sslmode=disable") + if err != nil { + _ = testcontainers.TerminateContainer(container) + return nil, err + } + + if err := provisionRuntimeRoleAndSchema(ctx, baseDSN); err != nil { + _ = testcontainers.TerminateContainer(container) + return nil, err + } + + scopedDSN, err := dsnForRuntimeServiceRole(baseDSN) + if err != nil { + _ = testcontainers.TerminateContainer(container) + return nil, err + } + + cfg := postgres.DefaultConfig() + cfg.PrimaryDSN = scopedDSN + cfg.OperationTimeout = pkgPGOperationTimeout + pool, err := postgres.OpenPrimary(ctx, cfg) + if err != nil { + _ = testcontainers.TerminateContainer(container) + return nil, err + } + if err := postgres.Ping(ctx, pool, pkgPGOperationTimeout); err != nil { + _ = pool.Close() + _ = testcontainers.TerminateContainer(container) + return nil, err + } + if err := postgres.RunMigrations(ctx, pool, migrations.FS(), "."); err != nil { + _ = pool.Close() + _ = testcontainers.TerminateContainer(container) + return nil, err + } + + return &runtimePostgresEnv{container: container, dsn: scopedDSN, pool: pool}, nil +} + +func provisionRuntimeRoleAndSchema(ctx context.Context, baseDSN string) error { + cfg := postgres.DefaultConfig() + cfg.PrimaryDSN = baseDSN + cfg.OperationTimeout = pkgPGOperationTimeout + db, err := postgres.OpenPrimary(ctx, cfg) + if err != nil { + return err + } + defer func() { _ = db.Close() }() + + statements := []string{ + `DO $$ BEGIN + IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'mailservice') THEN + CREATE ROLE mailservice LOGIN PASSWORD 'mailservice'; + END IF; + END $$;`, + `CREATE SCHEMA IF NOT EXISTS mail AUTHORIZATION mailservice;`, + `GRANT USAGE ON SCHEMA mail TO mailservice;`, + } + for _, statement := range statements { + if _, err := db.ExecContext(ctx, statement); err != nil { + return err + } + } + return nil +} + +func dsnForRuntimeServiceRole(baseDSN string) (string, error) { + parsed, err := url.Parse(baseDSN) + if err != nil { + return "", err + } + values := url.Values{} + values.Set("search_path", pkgPGServiceSchema) + values.Set("sslmode", "disable") + scoped := url.URL{ + Scheme: parsed.Scheme, + User: url.UserPassword(pkgPGServiceRole, pkgPGServicePassword), + Host: parsed.Host, + Path: parsed.Path, + RawQuery: values.Encode(), + } + return scoped.String(), nil +} + +// truncateRuntimeMail clears the mail schema between tests sharing the +// container. +func truncateRuntimeMail(t *testing.T) { + t.Helper() + env := ensureRuntimePostgresEnv(t) + if env == nil { + return + } + if _, err := env.pool.ExecContext(context.Background(), + `TRUNCATE TABLE + malformed_commands, + dead_letters, + delivery_payloads, + attempts, + delivery_recipients, + deliveries + RESTART IDENTITY CASCADE`, + ); err != nil { + t.Fatalf("truncate mail tables: %v", err) + } +} + +// runtimeBaseConfig returns a minimum-viable config suitable for runtime +// construction, with Redis and Postgres connection coordinates wired up. The +// caller still has to fill the templates dir, internal HTTP addr, SMTP mode, +// etc. The helper does NOT truncate mail tables — tests that need a clean +// slate should call truncateRuntimeMail explicitly (typically once at test +// start, not on every runtime restart). +func runtimeBaseConfig(t *testing.T, redisAddr string) mailconfig.Config { + t.Helper() + env := ensureRuntimePostgresEnv(t) + + cfg := mailconfig.DefaultConfig() + cfg.Redis.Conn.MasterAddr = redisAddr + cfg.Redis.Conn.Password = "integration" + cfg.Postgres.Conn.PrimaryDSN = env.dsn + cfg.Postgres.Conn.OperationTimeout = pkgPGOperationTimeout + return cfg +} + +// TestMain shuts down the shared container after the test process completes. +func TestMain(m *testing.M) { + code := m.Run() + if pkgPGContainerEnv != nil { + if pkgPGContainerEnv.pool != nil { + _ = pkgPGContainerEnv.pool.Close() + } + if pkgPGContainerEnv.container != nil { + _ = testcontainers.TerminateContainer(pkgPGContainerEnv.container) + } + } + os.Exit(code) +} diff --git a/mail/internal/app/runtime_smoke_test.go b/mail/internal/app/runtime_smoke_test.go index 5249be1..4aeb3f8 100644 --- a/mail/internal/app/runtime_smoke_test.go +++ b/mail/internal/app/runtime_smoke_test.go @@ -89,8 +89,8 @@ func TestRealRuntimeCompatibility(t *testing.T) { mailpitHTTPBaseURL, err := mailpitContainer.PortEndpoint(ctx, "8025/tcp", "http") require.NoError(t, err) - cfg := config.DefaultConfig() - cfg.Redis.Addr = redisAddr + truncateRuntimeMail(t) + cfg := runtimeBaseConfig(t, redisAddr) cfg.Templates.Dir = writeRuntimeTemplates(t) cfg.InternalHTTP.Addr = mustFreeAddr(t) cfg.ShutdownTimeout = time.Second diff --git a/mail/internal/app/runtime_stage14_test.go b/mail/internal/app/runtime_stage14_test.go index 8cdac2d..8c4e653 100644 --- a/mail/internal/app/runtime_stage14_test.go +++ b/mail/internal/app/runtime_stage14_test.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "encoding/json" + "errors" "io" "log/slog" "net/http" @@ -27,7 +28,6 @@ import ( ) func TestRuntimeAuthDeliverySentWithLocaleFallbackAndDuplicateIdempotency(t *testing.T) { - t.Parallel() env := newRuntimeTestEnvironment(t) clock := newRuntimeTestClock(runtimeClockStart()) @@ -85,7 +85,6 @@ func TestRuntimeAuthDeliverySentWithLocaleFallbackAndDuplicateIdempotency(t *tes } func TestRuntimeAuthDeliverySuppressedInStubMode(t *testing.T) { - t.Parallel() env := newRuntimeTestEnvironment(t) clock := newRuntimeTestClock(runtimeClockStart()) @@ -117,7 +116,6 @@ func TestRuntimeAuthDeliverySuppressedInStubMode(t *testing.T) { } func TestRuntimeGenericCommandAndOperatorRoutesSupportResendClone(t *testing.T) { - t.Parallel() env := newRuntimeTestEnvironment(t) clock := newRuntimeTestClock(runtimeClockStart()) @@ -162,7 +160,6 @@ func TestRuntimeGenericCommandAndOperatorRoutesSupportResendClone(t *testing.T) } func TestRuntimeRetriesTransientFailureUntilSuccess(t *testing.T) { - t.Parallel() env := newRuntimeTestEnvironment(t) clock := newRuntimeTestClock(runtimeClockStart()) @@ -197,7 +194,6 @@ func TestRuntimeRetriesTransientFailureUntilSuccess(t *testing.T) { } func TestRuntimeMovesDeliveryToDeadLetterAfterRetryExhaustion(t *testing.T) { - t.Parallel() env := newRuntimeTestEnvironment(t) clock := newRuntimeTestClock(runtimeClockStart()) @@ -247,7 +243,6 @@ func TestRuntimeMovesDeliveryToDeadLetterAfterRetryExhaustion(t *testing.T) { } func TestRuntimeRecoversPendingAttemptAfterGracefulShutdown(t *testing.T) { - t.Parallel() env := newRuntimeTestEnvironment(t) clock := newRuntimeTestClock(runtimeClockStart()) @@ -318,6 +313,7 @@ func newRuntimeTestEnvironment(t *testing.T) *runtimeTestEnvironment { t.Cleanup(func() { require.NoError(t, client.Close()) }) + truncateRuntimeMail(t) return &runtimeTestEnvironment{ redisServer: server, @@ -356,8 +352,7 @@ func (env *runtimeTestEnvironment) start(t *testing.T, opts runtimeInstanceOptio opts.smtpTimeout = 20 * time.Millisecond } - cfg := config.DefaultConfig() - cfg.Redis.Addr = env.redisServer.Addr() + cfg := runtimeBaseConfig(t, env.redisServer.Addr()) cfg.Templates.Dir = env.templateDir cfg.InternalHTTP.Addr = mustFreeAddr(t) cfg.ShutdownTimeout = time.Second @@ -497,6 +492,27 @@ func (provider *blockingProvider) Send(ctx context.Context, message ports.Messag } <-ctx.Done() + if errors.Is(ctx.Err(), context.DeadlineExceeded) { + // Mirror the real SMTP provider contract (see + // internal/adapters/smtp/provider.go::classifySendError): a per-attempt + // deadline expiration becomes a transient failure result tagged with + // `deadline_exceeded`, not a propagated context error. Returning ctx.Err() + // instead would surface as a fatal worker error and break the recovery + // scenario this test is exercising. + summary, err := ports.BuildSafeSummary(ports.SummaryFields{ + Provider: "blocking", + Result: string(ports.ClassificationTransientFailure), + Phase: "send", + }) + if err != nil { + return ports.Result{}, err + } + return ports.Result{ + Classification: ports.ClassificationTransientFailure, + Summary: summary, + Details: map[string]string{"phase": "send", "error": "deadline_exceeded"}, + }, nil + } return ports.Result{}, ctx.Err() } diff --git a/mail/internal/app/runtime_test.go b/mail/internal/app/runtime_test.go index 66bd60a..e91b385 100644 --- a/mail/internal/app/runtime_test.go +++ b/mail/internal/app/runtime_test.go @@ -17,13 +17,11 @@ import ( ) func TestNewRuntimeStartsWithStubMode(t *testing.T) { - t.Parallel() - redisServer := miniredis.RunT(t) templateDir := writeStage6Templates(t) - cfg := config.DefaultConfig() - cfg.Redis.Addr = redisServer.Addr() + truncateRuntimeMail(t) + cfg := runtimeBaseConfig(t, redisServer.Addr()) cfg.Templates.Dir = templateDir cfg.InternalHTTP.Addr = mustFreeAddr(t) @@ -33,28 +31,25 @@ func TestNewRuntimeStartsWithStubMode(t *testing.T) { } func TestNewRuntimeRejectsInvalidRedisConfig(t *testing.T) { - t.Parallel() - + redisServer := miniredis.RunT(t) templateDir := writeStage6Templates(t) - cfg := config.DefaultConfig() - cfg.Redis.Addr = "127.0.0.1" + truncateRuntimeMail(t) + cfg := runtimeBaseConfig(t, redisServer.Addr()) + cfg.Redis.Conn.Password = "" cfg.Templates.Dir = templateDir cfg.InternalHTTP.Addr = mustFreeAddr(t) _, err := NewRuntime(context.Background(), cfg, testLogger()) require.Error(t, err) - require.Contains(t, err.Error(), "redis addr") + require.Contains(t, err.Error(), "redis password") } func TestNewRuntimeRejectsUnavailableRedis(t *testing.T) { - t.Parallel() - templateDir := writeStage6Templates(t) - cfg := config.DefaultConfig() - cfg.Redis.Addr = "127.0.0.1:6399" - cfg.Redis.OperationTimeout = 100 * time.Millisecond + cfg := runtimeBaseConfig(t, "127.0.0.1:6399") + cfg.Redis.Conn.OperationTimeout = 100 * time.Millisecond cfg.Templates.Dir = templateDir cfg.InternalHTTP.Addr = mustFreeAddr(t) @@ -64,12 +59,10 @@ func TestNewRuntimeRejectsUnavailableRedis(t *testing.T) { } func TestNewRuntimeRejectsMissingTemplateDirectory(t *testing.T) { - t.Parallel() - redisServer := miniredis.RunT(t) - cfg := config.DefaultConfig() - cfg.Redis.Addr = redisServer.Addr() + truncateRuntimeMail(t) + cfg := runtimeBaseConfig(t, redisServer.Addr()) cfg.Templates.Dir = filepath.Join(t.TempDir(), "missing") cfg.InternalHTTP.Addr = mustFreeAddr(t) @@ -79,15 +72,13 @@ func TestNewRuntimeRejectsMissingTemplateDirectory(t *testing.T) { } func TestNewRuntimeRejectsMissingRequiredTemplateFile(t *testing.T) { - t.Parallel() - redisServer := miniredis.RunT(t) rootDir := t.TempDir() require.NoError(t, os.MkdirAll(filepath.Join(rootDir, "auth.login_code", "en"), 0o755)) require.NoError(t, os.WriteFile(filepath.Join(rootDir, "auth.login_code", "en", "subject.tmpl"), []byte("Subject"), 0o644)) - cfg := config.DefaultConfig() - cfg.Redis.Addr = redisServer.Addr() + truncateRuntimeMail(t) + cfg := runtimeBaseConfig(t, redisServer.Addr()) cfg.Templates.Dir = rootDir cfg.InternalHTTP.Addr = mustFreeAddr(t) @@ -97,8 +88,6 @@ func TestNewRuntimeRejectsMissingRequiredTemplateFile(t *testing.T) { } func TestNewRuntimeRejectsBrokenTemplateCatalog(t *testing.T) { - t.Parallel() - redisServer := miniredis.RunT(t) rootDir := t.TempDir() require.NoError(t, os.MkdirAll(filepath.Join(rootDir, "auth.login_code", "en"), 0o755)) @@ -108,8 +97,8 @@ func TestNewRuntimeRejectsBrokenTemplateCatalog(t *testing.T) { require.NoError(t, os.WriteFile(filepath.Join(rootDir, "game.turn.ready", "en", "subject.tmpl"), []byte("{{if .turn_number}"), 0o644)) require.NoError(t, os.WriteFile(filepath.Join(rootDir, "game.turn.ready", "en", "text.tmpl"), []byte("Turn ready"), 0o644)) - cfg := config.DefaultConfig() - cfg.Redis.Addr = redisServer.Addr() + truncateRuntimeMail(t) + cfg := runtimeBaseConfig(t, redisServer.Addr()) cfg.Templates.Dir = rootDir cfg.InternalHTTP.Addr = mustFreeAddr(t) @@ -119,13 +108,11 @@ func TestNewRuntimeRejectsBrokenTemplateCatalog(t *testing.T) { } func TestRuntimeRunStopsOnContextCancellation(t *testing.T) { - t.Parallel() - redisServer := miniredis.RunT(t) templateDir := writeStage6Templates(t) - cfg := config.DefaultConfig() - cfg.Redis.Addr = redisServer.Addr() + truncateRuntimeMail(t) + cfg := runtimeBaseConfig(t, redisServer.Addr()) cfg.Templates.Dir = templateDir cfg.InternalHTTP.Addr = mustFreeAddr(t) cfg.ShutdownTimeout = time.Second @@ -182,3 +169,5 @@ func mustFreeAddr(t *testing.T) string { return listener.Addr().String() } + +var _ = config.SMTPModeStub // keep config import even when no test uses it directly diff --git a/mail/internal/config/config.go b/mail/internal/config/config.go index 640bef5..ff17ee2 100644 --- a/mail/internal/config/config.go +++ b/mail/internal/config/config.go @@ -3,15 +3,18 @@ package config import ( - "crypto/tls" "fmt" "strings" "time" "galaxy/mail/internal/telemetry" + "galaxy/postgres" + "galaxy/redisconn" ) const ( + envPrefix = "MAIL" + shutdownTimeoutEnvVar = "MAIL_SHUTDOWN_TIMEOUT" logLevelEnvVar = "MAIL_LOG_LEVEL" @@ -20,15 +23,7 @@ const ( internalHTTPReadTimeoutEnvVar = "MAIL_INTERNAL_HTTP_READ_TIMEOUT" internalHTTPIdleTimeoutEnvVar = "MAIL_INTERNAL_HTTP_IDLE_TIMEOUT" - redisAddrEnvVar = "MAIL_REDIS_ADDR" - redisUsernameEnvVar = "MAIL_REDIS_USERNAME" - redisPasswordEnvVar = "MAIL_REDIS_PASSWORD" - redisDBEnvVar = "MAIL_REDIS_DB" - redisTLSEnabledEnvVar = "MAIL_REDIS_TLS_ENABLED" - redisOperationTimeoutEnvVar = "MAIL_REDIS_OPERATION_TIMEOUT" - redisCommandStreamEnvVar = "MAIL_REDIS_COMMAND_STREAM" - redisAttemptScheduleEnvVar = "MAIL_REDIS_ATTEMPT_SCHEDULE_KEY" - redisDeadLetterPrefixEnvVar = "MAIL_REDIS_DEAD_LETTER_PREFIX" + redisCommandStreamEnvVar = "MAIL_REDIS_COMMAND_STREAM" smtpModeEnvVar = "MAIL_SMTP_MODE" smtpAddrEnvVar = "MAIL_SMTP_ADDR" @@ -45,8 +40,10 @@ const ( streamBlockTimeoutEnvVar = "MAIL_STREAM_BLOCK_TIMEOUT" operatorRequestTimeoutEnvVar = "MAIL_OPERATOR_REQUEST_TIMEOUT" idempotencyTTLEnvVar = "MAIL_IDEMPOTENCY_TTL" - deliveryTTLEnvVar = "MAIL_DELIVERY_TTL" - attemptTTLEnvVar = "MAIL_ATTEMPT_TTL" + + deliveryRetentionEnvVar = "MAIL_DELIVERY_RETENTION" + malformedCommandRetentionEnvVar = "MAIL_MALFORMED_COMMAND_RETENTION" + cleanupIntervalEnvVar = "MAIL_CLEANUP_INTERVAL" otelServiceNameEnvVar = "OTEL_SERVICE_NAME" otelTracesExporterEnvVar = "OTEL_TRACES_EXPORTER" @@ -57,27 +54,24 @@ const ( otelStdoutTracesEnabledEnvVar = "MAIL_OTEL_STDOUT_TRACES_ENABLED" otelStdoutMetricsEnabledEnvVar = "MAIL_OTEL_STDOUT_METRICS_ENABLED" - defaultShutdownTimeout = 5 * time.Second - defaultLogLevel = "info" - defaultInternalHTTPAddr = ":8080" - defaultReadHeaderTimeout = 2 * time.Second - defaultReadTimeout = 10 * time.Second - defaultIdleTimeout = time.Minute - defaultRedisDB = 0 - defaultRedisOperationTimeout = 250 * time.Millisecond - defaultRedisCommandStream = "mail:delivery_commands" - defaultRedisAttemptScheduleKey = "mail:attempt_schedule" - defaultRedisDeadLetterPrefix = "mail:dead_letters:" - defaultSMTPMode = SMTPModeStub - defaultSMTPTimeout = 15 * time.Second - defaultTemplateDir = "templates" - defaultAttemptWorkerCount = 4 - defaultStreamBlockTimeout = 2 * time.Second - defaultOperatorRequestTimeout = 5 * time.Second - defaultIdempotencyTTL = 7 * 24 * time.Hour - defaultDeliveryTTL = 30 * 24 * time.Hour - defaultAttemptTTL = 90 * 24 * time.Hour - defaultOTelServiceName = "galaxy-mail" + defaultShutdownTimeout = 5 * time.Second + defaultLogLevel = "info" + defaultInternalHTTPAddr = ":8080" + defaultReadHeaderTimeout = 2 * time.Second + defaultReadTimeout = 10 * time.Second + defaultIdleTimeout = time.Minute + defaultRedisCommandStream = "mail:delivery_commands" + defaultSMTPMode = SMTPModeStub + defaultSMTPTimeout = 15 * time.Second + defaultTemplateDir = "templates" + defaultAttemptWorkerCount = 4 + defaultStreamBlockTimeout = 2 * time.Second + defaultOperatorRequestTimeout = 5 * time.Second + defaultIdempotencyTTL = 7 * 24 * time.Hour + defaultDeliveryRetention = 30 * 24 * time.Hour + defaultMalformedCommandRetention = 90 * 24 * time.Hour + defaultCleanupInterval = time.Hour + defaultOTelServiceName = "galaxy-mail" ) const ( @@ -99,10 +93,15 @@ type Config struct { // InternalHTTP configures the trusted internal HTTP listener. InternalHTTP InternalHTTPConfig - // Redis configures the shared Redis client and Redis-owned keys used by the - // runnable service skeleton. + // Redis configures the shared Redis connection topology and the inbound + // `mail:delivery_commands` Stream key. Durable mail state lives in + // PostgreSQL after Stage 4 of `PG_PLAN.md`. Redis RedisConfig + // Postgres configures the PostgreSQL-backed durable store consumed via + // `pkg/postgres`. + Postgres PostgresConfig + // SMTP configures the runtime mail provider mode and provider-specific // connection details. SMTP SMTPConfig @@ -115,22 +114,20 @@ type Config struct { AttemptWorkerConcurrency int // StreamBlockTimeout stores the maximum Redis Streams blocking read window - // used by the future command consumer. + // used by the command consumer. StreamBlockTimeout time.Duration - // OperatorRequestTimeout stores the future application-layer request budget - // for trusted operator handlers. + // OperatorRequestTimeout stores the application-layer request budget for + // trusted operator handlers. OperatorRequestTimeout time.Duration - // IdempotencyTTL stores the configured retention for idempotency records. + // IdempotencyTTL stores the per-acceptance idempotency window the service + // layer applies to the durable idempotency_expires_at column on + // `deliveries`. IdempotencyTTL time.Duration - // DeliveryTTL stores the configured retention for delivery records. - DeliveryTTL time.Duration - - // AttemptTTL stores the configured retention for attempt and dead-letter - // records. - AttemptTTL time.Duration + // Retention stores the periodic SQL retention worker configuration. + Retention RetentionConfig // Telemetry configures the process-wide OpenTelemetry runtime. Telemetry TelemetryConfig @@ -176,66 +173,67 @@ func (cfg InternalHTTPConfig) Validate() error { } } -// RedisConfig configures the shared Redis client used by the runnable process. +// RedisConfig configures the Mail Service Redis connection topology plus the +// inbound `mail:delivery_commands` Stream key. Per-call timeouts live in +// `Conn.OperationTimeout`. type RedisConfig struct { - // Addr stores the Redis network address. - Addr string - - // Username stores the optional Redis ACL username. - Username string - - // Password stores the optional Redis ACL password. - Password string - - // DB stores the Redis logical database index. - DB int - - // TLSEnabled reports whether TLS must be used for Redis connections. - TLSEnabled bool - - // OperationTimeout bounds one Redis round trip including the startup PING. - OperationTimeout time.Duration + // Conn carries the connection topology (master, replicas, password, db, + // per-call timeout). Loaded via redisconn.LoadFromEnv("MAIL"). + Conn redisconn.Config // CommandStream stores the configured Redis Streams key for async command // intake. CommandStream string - - // AttemptScheduleKey stores the configured sorted-set key of scheduled - // attempts. - AttemptScheduleKey string - - // DeadLetterPrefix stores the configured Redis key prefix of dead-letter - // entries. - DeadLetterPrefix string -} - -// TLSConfig returns the conservative TLS configuration used by the Redis -// client when TLSEnabled is true. -func (cfg RedisConfig) TLSConfig() *tls.Config { - if !cfg.TLSEnabled { - return nil - } - - return &tls.Config{MinVersion: tls.VersionTLS12} } // Validate reports whether cfg stores a usable Redis configuration. func (cfg RedisConfig) Validate() error { - switch { - case strings.TrimSpace(cfg.Addr) == "": - return fmt.Errorf("redis addr must not be empty") - case !isTCPAddr(cfg.Addr): - return fmt.Errorf("redis addr %q must use host:port form", cfg.Addr) - case cfg.DB < 0: - return fmt.Errorf("redis db must not be negative") - case cfg.OperationTimeout <= 0: - return fmt.Errorf("redis operation timeout must be positive") - case strings.TrimSpace(cfg.CommandStream) == "": + if err := cfg.Conn.Validate(); err != nil { + return err + } + if strings.TrimSpace(cfg.CommandStream) == "" { return fmt.Errorf("redis command stream must not be empty") - case strings.TrimSpace(cfg.AttemptScheduleKey) == "": - return fmt.Errorf("redis attempt schedule key must not be empty") - case strings.TrimSpace(cfg.DeadLetterPrefix) == "": - return fmt.Errorf("redis dead-letter prefix must not be empty") + } + return nil +} + +// PostgresConfig configures the PostgreSQL-backed durable store. +type PostgresConfig struct { + // Conn stores the primary plus replica DSN topology and pool tuning. + // Loaded via postgres.LoadFromEnv("MAIL"). + Conn postgres.Config +} + +// Validate reports whether cfg stores a usable PostgreSQL configuration. +func (cfg PostgresConfig) Validate() error { + return cfg.Conn.Validate() +} + +// RetentionConfig stores the durable retention windows applied by the +// periodic SQL retention worker. +type RetentionConfig struct { + // DeliveryRetention bounds how long deliveries (and their cascaded + // attempts, dead letters, recipients, payloads) survive after creation. + DeliveryRetention time.Duration + + // MalformedCommandRetention bounds how long malformed-command rows + // survive after their original recorded_at. + MalformedCommandRetention time.Duration + + // CleanupInterval stores the wall-clock period between two retention + // passes. + CleanupInterval time.Duration +} + +// Validate reports whether cfg stores a usable retention configuration. +func (cfg RetentionConfig) Validate() error { + switch { + case cfg.DeliveryRetention <= 0: + return fmt.Errorf("%s must be positive", deliveryRetentionEnvVar) + case cfg.MalformedCommandRetention <= 0: + return fmt.Errorf("%s must be positive", malformedCommandRetentionEnvVar) + case cfg.CleanupInterval <= 0: + return fmt.Errorf("%s must be positive", cleanupIntervalEnvVar) default: return nil } @@ -356,11 +354,11 @@ func DefaultConfig() Config { IdleTimeout: defaultIdleTimeout, }, Redis: RedisConfig{ - DB: defaultRedisDB, - OperationTimeout: defaultRedisOperationTimeout, - CommandStream: defaultRedisCommandStream, - AttemptScheduleKey: defaultRedisAttemptScheduleKey, - DeadLetterPrefix: defaultRedisDeadLetterPrefix, + Conn: redisconn.DefaultConfig(), + CommandStream: defaultRedisCommandStream, + }, + Postgres: PostgresConfig{ + Conn: postgres.DefaultConfig(), }, SMTP: SMTPConfig{ Mode: defaultSMTPMode, @@ -373,8 +371,11 @@ func DefaultConfig() Config { StreamBlockTimeout: defaultStreamBlockTimeout, OperatorRequestTimeout: defaultOperatorRequestTimeout, IdempotencyTTL: defaultIdempotencyTTL, - DeliveryTTL: defaultDeliveryTTL, - AttemptTTL: defaultAttemptTTL, + Retention: RetentionConfig{ + DeliveryRetention: defaultDeliveryRetention, + MalformedCommandRetention: defaultMalformedCommandRetention, + CleanupInterval: defaultCleanupInterval, + }, Telemetry: TelemetryConfig{ ServiceName: defaultOTelServiceName, TracesExporter: "none", diff --git a/mail/internal/config/config_test.go b/mail/internal/config/config_test.go index 943359f..609308e 100644 --- a/mail/internal/config/config_test.go +++ b/mail/internal/config/config_test.go @@ -7,8 +7,27 @@ import ( "github.com/stretchr/testify/require" ) +const ( + testRedisMasterAddr = "MAIL_REDIS_MASTER_ADDR" + testRedisPassword = "MAIL_REDIS_PASSWORD" + testRedisDB = "MAIL_REDIS_DB" + testRedisOpTimeout = "MAIL_REDIS_OPERATION_TIMEOUT" + testRedisLegacyTLS = "MAIL_REDIS_TLS_ENABLED" + testRedisLegacyUser = "MAIL_REDIS_USERNAME" + testPostgresDSN = "MAIL_POSTGRES_PRIMARY_DSN" + testPostgresOpT = "MAIL_POSTGRES_OPERATION_TIMEOUT" + demoPostgresDSN = "postgres://mailservice:mailservice@localhost:5432/galaxy?search_path=mail&sslmode=disable" +) + +func setMinimalConn(t *testing.T) { + t.Helper() + t.Setenv(testRedisMasterAddr, "127.0.0.1:6379") + t.Setenv(testRedisPassword, "secret") + t.Setenv(testPostgresDSN, demoPostgresDSN) +} + func TestLoadFromEnvUsesDefaults(t *testing.T) { - t.Setenv(redisAddrEnvVar, "127.0.0.1:6379") + setMinimalConn(t) cfg, err := LoadFromEnv() require.NoError(t, err) @@ -17,39 +36,34 @@ func TestLoadFromEnvUsesDefaults(t *testing.T) { require.Equal(t, defaults.ShutdownTimeout, cfg.ShutdownTimeout) require.Equal(t, defaults.Logging, cfg.Logging) require.Equal(t, defaults.InternalHTTP, cfg.InternalHTTP) - require.Equal(t, "127.0.0.1:6379", cfg.Redis.Addr) - require.Equal(t, defaults.Redis.DB, cfg.Redis.DB) - require.Equal(t, defaults.Redis.OperationTimeout, cfg.Redis.OperationTimeout) + require.Equal(t, "127.0.0.1:6379", cfg.Redis.Conn.MasterAddr) + require.Equal(t, "secret", cfg.Redis.Conn.Password) + require.Equal(t, defaults.Redis.Conn.DB, cfg.Redis.Conn.DB) + require.Equal(t, defaults.Redis.Conn.OperationTimeout, cfg.Redis.Conn.OperationTimeout) require.Equal(t, defaults.Redis.CommandStream, cfg.Redis.CommandStream) - require.Equal(t, defaults.Redis.AttemptScheduleKey, cfg.Redis.AttemptScheduleKey) - require.Equal(t, defaults.Redis.DeadLetterPrefix, cfg.Redis.DeadLetterPrefix) + require.Equal(t, demoPostgresDSN, cfg.Postgres.Conn.PrimaryDSN) require.Equal(t, defaults.SMTP, cfg.SMTP) require.Equal(t, defaults.Templates, cfg.Templates) require.Equal(t, defaults.AttemptWorkerConcurrency, cfg.AttemptWorkerConcurrency) require.Equal(t, defaults.StreamBlockTimeout, cfg.StreamBlockTimeout) require.Equal(t, defaults.OperatorRequestTimeout, cfg.OperatorRequestTimeout) require.Equal(t, defaults.IdempotencyTTL, cfg.IdempotencyTTL) - require.Equal(t, defaults.DeliveryTTL, cfg.DeliveryTTL) - require.Equal(t, defaults.AttemptTTL, cfg.AttemptTTL) + require.Equal(t, defaults.Retention, cfg.Retention) require.Equal(t, defaults.Telemetry, cfg.Telemetry) } func TestLoadFromEnvAppliesOverrides(t *testing.T) { + setMinimalConn(t) t.Setenv(shutdownTimeoutEnvVar, "9s") t.Setenv(logLevelEnvVar, "debug") t.Setenv(internalHTTPAddrEnvVar, "127.0.0.1:18080") t.Setenv(internalHTTPReadHeaderTimeoutEnvVar, "3s") t.Setenv(internalHTTPReadTimeoutEnvVar, "11s") t.Setenv(internalHTTPIdleTimeoutEnvVar, "61s") - t.Setenv(redisAddrEnvVar, "127.0.0.1:6380") - t.Setenv(redisUsernameEnvVar, "alice") - t.Setenv(redisPasswordEnvVar, "secret") - t.Setenv(redisDBEnvVar, "3") - t.Setenv(redisTLSEnabledEnvVar, "true") - t.Setenv(redisOperationTimeoutEnvVar, "750ms") + t.Setenv(testRedisDB, "3") + t.Setenv(testRedisOpTimeout, "750ms") t.Setenv(redisCommandStreamEnvVar, "mail:test_commands") - t.Setenv(redisAttemptScheduleEnvVar, "mail:test_schedule") - t.Setenv(redisDeadLetterPrefixEnvVar, "mail:test_dead_letters:") + t.Setenv(testPostgresOpT, "1500ms") t.Setenv(smtpModeEnvVar, SMTPModeSMTP) t.Setenv(smtpAddrEnvVar, "127.0.0.1:2525") t.Setenv(smtpUsernameEnvVar, "mailer") @@ -63,8 +77,9 @@ func TestLoadFromEnvAppliesOverrides(t *testing.T) { t.Setenv(streamBlockTimeoutEnvVar, "5s") t.Setenv(operatorRequestTimeoutEnvVar, "6s") t.Setenv(idempotencyTTLEnvVar, "48h") - t.Setenv(deliveryTTLEnvVar, "96h") - t.Setenv(attemptTTLEnvVar, "240h") + t.Setenv(deliveryRetentionEnvVar, "96h") + t.Setenv(malformedCommandRetentionEnvVar, "240h") + t.Setenv(cleanupIntervalEnvVar, "30m") t.Setenv(otelServiceNameEnvVar, "custom-mail") t.Setenv(otelTracesExporterEnvVar, "otlp") t.Setenv(otelMetricsExporterEnvVar, "otlp") @@ -83,17 +98,13 @@ func TestLoadFromEnvAppliesOverrides(t *testing.T) { ReadTimeout: 11 * time.Second, IdleTimeout: 61 * time.Second, }, cfg.InternalHTTP) - require.Equal(t, RedisConfig{ - Addr: "127.0.0.1:6380", - Username: "alice", - Password: "secret", - DB: 3, - TLSEnabled: true, - OperationTimeout: 750 * time.Millisecond, - CommandStream: "mail:test_commands", - AttemptScheduleKey: "mail:test_schedule", - DeadLetterPrefix: "mail:test_dead_letters:", - }, cfg.Redis) + require.Equal(t, "127.0.0.1:6379", cfg.Redis.Conn.MasterAddr) + require.Equal(t, "secret", cfg.Redis.Conn.Password) + require.Equal(t, 3, cfg.Redis.Conn.DB) + require.Equal(t, 750*time.Millisecond, cfg.Redis.Conn.OperationTimeout) + require.Equal(t, "mail:test_commands", cfg.Redis.CommandStream) + require.Equal(t, demoPostgresDSN, cfg.Postgres.Conn.PrimaryDSN) + require.Equal(t, 1500*time.Millisecond, cfg.Postgres.Conn.OperationTimeout) require.Equal(t, SMTPConfig{ Mode: SMTPModeSMTP, Addr: "127.0.0.1:2525", @@ -109,8 +120,9 @@ func TestLoadFromEnvAppliesOverrides(t *testing.T) { require.Equal(t, 5*time.Second, cfg.StreamBlockTimeout) require.Equal(t, 6*time.Second, cfg.OperatorRequestTimeout) require.Equal(t, 48*time.Hour, cfg.IdempotencyTTL) - require.Equal(t, 96*time.Hour, cfg.DeliveryTTL) - require.Equal(t, 240*time.Hour, cfg.AttemptTTL) + require.Equal(t, 96*time.Hour, cfg.Retention.DeliveryRetention) + require.Equal(t, 240*time.Hour, cfg.Retention.MalformedCommandRetention) + require.Equal(t, 30*time.Minute, cfg.Retention.CleanupInterval) require.Equal(t, TelemetryConfig{ ServiceName: "custom-mail", TracesExporter: "otlp", @@ -130,9 +142,8 @@ func TestLoadFromEnvRejectsInvalidValues(t *testing.T) { }{ {name: "invalid duration", envName: shutdownTimeoutEnvVar, envVal: "later"}, {name: "invalid log level", envName: logLevelEnvVar, envVal: "verbose"}, - {name: "invalid redis db", envName: redisDBEnvVar, envVal: "db-three"}, - {name: "invalid redis tls", envName: redisTLSEnabledEnvVar, envVal: "sometimes"}, - {name: "invalid redis timeout", envName: redisOperationTimeoutEnvVar, envVal: "never"}, + {name: "invalid redis db", envName: testRedisDB, envVal: "db-three"}, + {name: "invalid redis timeout", envName: testRedisOpTimeout, envVal: "never"}, {name: "invalid smtp mode", envName: smtpModeEnvVar, envVal: "ses"}, {name: "invalid smtp timeout", envName: smtpTimeoutEnvVar, envVal: "fast"}, {name: "invalid smtp insecure skip verify", envName: smtpInsecureSkipVerifyEnvVar, envVal: "sometimes"}, @@ -145,10 +156,9 @@ func TestLoadFromEnvRejectsInvalidValues(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { - t.Setenv(redisAddrEnvVar, "127.0.0.1:6379") + setMinimalConn(t) t.Setenv(tt.envName, tt.envVal) if tt.envName == smtpTimeoutEnvVar { t.Setenv(smtpModeEnvVar, SMTPModeSMTP) @@ -162,25 +172,45 @@ func TestLoadFromEnvRejectsInvalidValues(t *testing.T) { } } -func TestLoadFromEnvRejectsMissingRequiredRedisAddr(t *testing.T) { +func TestLoadFromEnvRejectsMissingRedisMasterAddr(t *testing.T) { + t.Setenv(testRedisPassword, "secret") + t.Setenv(testPostgresDSN, demoPostgresDSN) + _, err := LoadFromEnv() require.Error(t, err) - require.Contains(t, err.Error(), "redis addr") + require.Contains(t, err.Error(), "MAIL_REDIS_MASTER_ADDR") } -func TestLoadFromEnvRejectsInvalidRedisAddr(t *testing.T) { - t.Setenv(redisAddrEnvVar, "127.0.0.1") +func TestLoadFromEnvRejectsMissingPostgresDSN(t *testing.T) { + t.Setenv(testRedisMasterAddr, "127.0.0.1:6379") + t.Setenv(testRedisPassword, "secret") _, err := LoadFromEnv() require.Error(t, err) - require.Contains(t, err.Error(), "redis addr") + require.Contains(t, err.Error(), "MAIL_POSTGRES_PRIMARY_DSN") +} + +func TestLoadFromEnvRejectsLegacyRedisVars(t *testing.T) { + tests := map[string]string{ + "tls": testRedisLegacyTLS, + "username": testRedisLegacyUser, + } + for name, envVar := range tests { + t.Run(name, func(t *testing.T) { + setMinimalConn(t) + t.Setenv(envVar, "anything") + + _, err := LoadFromEnv() + require.Error(t, err) + require.Contains(t, err.Error(), envVar) + }) + } } func TestLoadFromEnvRejectsInvalidSMTPConfiguration(t *testing.T) { - t.Setenv(redisAddrEnvVar, "127.0.0.1:6379") - t.Setenv(smtpModeEnvVar, SMTPModeSMTP) - t.Run("missing addr", func(t *testing.T) { + setMinimalConn(t) + t.Setenv(smtpModeEnvVar, SMTPModeSMTP) t.Setenv(smtpFromEmailEnvVar, "noreply@example.com") _, err := LoadFromEnv() @@ -189,6 +219,8 @@ func TestLoadFromEnvRejectsInvalidSMTPConfiguration(t *testing.T) { }) t.Run("missing from email", func(t *testing.T) { + setMinimalConn(t) + t.Setenv(smtpModeEnvVar, SMTPModeSMTP) t.Setenv(smtpAddrEnvVar, "127.0.0.1:2525") _, err := LoadFromEnv() @@ -197,6 +229,8 @@ func TestLoadFromEnvRejectsInvalidSMTPConfiguration(t *testing.T) { }) t.Run("username without password", func(t *testing.T) { + setMinimalConn(t) + t.Setenv(smtpModeEnvVar, SMTPModeSMTP) t.Setenv(smtpAddrEnvVar, "127.0.0.1:2525") t.Setenv(smtpFromEmailEnvVar, "noreply@example.com") t.Setenv(smtpUsernameEnvVar, "mailer") @@ -207,6 +241,8 @@ func TestLoadFromEnvRejectsInvalidSMTPConfiguration(t *testing.T) { }) t.Run("password without username", func(t *testing.T) { + setMinimalConn(t) + t.Setenv(smtpModeEnvVar, SMTPModeSMTP) t.Setenv(smtpAddrEnvVar, "127.0.0.1:2525") t.Setenv(smtpFromEmailEnvVar, "noreply@example.com") t.Setenv(smtpPasswordEnvVar, "secret") @@ -227,21 +263,21 @@ func TestLoadFromEnvRejectsNonPositiveDurationsAndCounts(t *testing.T) { {name: "read header timeout", envName: internalHTTPReadHeaderTimeoutEnvVar, envVal: "0s"}, {name: "read timeout", envName: internalHTTPReadTimeoutEnvVar, envVal: "0s"}, {name: "idle timeout", envName: internalHTTPIdleTimeoutEnvVar, envVal: "0s"}, - {name: "redis operation timeout", envName: redisOperationTimeoutEnvVar, envVal: "0s"}, + {name: "redis operation timeout", envName: testRedisOpTimeout, envVal: "0s"}, {name: "smtp timeout", envName: smtpTimeoutEnvVar, envVal: "0s"}, {name: "attempt worker concurrency", envName: attemptWorkerConcurrencyEnvVar, envVal: "0"}, {name: "stream block timeout", envName: streamBlockTimeoutEnvVar, envVal: "0s"}, {name: "operator request timeout", envName: operatorRequestTimeoutEnvVar, envVal: "0s"}, {name: "idempotency ttl", envName: idempotencyTTLEnvVar, envVal: "0s"}, - {name: "delivery ttl", envName: deliveryTTLEnvVar, envVal: "0s"}, - {name: "attempt ttl", envName: attemptTTLEnvVar, envVal: "0s"}, + {name: "delivery retention", envName: deliveryRetentionEnvVar, envVal: "0s"}, + {name: "malformed command retention", envName: malformedCommandRetentionEnvVar, envVal: "0s"}, + {name: "cleanup interval", envName: cleanupIntervalEnvVar, envVal: "0s"}, } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { - t.Setenv(redisAddrEnvVar, "127.0.0.1:6379") + setMinimalConn(t) t.Setenv(tt.envName, tt.envVal) if tt.envName == smtpTimeoutEnvVar { t.Setenv(smtpModeEnvVar, SMTPModeSMTP) diff --git a/mail/internal/config/env.go b/mail/internal/config/env.go index 7200bcc..fe604f8 100644 --- a/mail/internal/config/env.go +++ b/mail/internal/config/env.go @@ -6,10 +6,17 @@ import ( "strconv" "strings" "time" + + "galaxy/postgres" + "galaxy/redisconn" ) // LoadFromEnv builds Config from environment variables and validates the -// resulting configuration. +// resulting configuration. Connection topology for Redis and PostgreSQL is +// delegated to the shared `pkg/redisconn` and `pkg/postgres` LoadFromEnv +// helpers — the Redis loader hard-fails on the deprecated +// `MAIL_REDIS_TLS_ENABLED` / `MAIL_REDIS_USERNAME` env vars; the Postgres +// loader requires a primary DSN. func LoadFromEnv() (Config, error) { cfg := DefaultConfig() @@ -36,24 +43,18 @@ func LoadFromEnv() (Config, error) { return Config{}, err } - cfg.Redis.Addr = stringEnv(redisAddrEnvVar, cfg.Redis.Addr) - cfg.Redis.Username = stringEnv(redisUsernameEnvVar, cfg.Redis.Username) - cfg.Redis.Password = stringEnv(redisPasswordEnvVar, cfg.Redis.Password) - cfg.Redis.DB, err = intEnv(redisDBEnvVar, cfg.Redis.DB) - if err != nil { - return Config{}, err - } - cfg.Redis.TLSEnabled, err = boolEnv(redisTLSEnabledEnvVar, cfg.Redis.TLSEnabled) - if err != nil { - return Config{}, err - } - cfg.Redis.OperationTimeout, err = durationEnv(redisOperationTimeoutEnvVar, cfg.Redis.OperationTimeout) + redisConn, err := redisconn.LoadFromEnv(envPrefix) if err != nil { return Config{}, err } + cfg.Redis.Conn = redisConn cfg.Redis.CommandStream = stringEnv(redisCommandStreamEnvVar, cfg.Redis.CommandStream) - cfg.Redis.AttemptScheduleKey = stringEnv(redisAttemptScheduleEnvVar, cfg.Redis.AttemptScheduleKey) - cfg.Redis.DeadLetterPrefix = stringEnv(redisDeadLetterPrefixEnvVar, cfg.Redis.DeadLetterPrefix) + + pgConn, err := postgres.LoadFromEnv(envPrefix) + if err != nil { + return Config{}, err + } + cfg.Postgres.Conn = pgConn cfg.SMTP.Mode = stringEnv(smtpModeEnvVar, cfg.SMTP.Mode) cfg.SMTP.Addr = stringEnv(smtpAddrEnvVar, cfg.SMTP.Addr) @@ -88,11 +89,15 @@ func LoadFromEnv() (Config, error) { if err != nil { return Config{}, err } - cfg.DeliveryTTL, err = durationEnv(deliveryTTLEnvVar, cfg.DeliveryTTL) + cfg.Retention.DeliveryRetention, err = durationEnv(deliveryRetentionEnvVar, cfg.Retention.DeliveryRetention) if err != nil { return Config{}, err } - cfg.AttemptTTL, err = durationEnv(attemptTTLEnvVar, cfg.AttemptTTL) + cfg.Retention.MalformedCommandRetention, err = durationEnv(malformedCommandRetentionEnvVar, cfg.Retention.MalformedCommandRetention) + if err != nil { + return Config{}, err + } + cfg.Retention.CleanupInterval, err = durationEnv(cleanupIntervalEnvVar, cfg.Retention.CleanupInterval) if err != nil { return Config{}, err } diff --git a/mail/internal/config/validation.go b/mail/internal/config/validation.go index a10243b..8565148 100644 --- a/mail/internal/config/validation.go +++ b/mail/internal/config/validation.go @@ -22,10 +22,6 @@ func (cfg Config) Validate() error { return fmt.Errorf("%s must be positive", operatorRequestTimeoutEnvVar) case cfg.IdempotencyTTL <= 0: return fmt.Errorf("%s must be positive", idempotencyTTLEnvVar) - case cfg.DeliveryTTL <= 0: - return fmt.Errorf("%s must be positive", deliveryTTLEnvVar) - case cfg.AttemptTTL <= 0: - return fmt.Errorf("%s must be positive", attemptTTLEnvVar) } if err := cfg.InternalHTTP.Validate(); err != nil { @@ -34,6 +30,12 @@ func (cfg Config) Validate() error { if err := cfg.Redis.Validate(); err != nil { return err } + if err := cfg.Postgres.Validate(); err != nil { + return fmt.Errorf("postgres: %w", err) + } + if err := cfg.Retention.Validate(); err != nil { + return err + } if err := cfg.SMTP.Validate(); err != nil { return err } diff --git a/mail/internal/worker/attempt_worker_test.go b/mail/internal/worker/attempt_worker_test.go deleted file mode 100644 index 34ac609..0000000 --- a/mail/internal/worker/attempt_worker_test.go +++ /dev/null @@ -1,347 +0,0 @@ -package worker - -import ( - "context" - "errors" - "io" - "log/slog" - "sync" - "testing" - "time" - - "galaxy/mail/internal/adapters/redisstate" - "galaxy/mail/internal/adapters/stubprovider" - "galaxy/mail/internal/domain/attempt" - "galaxy/mail/internal/domain/common" - deliverydomain "galaxy/mail/internal/domain/delivery" - "galaxy/mail/internal/ports" - "galaxy/mail/internal/service/executeattempt" - "galaxy/mail/internal/service/renderdelivery" - - "github.com/alicebob/miniredis/v2" - "github.com/redis/go-redis/v9" - "github.com/stretchr/testify/require" -) - -func TestAttemptWorkersSendImmediateFirstAttempt(t *testing.T) { - t.Parallel() - - fixture := newAttemptWorkerFixture(t, nil) - createAcceptedRenderedDelivery(t, fixture.client, common.DeliveryID("delivery-immediate"), fixture.clock.Now()) - - cancel, wait := fixture.run(t) - defer func() { - cancel() - wait() - }() - - require.Eventually(t, func() bool { - deliveryRecord := loadDeliveryRecord(t, fixture.client, common.DeliveryID("delivery-immediate")) - return deliveryRecord.Status == deliverydomain.StatusSent - }, 5*time.Second, 20*time.Millisecond) - - require.Len(t, fixture.provider.Inputs(), 1) -} - -func TestAttemptWorkersRetryTransientFailuresUntilSuccess(t *testing.T) { - t.Parallel() - - fixture := newAttemptWorkerFixture(t, []stubprovider.ScriptedOutcome{ - { - Classification: ports.ClassificationTransientFailure, - Script: "retry_1", - }, - { - Classification: ports.ClassificationTransientFailure, - Script: "retry_2", - }, - { - Classification: ports.ClassificationAccepted, - Script: "accepted", - }, - }) - createAcceptedRenderedDelivery(t, fixture.client, common.DeliveryID("delivery-retry-success"), fixture.clock.Now()) - - cancel, wait := fixture.run(t) - defer func() { - cancel() - wait() - }() - - require.Eventually(t, func() bool { - deliveryRecord := loadDeliveryRecord(t, fixture.client, common.DeliveryID("delivery-retry-success")) - return deliveryRecord.AttemptCount == 2 && deliveryRecord.Status == deliverydomain.StatusQueued - }, 5*time.Second, 20*time.Millisecond) - - fixture.clock.Advance(time.Minute) - - require.Eventually(t, func() bool { - deliveryRecord := loadDeliveryRecord(t, fixture.client, common.DeliveryID("delivery-retry-success")) - return deliveryRecord.AttemptCount == 3 && deliveryRecord.Status == deliverydomain.StatusQueued - }, 5*time.Second, 20*time.Millisecond) - - fixture.clock.Advance(5 * time.Minute) - - require.Eventually(t, func() bool { - deliveryRecord := loadDeliveryRecord(t, fixture.client, common.DeliveryID("delivery-retry-success")) - return deliveryRecord.Status == deliverydomain.StatusSent - }, 5*time.Second, 20*time.Millisecond) - - require.Len(t, fixture.provider.Inputs(), 3) -} - -func TestAttemptWorkersDeadLetterAfterRetryExhaustion(t *testing.T) { - t.Parallel() - - fixture := newAttemptWorkerFixture(t, []stubprovider.ScriptedOutcome{ - {Classification: ports.ClassificationTransientFailure, Script: "retry_1"}, - {Classification: ports.ClassificationTransientFailure, Script: "retry_2"}, - {Classification: ports.ClassificationTransientFailure, Script: "retry_3"}, - {Classification: ports.ClassificationTransientFailure, Script: "retry_4"}, - }) - deliveryID := common.DeliveryID("delivery-dead-letter") - createAcceptedRenderedDelivery(t, fixture.client, deliveryID, fixture.clock.Now()) - - cancel, wait := fixture.run(t) - defer func() { - cancel() - wait() - }() - - require.Eventually(t, func() bool { - return loadDeliveryRecord(t, fixture.client, deliveryID).AttemptCount == 2 - }, 5*time.Second, 20*time.Millisecond) - - fixture.clock.Advance(time.Minute) - require.Eventually(t, func() bool { - return loadDeliveryRecord(t, fixture.client, deliveryID).AttemptCount == 3 - }, 5*time.Second, 20*time.Millisecond) - - fixture.clock.Advance(5 * time.Minute) - require.Eventually(t, func() bool { - return loadDeliveryRecord(t, fixture.client, deliveryID).AttemptCount == 4 - }, 5*time.Second, 20*time.Millisecond) - - fixture.clock.Advance(30 * time.Minute) - require.Eventually(t, func() bool { - return loadDeliveryRecord(t, fixture.client, deliveryID).Status == deliverydomain.StatusDeadLetter - }, 5*time.Second, 20*time.Millisecond) - - deadLetter := loadDeadLetterRecord(t, fixture.client, deliveryID) - require.Equal(t, "retry_exhausted", deadLetter.FailureClassification) - require.Len(t, fixture.provider.Inputs(), 4) -} - -func TestAttemptWorkersRecoverExpiredClaimAfterCrash(t *testing.T) { - t.Parallel() - - fixture := newAttemptWorkerFixture(t, []stubprovider.ScriptedOutcome{ - {Classification: ports.ClassificationAccepted, Script: "accepted"}, - }) - deliveryID := common.DeliveryID("delivery-recovered") - createAcceptedRenderedDelivery(t, fixture.client, deliveryID, fixture.clock.Now()) - - claimed, found, err := fixture.store.ClaimDueAttempt(context.Background(), deliveryID, fixture.clock.Now()) - require.NoError(t, err) - require.True(t, found) - require.Equal(t, deliverydomain.StatusSending, claimed.Delivery.Status) - - fixture.clock.Advance(20 * time.Millisecond) - - cancel, wait := fixture.run(t) - defer func() { - cancel() - wait() - }() - - require.Eventually(t, func() bool { - deliveryRecord := loadDeliveryRecord(t, fixture.client, deliveryID) - return deliveryRecord.Status == deliverydomain.StatusQueued && deliveryRecord.AttemptCount == 2 - }, 5*time.Second, 20*time.Millisecond) - - fixture.clock.Advance(time.Minute) - - require.Eventually(t, func() bool { - deliveryRecord := loadDeliveryRecord(t, fixture.client, deliveryID) - return deliveryRecord.Status == deliverydomain.StatusSent - }, 5*time.Second, 20*time.Millisecond) - - require.Len(t, fixture.provider.Inputs(), 1) -} - -type attemptWorkerFixture struct { - client *redis.Client - store *redisstate.AttemptExecutionStore - service *executeattempt.Service - scheduler *Scheduler - pool *AttemptWorkerPool - provider *stubprovider.Provider - clock *schedulerTestClock -} - -func newAttemptWorkerFixture(t *testing.T, scripted []stubprovider.ScriptedOutcome) attemptWorkerFixture { - t.Helper() - - server := miniredis.RunT(t) - client := redis.NewClient(&redis.Options{Addr: server.Addr()}) - t.Cleanup(func() { require.NoError(t, client.Close()) }) - - store, err := redisstate.NewAttemptExecutionStore(client) - require.NoError(t, err) - - provider, err := stubprovider.New(scripted...) - require.NoError(t, err) - t.Cleanup(func() { require.NoError(t, provider.Close()) }) - - clock := &schedulerTestClock{now: time.Unix(1_775_121_700, 0).UTC()} - workQueue := make(chan executeattempt.WorkItem, 1) - - service, err := executeattempt.New(executeattempt.Config{ - Renderer: noopRenderer{}, - Provider: provider, - PayloadLoader: store, - Store: store, - Clock: clock, - AttemptTimeout: 5 * time.Millisecond, - }) - require.NoError(t, err) - - scheduler, err := NewScheduler(SchedulerConfig{ - Store: store, - Service: service, - WorkQueue: workQueue, - Clock: clock, - AttemptTimeout: 5 * time.Millisecond, - PollInterval: 10 * time.Millisecond, - RecoveryInterval: 10 * time.Millisecond, - RecoveryGrace: 5 * time.Millisecond, - }, testWorkerLogger()) - require.NoError(t, err) - - pool, err := NewAttemptWorkerPool(AttemptWorkerPoolConfig{ - Concurrency: 1, - WorkQueue: workQueue, - Service: service, - }, testWorkerLogger()) - require.NoError(t, err) - - return attemptWorkerFixture{ - client: client, - store: store, - service: service, - scheduler: scheduler, - pool: pool, - provider: provider, - clock: clock, - } -} - -func (fixture attemptWorkerFixture) run(t *testing.T) (context.CancelFunc, func()) { - t.Helper() - - ctx, cancel := context.WithCancel(context.Background()) - schedulerDone := make(chan error, 1) - poolDone := make(chan error, 1) - - go func() { - schedulerDone <- fixture.scheduler.Run(ctx) - }() - go func() { - poolDone <- fixture.pool.Run(ctx) - }() - - wait := func() { - require.ErrorIs(t, <-schedulerDone, context.Canceled) - require.ErrorIs(t, <-poolDone, context.Canceled) - } - - return cancel, wait -} - -type schedulerTestClock struct { - mu sync.Mutex - now time.Time -} - -func (clock *schedulerTestClock) Now() time.Time { - clock.mu.Lock() - defer clock.mu.Unlock() - return clock.now -} - -func (clock *schedulerTestClock) Advance(delta time.Duration) { - clock.mu.Lock() - defer clock.mu.Unlock() - clock.now = clock.now.Add(delta) -} - -type noopRenderer struct{} - -func (noopRenderer) Execute(context.Context, renderdelivery.Input) (renderdelivery.Result, error) { - return renderdelivery.Result{}, errors.New("unexpected render invocation") -} - -func createAcceptedRenderedDelivery(t *testing.T, client *redis.Client, deliveryID common.DeliveryID, createdAt time.Time) { - t.Helper() - - writer, err := redisstate.NewAtomicWriter(client) - require.NoError(t, err) - - deliveryRecord := deliverydomain.Delivery{ - DeliveryID: deliveryID, - Source: deliverydomain.SourceNotification, - PayloadMode: deliverydomain.PayloadModeRendered, - Envelope: deliverydomain.Envelope{ - To: []common.Email{common.Email("pilot@example.com")}, - }, - Content: deliverydomain.Content{ - Subject: "Turn ready", - TextBody: "Turn 54 is ready.", - }, - IdempotencyKey: common.IdempotencyKey("notification:" + deliveryID.String()), - Status: deliverydomain.StatusQueued, - AttemptCount: 1, - CreatedAt: createdAt.UTC().Truncate(time.Millisecond), - UpdatedAt: createdAt.UTC().Truncate(time.Millisecond), - } - require.NoError(t, deliveryRecord.Validate()) - - firstAttempt := attempt.Attempt{ - DeliveryID: deliveryID, - AttemptNo: 1, - ScheduledFor: createdAt.UTC().Truncate(time.Millisecond), - Status: attempt.StatusScheduled, - } - require.NoError(t, firstAttempt.Validate()) - - require.NoError(t, writer.CreateAcceptance(context.Background(), redisstate.CreateAcceptanceInput{ - Delivery: deliveryRecord, - FirstAttempt: &firstAttempt, - })) -} - -func loadDeliveryRecord(t *testing.T, client *redis.Client, deliveryID common.DeliveryID) deliverydomain.Delivery { - t.Helper() - - payload, err := client.Get(context.Background(), redisstate.Keyspace{}.Delivery(deliveryID)).Bytes() - require.NoError(t, err) - record, err := redisstate.UnmarshalDelivery(payload) - require.NoError(t, err) - - return record -} - -func loadDeadLetterRecord(t *testing.T, client *redis.Client, deliveryID common.DeliveryID) deliverydomain.DeadLetterEntry { - t.Helper() - - payload, err := client.Get(context.Background(), redisstate.Keyspace{}.DeadLetter(deliveryID)).Bytes() - require.NoError(t, err) - record, err := redisstate.UnmarshalDeadLetter(payload) - require.NoError(t, err) - - return record -} - -func testWorkerLogger() *slog.Logger { - return slog.New(slog.NewJSONHandler(io.Discard, nil)) -} diff --git a/mail/internal/worker/cleanup_worker.go b/mail/internal/worker/cleanup_worker.go deleted file mode 100644 index ba77ccf..0000000 --- a/mail/internal/worker/cleanup_worker.go +++ /dev/null @@ -1,73 +0,0 @@ -package worker - -import ( - "context" - "errors" - "log/slog" - "time" - - "galaxy/mail/internal/adapters/redisstate" -) - -const cleanupInterval = time.Hour - -// CleanupWorker stores the idle index cleanup worker used by the Stage 6 -// runtime skeleton. -type CleanupWorker struct { - cleaner *redisstate.IndexCleaner - logger *slog.Logger -} - -// NewCleanupWorker constructs the idle Stage 6 cleanup worker. -func NewCleanupWorker(cleaner *redisstate.IndexCleaner, logger *slog.Logger) (*CleanupWorker, error) { - if cleaner == nil { - return nil, errors.New("new cleanup worker: nil index cleaner") - } - if logger == nil { - logger = slog.Default() - } - - return &CleanupWorker{ - cleaner: cleaner, - logger: logger.With("component", "cleanup_worker"), - }, nil -} - -// Run starts the idle cleanup worker and blocks until ctx is canceled. -func (worker *CleanupWorker) Run(ctx context.Context) error { - if ctx == nil { - return errors.New("run cleanup worker: nil context") - } - if err := ctx.Err(); err != nil { - return err - } - if worker == nil || worker.cleaner == nil { - return errors.New("run cleanup worker: nil cleanup worker") - } - - worker.logger.Info("cleanup worker started", "interval", cleanupInterval.String()) - ticker := time.NewTicker(cleanupInterval) - defer ticker.Stop() - - for { - select { - case <-ctx.Done(): - worker.logger.Info("cleanup worker stopped") - return ctx.Err() - case <-ticker.C: - } - } -} - -// Shutdown stops the cleanup worker within ctx. The Stage 6 skeleton has no -// additional resources to release. -func (worker *CleanupWorker) Shutdown(ctx context.Context) error { - if ctx == nil { - return errors.New("shutdown cleanup worker: nil context") - } - if worker == nil { - return nil - } - - return nil -} diff --git a/mail/internal/worker/command_consumer.go b/mail/internal/worker/command_consumer.go index 5ab5a90..12d37e6 100644 --- a/mail/internal/worker/command_consumer.go +++ b/mail/internal/worker/command_consumer.go @@ -304,9 +304,10 @@ func optionalRawString(values map[string]any, key string) string { return value } -// Shutdown stops the command consumer within ctx. The consumer uses the -// shared process Redis client and therefore has no dedicated resources to -// release here. +// Shutdown stops the command consumer within ctx. The consumer borrows the +// shared process Redis client and forcibly closes it during Shutdown so the +// in-flight blocking XREAD returns immediately; the runtime owns the same +// client and its cleanupFn is tolerant of ErrClosed. func (consumer *CommandConsumer) Shutdown(ctx context.Context) error { if ctx == nil { return errors.New("shutdown command consumer: nil context") @@ -318,9 +319,10 @@ func (consumer *CommandConsumer) Shutdown(ctx context.Context) error { var err error consumer.closeOnce.Do(func() { if consumer.client != nil { - err = consumer.client.Close() + if cerr := consumer.client.Close(); cerr != nil && !errors.Is(cerr, redis.ErrClosed) { + err = cerr + } } }) - return err } diff --git a/mail/internal/worker/command_consumer_test.go b/mail/internal/worker/command_consumer_test.go deleted file mode 100644 index 5d8692b..0000000 --- a/mail/internal/worker/command_consumer_test.go +++ /dev/null @@ -1,391 +0,0 @@ -package worker - -import ( - "context" - "errors" - "io" - "log/slog" - "testing" - "time" - - "galaxy/mail/internal/adapters/redisstate" - "galaxy/mail/internal/service/acceptgenericdelivery" - - "github.com/alicebob/miniredis/v2" - "github.com/redis/go-redis/v9" - "github.com/stretchr/testify/require" -) - -func TestCommandConsumerAcceptsRenderedCommand(t *testing.T) { - t.Parallel() - - fixture := newCommandConsumerFixture(t) - messageID := addRenderedCommand(t, fixture.client, "mail-123", "notification:mail-123") - - ctx, cancel := context.WithCancel(context.Background()) - done := make(chan error, 1) - go func() { - done <- fixture.consumer.Run(ctx) - }() - - require.Eventually(t, func() bool { - delivery, found, err := fixture.acceptanceStore.GetDelivery(context.Background(), "mail-123") - if err != nil || !found { - return false - } - entryID, found, err := fixture.offsetStore.Load(context.Background(), fixture.stream) - return err == nil && found && entryID == messageID && delivery.DeliveryID == "mail-123" - }, 5*time.Second, 20*time.Millisecond) - - cancel() - require.ErrorIs(t, <-done, context.Canceled) -} - -func TestCommandConsumerAcceptsTemplateCommand(t *testing.T) { - t.Parallel() - - fixture := newCommandConsumerFixture(t) - messageID := addTemplateCommand(t, fixture.client, "mail-124", "notification:mail-124") - - ctx, cancel := context.WithCancel(context.Background()) - done := make(chan error, 1) - go func() { - done <- fixture.consumer.Run(ctx) - }() - - require.Eventually(t, func() bool { - delivery, found, err := fixture.acceptanceStore.GetDelivery(context.Background(), "mail-124") - if err != nil || !found { - return false - } - entryID, found, err := fixture.offsetStore.Load(context.Background(), fixture.stream) - return err == nil && found && entryID == messageID && delivery.TemplateID == "game.turn.ready" - }, 5*time.Second, 20*time.Millisecond) - - cancel() - require.ErrorIs(t, <-done, context.Canceled) -} - -func TestCommandConsumerRecordsMalformedCommandAndContinues(t *testing.T) { - t.Parallel() - - fixture := newCommandConsumerFixture(t) - malformedID := addMalformedRenderedCommand(t, fixture.client, "mail-bad", "notification:mail-bad") - validID := addRenderedCommand(t, fixture.client, "mail-125", "notification:mail-125") - - ctx, cancel := context.WithCancel(context.Background()) - done := make(chan error, 1) - go func() { - done <- fixture.consumer.Run(ctx) - }() - - require.Eventually(t, func() bool { - _, deliveryFound, deliveryErr := fixture.acceptanceStore.GetDelivery(context.Background(), "mail-125") - entry, malformedFound, malformedErr := fixture.malformedStore.Get(context.Background(), malformedID) - entryID, offsetFound, offsetErr := fixture.offsetStore.Load(context.Background(), fixture.stream) - return deliveryErr == nil && - malformedErr == nil && - offsetErr == nil && - deliveryFound && - malformedFound && - entry.FailureCode == "invalid_payload" && - offsetFound && - entryID == validID - }, 5*time.Second, 20*time.Millisecond) - - cancel() - require.ErrorIs(t, <-done, context.Canceled) -} - -func TestCommandConsumerRestartsFromSavedOffset(t *testing.T) { - t.Parallel() - - fixture := newCommandConsumerFixture(t) - firstID := addRenderedCommand(t, fixture.client, "mail-126", "notification:mail-126") - - firstCtx, firstCancel := context.WithCancel(context.Background()) - firstDone := make(chan error, 1) - go func() { - firstDone <- fixture.consumer.Run(firstCtx) - }() - - require.Eventually(t, func() bool { - entryID, found, err := fixture.offsetStore.Load(context.Background(), fixture.stream) - return err == nil && found && entryID == firstID - }, 5*time.Second, 20*time.Millisecond) - - firstCancel() - require.ErrorIs(t, <-firstDone, context.Canceled) - - secondID := addRenderedCommand(t, fixture.client, "mail-127", "notification:mail-127") - - secondCtx, secondCancel := context.WithCancel(context.Background()) - secondDone := make(chan error, 1) - go func() { - secondDone <- fixture.consumer.Run(secondCtx) - }() - - require.Eventually(t, func() bool { - _, firstFound, firstErr := fixture.acceptanceStore.GetDelivery(context.Background(), "mail-126") - _, secondFound, secondErr := fixture.acceptanceStore.GetDelivery(context.Background(), "mail-127") - entryID, offsetFound, offsetErr := fixture.offsetStore.Load(context.Background(), fixture.stream) - return firstErr == nil && - secondErr == nil && - offsetErr == nil && - firstFound && - secondFound && - offsetFound && - entryID == secondID - }, 5*time.Second, 20*time.Millisecond) - - secondCancel() - require.ErrorIs(t, <-secondDone, context.Canceled) -} - -func TestCommandConsumerDoesNotDuplicateAcceptanceAfterOffsetSaveFailure(t *testing.T) { - t.Parallel() - - fixture := newCommandConsumerFixture(t) - messageID := addRenderedCommand(t, fixture.client, "mail-128", "notification:mail-128") - failingOffsetStore := &scriptedOffsetStore{ - saveErrs: []error{errors.New("offset unavailable")}, - } - consumer := newCommandConsumerForTest(t, fixture.client, fixture.stream, fixture.acceptor, fixture.malformedStore, failingOffsetStore) - - err := consumer.Run(context.Background()) - require.Error(t, err) - require.ErrorContains(t, err, "save stream offset") - - delivery, found, err := fixture.acceptanceStore.GetDelivery(context.Background(), "mail-128") - require.NoError(t, err) - require.True(t, found) - require.Equal(t, "mail-128", delivery.DeliveryID.String()) - - indexCard, err := fixture.client.ZCard(context.Background(), redisstate.Keyspace{}.CreatedAtIndex()).Result() - require.NoError(t, err) - require.EqualValues(t, 1, indexCard) - - replayConsumer := newCommandConsumerForTest(t, fixture.client, fixture.stream, fixture.acceptor, fixture.malformedStore, failingOffsetStore) - replayCtx, replayCancel := context.WithCancel(context.Background()) - replayDone := make(chan error, 1) - go func() { - replayDone <- replayConsumer.Run(replayCtx) - }() - - require.Eventually(t, func() bool { - return failingOffsetStore.lastEntryID == messageID - }, 5*time.Second, 20*time.Millisecond) - - replayCancel() - require.ErrorIs(t, <-replayDone, context.Canceled) - - indexCard, err = fixture.client.ZCard(context.Background(), redisstate.Keyspace{}.CreatedAtIndex()).Result() - require.NoError(t, err) - require.EqualValues(t, 1, indexCard) - - scheduleCard, err := fixture.client.ZCard(context.Background(), redisstate.Keyspace{}.AttemptSchedule()).Result() - require.NoError(t, err) - require.EqualValues(t, 1, scheduleCard) -} - -func TestCommandConsumerRecordsIdempotencyConflictAsMalformed(t *testing.T) { - t.Parallel() - - fixture := newCommandConsumerFixture(t) - addRenderedCommand(t, fixture.client, "mail-129", "notification:shared") - conflictID := addRenderedCommandWithSubject(t, fixture.client, "mail-130", "notification:shared", "Different subject") - - ctx, cancel := context.WithCancel(context.Background()) - done := make(chan error, 1) - go func() { - done <- fixture.consumer.Run(ctx) - }() - - require.Eventually(t, func() bool { - _, firstFound, firstErr := fixture.acceptanceStore.GetDelivery(context.Background(), "mail-129") - _, secondFound, secondErr := fixture.acceptanceStore.GetDelivery(context.Background(), "mail-130") - entry, malformedFound, malformedErr := fixture.malformedStore.Get(context.Background(), conflictID) - return firstErr == nil && - secondErr == nil && - malformedErr == nil && - firstFound && - !secondFound && - malformedFound && - entry.FailureCode == "idempotency_conflict" - }, 5*time.Second, 20*time.Millisecond) - - cancel() - require.ErrorIs(t, <-done, context.Canceled) -} - -type commandConsumerFixture struct { - client *redis.Client - stream string - consumer *CommandConsumer - acceptor *acceptgenericdelivery.Service - acceptanceStore *redisstate.GenericAcceptanceStore - malformedStore *redisstate.MalformedCommandStore - offsetStore *redisstate.StreamOffsetStore -} - -func newCommandConsumerFixture(t *testing.T) commandConsumerFixture { - t.Helper() - - server := miniredis.RunT(t) - client := redis.NewClient(&redis.Options{Addr: server.Addr()}) - t.Cleanup(func() { require.NoError(t, client.Close()) }) - - acceptanceStore, err := redisstate.NewGenericAcceptanceStore(client) - require.NoError(t, err) - now := time.Now().UTC().Truncate(time.Millisecond) - acceptor, err := acceptgenericdelivery.New(acceptgenericdelivery.Config{ - Store: acceptanceStore, - Clock: testClock{now: now}, - IdempotencyTTL: redisstate.IdempotencyTTL, - }) - require.NoError(t, err) - - malformedStore, err := redisstate.NewMalformedCommandStore(client) - require.NoError(t, err) - offsetStore, err := redisstate.NewStreamOffsetStore(client) - require.NoError(t, err) - - stream := redisstate.Keyspace{}.DeliveryCommands() - consumer := newCommandConsumerForTest(t, client, stream, acceptor, malformedStore, offsetStore) - - return commandConsumerFixture{ - client: client, - stream: stream, - consumer: consumer, - acceptor: acceptor, - acceptanceStore: acceptanceStore, - malformedStore: malformedStore, - offsetStore: offsetStore, - } -} - -func newCommandConsumerForTest( - t *testing.T, - client *redis.Client, - stream string, - acceptor AcceptGenericDeliveryUseCase, - malformedRecorder MalformedCommandRecorder, - offsetStore StreamOffsetStore, -) *CommandConsumer { - t.Helper() - - consumer, err := NewCommandConsumer(CommandConsumerConfig{ - Client: client, - Stream: stream, - BlockTimeout: 20 * time.Millisecond, - Acceptor: acceptor, - MalformedRecorder: malformedRecorder, - OffsetStore: offsetStore, - Clock: testClock{now: time.Now().UTC().Truncate(time.Millisecond)}, - }, testLogger()) - require.NoError(t, err) - - return consumer -} - -func addRenderedCommand(t *testing.T, client *redis.Client, deliveryID string, idempotencyKey string) string { - t.Helper() - - return addRenderedCommandWithSubject(t, client, deliveryID, idempotencyKey, "Turn ready") -} - -func addRenderedCommandWithSubject(t *testing.T, client *redis.Client, deliveryID string, idempotencyKey string, subject string) string { - t.Helper() - - messageID, err := client.XAdd(context.Background(), &redis.XAddArgs{ - Stream: redisstate.Keyspace{}.DeliveryCommands(), - Values: map[string]any{ - "delivery_id": deliveryID, - "source": "notification", - "payload_mode": "rendered", - "idempotency_key": idempotencyKey, - "requested_at_ms": "1775121700000", - "payload_json": `{"to":["pilot@example.com"],"cc":[],"bcc":[],"reply_to":["noreply@example.com"],"subject":"` + subject + `","text_body":"Turn 54 is ready.","html_body":"

Turn 54 is ready.

","attachments":[]}`, - }, - }).Result() - require.NoError(t, err) - - return messageID -} - -func addTemplateCommand(t *testing.T, client *redis.Client, deliveryID string, idempotencyKey string) string { - t.Helper() - - messageID, err := client.XAdd(context.Background(), &redis.XAddArgs{ - Stream: redisstate.Keyspace{}.DeliveryCommands(), - Values: map[string]any{ - "delivery_id": deliveryID, - "source": "notification", - "payload_mode": "template", - "idempotency_key": idempotencyKey, - "requested_at_ms": "1775121700001", - "payload_json": `{"to":["pilot@example.com"],"cc":[],"bcc":[],"reply_to":[],"template_id":"game.turn.ready","locale":"fr-FR","variables":{"turn_number":54},"attachments":[]}`, - }, - }).Result() - require.NoError(t, err) - - return messageID -} - -func addMalformedRenderedCommand(t *testing.T, client *redis.Client, deliveryID string, idempotencyKey string) string { - t.Helper() - - messageID, err := client.XAdd(context.Background(), &redis.XAddArgs{ - Stream: redisstate.Keyspace{}.DeliveryCommands(), - Values: map[string]any{ - "delivery_id": deliveryID, - "source": "notification", - "payload_mode": "rendered", - "idempotency_key": idempotencyKey, - "requested_at_ms": "1775121700000", - "payload_json": `{"to":["pilot@example.com"],"cc":[],"bcc":[],"reply_to":[],"text_body":"Turn 54 is ready.","attachments":[]}`, - }, - }).Result() - require.NoError(t, err) - - return messageID -} - -type testClock struct { - now time.Time -} - -func (clock testClock) Now() time.Time { - return clock.now -} - -type scriptedOffsetStore struct { - lastEntryID string - found bool - saveErrs []error - saveCalls int -} - -func (store *scriptedOffsetStore) Load(context.Context, string) (string, bool, error) { - if !store.found { - return "", false, nil - } - - return store.lastEntryID, true, nil -} - -func (store *scriptedOffsetStore) Save(_ context.Context, _ string, entryID string) error { - if store.saveCalls < len(store.saveErrs) && store.saveErrs[store.saveCalls] != nil { - store.saveCalls++ - return store.saveErrs[store.saveCalls-1] - } - - store.saveCalls++ - store.lastEntryID = entryID - store.found = true - return nil -} - -func testLogger() *slog.Logger { - return slog.New(slog.NewJSONHandler(io.Discard, nil)) -} diff --git a/mail/internal/worker/sqlretention.go b/mail/internal/worker/sqlretention.go new file mode 100644 index 0000000..15fdb9d --- /dev/null +++ b/mail/internal/worker/sqlretention.go @@ -0,0 +1,162 @@ +package worker + +import ( + "context" + "errors" + "fmt" + "log/slog" + "time" +) + +// SQLRetentionStore performs the durable DELETE statements applied by the +// retention worker. Implementations are typically the umbrella PostgreSQL +// mail store; the interface keeps the worker decoupled from the store +// package. +type SQLRetentionStore interface { + // DeleteDeliveriesOlderThan removes deliveries whose created_at predates + // cutoff. Cascading FKs drop attempts, dead_letters, delivery_payloads, + // and delivery_recipients owned by the deleted rows. + DeleteDeliveriesOlderThan(ctx context.Context, cutoff time.Time) (int64, error) + + // DeleteMalformedCommandsOlderThan removes malformed-command rows whose + // recorded_at predates cutoff. + DeleteMalformedCommandsOlderThan(ctx context.Context, cutoff time.Time) (int64, error) +} + +// SQLRetentionConfig stores the dependencies and policy used by +// SQLRetentionWorker. +type SQLRetentionConfig struct { + // Store applies the durable DELETE statements. + Store SQLRetentionStore + + // DeliveryRetention bounds how long deliveries (and their cascaded + // attempts/dead_letters/payloads/recipients) survive after creation. + DeliveryRetention time.Duration + + // MalformedCommandRetention bounds how long malformed-command rows + // survive after recorded_at. + MalformedCommandRetention time.Duration + + // CleanupInterval stores the wall-clock period between two retention + // passes. + CleanupInterval time.Duration + + // Clock provides the wall-clock used to compute cutoff timestamps. + Clock Clock +} + +// SQLRetentionWorker periodically deletes deliveries and malformed-command +// rows whose retention window has expired. The worker replaces the previous +// Redis index_cleaner that maintained secondary index keys; PostgreSQL +// indexes are maintained by the engine, so the worker only needs to enforce +// retention. +type SQLRetentionWorker struct { + store SQLRetentionStore + deliveryRetention time.Duration + malformedCommandRetention time.Duration + cleanupInterval time.Duration + clock Clock + logger *slog.Logger +} + +// NewSQLRetentionWorker constructs the periodic retention worker. +func NewSQLRetentionWorker(cfg SQLRetentionConfig, logger *slog.Logger) (*SQLRetentionWorker, error) { + switch { + case cfg.Store == nil: + return nil, errors.New("new sql retention worker: nil store") + case cfg.DeliveryRetention <= 0: + return nil, errors.New("new sql retention worker: non-positive delivery retention") + case cfg.MalformedCommandRetention <= 0: + return nil, errors.New("new sql retention worker: non-positive malformed command retention") + case cfg.CleanupInterval <= 0: + return nil, errors.New("new sql retention worker: non-positive cleanup interval") + case cfg.Clock == nil: + return nil, errors.New("new sql retention worker: nil clock") + } + if logger == nil { + logger = slog.Default() + } + + return &SQLRetentionWorker{ + store: cfg.Store, + deliveryRetention: cfg.DeliveryRetention, + malformedCommandRetention: cfg.MalformedCommandRetention, + cleanupInterval: cfg.CleanupInterval, + clock: cfg.Clock, + logger: logger.With("component", "sql_retention_worker"), + }, nil +} + +// Run starts the retention loop and blocks until ctx is canceled. +func (worker *SQLRetentionWorker) Run(ctx context.Context) error { + if ctx == nil { + return errors.New("run sql retention worker: nil context") + } + if err := ctx.Err(); err != nil { + return err + } + if worker == nil { + return errors.New("run sql retention worker: nil worker") + } + + worker.logger.Info("sql retention worker started", + "delivery_retention", worker.deliveryRetention.String(), + "malformed_command_retention", worker.malformedCommandRetention.String(), + "cleanup_interval", worker.cleanupInterval.String(), + ) + defer worker.logger.Info("sql retention worker stopped") + + // First pass runs immediately so a freshly started service does not wait + // one full interval before evicting stale rows. + worker.runOnce(ctx) + + ticker := time.NewTicker(worker.cleanupInterval) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return ctx.Err() + case <-ticker.C: + worker.runOnce(ctx) + } + } +} + +// Shutdown stops the retention worker within ctx. +func (worker *SQLRetentionWorker) Shutdown(ctx context.Context) error { + if ctx == nil { + return errors.New("shutdown sql retention worker: nil context") + } + return nil +} + +func (worker *SQLRetentionWorker) runOnce(ctx context.Context) { + now := worker.clock.Now().UTC() + + deliveryCutoff := now.Add(-worker.deliveryRetention) + if deleted, err := worker.store.DeleteDeliveriesOlderThan(ctx, deliveryCutoff); err != nil { + worker.logger.Warn("delete expired deliveries failed", + "cutoff", deliveryCutoff, + "error", fmt.Sprintf("%v", err), + ) + } else if deleted > 0 { + worker.logger.Info("expired deliveries deleted", + "cutoff", deliveryCutoff, + "deleted", deleted, + ) + } + + malformedCutoff := now.Add(-worker.malformedCommandRetention) + if deleted, err := worker.store.DeleteMalformedCommandsOlderThan(ctx, malformedCutoff); err != nil { + worker.logger.Warn("delete expired malformed commands failed", + "cutoff", malformedCutoff, + "error", fmt.Sprintf("%v", err), + ) + } else if deleted > 0 { + worker.logger.Info("expired malformed commands deleted", + "cutoff", malformedCutoff, + "deleted", deleted, + ) + } +} diff --git a/notification/Makefile b/notification/Makefile new file mode 100644 index 0000000..ecae4be --- /dev/null +++ b/notification/Makefile @@ -0,0 +1,10 @@ +# Makefile for galaxy/notification. +# +# The `jet` target regenerates the go-jet/v2 query-builder code under +# internal/adapters/postgres/jet/ against a transient PostgreSQL container +# brought up by cmd/jetgen. Generated code is committed. + +.PHONY: jet + +jet: + go run ./cmd/jetgen diff --git a/notification/README.md b/notification/README.md index a266167..59c5e1b 100644 --- a/notification/README.md +++ b/notification/README.md @@ -155,7 +155,9 @@ Intentional runtime omissions in v1: Required: -- `NOTIFICATION_REDIS_ADDR` +- `NOTIFICATION_REDIS_MASTER_ADDR` +- `NOTIFICATION_REDIS_PASSWORD` +- `NOTIFICATION_POSTGRES_PRIMARY_DSN` - `NOTIFICATION_USER_SERVICE_BASE_URL` Primary configuration groups: @@ -168,12 +170,18 @@ Primary configuration groups: - `NOTIFICATION_INTERNAL_HTTP_READ_HEADER_TIMEOUT` with default `2s` - `NOTIFICATION_INTERNAL_HTTP_READ_TIMEOUT` with default `10s` - `NOTIFICATION_INTERNAL_HTTP_IDLE_TIMEOUT` with default `1m` -- Redis connectivity: - - `NOTIFICATION_REDIS_USERNAME` - - `NOTIFICATION_REDIS_PASSWORD` +- Redis connectivity (master/replica/password shape; the deprecated + `NOTIFICATION_REDIS_ADDR`, `NOTIFICATION_REDIS_USERNAME`, and + `NOTIFICATION_REDIS_TLS_ENABLED` env vars are rejected at startup): + - `NOTIFICATION_REDIS_REPLICA_ADDRS` (optional, comma-separated) - `NOTIFICATION_REDIS_DB` - - `NOTIFICATION_REDIS_TLS_ENABLED` - `NOTIFICATION_REDIS_OPERATION_TIMEOUT` +- PostgreSQL connectivity: + - `NOTIFICATION_POSTGRES_REPLICA_DSNS` (optional, comma-separated) + - `NOTIFICATION_POSTGRES_OPERATION_TIMEOUT` + - `NOTIFICATION_POSTGRES_MAX_OPEN_CONNS` + - `NOTIFICATION_POSTGRES_MAX_IDLE_CONNS` + - `NOTIFICATION_POSTGRES_CONN_MAX_LIFETIME` - stream names: - `NOTIFICATION_INTENTS_STREAM` with default `notification:intents` - `NOTIFICATION_INTENTS_READ_BLOCK_TIMEOUT` with default `2s` @@ -186,9 +194,13 @@ Primary configuration groups: - `NOTIFICATION_ROUTE_BACKOFF_MIN` with default `1s` - `NOTIFICATION_ROUTE_BACKOFF_MAX` with default `5m` - `NOTIFICATION_ROUTE_LEASE_TTL` with default `5s` - - `NOTIFICATION_DEAD_LETTER_TTL` with default `720h` - - `NOTIFICATION_RECORD_TTL` with default `720h` - `NOTIFICATION_IDEMPOTENCY_TTL` with default `168h` +- retention (periodic SQL retention worker; replaces the previous + `NOTIFICATION_DEAD_LETTER_TTL` and `NOTIFICATION_RECORD_TTL` Redis-EXPIRE + knobs): + - `NOTIFICATION_RECORD_RETENTION` with default `720h` + - `NOTIFICATION_MALFORMED_INTENT_RETENTION` with default `2160h` + - `NOTIFICATION_CLEANUP_INTERVAL` with default `1h` - `User Service` enrichment: - `NOTIFICATION_USER_SERVICE_TIMEOUT` with default `1s` - administrator routing: @@ -472,52 +484,90 @@ Materialization rules: The service-local aggregate notification status is derived from routes and is not a separate durable source of truth. -## Redis Logical Model +## Persistence Model + +Durable storage is split between PostgreSQL (table-shaped business state) +and Redis (streams, runtime coordination). The architectural rules live in +[`ARCHITECTURE.md §Persistence Backends`](../ARCHITECTURE.md#persistence-backends); +the per-service decision record is +[`docs/postgres-migration.md`](docs/postgres-migration.md). + +### PostgreSQL durable state + +The service owns the `notification` schema. Migrations are embedded in the +binary (`internal/adapters/postgres/migrations`) and applied at startup via +`pkg/postgres.RunMigrations` strictly before any HTTP listener becomes +ready. Every time-valued column is `timestamptz`, normalised to UTC by the +adapter on bind and scan. + +| Table | Frozen columns | +| --- | --- | +| `records` | `notification_id`, `notification_type`, `producer`, `audience_kind`, `recipient_user_ids` (jsonb), `payload_json`, `idempotency_key`, `request_fingerprint`, `request_id`, `trace_id`, `occurred_at`, `accepted_at`, `updated_at`, `idempotency_expires_at`; `UNIQUE (producer, idempotency_key)` | +| `routes` | `notification_id`, `route_id`, `channel`, `recipient_ref`, `status`, `attempt_count`, `max_attempts`, `next_attempt_at`, `resolved_email`, `resolved_locale`, `last_error_classification`, `last_error_message`, `last_error_at`, `created_at`, `updated_at`, `published_at`, `dead_lettered_at`, `skipped_at`; PRIMARY KEY `(notification_id, route_id)` | +| `dead_letters` | `notification_id`, `route_id`, `channel`, `recipient_ref`, `final_attempt_count`, `max_attempts`, `failure_classification`, `failure_message`, `recovery_hint`, `created_at`; PRIMARY KEY `(notification_id, route_id)` cascading from `routes` | +| `malformed_intents` | `stream_entry_id`, `notification_type`, `producer`, `idempotency_key`, `failure_code`, `failure_message`, `raw_fields` (jsonb), `recorded_at` | Storage rules: -- durable records are stored as strict JSON blobs -- timestamps are stored in Unix milliseconds -- dynamic Redis key segments are base64url-encoded -- `notification:route_schedule` is one shared sorted set for both `push` and - `email` +- the durable `records` row IS the idempotency reservation; the + `(producer, idempotency_key)` UNIQUE constraint surfaces conflicts as + `acceptintent.ErrConflict` +- `next_attempt_at` is non-NULL only while the route is a scheduling + candidate (`status=pending|failed`); the partial index `routes_due_idx` + drives the publishers' `ListDueRoutes` scan +- `payload_json` stores the canonical normalized JSON string used for + idempotency fingerprinting; `recipient_user_ids` is JSONB and omitted + for `audience_kind=admin_email` +- terminal transitions clear `next_attempt_at` and stamp the appropriate + terminal column (`published_at` / `dead_lettered_at` / `skipped_at`) +- record-level retention deletes cascade to `routes` and `dead_letters` + via `ON DELETE CASCADE` + +### Redis runtime-coordination state | Logical artifact | Redis key | | --- | --- | -| `notification_record` | `notification:records:` | -| `notification_route` | `notification:routes::` | | temporary route lease | `notification:route_leases::` | -| `notification_idempotency_record` | `notification:idempotency::` | -| `notification_dead_letter_entry` | `notification:dead_letters::` | -| malformed intent record | `notification:malformed_intents:` | | stream offset record | `notification:stream_offsets:` | | ingress stream | `notification:intents` | -| route schedule sorted set | `notification:route_schedule` | -| Record | Frozen fields | -| --- | --- | -| `notification_record` | `notification_id`, `notification_type`, `producer`, `audience_kind`, normalized `recipient_user_ids`, normalized `payload_json`, `idempotency_key`, `request_fingerprint`, optional `request_id`, optional `trace_id`, `occurred_at_ms`, `accepted_at_ms`, `updated_at_ms` | -| `notification_route` | `notification_id`, `route_id`, `channel`, `recipient_ref`, `status`, `attempt_count`, `max_attempts`, `next_attempt_at_ms`, optional `resolved_email`, optional `resolved_locale`, optional `last_error_classification`, optional `last_error_message`, optional `last_error_at_ms`, `created_at_ms`, `updated_at_ms`, optional `published_at_ms`, optional `dead_lettered_at_ms`, optional `skipped_at_ms` | -| `notification_idempotency_record` | `producer`, `idempotency_key`, `notification_id`, `request_fingerprint`, `created_at_ms`, `expires_at_ms` | -| `notification_dead_letter_entry` | `notification_id`, `route_id`, `channel`, `recipient_ref`, `final_attempt_count`, `max_attempts`, `failure_classification`, `failure_message`, `created_at_ms`, optional `recovery_hint` | -| malformed intent record | `stream_entry_id`, optional `notification_type`, optional `producer`, optional `idempotency_key`, `failure_code`, `failure_message`, `raw_fields_json`, `recorded_at_ms` | -| stream offset record | `stream`, `last_processed_entry_id`, `updated_at_ms` | +Storage rules: -`notification_record.recipient_user_ids` stores a normalized array of unique -`user_id` values and is omitted for `audience_kind=admin_email`. -`notification_record.payload_json` stores the canonical normalized JSON string -used for idempotency fingerprinting. -Temporary route lease keys store one opaque worker token and use -`NOTIFICATION_ROUTE_LEASE_TTL`; they are service-local coordination state -rather than durable records. -`notification:route_schedule` stores one member per scheduled route where score -= `next_attempt_at_ms` and member = full Redis route key with encoded dynamic -segments. -Newly accepted publishable routes enter the schedule immediately with -`status=pending` and `next_attempt_at_ms = accepted_at_ms`. -`failed` routes remain scheduled for retry. -`published`, `dead_letter`, and `skipped` are absent from the schedule. -Only the current lease holder may finalize one due publication attempt. +- dynamic Redis key segments are base64url-encoded +- temporary route lease keys store one opaque worker token and use + `NOTIFICATION_ROUTE_LEASE_TTL`; they are service-local coordination + state rather than durable records, retained on Redis as a per-replica + exclusivity hint atop the SQL claim +- stream offset records persist plain-XREAD consumer progress for + `notification:intents` and never expire +- the outbound streams `gateway:client-events` and `mail:delivery_commands` + remain Redis Streams owned by Gateway and Mail Service respectively; + Notification Service emits one entry through `XADD` before committing + the route's PostgreSQL state transition + +### Publisher claim and lease coordination + +`Push` and `Email` publishers share the same scheduling pattern: + +- `routes_due_idx` (the partial index on `next_attempt_at`) replaces the + former `notification:route_schedule` ZSET; the SQL query + `SELECT notification_id, route_id FROM routes WHERE next_attempt_at IS + NOT NULL AND next_attempt_at <= now() ORDER BY next_attempt_at ASC LIMIT + N` returns the next due batch +- `push` publishers filter for `route_id` prefix `push:`; `email` + publishers filter for prefix `email:` so the two workers do not contend +- `push` and `email` replicas coordinate through + `notification:route_leases::` with + `NOTIFICATION_ROUTE_LEASE_TTL` +- only the current lease holder finalises one due publication attempt; + the durable transition is a `Complete*` SQL transaction with optimistic + concurrency on `routes.updated_at` so a stale lease cannot overwrite a + fresher row state +- newly accepted publishable routes enter the partial index immediately + with `status=pending` and `next_attempt_at = accepted_at` +- `failed` routes remain in the partial index for retry +- `published`, `dead_letter`, and `skipped` clear `next_attempt_at` and + drop out of the index ## Retry And Dead-Letter Policy @@ -550,12 +600,15 @@ Rules: Retention rules: -- `notification_record` and `notification_route` use - `NOTIFICATION_RECORD_TTL` -- `notification_idempotency_record` uses `NOTIFICATION_IDEMPOTENCY_TTL` -- `notification_dead_letter_entry` and malformed intent records use - `NOTIFICATION_DEAD_LETTER_TTL` -- stream offset records do not use TTL +- `records` and their cascaded `routes` / `dead_letters` use + `NOTIFICATION_RECORD_RETENTION` (deleted by the periodic SQL retention + worker after the configured window; cascade clears dependent rows) +- the per-record idempotency window (`records.idempotency_expires_at`) + uses `NOTIFICATION_IDEMPOTENCY_TTL` +- `malformed_intents` use `NOTIFICATION_MALFORMED_INTENT_RETENTION` + (independent retention pass) +- the retention worker runs once per `NOTIFICATION_CLEANUP_INTERVAL` +- stream offset records do not expire ## Observability diff --git a/notification/cmd/jetgen/main.go b/notification/cmd/jetgen/main.go new file mode 100644 index 0000000..ccc71e3 --- /dev/null +++ b/notification/cmd/jetgen/main.go @@ -0,0 +1,236 @@ +// Command jetgen regenerates the go-jet/v2 query-builder code under +// galaxy/notification/internal/adapters/postgres/jet/ against a transient +// PostgreSQL instance. +// +// The program is intended to be invoked as `go run ./cmd/jetgen` (or via the +// `make jet` Makefile target) from within `galaxy/notification`. It is not +// part of the runtime binary. +// +// Steps: +// +// 1. start a postgres:16-alpine container via testcontainers-go +// 2. open it through pkg/postgres as the superuser +// 3. CREATE ROLE notificationservice and CREATE SCHEMA "notification" +// AUTHORIZATION notificationservice +// 4. open a second pool as notificationservice with search_path=notification +// and apply the embedded goose migrations +// 5. run jet's PostgreSQL generator against schema=notification, writing into +// ../internal/adapters/postgres/jet +package main + +import ( + "context" + "errors" + "fmt" + "log" + "net/url" + "os" + "path/filepath" + "runtime" + "time" + + "galaxy/notification/internal/adapters/postgres/migrations" + "galaxy/postgres" + + jetpostgres "github.com/go-jet/jet/v2/generator/postgres" + testcontainers "github.com/testcontainers/testcontainers-go" + tcpostgres "github.com/testcontainers/testcontainers-go/modules/postgres" + "github.com/testcontainers/testcontainers-go/wait" +) + +const ( + postgresImage = "postgres:16-alpine" + superuserName = "galaxy" + superuserPassword = "galaxy" + superuserDatabase = "galaxy_notification" + serviceRole = "notificationservice" + servicePassword = "notificationservice" + serviceSchema = "notification" + containerStartup = 90 * time.Second + defaultOpTimeout = 10 * time.Second + jetOutputDirSuffix = "internal/adapters/postgres/jet" +) + +func main() { + if err := run(context.Background()); err != nil { + log.Fatalf("jetgen: %v", err) + } +} + +func run(ctx context.Context) error { + outputDir, err := jetOutputDir() + if err != nil { + return err + } + + container, err := tcpostgres.Run(ctx, postgresImage, + tcpostgres.WithDatabase(superuserDatabase), + tcpostgres.WithUsername(superuserName), + tcpostgres.WithPassword(superuserPassword), + testcontainers.WithWaitStrategy( + wait.ForLog("database system is ready to accept connections"). + WithOccurrence(2). + WithStartupTimeout(containerStartup), + ), + ) + if err != nil { + return fmt.Errorf("start postgres container: %w", err) + } + defer func() { + if termErr := testcontainers.TerminateContainer(container); termErr != nil { + log.Printf("jetgen: terminate container: %v", termErr) + } + }() + + baseDSN, err := container.ConnectionString(ctx, "sslmode=disable") + if err != nil { + return fmt.Errorf("resolve container dsn: %w", err) + } + + if err := provisionRoleAndSchema(ctx, baseDSN); err != nil { + return err + } + + scopedDSN, err := dsnForServiceRole(baseDSN) + if err != nil { + return err + } + if err := applyMigrations(ctx, scopedDSN); err != nil { + return err + } + + if err := os.RemoveAll(outputDir); err != nil { + return fmt.Errorf("remove existing jet output %q: %w", outputDir, err) + } + if err := os.MkdirAll(filepath.Dir(outputDir), 0o755); err != nil { + return fmt.Errorf("ensure jet output parent: %w", err) + } + + jetCfg := postgres.DefaultConfig() + jetCfg.PrimaryDSN = scopedDSN + jetCfg.OperationTimeout = defaultOpTimeout + jetDB, err := postgres.OpenPrimary(ctx, jetCfg) + if err != nil { + return fmt.Errorf("open scoped pool for jet generation: %w", err) + } + defer func() { _ = jetDB.Close() }() + + if err := jetpostgres.GenerateDB(jetDB, serviceSchema, outputDir); err != nil { + return fmt.Errorf("jet generate: %w", err) + } + + log.Printf("jetgen: generated jet code into %s (schema=%s)", outputDir, serviceSchema) + return nil +} + +func provisionRoleAndSchema(ctx context.Context, baseDSN string) error { + cfg := postgres.DefaultConfig() + cfg.PrimaryDSN = baseDSN + cfg.OperationTimeout = defaultOpTimeout + db, err := postgres.OpenPrimary(ctx, cfg) + if err != nil { + return fmt.Errorf("open admin pool: %w", err) + } + defer func() { _ = db.Close() }() + + statements := []string{ + fmt.Sprintf(`DO $$ BEGIN + IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = %s) THEN + CREATE ROLE %s LOGIN PASSWORD %s; + END IF; + END $$;`, sqlLiteral(serviceRole), sqlIdentifier(serviceRole), sqlLiteral(servicePassword)), + fmt.Sprintf(`CREATE SCHEMA IF NOT EXISTS %s AUTHORIZATION %s;`, + sqlIdentifier(serviceSchema), sqlIdentifier(serviceRole)), + fmt.Sprintf(`GRANT USAGE ON SCHEMA %s TO %s;`, + sqlIdentifier(serviceSchema), sqlIdentifier(serviceRole)), + } + for _, statement := range statements { + if _, err := db.ExecContext(ctx, statement); err != nil { + return fmt.Errorf("provision %q/%q: %w", serviceSchema, serviceRole, err) + } + } + return nil +} + +func dsnForServiceRole(baseDSN string) (string, error) { + parsed, err := url.Parse(baseDSN) + if err != nil { + return "", fmt.Errorf("parse base dsn: %w", err) + } + values := url.Values{} + values.Set("search_path", serviceSchema) + values.Set("sslmode", "disable") + scoped := url.URL{ + Scheme: parsed.Scheme, + User: url.UserPassword(serviceRole, servicePassword), + Host: parsed.Host, + Path: parsed.Path, + RawQuery: values.Encode(), + } + return scoped.String(), nil +} + +func applyMigrations(ctx context.Context, dsn string) error { + cfg := postgres.DefaultConfig() + cfg.PrimaryDSN = dsn + cfg.OperationTimeout = defaultOpTimeout + db, err := postgres.OpenPrimary(ctx, cfg) + if err != nil { + return fmt.Errorf("open scoped pool: %w", err) + } + defer func() { _ = db.Close() }() + + if err := postgres.Ping(ctx, db, defaultOpTimeout); err != nil { + return err + } + if err := postgres.RunMigrations(ctx, db, migrations.FS(), "."); err != nil { + return fmt.Errorf("run migrations: %w", err) + } + return nil +} + +// jetOutputDir returns the absolute path that jet should write into. We rely +// on the runtime caller info to anchor it to galaxy/notification regardless +// of the invoking working directory. +func jetOutputDir() (string, error) { + _, file, _, ok := runtime.Caller(0) + if !ok { + return "", errors.New("resolve runtime caller for jet output path") + } + dir := filepath.Dir(file) + // dir = .../galaxy/notification/cmd/jetgen + moduleRoot := filepath.Clean(filepath.Join(dir, "..", "..")) + return filepath.Join(moduleRoot, jetOutputDirSuffix), nil +} + +func sqlIdentifier(name string) string { + return `"` + escapeDoubleQuotes(name) + `"` +} + +func sqlLiteral(value string) string { + return "'" + escapeSingleQuotes(value) + "'" +} + +func escapeDoubleQuotes(value string) string { + out := make([]byte, 0, len(value)) + for index := 0; index < len(value); index++ { + if value[index] == '"' { + out = append(out, '"', '"') + continue + } + out = append(out, value[index]) + } + return string(out) +} + +func escapeSingleQuotes(value string) string { + out := make([]byte, 0, len(value)) + for index := 0; index < len(value); index++ { + if value[index] == '\'' { + out = append(out, '\'', '\'') + continue + } + out = append(out, value[index]) + } + return string(out) +} diff --git a/notification/contract_asyncapi_test.go b/notification/contract_asyncapi_test.go index 819002a..e80b4ab 100644 --- a/notification/contract_asyncapi_test.go +++ b/notification/contract_asyncapi_test.go @@ -611,6 +611,7 @@ func TestGatewayREADMEFreezesExactPushVocabulary(t *testing.T) { "- `lobby.application.submitted`", "- `lobby.membership.approved`", "- `lobby.membership.rejected`", + "- `lobby.membership.blocked`", "- `lobby.invite.created`", "- `lobby.invite.redeemed`", "- `lobby.race_name.registration_eligible`", diff --git a/notification/docs/examples.md b/notification/docs/examples.md index 84d86ee..28de7bd 100644 --- a/notification/docs/examples.md +++ b/notification/docs/examples.md @@ -8,7 +8,9 @@ placeholders unless explicitly stated otherwise. Minimal local runtime: ```dotenv -NOTIFICATION_REDIS_ADDR=127.0.0.1:6379 +NOTIFICATION_REDIS_MASTER_ADDR=127.0.0.1:6379 +NOTIFICATION_REDIS_PASSWORD=integration +NOTIFICATION_POSTGRES_PRIMARY_DSN=postgres://notificationservice:notificationservice@127.0.0.1:5432/galaxy?search_path=notification&sslmode=disable NOTIFICATION_INTERNAL_HTTP_ADDR=:8092 NOTIFICATION_USER_SERVICE_BASE_URL=http://127.0.0.1:8091 diff --git a/notification/docs/postgres-migration.md b/notification/docs/postgres-migration.md new file mode 100644 index 0000000..a10d923 --- /dev/null +++ b/notification/docs/postgres-migration.md @@ -0,0 +1,265 @@ +# PostgreSQL Migration + +PG_PLAN.md §5 migrated `galaxy/notification` from a Redis-only durable store +to the steady-state split codified in `ARCHITECTURE.md §Persistence +Backends`: PostgreSQL is the source of truth for table-shaped notification +state, and Redis keeps only the inbound `notification:intents` stream, the +two outbound streams (`gateway:client-events`, `mail:delivery_commands`), +the persisted consumer offset, and the short-lived per-route exclusivity +lease. + +This document records the schema decisions and the non-obvious agreements +behind them. Use it together with the migration script +(`internal/adapters/postgres/migrations/00001_init.sql`) and the runtime +wiring (`internal/app/runtime.go`). + +## Outcomes + +- Schema `notification` (provisioned externally) holds the durable state: + `records`, `routes`, `dead_letters`, `malformed_intents`. +- The runtime opens one PostgreSQL pool via `pkg/postgres.OpenPrimary`, + applies embedded goose migrations strictly before any HTTP listener + becomes ready, and exits non-zero when migration or ping fails. +- The runtime opens one shared `*redis.Client` via + `pkg/redisconn.NewMasterClient` and passes it to the intent consumer, the + publishers (outbound XADDs), the route lease store, and the persisted + stream offset store. +- The Redis adapter package (`internal/adapters/redisstate/`) is reduced to + the surviving `LeaseStore`, `StreamOffsetStore`, and a slim `Keyspace` + exposing only `RouteLease(notificationID, routeID)`, + `StreamOffset(stream)`, and `Intents()`. The Lua-backed atomic writer, + the route-state mutation scripts, the records/routes/idempotency/dead- + letters/malformed-intents keyspace, and the per-record TTL constants are + gone. +- Configuration drops `NOTIFICATION_REDIS_USERNAME` / + `NOTIFICATION_REDIS_TLS_ENABLED` / `NOTIFICATION_REDIS_ADDR` and + introduces `NOTIFICATION_REDIS_MASTER_ADDR` / + `NOTIFICATION_REDIS_REPLICA_ADDRS` plus `NOTIFICATION_POSTGRES_*`. The + retention knobs `NOTIFICATION_RECORD_TTL` / + `NOTIFICATION_DEAD_LETTER_TTL` are renamed to + `NOTIFICATION_RECORD_RETENTION` / + `NOTIFICATION_MALFORMED_INTENT_RETENTION`, and a new + `NOTIFICATION_CLEANUP_INTERVAL` drives the periodic SQL retention + worker. + +## Decisions + +### 1. One schema, externally-provisioned role + +**Decision.** The `notification` schema and the matching +`notificationservice` role are created outside the migration sequence (in +tests, by +`integration/internal/harness/postgres_container.go::EnsureRoleAndSchema`; +in production, by an ops init script not in scope for this stage). The +embedded migration `00001_init.sql` only contains DDL for tables and +indexes and assumes it runs as the schema owner with +`search_path=notification`. + +**Why.** Mixing role creation, schema creation, and table DDL into one +script forces every consumer of the migration to run as a superuser. The +schema-per-service architectural rule +(`ARCHITECTURE.md §Persistence Backends`) lines up neatly with the +operational split: ops provisions roles and schemas, the service applies +schema-scoped migrations. + +### 2. Idempotency record IS the records row + +**Decision.** The `records` table carries `producer`, `idempotency_key`, +`request_fingerprint`, and `idempotency_expires_at` columns and a +`UNIQUE (producer, idempotency_key)` constraint. Acceptance flows insert +the row directly; a duplicate request races on the UNIQUE constraint and +surfaces as `acceptintent.ErrConflict`. There is no separate idempotency +table. + +**Why.** PG_PLAN.md §3 fixed this rule for every PG-backed service. With +the reservation living on the durable record, recovery is a single fact — +the row either exists or it does not — so no Redis-loss window can make a +duplicate sneak through. The `records.accepted_at` value doubles as the +`IdempotencyRecord.CreatedAt` returned to the service layer. + +### 3. `recipient_user_ids` as JSONB + +**Decision.** `records.recipient_user_ids` stores the normalized recipient +user-id list as a JSONB column. The codec round-trips a nil slice as `[]` +to keep the column NOT NULL while letting the read path return a nil slice +when the audience is not user-targeted. + +**Why.** The list is opaque to queries (we never element-filter on it). +JSONB lines up with the "everything outside primary fields is JSON" +pattern Mail Stage 4 already established; PostgreSQL will accept a future +GIN index on `recipient_user_ids jsonb_path_ops` if a recipient-filtered +operator UI ever lands. `text[]` would have forced a `pgtype.Array[string]` +boundary type and a different scan path with no functional benefit today. + +### 4. Timestamps are uniformly `timestamptz` and always UTC at the boundary + +**Decision.** Every time-valued column on every Stage 5 table uses +PostgreSQL's `timestamptz`. The domain model continues to use `time.Time`; +the adapter normalises every `time.Time` parameter to UTC at the binding +site (`record.X.UTC()` or the `nullableTime` helper that wraps a possibly +zero-valued `time.Time`), and re-wraps every scanned `time.Time` with +`.UTC()` (directly or via `timeFromNullable` for nullable columns) before +it leaves the adapter. The architecture-wide form of this rule lives in +`ARCHITECTURE.md §Persistence Backends → Timestamp handling`. + +**Why.** PG_PLAN.md §5 originally specified `_ms` epoch-millisecond +columns. User Service Stage 3 and Mail Service Stage 4 already use +`timestamptz` for every table and the runtime contract tests expect +Go-level `time.Time` semantics throughout. Keeping the same shape across +services reduces adapter-layer complexity and avoids two parallel encoding +paths in the notificationstore. The deviation from the literal plan is +intentional and is documented here. The defensive `.UTC()` rule on both +sides eliminates the class of bug where the pgx driver returns scanned +values in `time.Local`, which silently breaks equality tests, JSON +formatting, and comparison against pointer fields. + +### 5. Scheduler claim is non-locking; transitions use optimistic concurrency on `updated_at` + +**Decision.** `ListDueRoutes(ctx, now, limit)` is a non-locking +`SELECT notification_id, route_id FROM routes WHERE next_attempt_at IS +NOT NULL AND next_attempt_at <= $1 ORDER BY next_attempt_at ASC LIMIT $2`. +The publisher then takes a Redis lease (`route_leases:*`), reads the +route, emits the outbound stream entry, and calls one of +`CompleteRoutePublished` / `CompleteRouteFailed` / +`CompleteRouteDeadLetter`. Each `Complete*` transaction issues +`UPDATE routes SET ... WHERE notification_id = $a AND route_id = $b AND +updated_at = $expectedUpdatedAt`; a zero `RowsAffected` count surfaces as +`routestate.ErrConflict`, which the publisher treats as a no-op (some other +replica progressed the row since the worker loaded it). + +**Why.** A `FOR UPDATE` held across the publisher's whole publish window +would serialise concurrent publishers and block the outbound stream emit. +Per-row optimistic concurrency on `updated_at` keeps the lock duration +inside the SQL transaction itself; the lease bounds duplicates atop that. +The explicit `next_attempt_at` column (set to `NULL` for terminal states) +keeps the partial index `routes_due_idx` narrow and avoids the "schedule +out of sync with row" failure mode of the previous Redis ZSET + +JSON-payload pair. + +### 6. Outbound XADD precedes SQL completion (at-least-once across the dual-system boundary) + +**Decision.** The publisher emits the outbound stream entry through +`*redis.Client.XAdd` *before* the route's SQL state transition is +committed. If the XADD succeeds and the SQL update later fails, the next +replica retries — same notification gets a second outbound entry; the +consumer side (Gateway, Mail) deduplicates on the entry id. If the XADD +fails, `recordFailure` records a publication failure with classification +`gateway_stream_publish_failed` or `mail_stream_publish_failed` and +schedules a retry. + +**Why.** PG_PLAN.md §5 explicitly endorses this ordering by saying the +lease is "atop the SQL claim" rather than replacing it. The lease bounds +duplicate emission to one replica per route per lease window; the +consumer-side dedupe handles the rare cross-window case. A transactional +outbox would solve the duplicate but is out of Stage 5 scope; revisit if +duplicate-traffic ever becomes an operational concern. + +### 7. Lease stays on Redis as a hint + +**Decision.** The lease key `notification:route_leases::` +keeps its existing SETNX/Lua-release semantics, lifted into a dedicated +`redisstate.LeaseStore`. The composite +`internal/adapters/postgres/routepublisher.Store` wires the SQL state +store and the Redis lease store behind the existing publisher-worker +interfaces (`PushRouteStateStore`, `EmailRouteStateStore`). + +**Why.** PG_PLAN.md §5 retains the lease as a "short-lived, per-process +exclusivity hint atop the SQL claim". Without the lease, two replicas +selecting overlapping due batches would each XADD before either commits +the SQL transition — duplicating outbound traffic during contention. The +lease bounds emission rate to one-per-route-per-lease-TTL even when scans +overlap. Keeping the abstraction inside `LeaseStore` (separate from the +SQL store) keeps the architectural split visible. + +### 8. Periodic SQL retention replaces Redis EXPIRE + +**Decision.** A new `worker.SQLRetentionWorker` runs the two DELETE +statements driven by config: + +- `DELETE FROM records WHERE accepted_at < now() - $record_retention` + cascades to `routes` and `dead_letters` via `ON DELETE CASCADE`. +- `DELETE FROM malformed_intents WHERE recorded_at < now() - + $malformed_intent_retention` is a standalone retention pass. + +Three new env vars (`NOTIFICATION_RECORD_RETENTION`, +`NOTIFICATION_MALFORMED_INTENT_RETENTION`, +`NOTIFICATION_CLEANUP_INTERVAL`) drive the worker. +`NOTIFICATION_IDEMPOTENCY_TTL` survives unchanged: the service layer +materialises it on each row as `idempotency_expires_at`. + +**Why.** PostgreSQL maintains its own indexes; the previous per-key Redis +EXPIRE TTL semantics translate to a periodic batch DELETE. The two-knob +shape mirrors Mail Stage 4 (`MAIL_DELIVERY_RETENTION` + +`MAIL_MALFORMED_COMMAND_RETENTION`). The legacy +`NOTIFICATION_RECORD_TTL` / `NOTIFICATION_DEAD_LETTER_TTL` env vars are +intentionally retired without a backward-compat shim — keeping the names +would mislead operators reading the runbook because the eviction +mechanism genuinely changed. + +### 9. Shared Redis client with consumer-driven shutdown + +**Decision.** `internal/app/runtime.go` constructs one +`redisconn.NewMasterClient(cfg.Redis.Conn)` (via the thin +`redisadapter.NewClient` wrapper) and passes it to the intent consumer, +the lease store, the stream offset store, and both publishers (for their +outbound XADDs). The runtime cleanup tolerates `redis.ErrClosed` so a +double-close from any consumer is benign. + +**Why.** Each subsequent PG_PLAN stage (Lobby) ships a similar pattern; +sharing one client is the shape we want all stages to converge on. A +dedicated client per consumer is the artefact the Redis-only architecture +needed; sharing one client multiplies fewer TCP connections, ping points, +and OpenTelemetry instrumentation hooks for no functional benefit. + +### 10. Query layer is `go-jet/jet/v2` + +**Decision.** All `notificationstore` packages build SQL through the +jet builder API (`pgtable.
.INSERT/SELECT/UPDATE/DELETE` plus +the `pg.AND/OR/SET/MIN/COUNT/...` DSL). `cmd/jetgen` (invoked via +`make jet`) brings up a transient PostgreSQL container, applies the +embedded migrations, and runs +`github.com/go-jet/jet/v2/generator/postgres.GenerateDB` against the +provisioned schema; the generated table/model code lives under +`internal/adapters/postgres/jet/notification/{model,table}/*.go` and +is committed to the repo, so build consumers do not need Docker. +Statements are run through the `database/sql` API +(`stmt.Sql() → db/tx.Exec/Query/QueryRow`); manual `rowScanner` +helpers preserve the codecs.go boundary translations and domain-type +mapping. + +**Why.** Aligns with `PG_PLAN.md` §Library stack ("Query layer: +`github.com/go-jet/jet/v2` (PostgreSQL dialect). Generated code lives +under each service `internal/adapters/postgres/jet/`, regenerated via +a `make jet` target and committed to the repo"). Constructs the jet +builder does not cover natively (`MIN(timestamptz)` aggregates, +optimistic-concurrency `WHERE updated_at = $expected`, JSONB params) +are expressed through the per-DSL helpers (`pg.MIN(...)`, +`pg.TimestampzT(...)`, direct `[]byte`/string params for JSONB +columns). + +## Cross-References + +- `PG_PLAN.md §5` (Stage 5 — Notification Service migration). +- `ARCHITECTURE.md §Persistence Backends`. +- `internal/adapters/postgres/migrations/00001_init.sql` and + `internal/adapters/postgres/migrations/migrations.go`. +- `internal/adapters/postgres/notificationstore/{store,records,routes, + acceptance,scheduler,dead_letters,malformed_intents,retention,codecs, + helpers}.go` plus the testcontainers-backed unit suite under + `notificationstore/{harness,store}_test.go`. +- `internal/adapters/postgres/jet/notification/{model,table}/*.go` + (committed generated code) plus `cmd/jetgen/main.go` and the + `make jet` Makefile target that regenerate it. +- `internal/adapters/postgres/routepublisher/store.go` (composite + PG state + Redis lease behind the publisher contracts). +- `internal/service/routestate/types.go` (storage-agnostic value types). +- `internal/config/{config,env}.go` (`PostgresConfig` plus the + `redisconn.Config`-shaped `RedisConfig` envelope). +- `internal/app/runtime.go` (shared Redis client + PG pool open + migration + + notificationstore wiring + retention worker startup). +- `internal/worker/sqlretention.go` (periodic SQL retention worker). +- `internal/adapters/redisstate/{keyspace,codecs,errors,lease_store, + stream_offset_store}.go` (surviving slim Redis surface). +- `integration/internal/harness/notificationservice.go` + (per-suite Postgres container + `notification`/`notificationservice` + provisioning). diff --git a/notification/docs/runbook.md b/notification/docs/runbook.md index 2b91355..4d92c01 100644 --- a/notification/docs/runbook.md +++ b/notification/docs/runbook.md @@ -7,10 +7,16 @@ This runbook covers startup, steady-state verification, shutdown, and common Before starting the process, confirm: -- `NOTIFICATION_REDIS_ADDR` points to the Redis deployment that stores - notification records, routes, idempotency reservations, malformed intents, - dead letters, stream offsets, and route schedules -- Redis ACL, DB, TLS, and timeout settings match the target environment +- `NOTIFICATION_REDIS_MASTER_ADDR` points to the Redis master deployment + that hosts the inbound `notification:intents` stream, the persisted + consumer offset, the outbound `gateway:client-events` and + `mail:delivery_commands` streams, and the temporary `route_leases:*` keys +- `NOTIFICATION_REDIS_PASSWORD` matches the connection password + (mandatory; the deprecated `NOTIFICATION_REDIS_USERNAME` / + `NOTIFICATION_REDIS_TLS_ENABLED` env vars are rejected at startup) +- `NOTIFICATION_POSTGRES_PRIMARY_DSN` points to the PostgreSQL primary + hosting the `notification` schema; the role must own + `records`, `routes`, `dead_letters`, and `malformed_intents` - `NOTIFICATION_USER_SERVICE_BASE_URL` points to the trusted internal `User Service` - `NOTIFICATION_GATEWAY_CLIENT_EVENTS_STREAM` matches the stream consumed by @@ -19,11 +25,18 @@ Before starting the process, confirm: `Mail Service` - administrator email variables are populated for notification types that should notify administrators +- retention knobs (`NOTIFICATION_RECORD_RETENTION`, + `NOTIFICATION_MALFORMED_INTENT_RETENTION`, + `NOTIFICATION_CLEANUP_INTERVAL`) are sized for the expected operator + history window - OpenTelemetry exporter settings point at the intended collector when traces or metrics are expected outside the process -At startup the process performs a bounded Redis `PING`. Startup fails fast if -configuration validation or Redis connectivity fails. +At startup the process performs a bounded Redis `PING`, opens the +PostgreSQL pool, runs the embedded goose migrations, and only then starts +the internal HTTP probe. Startup fails fast if configuration validation, +Redis connectivity, PostgreSQL connectivity, or migration application +fails. Known startup caveats: diff --git a/notification/docs/runtime.md b/notification/docs/runtime.md index 489c949..ca831cb 100644 --- a/notification/docs/runtime.md +++ b/notification/docs/runtime.md @@ -129,7 +129,9 @@ Startup fails fast on invalid configuration or unavailable Redis. Required: -- `NOTIFICATION_REDIS_ADDR` +- `NOTIFICATION_REDIS_MASTER_ADDR` +- `NOTIFICATION_REDIS_PASSWORD` +- `NOTIFICATION_POSTGRES_PRIMARY_DSN` - `NOTIFICATION_USER_SERVICE_BASE_URL` Core process config: @@ -144,12 +146,12 @@ Internal HTTP config: - `NOTIFICATION_INTERNAL_HTTP_READ_TIMEOUT` with default `10s` - `NOTIFICATION_INTERNAL_HTTP_IDLE_TIMEOUT` with default `1m` -Redis connectivity: +Redis connectivity (master/replica/password shape; the deprecated +`NOTIFICATION_REDIS_ADDR`, `NOTIFICATION_REDIS_USERNAME`, and +`NOTIFICATION_REDIS_TLS_ENABLED` env vars are rejected at startup): -- `NOTIFICATION_REDIS_USERNAME` -- `NOTIFICATION_REDIS_PASSWORD` +- `NOTIFICATION_REDIS_REPLICA_ADDRS` (optional, comma-separated) - `NOTIFICATION_REDIS_DB` -- `NOTIFICATION_REDIS_TLS_ENABLED` - `NOTIFICATION_REDIS_OPERATION_TIMEOUT` - `NOTIFICATION_INTENTS_STREAM` - `NOTIFICATION_INTENTS_READ_BLOCK_TIMEOUT` @@ -157,6 +159,14 @@ Redis connectivity: - `NOTIFICATION_GATEWAY_CLIENT_EVENTS_STREAM_MAX_LEN` - `NOTIFICATION_MAIL_DELIVERY_COMMANDS_STREAM` +PostgreSQL connectivity: + +- `NOTIFICATION_POSTGRES_REPLICA_DSNS` (optional, comma-separated) +- `NOTIFICATION_POSTGRES_OPERATION_TIMEOUT` +- `NOTIFICATION_POSTGRES_MAX_OPEN_CONNS` +- `NOTIFICATION_POSTGRES_MAX_IDLE_CONNS` +- `NOTIFICATION_POSTGRES_CONN_MAX_LIFETIME` + Retry and retention: - `NOTIFICATION_PUSH_RETRY_MAX_ATTEMPTS` @@ -164,9 +174,12 @@ Retry and retention: - `NOTIFICATION_ROUTE_BACKOFF_MIN` - `NOTIFICATION_ROUTE_BACKOFF_MAX` - `NOTIFICATION_ROUTE_LEASE_TTL` -- `NOTIFICATION_DEAD_LETTER_TTL` -- `NOTIFICATION_RECORD_TTL` - `NOTIFICATION_IDEMPOTENCY_TTL` +- `NOTIFICATION_RECORD_RETENTION` (replaces the legacy + `NOTIFICATION_RECORD_TTL`; cascades to `routes` and `dead_letters`) +- `NOTIFICATION_MALFORMED_INTENT_RETENTION` (replaces the legacy + `NOTIFICATION_DEAD_LETTER_TTL`) +- `NOTIFICATION_CLEANUP_INTERVAL` (period of the SQL retention worker) User enrichment: diff --git a/notification/go.mod b/notification/go.mod index b5201be..a7a8acf 100644 --- a/notification/go.mod +++ b/notification/go.mod @@ -4,12 +4,17 @@ go 1.26.1 require ( galaxy/notificationintent v0.0.0 + galaxy/postgres v0.0.0-00010101000000-000000000000 + galaxy/redisconn v0.0.0-00010101000000-000000000000 galaxy/transcoder v0.0.0 github.com/alicebob/miniredis/v2 v2.37.0 + github.com/go-jet/jet/v2 v2.14.1 + github.com/jackc/pgx/v5 v5.9.2 github.com/redis/go-redis/extra/redisotel/v9 v9.18.0 github.com/redis/go-redis/v9 v9.18.0 github.com/stretchr/testify v1.11.1 github.com/testcontainers/testcontainers-go v0.42.0 + github.com/testcontainers/testcontainers-go/modules/postgres v0.42.0 github.com/testcontainers/testcontainers-go/modules/redis v0.42.0 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.68.0 go.opentelemetry.io/otel v1.43.0 @@ -88,3 +93,7 @@ require ( google.golang.org/grpc v1.80.0 // indirect google.golang.org/protobuf v1.36.11 // indirect ) + +replace galaxy/postgres => ../pkg/postgres + +replace galaxy/redisconn => ../pkg/redisconn diff --git a/notification/internal/adapters/postgres/jet/notification/model/dead_letters.go b/notification/internal/adapters/postgres/jet/notification/model/dead_letters.go new file mode 100644 index 0000000..ecacce8 --- /dev/null +++ b/notification/internal/adapters/postgres/jet/notification/model/dead_letters.go @@ -0,0 +1,25 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package model + +import ( + "time" +) + +type DeadLetters struct { + NotificationID string `sql:"primary_key"` + RouteID string `sql:"primary_key"` + Channel string + RecipientRef string + FinalAttemptCount int32 + MaxAttempts int32 + FailureClassification string + FailureMessage string + RecoveryHint string + CreatedAt time.Time +} diff --git a/notification/internal/adapters/postgres/jet/notification/model/goose_db_version.go b/notification/internal/adapters/postgres/jet/notification/model/goose_db_version.go new file mode 100644 index 0000000..c7f68e8 --- /dev/null +++ b/notification/internal/adapters/postgres/jet/notification/model/goose_db_version.go @@ -0,0 +1,19 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package model + +import ( + "time" +) + +type GooseDbVersion struct { + ID int32 `sql:"primary_key"` + VersionID int64 + IsApplied bool + Tstamp time.Time +} diff --git a/notification/internal/adapters/postgres/jet/notification/model/malformed_intents.go b/notification/internal/adapters/postgres/jet/notification/model/malformed_intents.go new file mode 100644 index 0000000..3ae6bc6 --- /dev/null +++ b/notification/internal/adapters/postgres/jet/notification/model/malformed_intents.go @@ -0,0 +1,23 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package model + +import ( + "time" +) + +type MalformedIntents struct { + StreamEntryID string `sql:"primary_key"` + NotificationType string + Producer string + IdempotencyKey string + FailureCode string + FailureMessage string + RawFields string + RecordedAt time.Time +} diff --git a/notification/internal/adapters/postgres/jet/notification/model/records.go b/notification/internal/adapters/postgres/jet/notification/model/records.go new file mode 100644 index 0000000..64bf214 --- /dev/null +++ b/notification/internal/adapters/postgres/jet/notification/model/records.go @@ -0,0 +1,29 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package model + +import ( + "time" +) + +type Records struct { + NotificationID string `sql:"primary_key"` + NotificationType string + Producer string + AudienceKind string + RecipientUserIds string + PayloadJSON string + IdempotencyKey string + RequestFingerprint string + RequestID string + TraceID string + OccurredAt time.Time + AcceptedAt time.Time + UpdatedAt time.Time + IdempotencyExpiresAt time.Time +} diff --git a/notification/internal/adapters/postgres/jet/notification/model/routes.go b/notification/internal/adapters/postgres/jet/notification/model/routes.go new file mode 100644 index 0000000..c747acc --- /dev/null +++ b/notification/internal/adapters/postgres/jet/notification/model/routes.go @@ -0,0 +1,33 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package model + +import ( + "time" +) + +type Routes struct { + NotificationID string `sql:"primary_key"` + RouteID string `sql:"primary_key"` + Channel string + RecipientRef string + Status string + AttemptCount int32 + MaxAttempts int32 + NextAttemptAt *time.Time + ResolvedEmail string + ResolvedLocale string + LastErrorClassification string + LastErrorMessage string + LastErrorAt *time.Time + CreatedAt time.Time + UpdatedAt time.Time + PublishedAt *time.Time + DeadLetteredAt *time.Time + SkippedAt *time.Time +} diff --git a/notification/internal/adapters/postgres/jet/notification/table/dead_letters.go b/notification/internal/adapters/postgres/jet/notification/table/dead_letters.go new file mode 100644 index 0000000..ad06dc0 --- /dev/null +++ b/notification/internal/adapters/postgres/jet/notification/table/dead_letters.go @@ -0,0 +1,105 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var DeadLetters = newDeadLettersTable("notification", "dead_letters", "") + +type deadLettersTable struct { + postgres.Table + + // Columns + NotificationID postgres.ColumnString + RouteID postgres.ColumnString + Channel postgres.ColumnString + RecipientRef postgres.ColumnString + FinalAttemptCount postgres.ColumnInteger + MaxAttempts postgres.ColumnInteger + FailureClassification postgres.ColumnString + FailureMessage postgres.ColumnString + RecoveryHint postgres.ColumnString + CreatedAt postgres.ColumnTimestampz + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList + DefaultColumns postgres.ColumnList +} + +type DeadLettersTable struct { + deadLettersTable + + EXCLUDED deadLettersTable +} + +// AS creates new DeadLettersTable with assigned alias +func (a DeadLettersTable) AS(alias string) *DeadLettersTable { + return newDeadLettersTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new DeadLettersTable with assigned schema name +func (a DeadLettersTable) FromSchema(schemaName string) *DeadLettersTable { + return newDeadLettersTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new DeadLettersTable with assigned table prefix +func (a DeadLettersTable) WithPrefix(prefix string) *DeadLettersTable { + return newDeadLettersTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new DeadLettersTable with assigned table suffix +func (a DeadLettersTable) WithSuffix(suffix string) *DeadLettersTable { + return newDeadLettersTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newDeadLettersTable(schemaName, tableName, alias string) *DeadLettersTable { + return &DeadLettersTable{ + deadLettersTable: newDeadLettersTableImpl(schemaName, tableName, alias), + EXCLUDED: newDeadLettersTableImpl("", "excluded", ""), + } +} + +func newDeadLettersTableImpl(schemaName, tableName, alias string) deadLettersTable { + var ( + NotificationIDColumn = postgres.StringColumn("notification_id") + RouteIDColumn = postgres.StringColumn("route_id") + ChannelColumn = postgres.StringColumn("channel") + RecipientRefColumn = postgres.StringColumn("recipient_ref") + FinalAttemptCountColumn = postgres.IntegerColumn("final_attempt_count") + MaxAttemptsColumn = postgres.IntegerColumn("max_attempts") + FailureClassificationColumn = postgres.StringColumn("failure_classification") + FailureMessageColumn = postgres.StringColumn("failure_message") + RecoveryHintColumn = postgres.StringColumn("recovery_hint") + CreatedAtColumn = postgres.TimestampzColumn("created_at") + allColumns = postgres.ColumnList{NotificationIDColumn, RouteIDColumn, ChannelColumn, RecipientRefColumn, FinalAttemptCountColumn, MaxAttemptsColumn, FailureClassificationColumn, FailureMessageColumn, RecoveryHintColumn, CreatedAtColumn} + mutableColumns = postgres.ColumnList{ChannelColumn, RecipientRefColumn, FinalAttemptCountColumn, MaxAttemptsColumn, FailureClassificationColumn, FailureMessageColumn, RecoveryHintColumn, CreatedAtColumn} + defaultColumns = postgres.ColumnList{RecoveryHintColumn} + ) + + return deadLettersTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + NotificationID: NotificationIDColumn, + RouteID: RouteIDColumn, + Channel: ChannelColumn, + RecipientRef: RecipientRefColumn, + FinalAttemptCount: FinalAttemptCountColumn, + MaxAttempts: MaxAttemptsColumn, + FailureClassification: FailureClassificationColumn, + FailureMessage: FailureMessageColumn, + RecoveryHint: RecoveryHintColumn, + CreatedAt: CreatedAtColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + DefaultColumns: defaultColumns, + } +} diff --git a/notification/internal/adapters/postgres/jet/notification/table/goose_db_version.go b/notification/internal/adapters/postgres/jet/notification/table/goose_db_version.go new file mode 100644 index 0000000..bf3af24 --- /dev/null +++ b/notification/internal/adapters/postgres/jet/notification/table/goose_db_version.go @@ -0,0 +1,87 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var GooseDbVersion = newGooseDbVersionTable("notification", "goose_db_version", "") + +type gooseDbVersionTable struct { + postgres.Table + + // Columns + ID postgres.ColumnInteger + VersionID postgres.ColumnInteger + IsApplied postgres.ColumnBool + Tstamp postgres.ColumnTimestamp + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList + DefaultColumns postgres.ColumnList +} + +type GooseDbVersionTable struct { + gooseDbVersionTable + + EXCLUDED gooseDbVersionTable +} + +// AS creates new GooseDbVersionTable with assigned alias +func (a GooseDbVersionTable) AS(alias string) *GooseDbVersionTable { + return newGooseDbVersionTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new GooseDbVersionTable with assigned schema name +func (a GooseDbVersionTable) FromSchema(schemaName string) *GooseDbVersionTable { + return newGooseDbVersionTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new GooseDbVersionTable with assigned table prefix +func (a GooseDbVersionTable) WithPrefix(prefix string) *GooseDbVersionTable { + return newGooseDbVersionTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new GooseDbVersionTable with assigned table suffix +func (a GooseDbVersionTable) WithSuffix(suffix string) *GooseDbVersionTable { + return newGooseDbVersionTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newGooseDbVersionTable(schemaName, tableName, alias string) *GooseDbVersionTable { + return &GooseDbVersionTable{ + gooseDbVersionTable: newGooseDbVersionTableImpl(schemaName, tableName, alias), + EXCLUDED: newGooseDbVersionTableImpl("", "excluded", ""), + } +} + +func newGooseDbVersionTableImpl(schemaName, tableName, alias string) gooseDbVersionTable { + var ( + IDColumn = postgres.IntegerColumn("id") + VersionIDColumn = postgres.IntegerColumn("version_id") + IsAppliedColumn = postgres.BoolColumn("is_applied") + TstampColumn = postgres.TimestampColumn("tstamp") + allColumns = postgres.ColumnList{IDColumn, VersionIDColumn, IsAppliedColumn, TstampColumn} + mutableColumns = postgres.ColumnList{VersionIDColumn, IsAppliedColumn, TstampColumn} + defaultColumns = postgres.ColumnList{TstampColumn} + ) + + return gooseDbVersionTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + ID: IDColumn, + VersionID: VersionIDColumn, + IsApplied: IsAppliedColumn, + Tstamp: TstampColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + DefaultColumns: defaultColumns, + } +} diff --git a/notification/internal/adapters/postgres/jet/notification/table/malformed_intents.go b/notification/internal/adapters/postgres/jet/notification/table/malformed_intents.go new file mode 100644 index 0000000..0224883 --- /dev/null +++ b/notification/internal/adapters/postgres/jet/notification/table/malformed_intents.go @@ -0,0 +1,99 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var MalformedIntents = newMalformedIntentsTable("notification", "malformed_intents", "") + +type malformedIntentsTable struct { + postgres.Table + + // Columns + StreamEntryID postgres.ColumnString + NotificationType postgres.ColumnString + Producer postgres.ColumnString + IdempotencyKey postgres.ColumnString + FailureCode postgres.ColumnString + FailureMessage postgres.ColumnString + RawFields postgres.ColumnString + RecordedAt postgres.ColumnTimestampz + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList + DefaultColumns postgres.ColumnList +} + +type MalformedIntentsTable struct { + malformedIntentsTable + + EXCLUDED malformedIntentsTable +} + +// AS creates new MalformedIntentsTable with assigned alias +func (a MalformedIntentsTable) AS(alias string) *MalformedIntentsTable { + return newMalformedIntentsTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new MalformedIntentsTable with assigned schema name +func (a MalformedIntentsTable) FromSchema(schemaName string) *MalformedIntentsTable { + return newMalformedIntentsTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new MalformedIntentsTable with assigned table prefix +func (a MalformedIntentsTable) WithPrefix(prefix string) *MalformedIntentsTable { + return newMalformedIntentsTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new MalformedIntentsTable with assigned table suffix +func (a MalformedIntentsTable) WithSuffix(suffix string) *MalformedIntentsTable { + return newMalformedIntentsTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newMalformedIntentsTable(schemaName, tableName, alias string) *MalformedIntentsTable { + return &MalformedIntentsTable{ + malformedIntentsTable: newMalformedIntentsTableImpl(schemaName, tableName, alias), + EXCLUDED: newMalformedIntentsTableImpl("", "excluded", ""), + } +} + +func newMalformedIntentsTableImpl(schemaName, tableName, alias string) malformedIntentsTable { + var ( + StreamEntryIDColumn = postgres.StringColumn("stream_entry_id") + NotificationTypeColumn = postgres.StringColumn("notification_type") + ProducerColumn = postgres.StringColumn("producer") + IdempotencyKeyColumn = postgres.StringColumn("idempotency_key") + FailureCodeColumn = postgres.StringColumn("failure_code") + FailureMessageColumn = postgres.StringColumn("failure_message") + RawFieldsColumn = postgres.StringColumn("raw_fields") + RecordedAtColumn = postgres.TimestampzColumn("recorded_at") + allColumns = postgres.ColumnList{StreamEntryIDColumn, NotificationTypeColumn, ProducerColumn, IdempotencyKeyColumn, FailureCodeColumn, FailureMessageColumn, RawFieldsColumn, RecordedAtColumn} + mutableColumns = postgres.ColumnList{NotificationTypeColumn, ProducerColumn, IdempotencyKeyColumn, FailureCodeColumn, FailureMessageColumn, RawFieldsColumn, RecordedAtColumn} + defaultColumns = postgres.ColumnList{NotificationTypeColumn, ProducerColumn, IdempotencyKeyColumn} + ) + + return malformedIntentsTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + StreamEntryID: StreamEntryIDColumn, + NotificationType: NotificationTypeColumn, + Producer: ProducerColumn, + IdempotencyKey: IdempotencyKeyColumn, + FailureCode: FailureCodeColumn, + FailureMessage: FailureMessageColumn, + RawFields: RawFieldsColumn, + RecordedAt: RecordedAtColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + DefaultColumns: defaultColumns, + } +} diff --git a/notification/internal/adapters/postgres/jet/notification/table/records.go b/notification/internal/adapters/postgres/jet/notification/table/records.go new file mode 100644 index 0000000..9f4ecf3 --- /dev/null +++ b/notification/internal/adapters/postgres/jet/notification/table/records.go @@ -0,0 +1,117 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var Records = newRecordsTable("notification", "records", "") + +type recordsTable struct { + postgres.Table + + // Columns + NotificationID postgres.ColumnString + NotificationType postgres.ColumnString + Producer postgres.ColumnString + AudienceKind postgres.ColumnString + RecipientUserIds postgres.ColumnString + PayloadJSON postgres.ColumnString + IdempotencyKey postgres.ColumnString + RequestFingerprint postgres.ColumnString + RequestID postgres.ColumnString + TraceID postgres.ColumnString + OccurredAt postgres.ColumnTimestampz + AcceptedAt postgres.ColumnTimestampz + UpdatedAt postgres.ColumnTimestampz + IdempotencyExpiresAt postgres.ColumnTimestampz + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList + DefaultColumns postgres.ColumnList +} + +type RecordsTable struct { + recordsTable + + EXCLUDED recordsTable +} + +// AS creates new RecordsTable with assigned alias +func (a RecordsTable) AS(alias string) *RecordsTable { + return newRecordsTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new RecordsTable with assigned schema name +func (a RecordsTable) FromSchema(schemaName string) *RecordsTable { + return newRecordsTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new RecordsTable with assigned table prefix +func (a RecordsTable) WithPrefix(prefix string) *RecordsTable { + return newRecordsTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new RecordsTable with assigned table suffix +func (a RecordsTable) WithSuffix(suffix string) *RecordsTable { + return newRecordsTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newRecordsTable(schemaName, tableName, alias string) *RecordsTable { + return &RecordsTable{ + recordsTable: newRecordsTableImpl(schemaName, tableName, alias), + EXCLUDED: newRecordsTableImpl("", "excluded", ""), + } +} + +func newRecordsTableImpl(schemaName, tableName, alias string) recordsTable { + var ( + NotificationIDColumn = postgres.StringColumn("notification_id") + NotificationTypeColumn = postgres.StringColumn("notification_type") + ProducerColumn = postgres.StringColumn("producer") + AudienceKindColumn = postgres.StringColumn("audience_kind") + RecipientUserIdsColumn = postgres.StringColumn("recipient_user_ids") + PayloadJSONColumn = postgres.StringColumn("payload_json") + IdempotencyKeyColumn = postgres.StringColumn("idempotency_key") + RequestFingerprintColumn = postgres.StringColumn("request_fingerprint") + RequestIDColumn = postgres.StringColumn("request_id") + TraceIDColumn = postgres.StringColumn("trace_id") + OccurredAtColumn = postgres.TimestampzColumn("occurred_at") + AcceptedAtColumn = postgres.TimestampzColumn("accepted_at") + UpdatedAtColumn = postgres.TimestampzColumn("updated_at") + IdempotencyExpiresAtColumn = postgres.TimestampzColumn("idempotency_expires_at") + allColumns = postgres.ColumnList{NotificationIDColumn, NotificationTypeColumn, ProducerColumn, AudienceKindColumn, RecipientUserIdsColumn, PayloadJSONColumn, IdempotencyKeyColumn, RequestFingerprintColumn, RequestIDColumn, TraceIDColumn, OccurredAtColumn, AcceptedAtColumn, UpdatedAtColumn, IdempotencyExpiresAtColumn} + mutableColumns = postgres.ColumnList{NotificationTypeColumn, ProducerColumn, AudienceKindColumn, RecipientUserIdsColumn, PayloadJSONColumn, IdempotencyKeyColumn, RequestFingerprintColumn, RequestIDColumn, TraceIDColumn, OccurredAtColumn, AcceptedAtColumn, UpdatedAtColumn, IdempotencyExpiresAtColumn} + defaultColumns = postgres.ColumnList{RecipientUserIdsColumn, RequestIDColumn, TraceIDColumn} + ) + + return recordsTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + NotificationID: NotificationIDColumn, + NotificationType: NotificationTypeColumn, + Producer: ProducerColumn, + AudienceKind: AudienceKindColumn, + RecipientUserIds: RecipientUserIdsColumn, + PayloadJSON: PayloadJSONColumn, + IdempotencyKey: IdempotencyKeyColumn, + RequestFingerprint: RequestFingerprintColumn, + RequestID: RequestIDColumn, + TraceID: TraceIDColumn, + OccurredAt: OccurredAtColumn, + AcceptedAt: AcceptedAtColumn, + UpdatedAt: UpdatedAtColumn, + IdempotencyExpiresAt: IdempotencyExpiresAtColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + DefaultColumns: defaultColumns, + } +} diff --git a/notification/internal/adapters/postgres/jet/notification/table/routes.go b/notification/internal/adapters/postgres/jet/notification/table/routes.go new file mode 100644 index 0000000..1030826 --- /dev/null +++ b/notification/internal/adapters/postgres/jet/notification/table/routes.go @@ -0,0 +1,129 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var Routes = newRoutesTable("notification", "routes", "") + +type routesTable struct { + postgres.Table + + // Columns + NotificationID postgres.ColumnString + RouteID postgres.ColumnString + Channel postgres.ColumnString + RecipientRef postgres.ColumnString + Status postgres.ColumnString + AttemptCount postgres.ColumnInteger + MaxAttempts postgres.ColumnInteger + NextAttemptAt postgres.ColumnTimestampz + ResolvedEmail postgres.ColumnString + ResolvedLocale postgres.ColumnString + LastErrorClassification postgres.ColumnString + LastErrorMessage postgres.ColumnString + LastErrorAt postgres.ColumnTimestampz + CreatedAt postgres.ColumnTimestampz + UpdatedAt postgres.ColumnTimestampz + PublishedAt postgres.ColumnTimestampz + DeadLetteredAt postgres.ColumnTimestampz + SkippedAt postgres.ColumnTimestampz + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList + DefaultColumns postgres.ColumnList +} + +type RoutesTable struct { + routesTable + + EXCLUDED routesTable +} + +// AS creates new RoutesTable with assigned alias +func (a RoutesTable) AS(alias string) *RoutesTable { + return newRoutesTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new RoutesTable with assigned schema name +func (a RoutesTable) FromSchema(schemaName string) *RoutesTable { + return newRoutesTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new RoutesTable with assigned table prefix +func (a RoutesTable) WithPrefix(prefix string) *RoutesTable { + return newRoutesTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new RoutesTable with assigned table suffix +func (a RoutesTable) WithSuffix(suffix string) *RoutesTable { + return newRoutesTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newRoutesTable(schemaName, tableName, alias string) *RoutesTable { + return &RoutesTable{ + routesTable: newRoutesTableImpl(schemaName, tableName, alias), + EXCLUDED: newRoutesTableImpl("", "excluded", ""), + } +} + +func newRoutesTableImpl(schemaName, tableName, alias string) routesTable { + var ( + NotificationIDColumn = postgres.StringColumn("notification_id") + RouteIDColumn = postgres.StringColumn("route_id") + ChannelColumn = postgres.StringColumn("channel") + RecipientRefColumn = postgres.StringColumn("recipient_ref") + StatusColumn = postgres.StringColumn("status") + AttemptCountColumn = postgres.IntegerColumn("attempt_count") + MaxAttemptsColumn = postgres.IntegerColumn("max_attempts") + NextAttemptAtColumn = postgres.TimestampzColumn("next_attempt_at") + ResolvedEmailColumn = postgres.StringColumn("resolved_email") + ResolvedLocaleColumn = postgres.StringColumn("resolved_locale") + LastErrorClassificationColumn = postgres.StringColumn("last_error_classification") + LastErrorMessageColumn = postgres.StringColumn("last_error_message") + LastErrorAtColumn = postgres.TimestampzColumn("last_error_at") + CreatedAtColumn = postgres.TimestampzColumn("created_at") + UpdatedAtColumn = postgres.TimestampzColumn("updated_at") + PublishedAtColumn = postgres.TimestampzColumn("published_at") + DeadLetteredAtColumn = postgres.TimestampzColumn("dead_lettered_at") + SkippedAtColumn = postgres.TimestampzColumn("skipped_at") + allColumns = postgres.ColumnList{NotificationIDColumn, RouteIDColumn, ChannelColumn, RecipientRefColumn, StatusColumn, AttemptCountColumn, MaxAttemptsColumn, NextAttemptAtColumn, ResolvedEmailColumn, ResolvedLocaleColumn, LastErrorClassificationColumn, LastErrorMessageColumn, LastErrorAtColumn, CreatedAtColumn, UpdatedAtColumn, PublishedAtColumn, DeadLetteredAtColumn, SkippedAtColumn} + mutableColumns = postgres.ColumnList{ChannelColumn, RecipientRefColumn, StatusColumn, AttemptCountColumn, MaxAttemptsColumn, NextAttemptAtColumn, ResolvedEmailColumn, ResolvedLocaleColumn, LastErrorClassificationColumn, LastErrorMessageColumn, LastErrorAtColumn, CreatedAtColumn, UpdatedAtColumn, PublishedAtColumn, DeadLetteredAtColumn, SkippedAtColumn} + defaultColumns = postgres.ColumnList{AttemptCountColumn, ResolvedEmailColumn, ResolvedLocaleColumn, LastErrorClassificationColumn, LastErrorMessageColumn} + ) + + return routesTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + NotificationID: NotificationIDColumn, + RouteID: RouteIDColumn, + Channel: ChannelColumn, + RecipientRef: RecipientRefColumn, + Status: StatusColumn, + AttemptCount: AttemptCountColumn, + MaxAttempts: MaxAttemptsColumn, + NextAttemptAt: NextAttemptAtColumn, + ResolvedEmail: ResolvedEmailColumn, + ResolvedLocale: ResolvedLocaleColumn, + LastErrorClassification: LastErrorClassificationColumn, + LastErrorMessage: LastErrorMessageColumn, + LastErrorAt: LastErrorAtColumn, + CreatedAt: CreatedAtColumn, + UpdatedAt: UpdatedAtColumn, + PublishedAt: PublishedAtColumn, + DeadLetteredAt: DeadLetteredAtColumn, + SkippedAt: SkippedAtColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + DefaultColumns: defaultColumns, + } +} diff --git a/notification/internal/adapters/postgres/jet/notification/table/table_use_schema.go b/notification/internal/adapters/postgres/jet/notification/table/table_use_schema.go new file mode 100644 index 0000000..95f330e --- /dev/null +++ b/notification/internal/adapters/postgres/jet/notification/table/table_use_schema.go @@ -0,0 +1,18 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +// UseSchema sets a new schema name for all generated table SQL builder types. It is recommended to invoke +// this method only once at the beginning of the program. +func UseSchema(schema string) { + DeadLetters = DeadLetters.FromSchema(schema) + GooseDbVersion = GooseDbVersion.FromSchema(schema) + MalformedIntents = MalformedIntents.FromSchema(schema) + Records = Records.FromSchema(schema) + Routes = Routes.FromSchema(schema) +} diff --git a/notification/internal/adapters/postgres/migrations/00001_init.sql b/notification/internal/adapters/postgres/migrations/00001_init.sql new file mode 100644 index 0000000..dc0ee55 --- /dev/null +++ b/notification/internal/adapters/postgres/migrations/00001_init.sql @@ -0,0 +1,105 @@ +-- +goose Up +-- records holds one durable notification record per accepted intent. The +-- (producer, idempotency_key) UNIQUE constraint replaces the previous Redis +-- idempotency keyspace: the durable row IS the idempotency reservation. +CREATE TABLE records ( + notification_id text PRIMARY KEY, + notification_type text NOT NULL, + producer text NOT NULL, + audience_kind text NOT NULL, + recipient_user_ids jsonb NOT NULL DEFAULT '[]'::jsonb, + payload_json text NOT NULL, + idempotency_key text NOT NULL, + request_fingerprint text NOT NULL, + request_id text NOT NULL DEFAULT '', + trace_id text NOT NULL DEFAULT '', + occurred_at timestamptz NOT NULL, + accepted_at timestamptz NOT NULL, + updated_at timestamptz NOT NULL, + idempotency_expires_at timestamptz NOT NULL, + CONSTRAINT records_idempotency_unique UNIQUE (producer, idempotency_key) +); + +-- Newest-first listing index used by operator/audit reads. +CREATE INDEX records_listing_idx + ON records (accepted_at DESC, notification_id DESC); + +-- routes stores one row per (notification_id, route_id). next_attempt_at is +-- non-NULL only while the row is a scheduling candidate (status pending or +-- failed); the partial index keeps the scheduler scan tight. +CREATE TABLE routes ( + notification_id text NOT NULL + REFERENCES records(notification_id) ON DELETE CASCADE, + route_id text NOT NULL, + channel text NOT NULL, + recipient_ref text NOT NULL, + status text NOT NULL, + attempt_count integer NOT NULL DEFAULT 0, + max_attempts integer NOT NULL, + next_attempt_at timestamptz, + resolved_email text NOT NULL DEFAULT '', + resolved_locale text NOT NULL DEFAULT '', + last_error_classification text NOT NULL DEFAULT '', + last_error_message text NOT NULL DEFAULT '', + last_error_at timestamptz, + created_at timestamptz NOT NULL, + updated_at timestamptz NOT NULL, + published_at timestamptz, + dead_lettered_at timestamptz, + skipped_at timestamptz, + PRIMARY KEY (notification_id, route_id) +); + +-- Drives the publishers' due-route pull. Partial predicate keeps the index +-- narrow: terminal rows (published / dead_letter / skipped) never appear. +CREATE INDEX routes_due_idx + ON routes (next_attempt_at) + WHERE next_attempt_at IS NOT NULL; + +-- Coarse status / channel filters used by operator views. +CREATE INDEX routes_status_idx ON routes (status); +CREATE INDEX routes_channel_idx ON routes (channel); + +-- dead_letters carries the operator-visible record for one route that +-- exhausted automated handling. Cascade tied to the parent route row so a +-- record-level retention DELETE clears dependent dead-letter rows naturally. +CREATE TABLE dead_letters ( + notification_id text NOT NULL, + route_id text NOT NULL, + channel text NOT NULL, + recipient_ref text NOT NULL, + final_attempt_count integer NOT NULL, + max_attempts integer NOT NULL, + failure_classification text NOT NULL, + failure_message text NOT NULL, + recovery_hint text NOT NULL DEFAULT '', + created_at timestamptz NOT NULL, + PRIMARY KEY (notification_id, route_id), + FOREIGN KEY (notification_id, route_id) + REFERENCES routes(notification_id, route_id) ON DELETE CASCADE +); + +CREATE INDEX dead_letters_listing_idx + ON dead_letters (created_at DESC, notification_id DESC, route_id DESC); + +-- malformed_intents stores operator-visible records for stream entries the +-- intent validator could not accept. Independent retention pass. +CREATE TABLE malformed_intents ( + stream_entry_id text PRIMARY KEY, + notification_type text NOT NULL DEFAULT '', + producer text NOT NULL DEFAULT '', + idempotency_key text NOT NULL DEFAULT '', + failure_code text NOT NULL, + failure_message text NOT NULL, + raw_fields jsonb NOT NULL, + recorded_at timestamptz NOT NULL +); + +CREATE INDEX malformed_intents_listing_idx + ON malformed_intents (recorded_at DESC, stream_entry_id DESC); + +-- +goose Down +DROP TABLE IF EXISTS malformed_intents; +DROP TABLE IF EXISTS dead_letters; +DROP TABLE IF EXISTS routes; +DROP TABLE IF EXISTS records; diff --git a/notification/internal/adapters/postgres/migrations/migrations.go b/notification/internal/adapters/postgres/migrations/migrations.go new file mode 100644 index 0000000..d52860e --- /dev/null +++ b/notification/internal/adapters/postgres/migrations/migrations.go @@ -0,0 +1,19 @@ +// Package migrations exposes the embedded goose migration files used by +// Notification Service to provision its `notification` schema in PostgreSQL. +// +// The embedded filesystem is consumed by `pkg/postgres.RunMigrations` during +// notification-service startup and by `cmd/jetgen` when regenerating the +// `internal/adapters/postgres/jet/` code against a transient PostgreSQL +// instance. +package migrations + +import "embed" + +//go:embed *.sql +var fs embed.FS + +// FS returns the embedded filesystem containing every numbered goose +// migration shipped with Notification Service. +func FS() embed.FS { + return fs +} diff --git a/notification/internal/adapters/postgres/notificationstore/acceptance.go b/notification/internal/adapters/postgres/notificationstore/acceptance.go new file mode 100644 index 0000000..8b509c6 --- /dev/null +++ b/notification/internal/adapters/postgres/notificationstore/acceptance.go @@ -0,0 +1,118 @@ +package notificationstore + +import ( + "context" + "database/sql" + "errors" + "fmt" + + "galaxy/notification/internal/api/intentstream" + "galaxy/notification/internal/service/acceptintent" +) + +// Compile-time confirmation that *Store satisfies acceptintent.Store. The +// runtime wiring depends on this so the accept-intent service can consume +// the PostgreSQL adapter directly. +var _ acceptintent.Store = (*Store)(nil) + +// CreateAcceptance writes one notification record together with its derived +// route slots inside one BEGIN … COMMIT transaction. Idempotency races +// surface as `acceptintent.ErrConflict`. +func (store *Store) CreateAcceptance(ctx context.Context, input acceptintent.CreateAcceptanceInput) error { + if store == nil { + return errors.New("create notification acceptance: nil store") + } + if ctx == nil { + return errors.New("create notification acceptance: nil context") + } + if err := input.Validate(); err != nil { + return fmt.Errorf("create notification acceptance: %w", err) + } + + return store.withTx(ctx, "create notification acceptance", func(ctx context.Context, tx *sql.Tx) error { + if err := insertRecord(ctx, tx, input.Notification, input.Idempotency.ExpiresAt); err != nil { + if isUniqueViolation(err) { + return acceptintent.ErrConflict + } + return fmt.Errorf("create notification acceptance: insert record: %w", err) + } + for index, route := range input.Routes { + if err := insertRoute(ctx, tx, route); err != nil { + return fmt.Errorf("create notification acceptance: insert route[%d]: %w", index, err) + } + } + return nil + }) +} + +// GetIdempotency loads one accepted idempotency reservation. Because the +// records row IS the idempotency reservation, the lookup keys on +// `(producer, idempotency_key)` and projects the relevant subset of the row +// into an IdempotencyRecord. +func (store *Store) GetIdempotency(ctx context.Context, producer intentstream.Producer, idempotencyKey string) (acceptintent.IdempotencyRecord, bool, error) { + if store == nil { + return acceptintent.IdempotencyRecord{}, false, errors.New("get notification idempotency: nil store") + } + if ctx == nil { + return acceptintent.IdempotencyRecord{}, false, errors.New("get notification idempotency: nil context") + } + + operationCtx, cancel, err := store.operationContext(ctx, "get notification idempotency") + if err != nil { + return acceptintent.IdempotencyRecord{}, false, err + } + defer cancel() + + scanned, found, err := loadIdempotencyByKey(operationCtx, store.db, string(producer), idempotencyKey) + if err != nil { + return acceptintent.IdempotencyRecord{}, false, err + } + if !found { + return acceptintent.IdempotencyRecord{}, false, nil + } + return idempotencyRecordFromScanned(scanned), true, nil +} + +// GetNotification loads one accepted notification by NotificationID. +func (store *Store) GetNotification(ctx context.Context, notificationID string) (acceptintent.NotificationRecord, bool, error) { + if store == nil { + return acceptintent.NotificationRecord{}, false, errors.New("get notification record: nil store") + } + if ctx == nil { + return acceptintent.NotificationRecord{}, false, errors.New("get notification record: nil context") + } + + operationCtx, cancel, err := store.operationContext(ctx, "get notification record") + if err != nil { + return acceptintent.NotificationRecord{}, false, err + } + defer cancel() + + scanned, found, err := loadRecord(operationCtx, store.db, notificationID) + if err != nil { + return acceptintent.NotificationRecord{}, false, err + } + if !found { + return acceptintent.NotificationRecord{}, false, nil + } + return scanned.Record, true, nil +} + +// GetRoute loads one accepted notification route by `(notificationID, +// routeID)`. Required by the publisher worker contracts. +func (store *Store) GetRoute(ctx context.Context, notificationID string, routeID string) (acceptintent.NotificationRoute, bool, error) { + if store == nil { + return acceptintent.NotificationRoute{}, false, errors.New("get notification route: nil store") + } + if ctx == nil { + return acceptintent.NotificationRoute{}, false, errors.New("get notification route: nil context") + } + + operationCtx, cancel, err := store.operationContext(ctx, "get notification route") + if err != nil { + return acceptintent.NotificationRoute{}, false, err + } + defer cancel() + + return loadRoute(operationCtx, store.db, notificationID, routeID) +} diff --git a/notification/internal/adapters/postgres/notificationstore/codecs.go b/notification/internal/adapters/postgres/notificationstore/codecs.go new file mode 100644 index 0000000..3b51bda --- /dev/null +++ b/notification/internal/adapters/postgres/notificationstore/codecs.go @@ -0,0 +1,65 @@ +package notificationstore + +import ( + "encoding/json" + "fmt" +) + +// marshalRecipientUserIDs returns the JSONB bytes for the +// `records.recipient_user_ids` column. A nil/empty slice round-trips as `[]` +// to keep the column NOT NULL across equality tests. +func marshalRecipientUserIDs(userIDs []string) ([]byte, error) { + if userIDs == nil { + userIDs = []string{} + } + payload, err := json.Marshal(userIDs) + if err != nil { + return nil, fmt.Errorf("marshal recipient user ids: %w", err) + } + return payload, nil +} + +// unmarshalRecipientUserIDs decodes the JSONB recipient user-id list. nil +// payloads round-trip as a nil slice so the read path matches what the +// service layer accepts (`nil` and an empty `[]` are equivalent for +// audience_kind != user_set). +func unmarshalRecipientUserIDs(payload []byte) ([]string, error) { + if len(payload) == 0 { + return nil, nil + } + var userIDs []string + if err := json.Unmarshal(payload, &userIDs); err != nil { + return nil, fmt.Errorf("unmarshal recipient user ids: %w", err) + } + if len(userIDs) == 0 { + return nil, nil + } + return userIDs, nil +} + +// marshalRawFields returns the JSONB bytes for the +// `malformed_intents.raw_fields` column. The map is serialised verbatim so +// future operator queries can match arbitrary keys. +func marshalRawFields(fields map[string]any) ([]byte, error) { + if fields == nil { + fields = map[string]any{} + } + payload, err := json.Marshal(fields) + if err != nil { + return nil, fmt.Errorf("marshal raw fields: %w", err) + } + return payload, nil +} + +// unmarshalRawFields decodes the malformed_intents.raw_fields column into a +// non-nil map (empty {} when the column is null/empty). +func unmarshalRawFields(payload []byte) (map[string]any, error) { + out := map[string]any{} + if len(payload) == 0 { + return out, nil + } + if err := json.Unmarshal(payload, &out); err != nil { + return nil, fmt.Errorf("unmarshal raw fields: %w", err) + } + return out, nil +} diff --git a/notification/internal/adapters/postgres/notificationstore/dead_letters.go b/notification/internal/adapters/postgres/notificationstore/dead_letters.go new file mode 100644 index 0000000..e990d58 --- /dev/null +++ b/notification/internal/adapters/postgres/notificationstore/dead_letters.go @@ -0,0 +1,61 @@ +package notificationstore + +import ( + "context" + "database/sql" + "time" + + pgtable "galaxy/notification/internal/adapters/postgres/jet/notification/table" +) + +// deadLetterRow stores the column values written to one dead_letters row. +// Kept package-private because the public surface is the routestate +// CompleteRouteDeadLetterInput shape; this struct is only the on-disk +// projection. +type deadLetterRow struct { + NotificationID string + RouteID string + Channel string + RecipientRef string + FinalAttemptCount int + MaxAttempts int + FailureClassification string + FailureMessage string + RecoveryHint string + CreatedAt time.Time +} + +// insertDeadLetter writes one dead-letter audit row inside an open +// transaction. The composite PRIMARY KEY guards against duplicate inserts +// for the same `(notification_id, route_id)` pair. +func insertDeadLetter(ctx context.Context, tx *sql.Tx, row deadLetterRow) error { + stmt := pgtable.DeadLetters.INSERT( + pgtable.DeadLetters.NotificationID, + pgtable.DeadLetters.RouteID, + pgtable.DeadLetters.Channel, + pgtable.DeadLetters.RecipientRef, + pgtable.DeadLetters.FinalAttemptCount, + pgtable.DeadLetters.MaxAttempts, + pgtable.DeadLetters.FailureClassification, + pgtable.DeadLetters.FailureMessage, + pgtable.DeadLetters.RecoveryHint, + pgtable.DeadLetters.CreatedAt, + ).VALUES( + row.NotificationID, + row.RouteID, + row.Channel, + row.RecipientRef, + row.FinalAttemptCount, + row.MaxAttempts, + row.FailureClassification, + row.FailureMessage, + row.RecoveryHint, + row.CreatedAt.UTC(), + ) + + query, args := stmt.Sql() + if _, err := tx.ExecContext(ctx, query, args...); err != nil { + return err + } + return nil +} diff --git a/notification/internal/adapters/postgres/notificationstore/harness_test.go b/notification/internal/adapters/postgres/notificationstore/harness_test.go new file mode 100644 index 0000000..eaa2c16 --- /dev/null +++ b/notification/internal/adapters/postgres/notificationstore/harness_test.go @@ -0,0 +1,200 @@ +package notificationstore + +import ( + "context" + "database/sql" + "net/url" + "os" + "sync" + "testing" + "time" + + "galaxy/notification/internal/adapters/postgres/migrations" + "galaxy/postgres" + + testcontainers "github.com/testcontainers/testcontainers-go" + tcpostgres "github.com/testcontainers/testcontainers-go/modules/postgres" + "github.com/testcontainers/testcontainers-go/wait" +) + +const ( + pkgPostgresImage = "postgres:16-alpine" + pkgSuperUser = "galaxy" + pkgSuperPassword = "galaxy" + pkgSuperDatabase = "galaxy_notification" + pkgServiceRole = "notificationservice" + pkgServicePassword = "notificationservice" + pkgServiceSchema = "notification" + pkgContainerStartup = 90 * time.Second + pkgOperationTimeout = 10 * time.Second +) + +var ( + pkgContainerOnce sync.Once + pkgContainerErr error + pkgContainerEnv *postgresEnv +) + +type postgresEnv struct { + container *tcpostgres.PostgresContainer + dsn string + pool *sql.DB +} + +func ensurePostgresEnv(t testing.TB) *postgresEnv { + t.Helper() + pkgContainerOnce.Do(func() { + pkgContainerEnv, pkgContainerErr = startPostgresEnv() + }) + if pkgContainerErr != nil { + t.Skipf("postgres container start failed (Docker unavailable?): %v", pkgContainerErr) + } + return pkgContainerEnv +} + +func startPostgresEnv() (*postgresEnv, error) { + ctx := context.Background() + container, err := tcpostgres.Run(ctx, pkgPostgresImage, + tcpostgres.WithDatabase(pkgSuperDatabase), + tcpostgres.WithUsername(pkgSuperUser), + tcpostgres.WithPassword(pkgSuperPassword), + testcontainers.WithWaitStrategy( + wait.ForLog("database system is ready to accept connections"). + WithOccurrence(2). + WithStartupTimeout(pkgContainerStartup), + ), + ) + if err != nil { + return nil, err + } + + baseDSN, err := container.ConnectionString(ctx, "sslmode=disable") + if err != nil { + _ = testcontainers.TerminateContainer(container) + return nil, err + } + + if err := provisionRoleAndSchema(ctx, baseDSN); err != nil { + _ = testcontainers.TerminateContainer(container) + return nil, err + } + + scopedDSN, err := dsnForServiceRole(baseDSN) + if err != nil { + _ = testcontainers.TerminateContainer(container) + return nil, err + } + + cfg := postgres.DefaultConfig() + cfg.PrimaryDSN = scopedDSN + cfg.OperationTimeout = pkgOperationTimeout + pool, err := postgres.OpenPrimary(ctx, cfg) + if err != nil { + _ = testcontainers.TerminateContainer(container) + return nil, err + } + if err := postgres.Ping(ctx, pool, pkgOperationTimeout); err != nil { + _ = pool.Close() + _ = testcontainers.TerminateContainer(container) + return nil, err + } + if err := postgres.RunMigrations(ctx, pool, migrations.FS(), "."); err != nil { + _ = pool.Close() + _ = testcontainers.TerminateContainer(container) + return nil, err + } + + return &postgresEnv{ + container: container, + dsn: scopedDSN, + pool: pool, + }, nil +} + +func provisionRoleAndSchema(ctx context.Context, baseDSN string) error { + cfg := postgres.DefaultConfig() + cfg.PrimaryDSN = baseDSN + cfg.OperationTimeout = pkgOperationTimeout + db, err := postgres.OpenPrimary(ctx, cfg) + if err != nil { + return err + } + defer func() { _ = db.Close() }() + + statements := []string{ + `DO $$ BEGIN + IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'notificationservice') THEN + CREATE ROLE notificationservice LOGIN PASSWORD 'notificationservice'; + END IF; + END $$;`, + `CREATE SCHEMA IF NOT EXISTS notification AUTHORIZATION notificationservice;`, + `GRANT USAGE ON SCHEMA notification TO notificationservice;`, + } + for _, statement := range statements { + if _, err := db.ExecContext(ctx, statement); err != nil { + return err + } + } + return nil +} + +func dsnForServiceRole(baseDSN string) (string, error) { + parsed, err := url.Parse(baseDSN) + if err != nil { + return "", err + } + values := url.Values{} + values.Set("search_path", pkgServiceSchema) + values.Set("sslmode", "disable") + scoped := url.URL{ + Scheme: parsed.Scheme, + User: url.UserPassword(pkgServiceRole, pkgServicePassword), + Host: parsed.Host, + Path: parsed.Path, + RawQuery: values.Encode(), + } + return scoped.String(), nil +} + +// newTestStore returns a Store backed by the package-scoped pool. Every +// invocation truncates the notification-owned tables so individual tests +// start from a clean slate while sharing one container start. +func newTestStore(t *testing.T) *Store { + t.Helper() + env := ensurePostgresEnv(t) + truncateAll(t, env.pool) + store, err := New(Config{DB: env.pool, OperationTimeout: pkgOperationTimeout}) + if err != nil { + t.Fatalf("new store: %v", err) + } + return store +} + +func truncateAll(t *testing.T, db *sql.DB) { + t.Helper() + statement := `TRUNCATE TABLE + malformed_intents, + dead_letters, + routes, + records + RESTART IDENTITY CASCADE` + if _, err := db.ExecContext(context.Background(), statement); err != nil { + t.Fatalf("truncate tables: %v", err) + } +} + +// TestMain runs first when `go test` enters the package. We drive it +// through a TestMain so the container started by the first test is shut +// down on the way out, even when individual tests panic. +func TestMain(m *testing.M) { + code := m.Run() + if pkgContainerEnv != nil { + if pkgContainerEnv.pool != nil { + _ = pkgContainerEnv.pool.Close() + } + if pkgContainerEnv.container != nil { + _ = testcontainers.TerminateContainer(pkgContainerEnv.container) + } + } + os.Exit(code) +} diff --git a/notification/internal/adapters/postgres/notificationstore/helpers.go b/notification/internal/adapters/postgres/notificationstore/helpers.go new file mode 100644 index 0000000..20a348d --- /dev/null +++ b/notification/internal/adapters/postgres/notificationstore/helpers.go @@ -0,0 +1,68 @@ +package notificationstore + +import ( + "context" + "database/sql" + "errors" + "fmt" + "time" + + "github.com/jackc/pgx/v5/pgconn" +) + +// pgUniqueViolationCode identifies the SQLSTATE returned by PostgreSQL when +// a UNIQUE constraint is violated by INSERT or UPDATE. +const pgUniqueViolationCode = "23505" + +// isUniqueViolation reports whether err is a PostgreSQL unique-violation, +// regardless of constraint name. +func isUniqueViolation(err error) bool { + var pgErr *pgconn.PgError + if !errors.As(err, &pgErr) { + return false + } + return pgErr.Code == pgUniqueViolationCode +} + +// isNoRows reports whether err is sql.ErrNoRows. +func isNoRows(err error) bool { + return errors.Is(err, sql.ErrNoRows) +} + +// nullableTime returns t.UTC() when non-zero, otherwise nil so the column +// is bound as SQL NULL. The notification domain uses zero-valued time.Time +// to express "absent" timestamps (no pointers), so the helper centralises +// the boundary translation. +func nullableTime(t time.Time) any { + if t.IsZero() { + return nil + } + return t.UTC() +} + +// timeFromNullable copies an optional sql.NullTime read from PostgreSQL +// into a domain time.Time, applying the global UTC normalisation rule. +// Invalid (NULL) values become the zero time.Time. +func timeFromNullable(value sql.NullTime) time.Time { + if !value.Valid { + return time.Time{} + } + return value.Time.UTC() +} + +// withTimeout derives a child context bounded by timeout and prefixes +// context errors with operation. Callers must always invoke the returned +// cancel. +func withTimeout(ctx context.Context, operation string, timeout time.Duration) (context.Context, context.CancelFunc, error) { + if ctx == nil { + return nil, nil, fmt.Errorf("%s: nil context", operation) + } + if err := ctx.Err(); err != nil { + return nil, nil, fmt.Errorf("%s: %w", operation, err) + } + if timeout <= 0 { + return nil, nil, fmt.Errorf("%s: operation timeout must be positive", operation) + } + bounded, cancel := context.WithTimeout(ctx, timeout) + return bounded, cancel, nil +} diff --git a/notification/internal/adapters/postgres/notificationstore/malformed_intents.go b/notification/internal/adapters/postgres/notificationstore/malformed_intents.go new file mode 100644 index 0000000..fe1a673 --- /dev/null +++ b/notification/internal/adapters/postgres/notificationstore/malformed_intents.go @@ -0,0 +1,131 @@ +package notificationstore + +import ( + "context" + "errors" + "fmt" + + pgtable "galaxy/notification/internal/adapters/postgres/jet/notification/table" + "galaxy/notification/internal/service/malformedintent" + + pg "github.com/go-jet/jet/v2/postgres" +) + +// Record stores entry idempotently by stream entry id. The helper satisfies +// `worker.MalformedIntentRecorder`. Re-recording an entry with the same +// `stream_entry_id` is a silent no-op via `ON CONFLICT DO NOTHING`. +func (store *Store) Record(ctx context.Context, entry malformedintent.Entry) error { + if store == nil { + return errors.New("record malformed intent: nil store") + } + if ctx == nil { + return errors.New("record malformed intent: nil context") + } + if err := entry.Validate(); err != nil { + return fmt.Errorf("record malformed intent: %w", err) + } + + rawFields, err := marshalRawFields(entry.RawFields) + if err != nil { + return fmt.Errorf("record malformed intent: %w", err) + } + + operationCtx, cancel, err := store.operationContext(ctx, "record malformed intent") + if err != nil { + return err + } + defer cancel() + + stmt := pgtable.MalformedIntents.INSERT( + pgtable.MalformedIntents.StreamEntryID, + pgtable.MalformedIntents.NotificationType, + pgtable.MalformedIntents.Producer, + pgtable.MalformedIntents.IdempotencyKey, + pgtable.MalformedIntents.FailureCode, + pgtable.MalformedIntents.FailureMessage, + pgtable.MalformedIntents.RawFields, + pgtable.MalformedIntents.RecordedAt, + ).VALUES( + entry.StreamEntryID, + entry.NotificationType, + entry.Producer, + entry.IdempotencyKey, + string(entry.FailureCode), + entry.FailureMessage, + rawFields, + entry.RecordedAt.UTC(), + ).ON_CONFLICT(pgtable.MalformedIntents.StreamEntryID).DO_NOTHING() + + query, args := stmt.Sql() + if _, err := store.db.ExecContext(operationCtx, query, args...); err != nil { + return fmt.Errorf("record malformed intent: %w", err) + } + return nil +} + +// GetMalformedIntent loads one malformed-intent entry by stream entry id. +// Returns found=false when no such row exists. +func (store *Store) GetMalformedIntent(ctx context.Context, streamEntryID string) (malformedintent.Entry, bool, error) { + if store == nil { + return malformedintent.Entry{}, false, errors.New("get malformed intent: nil store") + } + if ctx == nil { + return malformedintent.Entry{}, false, errors.New("get malformed intent: nil context") + } + + operationCtx, cancel, err := store.operationContext(ctx, "get malformed intent") + if err != nil { + return malformedintent.Entry{}, false, err + } + defer cancel() + + stmt := pg.SELECT( + pgtable.MalformedIntents.NotificationType, + pgtable.MalformedIntents.Producer, + pgtable.MalformedIntents.IdempotencyKey, + pgtable.MalformedIntents.FailureCode, + pgtable.MalformedIntents.FailureMessage, + pgtable.MalformedIntents.RawFields, + pgtable.MalformedIntents.RecordedAt, + ).FROM(pgtable.MalformedIntents). + WHERE(pgtable.MalformedIntents.StreamEntryID.EQ(pg.String(streamEntryID))) + + query, args := stmt.Sql() + row := store.db.QueryRowContext(operationCtx, query, args...) + + var ( + notificationType string + producer string + idempotencyKey string + failureCode string + failureMessage string + rawFields []byte + ) + entry := malformedintent.Entry{StreamEntryID: streamEntryID} + if err := row.Scan( + ¬ificationType, + &producer, + &idempotencyKey, + &failureCode, + &failureMessage, + &rawFields, + &entry.RecordedAt, + ); err != nil { + if isNoRows(err) { + return malformedintent.Entry{}, false, nil + } + return malformedintent.Entry{}, false, fmt.Errorf("get malformed intent: %w", err) + } + entry.NotificationType = notificationType + entry.Producer = producer + entry.IdempotencyKey = idempotencyKey + entry.FailureCode = malformedintent.FailureCode(failureCode) + entry.FailureMessage = failureMessage + entry.RecordedAt = entry.RecordedAt.UTC() + fields, err := unmarshalRawFields(rawFields) + if err != nil { + return malformedintent.Entry{}, false, fmt.Errorf("get malformed intent: %w", err) + } + entry.RawFields = fields + return entry, true, nil +} diff --git a/notification/internal/adapters/postgres/notificationstore/records.go b/notification/internal/adapters/postgres/notificationstore/records.go new file mode 100644 index 0000000..bcd0a73 --- /dev/null +++ b/notification/internal/adapters/postgres/notificationstore/records.go @@ -0,0 +1,223 @@ +package notificationstore + +import ( + "context" + "database/sql" + "errors" + "fmt" + "time" + + "galaxy/notification/internal/api/intentstream" + pgtable "galaxy/notification/internal/adapters/postgres/jet/notification/table" + "galaxy/notification/internal/service/acceptintent" + + pg "github.com/go-jet/jet/v2/postgres" +) + +// recordSelectColumns is the canonical SELECT list for the records table, +// matching scanRecord's column order. +var recordSelectColumns = pg.ColumnList{ + pgtable.Records.NotificationID, + pgtable.Records.NotificationType, + pgtable.Records.Producer, + pgtable.Records.AudienceKind, + pgtable.Records.RecipientUserIds, + pgtable.Records.PayloadJSON, + pgtable.Records.IdempotencyKey, + pgtable.Records.RequestFingerprint, + pgtable.Records.RequestID, + pgtable.Records.TraceID, + pgtable.Records.OccurredAt, + pgtable.Records.AcceptedAt, + pgtable.Records.UpdatedAt, + pgtable.Records.IdempotencyExpiresAt, +} + +// rowScanner abstracts *sql.Row and *sql.Rows so scanRecord/scanRoute can be +// shared across both single-row reads and iterated reads. +type rowScanner interface { + Scan(dest ...any) error +} + +// scannedRecord stores the columns scanned from the records table plus the +// idempotency_expires_at value the service layer feeds back into the +// IdempotencyRecord constructed from the same row. +type scannedRecord struct { + Record acceptintent.NotificationRecord + IdempotencyExpiresAt time.Time +} + +// scanRecord scans one records row from rs. Returns sql.ErrNoRows verbatim +// so callers can distinguish "no row" from a hard error. +func scanRecord(rs rowScanner) (scannedRecord, error) { + var ( + notificationID string + notificationType string + producer string + audienceKind string + recipientUserIDs []byte + payloadJSON string + idempotencyKey string + requestFingerprint string + requestID string + traceID string + occurredAt time.Time + acceptedAt time.Time + updatedAt time.Time + idempotencyExpiresAt time.Time + ) + if err := rs.Scan( + ¬ificationID, + ¬ificationType, + &producer, + &audienceKind, + &recipientUserIDs, + &payloadJSON, + &idempotencyKey, + &requestFingerprint, + &requestID, + &traceID, + &occurredAt, + &acceptedAt, + &updatedAt, + &idempotencyExpiresAt, + ); err != nil { + return scannedRecord{}, err + } + + users, err := unmarshalRecipientUserIDs(recipientUserIDs) + if err != nil { + return scannedRecord{}, err + } + + return scannedRecord{ + Record: acceptintent.NotificationRecord{ + NotificationID: notificationID, + NotificationType: intentstream.NotificationType(notificationType), + Producer: intentstream.Producer(producer), + AudienceKind: intentstream.AudienceKind(audienceKind), + RecipientUserIDs: users, + PayloadJSON: payloadJSON, + IdempotencyKey: idempotencyKey, + RequestFingerprint: requestFingerprint, + RequestID: requestID, + TraceID: traceID, + OccurredAt: occurredAt.UTC(), + AcceptedAt: acceptedAt.UTC(), + UpdatedAt: updatedAt.UTC(), + }, + IdempotencyExpiresAt: idempotencyExpiresAt.UTC(), + }, nil +} + +// insertRecord writes one record row plus its idempotency expiry inside an +// open transaction. The (producer, idempotency_key) UNIQUE constraint is +// the idempotency reservation; the caller maps `isUniqueViolation` errors +// to `acceptintent.ErrConflict`. +func insertRecord(ctx context.Context, tx *sql.Tx, record acceptintent.NotificationRecord, idempotencyExpiresAt time.Time) error { + if err := record.Validate(); err != nil { + return fmt.Errorf("insert record: %w", err) + } + + users, err := marshalRecipientUserIDs(record.RecipientUserIDs) + if err != nil { + return fmt.Errorf("insert record: %w", err) + } + + stmt := pgtable.Records.INSERT( + pgtable.Records.NotificationID, + pgtable.Records.NotificationType, + pgtable.Records.Producer, + pgtable.Records.AudienceKind, + pgtable.Records.RecipientUserIds, + pgtable.Records.PayloadJSON, + pgtable.Records.IdempotencyKey, + pgtable.Records.RequestFingerprint, + pgtable.Records.RequestID, + pgtable.Records.TraceID, + pgtable.Records.OccurredAt, + pgtable.Records.AcceptedAt, + pgtable.Records.UpdatedAt, + pgtable.Records.IdempotencyExpiresAt, + ).VALUES( + record.NotificationID, + string(record.NotificationType), + string(record.Producer), + string(record.AudienceKind), + users, + record.PayloadJSON, + record.IdempotencyKey, + record.RequestFingerprint, + record.RequestID, + record.TraceID, + record.OccurredAt.UTC(), + record.AcceptedAt.UTC(), + record.UpdatedAt.UTC(), + idempotencyExpiresAt.UTC(), + ) + + query, args := stmt.Sql() + if _, err := tx.ExecContext(ctx, query, args...); err != nil { + return err + } + return nil +} + +// loadRecord returns the record row for notificationID using the store's +// default pool. found is false when no such row exists. +func loadRecord(ctx context.Context, db *sql.DB, notificationID string) (scannedRecord, bool, error) { + stmt := pg.SELECT(recordSelectColumns). + FROM(pgtable.Records). + WHERE(pgtable.Records.NotificationID.EQ(pg.String(notificationID))) + + query, args := stmt.Sql() + row := db.QueryRowContext(ctx, query, args...) + scanned, err := scanRecord(row) + if isNoRows(err) { + return scannedRecord{}, false, nil + } + if err != nil { + return scannedRecord{}, false, fmt.Errorf("load notification record: %w", err) + } + return scanned, true, nil +} + +// loadIdempotencyByKey returns the records row that owns one +// `(producer, idempotency_key)` reservation. found is false when no match. +func loadIdempotencyByKey(ctx context.Context, db *sql.DB, producer string, idempotencyKey string) (scannedRecord, bool, error) { + stmt := pg.SELECT(recordSelectColumns). + FROM(pgtable.Records). + WHERE(pg.AND( + pgtable.Records.Producer.EQ(pg.String(producer)), + pgtable.Records.IdempotencyKey.EQ(pg.String(idempotencyKey)), + )) + + query, args := stmt.Sql() + row := db.QueryRowContext(ctx, query, args...) + scanned, err := scanRecord(row) + if isNoRows(err) { + return scannedRecord{}, false, nil + } + if err != nil { + return scannedRecord{}, false, fmt.Errorf("load notification idempotency: %w", err) + } + return scanned, true, nil +} + +// idempotencyRecordFromScanned constructs an IdempotencyRecord shape from +// the scanned record. CreatedAt mirrors AcceptedAt because the durable row +// is the idempotency reservation. +func idempotencyRecordFromScanned(scanned scannedRecord) acceptintent.IdempotencyRecord { + return acceptintent.IdempotencyRecord{ + Producer: scanned.Record.Producer, + IdempotencyKey: scanned.Record.IdempotencyKey, + NotificationID: scanned.Record.NotificationID, + RequestFingerprint: scanned.Record.RequestFingerprint, + CreatedAt: scanned.Record.AcceptedAt, + ExpiresAt: scanned.IdempotencyExpiresAt, + } +} + +// errRecordNotFound is the package-private sentinel returned by helpers +// when a row required by an in-progress transaction is not found. +var errRecordNotFound = errors.New("record not found") diff --git a/notification/internal/adapters/postgres/notificationstore/retention.go b/notification/internal/adapters/postgres/notificationstore/retention.go new file mode 100644 index 0000000..cc1f3bc --- /dev/null +++ b/notification/internal/adapters/postgres/notificationstore/retention.go @@ -0,0 +1,67 @@ +package notificationstore + +import ( + "context" + "errors" + "fmt" + "time" + + pgtable "galaxy/notification/internal/adapters/postgres/jet/notification/table" + + pg "github.com/go-jet/jet/v2/postgres" +) + +// DeleteRecordsOlderThan removes records rows whose `accepted_at` predates +// cutoff. The records FK CASCADE clears the dependent routes and +// dead_letters rows in the same statement. +func (store *Store) DeleteRecordsOlderThan(ctx context.Context, cutoff time.Time) (int64, error) { + if store == nil { + return 0, errors.New("delete notification records: nil store") + } + operationCtx, cancel, err := store.operationContext(ctx, "delete notification records") + if err != nil { + return 0, err + } + defer cancel() + + stmt := pgtable.Records.DELETE(). + WHERE(pgtable.Records.AcceptedAt.LT(pg.TimestampzT(cutoff.UTC()))) + + query, args := stmt.Sql() + result, err := store.db.ExecContext(operationCtx, query, args...) + if err != nil { + return 0, fmt.Errorf("delete notification records: %w", err) + } + rows, err := result.RowsAffected() + if err != nil { + return 0, fmt.Errorf("delete notification records: rows affected: %w", err) + } + return rows, nil +} + +// DeleteMalformedIntentsOlderThan removes malformed-intent rows whose +// `recorded_at` predates cutoff. +func (store *Store) DeleteMalformedIntentsOlderThan(ctx context.Context, cutoff time.Time) (int64, error) { + if store == nil { + return 0, errors.New("delete malformed intents: nil store") + } + operationCtx, cancel, err := store.operationContext(ctx, "delete malformed intents") + if err != nil { + return 0, err + } + defer cancel() + + stmt := pgtable.MalformedIntents.DELETE(). + WHERE(pgtable.MalformedIntents.RecordedAt.LT(pg.TimestampzT(cutoff.UTC()))) + + query, args := stmt.Sql() + result, err := store.db.ExecContext(operationCtx, query, args...) + if err != nil { + return 0, fmt.Errorf("delete malformed intents: %w", err) + } + rows, err := result.RowsAffected() + if err != nil { + return 0, fmt.Errorf("delete malformed intents: rows affected: %w", err) + } + return rows, nil +} diff --git a/notification/internal/adapters/postgres/notificationstore/routes.go b/notification/internal/adapters/postgres/notificationstore/routes.go new file mode 100644 index 0000000..14c0a84 --- /dev/null +++ b/notification/internal/adapters/postgres/notificationstore/routes.go @@ -0,0 +1,248 @@ +package notificationstore + +import ( + "context" + "database/sql" + "fmt" + "time" + + "galaxy/notification/internal/api/intentstream" + pgtable "galaxy/notification/internal/adapters/postgres/jet/notification/table" + "galaxy/notification/internal/service/acceptintent" + + pg "github.com/go-jet/jet/v2/postgres" +) + +// routeSelectColumns is the canonical SELECT list for the routes table, +// matching scanRoute's column order. +var routeSelectColumns = pg.ColumnList{ + pgtable.Routes.NotificationID, + pgtable.Routes.RouteID, + pgtable.Routes.Channel, + pgtable.Routes.RecipientRef, + pgtable.Routes.Status, + pgtable.Routes.AttemptCount, + pgtable.Routes.MaxAttempts, + pgtable.Routes.NextAttemptAt, + pgtable.Routes.ResolvedEmail, + pgtable.Routes.ResolvedLocale, + pgtable.Routes.LastErrorClassification, + pgtable.Routes.LastErrorMessage, + pgtable.Routes.LastErrorAt, + pgtable.Routes.CreatedAt, + pgtable.Routes.UpdatedAt, + pgtable.Routes.PublishedAt, + pgtable.Routes.DeadLetteredAt, + pgtable.Routes.SkippedAt, +} + +// scanRoute scans one routes row from rs. +func scanRoute(rs rowScanner) (acceptintent.NotificationRoute, error) { + var ( + notificationID string + routeID string + channel string + recipientRef string + status string + attemptCount int + maxAttempts int + nextAttemptAt sql.NullTime + resolvedEmail string + resolvedLocale string + lastErrorClassification string + lastErrorMessage string + lastErrorAt sql.NullTime + createdAt time.Time + updatedAt time.Time + publishedAt sql.NullTime + deadLetteredAt sql.NullTime + skippedAt sql.NullTime + ) + if err := rs.Scan( + ¬ificationID, + &routeID, + &channel, + &recipientRef, + &status, + &attemptCount, + &maxAttempts, + &nextAttemptAt, + &resolvedEmail, + &resolvedLocale, + &lastErrorClassification, + &lastErrorMessage, + &lastErrorAt, + &createdAt, + &updatedAt, + &publishedAt, + &deadLetteredAt, + &skippedAt, + ); err != nil { + return acceptintent.NotificationRoute{}, err + } + + return acceptintent.NotificationRoute{ + NotificationID: notificationID, + RouteID: routeID, + Channel: intentstream.Channel(channel), + RecipientRef: recipientRef, + Status: acceptintent.RouteStatus(status), + AttemptCount: attemptCount, + MaxAttempts: maxAttempts, + NextAttemptAt: timeFromNullable(nextAttemptAt), + ResolvedEmail: resolvedEmail, + ResolvedLocale: resolvedLocale, + LastErrorClassification: lastErrorClassification, + LastErrorMessage: lastErrorMessage, + LastErrorAt: timeFromNullable(lastErrorAt), + CreatedAt: createdAt.UTC(), + UpdatedAt: updatedAt.UTC(), + PublishedAt: timeFromNullable(publishedAt), + DeadLetteredAt: timeFromNullable(deadLetteredAt), + SkippedAt: timeFromNullable(skippedAt), + }, nil +} + +// insertRoute writes one route row inside an open transaction. +func insertRoute(ctx context.Context, tx *sql.Tx, route acceptintent.NotificationRoute) error { + if err := route.Validate(); err != nil { + return fmt.Errorf("insert route: %w", err) + } + + stmt := pgtable.Routes.INSERT( + pgtable.Routes.NotificationID, + pgtable.Routes.RouteID, + pgtable.Routes.Channel, + pgtable.Routes.RecipientRef, + pgtable.Routes.Status, + pgtable.Routes.AttemptCount, + pgtable.Routes.MaxAttempts, + pgtable.Routes.NextAttemptAt, + pgtable.Routes.ResolvedEmail, + pgtable.Routes.ResolvedLocale, + pgtable.Routes.LastErrorClassification, + pgtable.Routes.LastErrorMessage, + pgtable.Routes.LastErrorAt, + pgtable.Routes.CreatedAt, + pgtable.Routes.UpdatedAt, + pgtable.Routes.PublishedAt, + pgtable.Routes.DeadLetteredAt, + pgtable.Routes.SkippedAt, + ).VALUES( + route.NotificationID, + route.RouteID, + string(route.Channel), + route.RecipientRef, + string(route.Status), + route.AttemptCount, + route.MaxAttempts, + nullableTime(route.NextAttemptAt), + route.ResolvedEmail, + route.ResolvedLocale, + route.LastErrorClassification, + route.LastErrorMessage, + nullableTime(route.LastErrorAt), + route.CreatedAt.UTC(), + route.UpdatedAt.UTC(), + nullableTime(route.PublishedAt), + nullableTime(route.DeadLetteredAt), + nullableTime(route.SkippedAt), + ) + + query, args := stmt.Sql() + if _, err := tx.ExecContext(ctx, query, args...); err != nil { + return err + } + return nil +} + +// loadRoute returns one route row by composite key. found is false when no +// matching row exists. +func loadRoute(ctx context.Context, db *sql.DB, notificationID string, routeID string) (acceptintent.NotificationRoute, bool, error) { + stmt := pg.SELECT(routeSelectColumns). + FROM(pgtable.Routes). + WHERE(pg.AND( + pgtable.Routes.NotificationID.EQ(pg.String(notificationID)), + pgtable.Routes.RouteID.EQ(pg.String(routeID)), + )) + query, args := stmt.Sql() + row := db.QueryRowContext(ctx, query, args...) + route, err := scanRoute(row) + if isNoRows(err) { + return acceptintent.NotificationRoute{}, false, nil + } + if err != nil { + return acceptintent.NotificationRoute{}, false, fmt.Errorf("load notification route: %w", err) + } + return route, true, nil +} + +// loadRouteTx returns one route row by composite key inside an open +// transaction. +func loadRouteTx(ctx context.Context, tx *sql.Tx, notificationID string, routeID string) (acceptintent.NotificationRoute, bool, error) { + stmt := pg.SELECT(routeSelectColumns). + FROM(pgtable.Routes). + WHERE(pg.AND( + pgtable.Routes.NotificationID.EQ(pg.String(notificationID)), + pgtable.Routes.RouteID.EQ(pg.String(routeID)), + )) + query, args := stmt.Sql() + row := tx.QueryRowContext(ctx, query, args...) + route, err := scanRoute(row) + if isNoRows(err) { + return acceptintent.NotificationRoute{}, false, nil + } + if err != nil { + return acceptintent.NotificationRoute{}, false, fmt.Errorf("load notification route: %w", err) + } + return route, true, nil +} + +// updateRouteIfMatching writes the route columns back inside an open +// transaction, gated on `updated_at = expectedUpdatedAt`. Returns the +// number of rows actually updated; zero indicates an optimistic-concurrency +// loss. +func updateRouteIfMatching(ctx context.Context, tx *sql.Tx, route acceptintent.NotificationRoute, expectedUpdatedAt time.Time) (int64, error) { + stmt := pgtable.Routes.UPDATE( + pgtable.Routes.Status, + pgtable.Routes.AttemptCount, + pgtable.Routes.NextAttemptAt, + pgtable.Routes.ResolvedEmail, + pgtable.Routes.ResolvedLocale, + pgtable.Routes.LastErrorClassification, + pgtable.Routes.LastErrorMessage, + pgtable.Routes.LastErrorAt, + pgtable.Routes.UpdatedAt, + pgtable.Routes.PublishedAt, + pgtable.Routes.DeadLetteredAt, + pgtable.Routes.SkippedAt, + ).SET( + string(route.Status), + route.AttemptCount, + nullableTime(route.NextAttemptAt), + route.ResolvedEmail, + route.ResolvedLocale, + route.LastErrorClassification, + route.LastErrorMessage, + nullableTime(route.LastErrorAt), + route.UpdatedAt.UTC(), + nullableTime(route.PublishedAt), + nullableTime(route.DeadLetteredAt), + nullableTime(route.SkippedAt), + ).WHERE(pg.AND( + pgtable.Routes.NotificationID.EQ(pg.String(route.NotificationID)), + pgtable.Routes.RouteID.EQ(pg.String(route.RouteID)), + pgtable.Routes.UpdatedAt.EQ(pg.TimestampzT(expectedUpdatedAt.UTC())), + )) + + query, args := stmt.Sql() + result, err := tx.ExecContext(ctx, query, args...) + if err != nil { + return 0, err + } + rows, err := result.RowsAffected() + if err != nil { + return 0, err + } + return rows, nil +} diff --git a/notification/internal/adapters/postgres/notificationstore/scheduler.go b/notification/internal/adapters/postgres/notificationstore/scheduler.go new file mode 100644 index 0000000..73094a0 --- /dev/null +++ b/notification/internal/adapters/postgres/notificationstore/scheduler.go @@ -0,0 +1,262 @@ +package notificationstore + +import ( + "context" + "database/sql" + "errors" + "fmt" + "time" + + pgtable "galaxy/notification/internal/adapters/postgres/jet/notification/table" + "galaxy/notification/internal/service/acceptintent" + "galaxy/notification/internal/service/routestate" + "galaxy/notification/internal/telemetry" + + pg "github.com/go-jet/jet/v2/postgres" +) + +// scheduledRouteKey synthesises a stable, human-readable key for one +// ScheduledRoute. Notification publishers do not interpret the key beyond +// requiring it to be non-empty (`ScheduledRoute.Validate`). +func scheduledRouteKey(notificationID string, routeID string) string { + return notificationID + "/" + routeID +} + +// ListDueRoutes returns up to limit routes whose `next_attempt_at` is at or +// before now. The query is non-locking; per-row contention is resolved by +// the lease (Redis) plus the optimistic-concurrency check inside `Complete*`. +func (store *Store) ListDueRoutes(ctx context.Context, now time.Time, limit int64) ([]routestate.ScheduledRoute, error) { + if store == nil { + return nil, errors.New("list due routes: nil store") + } + if ctx == nil { + return nil, errors.New("list due routes: nil context") + } + if err := routestate.ValidateUTCMillisecondTimestamp("list due routes now", now); err != nil { + return nil, err + } + if limit <= 0 { + return nil, errors.New("list due routes: limit must be positive") + } + + operationCtx, cancel, err := store.operationContext(ctx, "list due routes") + if err != nil { + return nil, err + } + defer cancel() + + stmt := pg.SELECT(pgtable.Routes.NotificationID, pgtable.Routes.RouteID). + FROM(pgtable.Routes). + WHERE(pg.AND( + pgtable.Routes.NextAttemptAt.IS_NOT_NULL(), + pgtable.Routes.NextAttemptAt.LT_EQ(pg.TimestampzT(now.UTC())), + )). + ORDER_BY(pgtable.Routes.NextAttemptAt.ASC()). + LIMIT(limit) + + query, args := stmt.Sql() + rows, err := store.db.QueryContext(operationCtx, query, args...) + if err != nil { + return nil, fmt.Errorf("list due routes: %w", err) + } + defer rows.Close() + + out := make([]routestate.ScheduledRoute, 0, limit) + for rows.Next() { + var ( + notificationID string + routeID string + ) + if err := rows.Scan(¬ificationID, &routeID); err != nil { + return nil, fmt.Errorf("list due routes: scan: %w", err) + } + out = append(out, routestate.ScheduledRoute{ + RouteKey: scheduledRouteKey(notificationID, routeID), + NotificationID: notificationID, + RouteID: routeID, + }) + } + if err := rows.Err(); err != nil { + return nil, fmt.Errorf("list due routes: %w", err) + } + return out, nil +} + +// ReadRouteScheduleSnapshot returns the current depth of the route schedule +// (rows with non-NULL `next_attempt_at`) together with the oldest scheduled +// timestamp when one exists. The runtime exposes this through the telemetry +// snapshot reader. +func (store *Store) ReadRouteScheduleSnapshot(ctx context.Context) (telemetry.RouteScheduleSnapshot, error) { + if store == nil { + return telemetry.RouteScheduleSnapshot{}, errors.New("read route schedule snapshot: nil store") + } + if ctx == nil { + return telemetry.RouteScheduleSnapshot{}, errors.New("read route schedule snapshot: nil context") + } + + operationCtx, cancel, err := store.operationContext(ctx, "read route schedule snapshot") + if err != nil { + return telemetry.RouteScheduleSnapshot{}, err + } + defer cancel() + + stmt := pg.SELECT( + pg.COUNT(pg.STAR), + pg.MIN(pgtable.Routes.NextAttemptAt), + ). + FROM(pgtable.Routes). + WHERE(pgtable.Routes.NextAttemptAt.IS_NOT_NULL()) + + query, args := stmt.Sql() + row := store.db.QueryRowContext(operationCtx, query, args...) + var ( + depth int64 + oldest sql.NullTime + summary telemetry.RouteScheduleSnapshot + ) + if err := row.Scan(&depth, &oldest); err != nil { + return telemetry.RouteScheduleSnapshot{}, fmt.Errorf("read route schedule snapshot: %w", err) + } + summary.Depth = depth + if oldest.Valid { + oldestUTC := oldest.Time.UTC() + summary.OldestScheduledFor = &oldestUTC + } + return summary, nil +} + +// CompleteRoutePublished marks the expected route as `published`, +// increments attempt_count, and clears retry/error fields. Optimistic +// concurrency on `updated_at` rejects races that happened since the +// publisher loaded the row; a mismatch surfaces as `routestate.ErrConflict`. +// +// Note: the outbound stream emission (XADD) happens in the publisher +// before this call. The store deliberately ignores the input.Stream and +// input.StreamValues fields — they are kept on the input only so the +// publisher can pass one struct around through its state machine. +func (store *Store) CompleteRoutePublished(ctx context.Context, input routestate.CompleteRoutePublishedInput) error { + if store == nil { + return errors.New("complete route published: nil store") + } + if ctx == nil { + return errors.New("complete route published: nil context") + } + if err := input.Validate(); err != nil { + return fmt.Errorf("complete route published: %w", err) + } + + updated := input.ExpectedRoute + updated.Status = acceptintent.RouteStatusPublished + updated.AttemptCount++ + updated.NextAttemptAt = time.Time{} + updated.LastErrorClassification = "" + updated.LastErrorMessage = "" + updated.LastErrorAt = time.Time{} + updated.UpdatedAt = input.PublishedAt + updated.PublishedAt = input.PublishedAt + updated.DeadLetteredAt = time.Time{} + + return store.withTx(ctx, "complete route published", func(ctx context.Context, tx *sql.Tx) error { + rows, err := updateRouteIfMatching(ctx, tx, updated, input.ExpectedRoute.UpdatedAt) + if err != nil { + return fmt.Errorf("complete route published: %w", err) + } + if rows == 0 { + return routestate.ErrConflict + } + return nil + }) +} + +// CompleteRouteFailed records one retryable publication failure: increments +// attempt_count, populates the last-error fields, and reschedules the row +// at `NextAttemptAt`. +func (store *Store) CompleteRouteFailed(ctx context.Context, input routestate.CompleteRouteFailedInput) error { + if store == nil { + return errors.New("complete route failed: nil store") + } + if ctx == nil { + return errors.New("complete route failed: nil context") + } + if err := input.Validate(); err != nil { + return fmt.Errorf("complete route failed: %w", err) + } + + updated := input.ExpectedRoute + updated.Status = acceptintent.RouteStatusFailed + updated.AttemptCount++ + updated.NextAttemptAt = input.NextAttemptAt + updated.LastErrorClassification = input.FailureClassification + updated.LastErrorMessage = input.FailureMessage + updated.LastErrorAt = input.FailedAt + updated.UpdatedAt = input.FailedAt + + return store.withTx(ctx, "complete route failed", func(ctx context.Context, tx *sql.Tx) error { + rows, err := updateRouteIfMatching(ctx, tx, updated, input.ExpectedRoute.UpdatedAt) + if err != nil { + return fmt.Errorf("complete route failed: %w", err) + } + if rows == 0 { + return routestate.ErrConflict + } + return nil + }) +} + +// CompleteRouteDeadLetter records one terminal publication failure: +// marks the route `dead_letter`, clears the schedule, and inserts the +// dead-letter audit row. +func (store *Store) CompleteRouteDeadLetter(ctx context.Context, input routestate.CompleteRouteDeadLetterInput) error { + if store == nil { + return errors.New("complete route dead letter: nil store") + } + if ctx == nil { + return errors.New("complete route dead letter: nil context") + } + if err := input.Validate(); err != nil { + return fmt.Errorf("complete route dead letter: %w", err) + } + + updated := input.ExpectedRoute + updated.Status = acceptintent.RouteStatusDeadLetter + updated.AttemptCount++ + updated.NextAttemptAt = time.Time{} + updated.LastErrorClassification = input.FailureClassification + updated.LastErrorMessage = input.FailureMessage + updated.LastErrorAt = input.DeadLetteredAt + updated.UpdatedAt = input.DeadLetteredAt + updated.DeadLetteredAt = input.DeadLetteredAt + + if updated.AttemptCount < updated.MaxAttempts { + return fmt.Errorf( + "complete route dead letter: final attempt count %d is below max attempts %d", + updated.AttemptCount, + updated.MaxAttempts, + ) + } + + return store.withTx(ctx, "complete route dead letter", func(ctx context.Context, tx *sql.Tx) error { + rows, err := updateRouteIfMatching(ctx, tx, updated, input.ExpectedRoute.UpdatedAt) + if err != nil { + return fmt.Errorf("complete route dead letter: %w", err) + } + if rows == 0 { + return routestate.ErrConflict + } + if err := insertDeadLetter(ctx, tx, deadLetterRow{ + NotificationID: updated.NotificationID, + RouteID: updated.RouteID, + Channel: string(updated.Channel), + RecipientRef: updated.RecipientRef, + FinalAttemptCount: updated.AttemptCount, + MaxAttempts: updated.MaxAttempts, + FailureClassification: input.FailureClassification, + FailureMessage: input.FailureMessage, + RecoveryHint: input.RecoveryHint, + CreatedAt: input.DeadLetteredAt, + }); err != nil { + return fmt.Errorf("complete route dead letter: %w", err) + } + return nil + }) +} diff --git a/notification/internal/adapters/postgres/notificationstore/store.go b/notification/internal/adapters/postgres/notificationstore/store.go new file mode 100644 index 0000000..c3dcc77 --- /dev/null +++ b/notification/internal/adapters/postgres/notificationstore/store.go @@ -0,0 +1,126 @@ +// Package notificationstore implements the PostgreSQL-backed source-of-truth +// persistence used by Notification Service. +// +// The package owns the on-disk shape of the `notification` schema (defined +// in `galaxy/notification/internal/adapters/postgres/migrations`) and +// translates the schema-agnostic Store interfaces declared by the +// `internal/service/acceptintent` use case and the route publishers into +// concrete `database/sql` operations driven by the pgx driver. Atomic +// composite operations (acceptance, route-completion transitions) execute +// inside explicit `BEGIN … COMMIT` transactions; per-row lifecycle +// transitions use optimistic concurrency on the `updated_at` token rather +// than retaining a `FOR UPDATE` lock across the publisher's outbound stream +// emission. +// +// Stage 5 of `PG_PLAN.md` migrates Notification Service away from +// Redis-backed durable state. The inbound `notification:intents` Redis +// Stream and its consumer offset, the outbound `gateway:client-events` and +// `mail:delivery_commands` Redis Streams, and the short-lived +// `route_leases:*` exclusivity hint all remain on Redis; this store is no +// longer aware of any of them. +package notificationstore + +import ( + "context" + "database/sql" + "errors" + "fmt" + "time" +) + +// Config configures one PostgreSQL-backed notification store instance. The +// store does not own the underlying *sql.DB lifecycle: the caller (typically +// the service runtime) opens, instruments, migrates, and closes the pool. +// The store only borrows the pool and bounds individual round trips with +// OperationTimeout. +type Config struct { + // DB stores the connection pool the store uses for every query. + DB *sql.DB + + // OperationTimeout bounds one round trip. The store creates a derived + // context for each operation so callers cannot starve the pool with an + // unbounded ctx. Multi-statement transactions inherit this bound for the + // whole BEGIN … COMMIT span. + OperationTimeout time.Duration +} + +// Store persists Notification Service durable state in PostgreSQL and +// exposes the per-use-case Store interfaces required by acceptance, +// publication completion, malformed-intent recording, and the periodic +// retention worker. +type Store struct { + db *sql.DB + operationTimeout time.Duration +} + +// New constructs one PostgreSQL-backed notification store from cfg. +func New(cfg Config) (*Store, error) { + if cfg.DB == nil { + return nil, errors.New("new postgres notification store: db must not be nil") + } + if cfg.OperationTimeout <= 0 { + return nil, errors.New("new postgres notification store: operation timeout must be positive") + } + return &Store{ + db: cfg.DB, + operationTimeout: cfg.OperationTimeout, + }, nil +} + +// Close is a no-op for the PostgreSQL-backed store: the connection pool is +// owned by the caller (the runtime) and closed once the runtime shuts down. +// The accessor remains so the runtime wiring can treat the store like the +// previous Redis-backed implementation. +func (store *Store) Close() error { + return nil +} + +// Ping verifies that the configured PostgreSQL backend is reachable. It +// runs `db.PingContext` under the configured operation timeout. +func (store *Store) Ping(ctx context.Context) error { + operationCtx, cancel, err := withTimeout(ctx, "ping postgres notification store", store.operationTimeout) + if err != nil { + return err + } + defer cancel() + + if err := store.db.PingContext(operationCtx); err != nil { + return fmt.Errorf("ping postgres notification store: %w", err) + } + return nil +} + +// withTx runs fn inside a BEGIN … COMMIT transaction bounded by the store's +// operation timeout. It rolls back on any error or panic and returns +// whatever fn returned. The transaction uses the default isolation level +// (`READ COMMITTED`); per-row contention is resolved through optimistic +// concurrency on `updated_at` rather than `SELECT … FOR UPDATE`. +func (store *Store) withTx(ctx context.Context, operation string, fn func(ctx context.Context, tx *sql.Tx) error) error { + operationCtx, cancel, err := withTimeout(ctx, operation, store.operationTimeout) + if err != nil { + return err + } + defer cancel() + + tx, err := store.db.BeginTx(operationCtx, nil) + if err != nil { + return fmt.Errorf("%s: begin: %w", operation, err) + } + + if err := fn(operationCtx, tx); err != nil { + _ = tx.Rollback() + return err + } + + if err := tx.Commit(); err != nil { + return fmt.Errorf("%s: commit: %w", operation, err) + } + return nil +} + +// operationContext bounds one read or write that does not need a +// transaction envelope (single statement). It mirrors store.withTx for +// non-transactional callers. +func (store *Store) operationContext(ctx context.Context, operation string) (context.Context, context.CancelFunc, error) { + return withTimeout(ctx, operation, store.operationTimeout) +} diff --git a/notification/internal/adapters/postgres/notificationstore/store_test.go b/notification/internal/adapters/postgres/notificationstore/store_test.go new file mode 100644 index 0000000..eb8c6f0 --- /dev/null +++ b/notification/internal/adapters/postgres/notificationstore/store_test.go @@ -0,0 +1,567 @@ +package notificationstore + +import ( + "context" + "errors" + "testing" + "time" + + "galaxy/notification/internal/api/intentstream" + "galaxy/notification/internal/service/acceptintent" + "galaxy/notification/internal/service/malformedintent" + "galaxy/notification/internal/service/routestate" +) + +func TestPing(t *testing.T) { + store := newTestStore(t) + if err := store.Ping(context.Background()); err != nil { + t.Fatalf("ping: %v", err) + } +} + +func TestCreateAcceptanceAndReads(t *testing.T) { + store := newTestStore(t) + ctx := context.Background() + now := time.Now().UTC().Truncate(time.Millisecond) + + notification := newNotification(t, "n-1", now) + pushRoute := newPendingRoute(notification.NotificationID, "push:user-1", intentstream.ChannelPush, "user-1", now) + emailRoute := newPendingRoute(notification.NotificationID, "email:user-1", intentstream.ChannelEmail, "user-1", now) + idem := newIdempotency(notification, now) + + if err := store.CreateAcceptance(ctx, acceptintent.CreateAcceptanceInput{ + Notification: notification, + Routes: []acceptintent.NotificationRoute{pushRoute, emailRoute}, + Idempotency: idem, + }); err != nil { + t.Fatalf("create acceptance: %v", err) + } + + gotNotification, found, err := store.GetNotification(ctx, notification.NotificationID) + if err != nil || !found { + t.Fatalf("get notification: found=%v err=%v", found, err) + } + if gotNotification.PayloadJSON != notification.PayloadJSON { + t.Fatalf("notification payload mismatch: got %q want %q", gotNotification.PayloadJSON, notification.PayloadJSON) + } + if len(gotNotification.RecipientUserIDs) != 1 || gotNotification.RecipientUserIDs[0] != "user-1" { + t.Fatalf("recipient_user_ids round-trip: %#v", gotNotification.RecipientUserIDs) + } + + gotIdem, found, err := store.GetIdempotency(ctx, notification.Producer, notification.IdempotencyKey) + if err != nil || !found { + t.Fatalf("get idempotency: found=%v err=%v", found, err) + } + if gotIdem.NotificationID != notification.NotificationID { + t.Fatalf("idempotency notification id mismatch: got %q want %q", gotIdem.NotificationID, notification.NotificationID) + } + if !gotIdem.ExpiresAt.Equal(idem.ExpiresAt) { + t.Fatalf("idempotency expires_at mismatch: got %v want %v", gotIdem.ExpiresAt, idem.ExpiresAt) + } + + gotRoute, found, err := store.GetRoute(ctx, notification.NotificationID, pushRoute.RouteID) + if err != nil || !found { + t.Fatalf("get push route: found=%v err=%v", found, err) + } + if gotRoute.Channel != intentstream.ChannelPush { + t.Fatalf("push route channel mismatch: got %q", gotRoute.Channel) + } + if !gotRoute.NextAttemptAt.Equal(pushRoute.NextAttemptAt) { + t.Fatalf("push route next_attempt_at mismatch: got %v want %v", gotRoute.NextAttemptAt, pushRoute.NextAttemptAt) + } +} + +func TestCreateAcceptanceIdempotencyConflict(t *testing.T) { + store := newTestStore(t) + ctx := context.Background() + now := time.Now().UTC().Truncate(time.Millisecond) + + notification := newNotification(t, "n-1", now) + route := newPendingRoute(notification.NotificationID, "push:user-1", intentstream.ChannelPush, "user-1", now) + + first := acceptintent.CreateAcceptanceInput{ + Notification: notification, + Routes: []acceptintent.NotificationRoute{route}, + Idempotency: newIdempotency(notification, now), + } + if err := store.CreateAcceptance(ctx, first); err != nil { + t.Fatalf("first acceptance: %v", err) + } + + clone := notification + clone.NotificationID = "n-2" + cloneRoute := route + cloneRoute.NotificationID = clone.NotificationID + clone.AcceptedAt = now.Add(time.Second) + clone.UpdatedAt = clone.AcceptedAt + cloneIdem := newIdempotency(clone, now.Add(time.Second)) + cloneIdem.IdempotencyKey = notification.IdempotencyKey + + err := store.CreateAcceptance(ctx, acceptintent.CreateAcceptanceInput{ + Notification: clone, + Routes: []acceptintent.NotificationRoute{cloneRoute}, + Idempotency: cloneIdem, + }) + if !errors.Is(err, acceptintent.ErrConflict) { + t.Fatalf("expected acceptintent.ErrConflict, got %v", err) + } +} + +func TestListDueRoutes(t *testing.T) { + store := newTestStore(t) + ctx := context.Background() + base := time.Now().UTC().Truncate(time.Millisecond) + + pastNotification := newNotification(t, "past", base) + pastRoute := newPendingRoute(pastNotification.NotificationID, "push:past", intentstream.ChannelPush, "user-1", base.Add(-time.Minute)) + if err := store.CreateAcceptance(ctx, acceptintent.CreateAcceptanceInput{ + Notification: pastNotification, + Routes: []acceptintent.NotificationRoute{pastRoute}, + Idempotency: newIdempotency(pastNotification, base), + }); err != nil { + t.Fatalf("acceptance past: %v", err) + } + + futureNotification := newNotification(t, "future", base) + futureNotification.IdempotencyKey = "key-future" + futureRoute := newPendingRoute(futureNotification.NotificationID, "push:future", intentstream.ChannelPush, "user-2", base.Add(time.Hour)) + if err := store.CreateAcceptance(ctx, acceptintent.CreateAcceptanceInput{ + Notification: futureNotification, + Routes: []acceptintent.NotificationRoute{futureRoute}, + Idempotency: newIdempotency(futureNotification, base), + }); err != nil { + t.Fatalf("acceptance future: %v", err) + } + + due, err := store.ListDueRoutes(ctx, base, 10) + if err != nil { + t.Fatalf("list due routes: %v", err) + } + if len(due) != 1 { + t.Fatalf("expected one due route, got %d", len(due)) + } + if due[0].NotificationID != "past" || due[0].RouteID != "push:past" { + t.Fatalf("unexpected due route: %#v", due[0]) + } +} + +func TestCompleteRoutePublishedHappyPath(t *testing.T) { + store := newTestStore(t) + ctx := context.Background() + now := time.Now().UTC().Truncate(time.Millisecond) + + notification := newNotification(t, "n-1", now) + route := newPendingRoute(notification.NotificationID, "email:user-1", intentstream.ChannelEmail, "user-1", now) + if err := store.CreateAcceptance(ctx, acceptintent.CreateAcceptanceInput{ + Notification: notification, + Routes: []acceptintent.NotificationRoute{route}, + Idempotency: newIdempotency(notification, now), + }); err != nil { + t.Fatalf("acceptance: %v", err) + } + + publishedAt := now.Add(time.Second) + err := store.CompleteRoutePublished(ctx, routestate.CompleteRoutePublishedInput{ + ExpectedRoute: route, + LeaseToken: "token", + PublishedAt: publishedAt, + Stream: "mail:delivery_commands", + StreamValues: map[string]any{"k": "v"}, + }) + if err != nil { + t.Fatalf("complete published: %v", err) + } + + got, _, err := store.GetRoute(ctx, route.NotificationID, route.RouteID) + if err != nil { + t.Fatalf("get route: %v", err) + } + if got.Status != acceptintent.RouteStatusPublished { + t.Fatalf("expected status published, got %q", got.Status) + } + if got.AttemptCount != 1 { + t.Fatalf("expected attempt_count 1, got %d", got.AttemptCount) + } + if !got.NextAttemptAt.IsZero() { + t.Fatalf("expected next_attempt_at cleared, got %v", got.NextAttemptAt) + } + if !got.PublishedAt.Equal(publishedAt) { + t.Fatalf("expected published_at %v, got %v", publishedAt, got.PublishedAt) + } +} + +func TestCompleteRoutePublishedConflictOnUpdatedAtMismatch(t *testing.T) { + store := newTestStore(t) + ctx := context.Background() + now := time.Now().UTC().Truncate(time.Millisecond) + + notification := newNotification(t, "n-1", now) + route := newPendingRoute(notification.NotificationID, "email:user-1", intentstream.ChannelEmail, "user-1", now) + if err := store.CreateAcceptance(ctx, acceptintent.CreateAcceptanceInput{ + Notification: notification, + Routes: []acceptintent.NotificationRoute{route}, + Idempotency: newIdempotency(notification, now), + }); err != nil { + t.Fatalf("acceptance: %v", err) + } + + stale := route + stale.UpdatedAt = now.Add(-time.Minute) // mismatch on purpose + + err := store.CompleteRoutePublished(ctx, routestate.CompleteRoutePublishedInput{ + ExpectedRoute: stale, + LeaseToken: "token", + PublishedAt: now.Add(time.Second), + Stream: "mail:delivery_commands", + StreamValues: map[string]any{"k": "v"}, + }) + if !errors.Is(err, routestate.ErrConflict) { + t.Fatalf("expected routestate.ErrConflict, got %v", err) + } +} + +func TestCompleteRouteFailedReschedule(t *testing.T) { + store := newTestStore(t) + ctx := context.Background() + now := time.Now().UTC().Truncate(time.Millisecond) + + notification := newNotification(t, "n-1", now) + route := newPendingRoute(notification.NotificationID, "email:user-1", intentstream.ChannelEmail, "user-1", now) + if err := store.CreateAcceptance(ctx, acceptintent.CreateAcceptanceInput{ + Notification: notification, + Routes: []acceptintent.NotificationRoute{route}, + Idempotency: newIdempotency(notification, now), + }); err != nil { + t.Fatalf("acceptance: %v", err) + } + + failedAt := now.Add(time.Second) + nextAttemptAt := now.Add(2 * time.Minute) + err := store.CompleteRouteFailed(ctx, routestate.CompleteRouteFailedInput{ + ExpectedRoute: route, + LeaseToken: "token", + FailedAt: failedAt, + NextAttemptAt: nextAttemptAt, + FailureClassification: "smtp_temporary_failure", + FailureMessage: "graylisted", + }) + if err != nil { + t.Fatalf("complete failed: %v", err) + } + + got, _, err := store.GetRoute(ctx, route.NotificationID, route.RouteID) + if err != nil { + t.Fatalf("get route: %v", err) + } + if got.Status != acceptintent.RouteStatusFailed { + t.Fatalf("expected status failed, got %q", got.Status) + } + if got.AttemptCount != 1 { + t.Fatalf("expected attempt_count 1, got %d", got.AttemptCount) + } + if !got.NextAttemptAt.Equal(nextAttemptAt) { + t.Fatalf("expected next_attempt_at %v, got %v", nextAttemptAt, got.NextAttemptAt) + } + if got.LastErrorClassification != "smtp_temporary_failure" { + t.Fatalf("expected error classification, got %q", got.LastErrorClassification) + } +} + +func TestCompleteRouteDeadLetter(t *testing.T) { + store := newTestStore(t) + ctx := context.Background() + now := time.Now().UTC().Truncate(time.Millisecond) + + notification := newNotification(t, "n-1", now) + route := newPendingRoute(notification.NotificationID, "email:user-1", intentstream.ChannelEmail, "user-1", now) + route.MaxAttempts = 1 // single attempt budget so the first failure is terminal. + if err := store.CreateAcceptance(ctx, acceptintent.CreateAcceptanceInput{ + Notification: notification, + Routes: []acceptintent.NotificationRoute{route}, + Idempotency: newIdempotency(notification, now), + }); err != nil { + t.Fatalf("acceptance: %v", err) + } + + deadAt := now.Add(time.Second) + err := store.CompleteRouteDeadLetter(ctx, routestate.CompleteRouteDeadLetterInput{ + ExpectedRoute: route, + LeaseToken: "token", + DeadLetteredAt: deadAt, + FailureClassification: "smtp_permanent_failure", + FailureMessage: "rejected", + RecoveryHint: "manual review", + }) + if err != nil { + t.Fatalf("complete dead letter: %v", err) + } + + got, _, err := store.GetRoute(ctx, route.NotificationID, route.RouteID) + if err != nil { + t.Fatalf("get route: %v", err) + } + if got.Status != acceptintent.RouteStatusDeadLetter { + t.Fatalf("expected status dead_letter, got %q", got.Status) + } + if !got.DeadLetteredAt.Equal(deadAt) { + t.Fatalf("expected dead_lettered_at %v, got %v", deadAt, got.DeadLetteredAt) + } + + // Check that the dead_letters audit row was inserted. + row := store.db.QueryRow(`SELECT failure_classification, recovery_hint FROM dead_letters WHERE notification_id = $1 AND route_id = $2`, + route.NotificationID, route.RouteID) + var classification string + var hint string + if err := row.Scan(&classification, &hint); err != nil { + t.Fatalf("scan dead_letter row: %v", err) + } + if classification != "smtp_permanent_failure" || hint != "manual review" { + t.Fatalf("dead_letter row mismatch: classification=%q hint=%q", classification, hint) + } +} + +func TestReadRouteScheduleSnapshot(t *testing.T) { + store := newTestStore(t) + ctx := context.Background() + base := time.Now().UTC().Truncate(time.Millisecond) + + for index, offset := range []time.Duration{-time.Hour, time.Minute, 2 * time.Minute} { + notification := newNotification(t, idString("n-", index), base) + notification.IdempotencyKey = idString("key-", index) + route := newPendingRoute(notification.NotificationID, idString("push:user-", index), intentstream.ChannelPush, idString("user-", index), base.Add(offset)) + if err := store.CreateAcceptance(ctx, acceptintent.CreateAcceptanceInput{ + Notification: notification, + Routes: []acceptintent.NotificationRoute{route}, + Idempotency: newIdempotency(notification, base), + }); err != nil { + t.Fatalf("acceptance %d: %v", index, err) + } + } + + snap, err := store.ReadRouteScheduleSnapshot(ctx) + if err != nil { + t.Fatalf("read snapshot: %v", err) + } + if snap.Depth != 3 { + t.Fatalf("expected depth 3, got %d", snap.Depth) + } + if snap.OldestScheduledFor == nil { + t.Fatalf("expected oldest scheduled time, got nil") + } + if !snap.OldestScheduledFor.Equal(base.Add(-time.Hour)) { + t.Fatalf("expected oldest %v, got %v", base.Add(-time.Hour), *snap.OldestScheduledFor) + } +} + +func TestMalformedIntentRecordAndGet(t *testing.T) { + store := newTestStore(t) + ctx := context.Background() + now := time.Now().UTC().Truncate(time.Millisecond) + + entry := malformedintent.Entry{ + StreamEntryID: "stream-1", + NotificationType: "game.turn.ready", + Producer: "game-master", + IdempotencyKey: "key-1", + FailureCode: malformedintent.FailureCodeInvalidPayload, + FailureMessage: "decode failed", + RawFields: map[string]any{"raw_payload": "abc"}, + RecordedAt: now, + } + if err := store.Record(ctx, entry); err != nil { + t.Fatalf("record malformed: %v", err) + } + + // idempotent re-record + if err := store.Record(ctx, entry); err != nil { + t.Fatalf("record malformed twice: %v", err) + } + + got, found, err := store.GetMalformedIntent(ctx, entry.StreamEntryID) + if err != nil || !found { + t.Fatalf("get malformed: found=%v err=%v", found, err) + } + if got.FailureCode != malformedintent.FailureCodeInvalidPayload { + t.Fatalf("failure_code mismatch: %q", got.FailureCode) + } + if got.FailureMessage != entry.FailureMessage { + t.Fatalf("failure_message mismatch: %q", got.FailureMessage) + } +} + +func TestRetentionDeletesAndCascade(t *testing.T) { + store := newTestStore(t) + ctx := context.Background() + old := time.Now().UTC().Add(-30 * 24 * time.Hour).Truncate(time.Millisecond) + fresh := time.Now().UTC().Truncate(time.Millisecond) + + oldNotification := newNotification(t, "old", old) + oldNotification.IdempotencyKey = "key-old" + oldRoute := newPendingRoute(oldNotification.NotificationID, "push:user-old", intentstream.ChannelPush, "user-old", old) + oldRoute.MaxAttempts = 1 + if err := store.CreateAcceptance(ctx, acceptintent.CreateAcceptanceInput{ + Notification: oldNotification, + Routes: []acceptintent.NotificationRoute{oldRoute}, + Idempotency: newIdempotency(oldNotification, old), + }); err != nil { + t.Fatalf("acceptance old: %v", err) + } + if err := store.CompleteRouteDeadLetter(ctx, routestate.CompleteRouteDeadLetterInput{ + ExpectedRoute: oldRoute, + LeaseToken: "token", + DeadLetteredAt: old.Add(time.Second), + FailureClassification: "smtp_permanent_failure", + FailureMessage: "rejected", + }); err != nil { + t.Fatalf("dead letter old: %v", err) + } + + freshNotification := newNotification(t, "fresh", fresh) + freshNotification.IdempotencyKey = "key-fresh" + freshRoute := newPendingRoute(freshNotification.NotificationID, "push:user-fresh", intentstream.ChannelPush, "user-fresh", fresh) + if err := store.CreateAcceptance(ctx, acceptintent.CreateAcceptanceInput{ + Notification: freshNotification, + Routes: []acceptintent.NotificationRoute{freshRoute}, + Idempotency: newIdempotency(freshNotification, fresh), + }); err != nil { + t.Fatalf("acceptance fresh: %v", err) + } + + cutoff := time.Now().UTC().Add(-7 * 24 * time.Hour) + deleted, err := store.DeleteRecordsOlderThan(ctx, cutoff) + if err != nil { + t.Fatalf("delete records: %v", err) + } + if deleted != 1 { + t.Fatalf("expected 1 deleted, got %d", deleted) + } + + if _, found, err := store.GetNotification(ctx, "old"); err != nil || found { + t.Fatalf("old notification should be gone: found=%v err=%v", found, err) + } + + // Confirm cascade emptied routes/dead_letters for old notification. + var routeCount int + if err := store.db.QueryRow(`SELECT COUNT(*) FROM routes WHERE notification_id = 'old'`).Scan(&routeCount); err != nil { + t.Fatalf("count routes: %v", err) + } + if routeCount != 0 { + t.Fatalf("expected 0 cascaded routes, got %d", routeCount) + } + var deadCount int + if err := store.db.QueryRow(`SELECT COUNT(*) FROM dead_letters WHERE notification_id = 'old'`).Scan(&deadCount); err != nil { + t.Fatalf("count dead letters: %v", err) + } + if deadCount != 0 { + t.Fatalf("expected 0 cascaded dead letters, got %d", deadCount) + } + + // Fresh notification stays. + if _, found, err := store.GetNotification(ctx, "fresh"); err != nil || !found { + t.Fatalf("fresh notification missing: found=%v err=%v", found, err) + } +} + +func TestDeleteMalformedIntentsOlderThan(t *testing.T) { + store := newTestStore(t) + ctx := context.Background() + old := time.Now().UTC().Add(-30 * 24 * time.Hour).Truncate(time.Millisecond) + fresh := time.Now().UTC().Truncate(time.Millisecond) + + oldEntry := malformedintent.Entry{ + StreamEntryID: "stream-old", + FailureCode: malformedintent.FailureCodeInvalidPayload, + FailureMessage: "decode failed", + RawFields: map[string]any{}, + RecordedAt: old, + } + if err := store.Record(ctx, oldEntry); err != nil { + t.Fatalf("record old: %v", err) + } + freshEntry := malformedintent.Entry{ + StreamEntryID: "stream-fresh", + FailureCode: malformedintent.FailureCodeInvalidPayload, + FailureMessage: "decode failed", + RawFields: map[string]any{}, + RecordedAt: fresh, + } + if err := store.Record(ctx, freshEntry); err != nil { + t.Fatalf("record fresh: %v", err) + } + + cutoff := time.Now().UTC().Add(-7 * 24 * time.Hour) + deleted, err := store.DeleteMalformedIntentsOlderThan(ctx, cutoff) + if err != nil { + t.Fatalf("delete: %v", err) + } + if deleted != 1 { + t.Fatalf("expected 1 deleted, got %d", deleted) + } + + if _, found, err := store.GetMalformedIntent(ctx, "stream-old"); err != nil || found { + t.Fatalf("old malformed intent should be gone: found=%v err=%v", found, err) + } + if _, found, err := store.GetMalformedIntent(ctx, "stream-fresh"); err != nil || !found { + t.Fatalf("fresh malformed intent missing: found=%v err=%v", found, err) + } +} + +// ---- helpers ---- + +func newNotification(t testing.TB, id string, occurred time.Time) acceptintent.NotificationRecord { + t.Helper() + return acceptintent.NotificationRecord{ + NotificationID: id, + NotificationType: intentstream.NotificationTypeGameTurnReady, + Producer: intentstream.ProducerGameMaster, + AudienceKind: intentstream.AudienceKindUser, + RecipientUserIDs: []string{"user-1"}, + PayloadJSON: `{"a":1}`, + IdempotencyKey: "key-" + id, + RequestFingerprint: "fp-" + id, + OccurredAt: occurred, + AcceptedAt: occurred, + UpdatedAt: occurred, + } +} + +func newIdempotency(record acceptintent.NotificationRecord, createdAt time.Time) acceptintent.IdempotencyRecord { + return acceptintent.IdempotencyRecord{ + Producer: record.Producer, + IdempotencyKey: record.IdempotencyKey, + NotificationID: record.NotificationID, + RequestFingerprint: record.RequestFingerprint, + CreatedAt: createdAt, + ExpiresAt: createdAt.Add(7 * 24 * time.Hour), + } +} + +func newPendingRoute(notificationID string, routeID string, channel intentstream.Channel, recipient string, dueAt time.Time) acceptintent.NotificationRoute { + return acceptintent.NotificationRoute{ + NotificationID: notificationID, + RouteID: routeID, + Channel: channel, + RecipientRef: "user:" + recipient, + Status: acceptintent.RouteStatusPending, + AttemptCount: 0, + MaxAttempts: 3, + NextAttemptAt: dueAt, + ResolvedEmail: recipient + "@example.com", + ResolvedLocale: "en", + CreatedAt: dueAt, + UpdatedAt: dueAt, + } +} + +func idString(prefix string, index int) string { + switch index { + case 0: + return prefix + "0" + case 1: + return prefix + "1" + case 2: + return prefix + "2" + default: + return prefix + "n" + } +} diff --git a/notification/internal/adapters/postgres/routepublisher/store.go b/notification/internal/adapters/postgres/routepublisher/store.go new file mode 100644 index 0000000..733a282 --- /dev/null +++ b/notification/internal/adapters/postgres/routepublisher/store.go @@ -0,0 +1,86 @@ +// Package routepublisher composes one PostgreSQL-backed route-state store +// (notificationstore) with one Redis-backed lease store (redisstate.LeaseStore) +// behind the publisher worker contracts. The composition lets push and email +// publishers keep their existing one-store dependency while Stage 5 of +// `PG_PLAN.md` splits durable state to PostgreSQL and the short-lived +// per-replica exclusivity lease to Redis. +package routepublisher + +import ( + "context" + "errors" + "time" + + "galaxy/notification/internal/adapters/postgres/notificationstore" + "galaxy/notification/internal/adapters/redisstate" + "galaxy/notification/internal/service/acceptintent" + "galaxy/notification/internal/service/routestate" + "galaxy/notification/internal/telemetry" +) + +// Store delegates each route-publisher method to either the durable state +// store (PostgreSQL) or the lease store (Redis), preserving the umbrella +// contract consumed by `worker.PushPublisher` and `worker.EmailPublisher`. +type Store struct { + state *notificationstore.Store + leases *redisstate.LeaseStore +} + +// New constructs one composite route-publisher store. Both dependencies are +// required: the SQL store owns route lifecycle and dead-letter persistence, +// and the lease store owns the short-lived per-replica exclusivity hint +// retained on Redis per PG_PLAN.md §5. +func New(state *notificationstore.Store, leases *redisstate.LeaseStore) (*Store, error) { + if state == nil { + return nil, errors.New("new route publisher store: nil notification state store") + } + if leases == nil { + return nil, errors.New("new route publisher store: nil lease store") + } + return &Store{state: state, leases: leases}, nil +} + +// ListDueRoutes delegates to the SQL store. +func (store *Store) ListDueRoutes(ctx context.Context, now time.Time, limit int64) ([]routestate.ScheduledRoute, error) { + return store.state.ListDueRoutes(ctx, now, limit) +} + +// TryAcquireRouteLease delegates to the Redis lease store. +func (store *Store) TryAcquireRouteLease(ctx context.Context, notificationID string, routeID string, token string, ttl time.Duration) (bool, error) { + return store.leases.TryAcquireRouteLease(ctx, notificationID, routeID, token, ttl) +} + +// ReleaseRouteLease delegates to the Redis lease store. +func (store *Store) ReleaseRouteLease(ctx context.Context, notificationID string, routeID string, token string) error { + return store.leases.ReleaseRouteLease(ctx, notificationID, routeID, token) +} + +// GetNotification delegates to the SQL store. +func (store *Store) GetNotification(ctx context.Context, notificationID string) (acceptintent.NotificationRecord, bool, error) { + return store.state.GetNotification(ctx, notificationID) +} + +// GetRoute delegates to the SQL store. +func (store *Store) GetRoute(ctx context.Context, notificationID string, routeID string) (acceptintent.NotificationRoute, bool, error) { + return store.state.GetRoute(ctx, notificationID, routeID) +} + +// CompleteRoutePublished delegates to the SQL store. +func (store *Store) CompleteRoutePublished(ctx context.Context, input routestate.CompleteRoutePublishedInput) error { + return store.state.CompleteRoutePublished(ctx, input) +} + +// CompleteRouteFailed delegates to the SQL store. +func (store *Store) CompleteRouteFailed(ctx context.Context, input routestate.CompleteRouteFailedInput) error { + return store.state.CompleteRouteFailed(ctx, input) +} + +// CompleteRouteDeadLetter delegates to the SQL store. +func (store *Store) CompleteRouteDeadLetter(ctx context.Context, input routestate.CompleteRouteDeadLetterInput) error { + return store.state.CompleteRouteDeadLetter(ctx, input) +} + +// ReadRouteScheduleSnapshot delegates to the SQL store. +func (store *Store) ReadRouteScheduleSnapshot(ctx context.Context) (telemetry.RouteScheduleSnapshot, error) { + return store.state.ReadRouteScheduleSnapshot(ctx) +} diff --git a/notification/internal/adapters/redis/client.go b/notification/internal/adapters/redis/client.go index ba9a9f9..aeebf10 100644 --- a/notification/internal/adapters/redis/client.go +++ b/notification/internal/adapters/redis/client.go @@ -1,5 +1,6 @@ // Package redisadapter provides the Redis client helpers used by Notification -// Service runtime wiring. +// Service runtime wiring. The helpers wrap `pkg/redisconn` so the runtime +// keeps the same construction surface across the Stage 5 migration. package redisadapter import ( @@ -8,27 +9,21 @@ import ( "galaxy/notification/internal/config" "galaxy/notification/internal/telemetry" + "galaxy/redisconn" "github.com/redis/go-redis/extra/redisotel/v9" "github.com/redis/go-redis/v9" ) -// NewClient constructs one Redis client from cfg. +// NewClient constructs one Redis client from cfg using the shared +// `pkg/redisconn` helper, which enforces the master/replica/password env-var +// shape. func NewClient(cfg config.RedisConfig) *redis.Client { - return redis.NewClient(&redis.Options{ - Addr: cfg.Addr, - Username: cfg.Username, - Password: cfg.Password, - DB: cfg.DB, - TLSConfig: cfg.TLSConfig(), - DialTimeout: cfg.OperationTimeout, - ReadTimeout: cfg.OperationTimeout, - WriteTimeout: cfg.OperationTimeout, - }) + return redisconn.NewMasterClient(cfg.Conn) } -// InstrumentClient attaches Redis tracing and metrics exporters to client when -// telemetryRuntime is available. +// InstrumentClient attaches Redis tracing and metrics exporters to client +// when telemetryRuntime is available. func InstrumentClient(client *redis.Client, telemetryRuntime *telemetry.Runtime) error { if client == nil { return fmt.Errorf("instrument redis client: nil client") @@ -55,13 +50,13 @@ func InstrumentClient(client *redis.Client, telemetryRuntime *telemetry.Runtime) } // Ping performs the startup Redis connectivity check bounded by -// cfg.OperationTimeout. +// cfg.Conn.OperationTimeout. func Ping(ctx context.Context, cfg config.RedisConfig, client *redis.Client) error { if client == nil { return fmt.Errorf("ping redis: nil client") } - pingCtx, cancel := context.WithTimeout(ctx, cfg.OperationTimeout) + pingCtx, cancel := context.WithTimeout(ctx, cfg.Conn.OperationTimeout) defer cancel() if err := client.Ping(pingCtx).Err(); err != nil { diff --git a/notification/internal/adapters/redisstate/acceptance_store.go b/notification/internal/adapters/redisstate/acceptance_store.go deleted file mode 100644 index 4a75625..0000000 --- a/notification/internal/adapters/redisstate/acceptance_store.go +++ /dev/null @@ -1,140 +0,0 @@ -package redisstate - -import ( - "context" - "errors" - "fmt" - - "galaxy/notification/internal/api/intentstream" - "galaxy/notification/internal/service/acceptintent" - - "github.com/redis/go-redis/v9" -) - -// AcceptanceStore provides the Redis-backed durable storage used by the -// intent-acceptance use case. -type AcceptanceStore struct { - client *redis.Client - writer *AtomicWriter - keys Keyspace - cfg AcceptanceConfig -} - -// NewAcceptanceStore constructs one Redis-backed acceptance store. -func NewAcceptanceStore(client *redis.Client, cfg AcceptanceConfig) (*AcceptanceStore, error) { - if client == nil { - return nil, errors.New("new notification acceptance store: nil redis client") - } - - writer, err := NewAtomicWriter(client, cfg) - if err != nil { - return nil, fmt.Errorf("new notification acceptance store: %w", err) - } - - return &AcceptanceStore{ - client: client, - writer: writer, - keys: Keyspace{}, - cfg: cfg, - }, nil -} - -// CreateAcceptance stores one complete accepted notification write set in -// Redis. -func (store *AcceptanceStore) CreateAcceptance(ctx context.Context, input acceptintent.CreateAcceptanceInput) error { - if store == nil || store.client == nil || store.writer == nil { - return errors.New("create notification acceptance: nil store") - } - if ctx == nil { - return errors.New("create notification acceptance: nil context") - } - if err := input.Validate(); err != nil { - return fmt.Errorf("create notification acceptance: %w", err) - } - - err := store.writer.CreateAcceptance(ctx, input) - if errors.Is(err, ErrConflict) { - return fmt.Errorf("create notification acceptance: %w", acceptintent.ErrConflict) - } - if err != nil { - return fmt.Errorf("create notification acceptance: %w", err) - } - - return nil -} - -// GetIdempotency loads one accepted idempotency scope from Redis. -func (store *AcceptanceStore) GetIdempotency(ctx context.Context, producer intentstream.Producer, idempotencyKey string) (acceptintent.IdempotencyRecord, bool, error) { - if store == nil || store.client == nil { - return acceptintent.IdempotencyRecord{}, false, errors.New("get notification idempotency: nil store") - } - if ctx == nil { - return acceptintent.IdempotencyRecord{}, false, errors.New("get notification idempotency: nil context") - } - - payload, err := store.client.Get(ctx, store.keys.Idempotency(producer, idempotencyKey)).Bytes() - switch { - case errors.Is(err, redis.Nil): - return acceptintent.IdempotencyRecord{}, false, nil - case err != nil: - return acceptintent.IdempotencyRecord{}, false, fmt.Errorf("get notification idempotency: %w", err) - } - - record, err := UnmarshalIdempotency(payload) - if err != nil { - return acceptintent.IdempotencyRecord{}, false, fmt.Errorf("get notification idempotency: %w", err) - } - - return record, true, nil -} - -// GetNotification loads one accepted notification record from Redis. -func (store *AcceptanceStore) GetNotification(ctx context.Context, notificationID string) (acceptintent.NotificationRecord, bool, error) { - if store == nil || store.client == nil { - return acceptintent.NotificationRecord{}, false, errors.New("get notification record: nil store") - } - if ctx == nil { - return acceptintent.NotificationRecord{}, false, errors.New("get notification record: nil context") - } - - payload, err := store.client.Get(ctx, store.keys.Notification(notificationID)).Bytes() - switch { - case errors.Is(err, redis.Nil): - return acceptintent.NotificationRecord{}, false, nil - case err != nil: - return acceptintent.NotificationRecord{}, false, fmt.Errorf("get notification record: %w", err) - } - - record, err := UnmarshalNotification(payload) - if err != nil { - return acceptintent.NotificationRecord{}, false, fmt.Errorf("get notification record: %w", err) - } - - return record, true, nil -} - -// GetRoute loads one accepted notification route by NotificationID and -// RouteID. -func (store *AcceptanceStore) GetRoute(ctx context.Context, notificationID string, routeID string) (acceptintent.NotificationRoute, bool, error) { - if store == nil || store.client == nil { - return acceptintent.NotificationRoute{}, false, errors.New("get notification route: nil store") - } - if ctx == nil { - return acceptintent.NotificationRoute{}, false, errors.New("get notification route: nil context") - } - - payload, err := store.client.Get(ctx, store.keys.Route(notificationID, routeID)).Bytes() - switch { - case errors.Is(err, redis.Nil): - return acceptintent.NotificationRoute{}, false, nil - case err != nil: - return acceptintent.NotificationRoute{}, false, fmt.Errorf("get notification route: %w", err) - } - - record, err := UnmarshalRoute(payload) - if err != nil { - return acceptintent.NotificationRoute{}, false, fmt.Errorf("get notification route: %w", err) - } - - return record, true, nil -} diff --git a/notification/internal/adapters/redisstate/acceptance_store_test.go b/notification/internal/adapters/redisstate/acceptance_store_test.go deleted file mode 100644 index d4e8793..0000000 --- a/notification/internal/adapters/redisstate/acceptance_store_test.go +++ /dev/null @@ -1,311 +0,0 @@ -package redisstate - -import ( - "context" - "io" - "log/slog" - "testing" - "time" - - "galaxy/notification/internal/api/intentstream" - "galaxy/notification/internal/config" - "galaxy/notification/internal/service/acceptintent" - "galaxy/notification/internal/service/malformedintent" - - "github.com/alicebob/miniredis/v2" - "github.com/redis/go-redis/v9" - "github.com/stretchr/testify/require" -) - -func TestAcceptanceStoreCreateAcceptancePersistsNotificationRoutesAndSchedule(t *testing.T) { - t.Parallel() - - server := miniredis.RunT(t) - client := newTestRedisClient(t, server) - - store, err := NewAcceptanceStore(client, AcceptanceConfig{ - RecordTTL: 24 * time.Hour, - DeadLetterTTL: 72 * time.Hour, - IdempotencyTTL: 7 * 24 * time.Hour, - }) - require.NoError(t, err) - - now := time.UnixMilli(1775121700000).UTC() - input := validAdminAcceptanceInput(now) - - require.NoError(t, store.CreateAcceptance(context.Background(), input)) - - notificationRecord, found, err := store.GetNotification(context.Background(), input.Notification.NotificationID) - require.NoError(t, err) - require.True(t, found) - require.Equal(t, input.Notification.NotificationID, notificationRecord.NotificationID) - - idempotencyRecord, found, err := store.GetIdempotency(context.Background(), input.Idempotency.Producer, input.Idempotency.IdempotencyKey) - require.NoError(t, err) - require.True(t, found) - require.Equal(t, input.Idempotency.RequestFingerprint, idempotencyRecord.RequestFingerprint) - - pushRoutePayload, err := client.Get(context.Background(), Keyspace{}.Route(input.Notification.NotificationID, "push:email:owner@example.com")).Bytes() - require.NoError(t, err) - pushRoute, err := UnmarshalRoute(pushRoutePayload) - require.NoError(t, err) - require.Equal(t, acceptintent.RouteStatusSkipped, pushRoute.Status) - - emailRouteKey := Keyspace{}.Route(input.Notification.NotificationID, "email:email:owner@example.com") - emailRoutePayload, err := client.Get(context.Background(), emailRouteKey).Bytes() - require.NoError(t, err) - emailRoute, err := UnmarshalRoute(emailRoutePayload) - require.NoError(t, err) - require.Equal(t, acceptintent.RouteStatusPending, emailRoute.Status) - - scheduled, err := client.ZRangeWithScores(context.Background(), Keyspace{}.RouteSchedule(), 0, -1).Result() - require.NoError(t, err) - require.Len(t, scheduled, 1) - require.Equal(t, emailRouteKey, scheduled[0].Member) - require.Equal(t, float64(now.UnixMilli()), scheduled[0].Score) - - notificationTTL, err := client.PTTL(context.Background(), Keyspace{}.Notification(input.Notification.NotificationID)).Result() - require.NoError(t, err) - require.Greater(t, notificationTTL, 23*time.Hour) - require.LessOrEqual(t, notificationTTL, 24*time.Hour) - - routeTTL, err := client.PTTL(context.Background(), emailRouteKey).Result() - require.NoError(t, err) - require.Greater(t, routeTTL, 23*time.Hour) - require.LessOrEqual(t, routeTTL, 24*time.Hour) - - idempotencyTTL, err := client.PTTL(context.Background(), Keyspace{}.Idempotency(input.Idempotency.Producer, input.Idempotency.IdempotencyKey)).Result() - require.NoError(t, err) - require.Greater(t, idempotencyTTL, 6*24*time.Hour) - require.LessOrEqual(t, idempotencyTTL, 7*24*time.Hour) -} - -func TestMalformedIntentStoreRecordPersistsEntry(t *testing.T) { - t.Parallel() - - server := miniredis.RunT(t) - client := newTestRedisClient(t, server) - - store, err := NewMalformedIntentStore(client, 72*time.Hour) - require.NoError(t, err) - - entry := malformedintent.Entry{ - StreamEntryID: "1775121700000-0", - NotificationType: "game.turn.ready", - Producer: "game_master", - IdempotencyKey: "game-123:turn-54", - FailureCode: malformedintent.FailureCodeInvalidPayload, - FailureMessage: "payload_json.turn_number is required", - RawFields: map[string]any{ - "notification_type": "game.turn.ready", - }, - RecordedAt: time.UnixMilli(1775121700000).UTC(), - } - - require.NoError(t, store.Record(context.Background(), entry)) - - payload, err := client.Get(context.Background(), Keyspace{}.MalformedIntent(entry.StreamEntryID)).Bytes() - require.NoError(t, err) - recordedEntry, err := UnmarshalMalformedIntent(payload) - require.NoError(t, err) - require.Equal(t, entry.StreamEntryID, recordedEntry.StreamEntryID) - require.Equal(t, entry.FailureCode, recordedEntry.FailureCode) - - ttl, err := client.PTTL(context.Background(), Keyspace{}.MalformedIntent(entry.StreamEntryID)).Result() - require.NoError(t, err) - require.Greater(t, ttl, 71*time.Hour) - require.LessOrEqual(t, ttl, 72*time.Hour) -} - -func TestStreamOffsetStoreLoadAndSave(t *testing.T) { - t.Parallel() - - server := miniredis.RunT(t) - client := newTestRedisClient(t, server) - - store, err := NewStreamOffsetStore(client) - require.NoError(t, err) - - _, found, err := store.Load(context.Background(), "notification:intents") - require.NoError(t, err) - require.False(t, found) - - require.NoError(t, store.Save(context.Background(), "notification:intents", "1775121700000-0")) - - entryID, found, err := store.Load(context.Background(), "notification:intents") - require.NoError(t, err) - require.True(t, found) - require.Equal(t, "1775121700000-0", entryID) -} - -func TestIntentStreamLagReaderReadsOldestUnprocessedEntry(t *testing.T) { - t.Parallel() - - server := miniredis.RunT(t) - client := newTestRedisClient(t, server) - - store, err := NewStreamOffsetStore(client) - require.NoError(t, err) - reader, err := NewIntentStreamLagReader(store, "notification:intents") - require.NoError(t, err) - - firstID, err := client.XAdd(context.Background(), &redis.XAddArgs{ - Stream: "notification:intents", - ID: "1775121700000-0", - Values: map[string]any{"payload": "first"}, - }).Result() - require.NoError(t, err) - secondID, err := client.XAdd(context.Background(), &redis.XAddArgs{ - Stream: "notification:intents", - ID: "1775121701000-0", - Values: map[string]any{"payload": "second"}, - }).Result() - require.NoError(t, err) - - snapshot, err := reader.ReadIntentStreamLagSnapshot(context.Background()) - require.NoError(t, err) - require.NotNil(t, snapshot.OldestUnprocessedAt) - require.Equal(t, time.UnixMilli(1775121700000).UTC(), *snapshot.OldestUnprocessedAt) - - require.NoError(t, store.Save(context.Background(), "notification:intents", firstID)) - snapshot, err = reader.ReadIntentStreamLagSnapshot(context.Background()) - require.NoError(t, err) - require.NotNil(t, snapshot.OldestUnprocessedAt) - require.Equal(t, time.UnixMilli(1775121701000).UTC(), *snapshot.OldestUnprocessedAt) - - require.NoError(t, store.Save(context.Background(), "notification:intents", secondID)) - snapshot, err = reader.ReadIntentStreamLagSnapshot(context.Background()) - require.NoError(t, err) - require.Nil(t, snapshot.OldestUnprocessedAt) -} - -func TestAcceptanceStoreWorksWithAcceptIntentService(t *testing.T) { - t.Parallel() - - server := miniredis.RunT(t) - client := newTestRedisClient(t, server) - - store, err := NewAcceptanceStore(client, AcceptanceConfig{ - RecordTTL: 24 * time.Hour, - DeadLetterTTL: 72 * time.Hour, - IdempotencyTTL: 7 * 24 * time.Hour, - }) - require.NoError(t, err) - - service, err := acceptintent.New(acceptintent.Config{ - Store: store, - UserDirectory: staticUserDirectory{}, - Clock: fixedClock{now: time.UnixMilli(1775121700000).UTC()}, - Logger: slog.New(slog.NewTextHandler(io.Discard, nil)), - PushMaxAttempts: 3, - EmailMaxAttempts: 7, - IdempotencyTTL: 7 * 24 * time.Hour, - AdminRouting: config.AdminRoutingConfig{ - LobbyApplicationSubmitted: []string{"owner@example.com"}, - }, - }) - require.NoError(t, err) - - result, err := service.Execute(context.Background(), acceptintent.AcceptInput{ - NotificationID: "1775121700000-0", - Intent: intentstream.Intent{ - NotificationType: intentstream.NotificationTypeLobbyApplicationSubmitted, - Producer: intentstream.ProducerGameLobby, - AudienceKind: intentstream.AudienceKindAdminEmail, - IdempotencyKey: "game-456:application-submitted:user-42", - OccurredAt: time.UnixMilli(1775121700002).UTC(), - PayloadJSON: `{"applicant_name":"Nova Pilot","applicant_user_id":"user-42","game_id":"game-456","game_name":"Orion Front"}`, - }, - }) - require.NoError(t, err) - require.Equal(t, acceptintent.OutcomeAccepted, result.Outcome) - - record, found, err := store.GetNotification(context.Background(), "1775121700000-0") - require.NoError(t, err) - require.True(t, found) - require.Equal(t, "1775121700000-0", record.NotificationID) -} - -type fixedClock struct { - now time.Time -} - -func (clock fixedClock) Now() time.Time { - return clock.now -} - -func validAdminAcceptanceInput(now time.Time) acceptintent.CreateAcceptanceInput { - return acceptintent.CreateAcceptanceInput{ - Notification: acceptintent.NotificationRecord{ - NotificationID: "1775121700000-0", - NotificationType: intentstream.NotificationTypeLobbyApplicationSubmitted, - Producer: intentstream.ProducerGameLobby, - AudienceKind: intentstream.AudienceKindAdminEmail, - PayloadJSON: `{"applicant_name":"Nova Pilot","applicant_user_id":"user-42","game_id":"game-456","game_name":"Orion Front"}`, - IdempotencyKey: "game-456:application-submitted:user-42", - RequestFingerprint: "sha256:deadbeef", - OccurredAt: now, - AcceptedAt: now, - UpdatedAt: now, - }, - Routes: []acceptintent.NotificationRoute{ - { - NotificationID: "1775121700000-0", - RouteID: "push:email:owner@example.com", - Channel: intentstream.ChannelPush, - RecipientRef: "email:owner@example.com", - Status: acceptintent.RouteStatusSkipped, - AttemptCount: 0, - MaxAttempts: 3, - ResolvedEmail: "owner@example.com", - ResolvedLocale: "en", - CreatedAt: now, - UpdatedAt: now, - SkippedAt: now, - }, - { - NotificationID: "1775121700000-0", - RouteID: "email:email:owner@example.com", - Channel: intentstream.ChannelEmail, - RecipientRef: "email:owner@example.com", - Status: acceptintent.RouteStatusPending, - AttemptCount: 0, - MaxAttempts: 7, - NextAttemptAt: now, - ResolvedEmail: "owner@example.com", - ResolvedLocale: "en", - CreatedAt: now, - UpdatedAt: now, - }, - }, - Idempotency: acceptintent.IdempotencyRecord{ - Producer: intentstream.ProducerGameLobby, - IdempotencyKey: "game-456:application-submitted:user-42", - NotificationID: "1775121700000-0", - RequestFingerprint: "sha256:deadbeef", - CreatedAt: now, - ExpiresAt: now.Add(7 * 24 * time.Hour), - }, - } -} - -func newTestRedisClient(t *testing.T, server *miniredis.Miniredis) *redis.Client { - t.Helper() - - client := redis.NewClient(&redis.Options{ - Addr: server.Addr(), - Protocol: 2, - DisableIdentity: true, - }) - t.Cleanup(func() { - require.NoError(t, client.Close()) - }) - - return client -} - -type staticUserDirectory struct{} - -func (staticUserDirectory) GetUserByID(context.Context, string) (acceptintent.UserRecord, error) { - return acceptintent.UserRecord{}, acceptintent.ErrRecipientNotFound -} diff --git a/notification/internal/adapters/redisstate/atomic_writer.go b/notification/internal/adapters/redisstate/atomic_writer.go deleted file mode 100644 index 6e6f660..0000000 --- a/notification/internal/adapters/redisstate/atomic_writer.go +++ /dev/null @@ -1,157 +0,0 @@ -package redisstate - -import ( - "context" - "errors" - "fmt" - "time" - - "galaxy/notification/internal/service/acceptintent" - - "github.com/redis/go-redis/v9" -) - -// AcceptanceConfig stores the retention settings applied to accepted durable -// notification state. -type AcceptanceConfig struct { - // RecordTTL stores the retention period applied to notification and route - // records. - RecordTTL time.Duration - - // DeadLetterTTL stores the retention period applied to route dead-letter - // entries. - DeadLetterTTL time.Duration - - // IdempotencyTTL stores the retention period applied to idempotency - // reservations. - IdempotencyTTL time.Duration -} - -// Validate reports whether cfg contains usable retention settings. -func (cfg AcceptanceConfig) Validate() error { - switch { - case cfg.RecordTTL <= 0: - return fmt.Errorf("record ttl must be positive") - case cfg.DeadLetterTTL <= 0: - return fmt.Errorf("dead-letter ttl must be positive") - case cfg.IdempotencyTTL <= 0: - return fmt.Errorf("idempotency ttl must be positive") - default: - return nil - } -} - -// AtomicWriter performs the minimal multi-key Redis mutations required by -// notification intent acceptance. -type AtomicWriter struct { - client *redis.Client - keys Keyspace - cfg AcceptanceConfig -} - -// NewAtomicWriter constructs a low-level Redis mutation helper. -func NewAtomicWriter(client *redis.Client, cfg AcceptanceConfig) (*AtomicWriter, error) { - if client == nil { - return nil, errors.New("new notification redis atomic writer: nil client") - } - if err := cfg.Validate(); err != nil { - return nil, fmt.Errorf("new notification redis atomic writer: %w", err) - } - - return &AtomicWriter{ - client: client, - keys: Keyspace{}, - cfg: cfg, - }, nil -} - -// CreateAcceptance stores one notification record, all derived routes, and -// the matching idempotency reservation in one optimistic Redis transaction. -func (writer *AtomicWriter) CreateAcceptance(ctx context.Context, input acceptintent.CreateAcceptanceInput) error { - if writer == nil || writer.client == nil { - return errors.New("create notification acceptance in redis: nil writer") - } - if ctx == nil { - return errors.New("create notification acceptance in redis: nil context") - } - if err := input.Validate(); err != nil { - return fmt.Errorf("create notification acceptance in redis: %w", err) - } - - notificationPayload, err := MarshalNotification(input.Notification) - if err != nil { - return fmt.Errorf("create notification acceptance in redis: %w", err) - } - idempotencyPayload, err := MarshalIdempotency(input.Idempotency) - if err != nil { - return fmt.Errorf("create notification acceptance in redis: %w", err) - } - - routePayloads := make([][]byte, len(input.Routes)) - routeKeys := make([]string, len(input.Routes)) - scheduledRouteKeys := make([]string, 0, len(input.Routes)) - scheduledRouteScores := make([]float64, 0, len(input.Routes)) - for index, route := range input.Routes { - payload, err := MarshalRoute(route) - if err != nil { - return fmt.Errorf("create notification acceptance in redis: route %d: %w", index, err) - } - routePayloads[index] = payload - routeKeys[index] = writer.keys.Route(route.NotificationID, route.RouteID) - if route.Status == acceptintent.RouteStatusPending { - scheduledRouteKeys = append(scheduledRouteKeys, routeKeys[index]) - scheduledRouteScores = append(scheduledRouteScores, float64(route.NextAttemptAt.UTC().UnixMilli())) - } - } - - notificationKey := writer.keys.Notification(input.Notification.NotificationID) - idempotencyKey := writer.keys.Idempotency(input.Idempotency.Producer, input.Idempotency.IdempotencyKey) - watchKeys := append([]string{notificationKey, idempotencyKey}, routeKeys...) - - watchErr := writer.client.Watch(ctx, func(tx *redis.Tx) error { - for _, key := range watchKeys { - if err := ensureKeyAbsent(ctx, tx, key); err != nil { - return err - } - } - - _, err := tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error { - pipe.Set(ctx, notificationKey, notificationPayload, writer.cfg.RecordTTL) - pipe.Set(ctx, idempotencyKey, idempotencyPayload, writer.cfg.IdempotencyTTL) - for index, routeKey := range routeKeys { - pipe.Set(ctx, routeKey, routePayloads[index], writer.cfg.RecordTTL) - } - for index, routeKey := range scheduledRouteKeys { - pipe.ZAdd(ctx, writer.keys.RouteSchedule(), redis.Z{ - Score: scheduledRouteScores[index], - Member: routeKey, - }) - } - - return nil - }) - - return err - }, watchKeys...) - - switch { - case errors.Is(watchErr, ErrConflict), errors.Is(watchErr, redis.TxFailedErr): - return ErrConflict - case watchErr != nil: - return fmt.Errorf("create notification acceptance in redis: %w", watchErr) - default: - return nil - } -} - -func ensureKeyAbsent(ctx context.Context, tx *redis.Tx, key string) error { - exists, err := tx.Exists(ctx, key).Result() - if err != nil { - return err - } - if exists > 0 { - return ErrConflict - } - - return nil -} diff --git a/notification/internal/adapters/redisstate/codecs.go b/notification/internal/adapters/redisstate/codecs.go index 65979a6..90e3c24 100644 --- a/notification/internal/adapters/redisstate/codecs.go +++ b/notification/internal/adapters/redisstate/codecs.go @@ -6,10 +6,6 @@ import ( "fmt" "io" "time" - - "galaxy/notification/internal/api/intentstream" - "galaxy/notification/internal/service/acceptintent" - "galaxy/notification/internal/service/malformedintent" ) // StreamOffset stores the persisted progress of the plain-XREAD intent @@ -18,412 +14,14 @@ type StreamOffset struct { // Stream stores the Redis Stream name. Stream string - // LastProcessedEntryID stores the last durably processed Redis Stream entry - // identifier. + // LastProcessedEntryID stores the last durably processed Redis Stream + // entry identifier. LastProcessedEntryID string // UpdatedAt stores when the offset record was last updated. UpdatedAt time.Time } -// DeadLetterEntry stores one terminal route-publication failure recorded for -// later operator inspection. -type DeadLetterEntry struct { - // NotificationID stores the owning notification identifier. - NotificationID string - - // RouteID stores the exhausted route identifier. - RouteID string - - // Channel stores the failed route channel. - Channel intentstream.Channel - - // RecipientRef stores the stable failed recipient slot identifier. - RecipientRef string - - // FinalAttemptCount stores how many publication attempts were consumed. - FinalAttemptCount int - - // MaxAttempts stores the configured retry budget for Channel. - MaxAttempts int - - // FailureClassification stores the stable classified failure reason. - FailureClassification string - - // FailureMessage stores the last failure detail. - FailureMessage string - - // CreatedAt stores when the route moved to dead_letter. - CreatedAt time.Time - - // RecoveryHint stores the optional operator-facing recovery hint. - RecoveryHint string -} - -type notificationRecordJSON struct { - NotificationID string `json:"notification_id"` - NotificationType intentstream.NotificationType `json:"notification_type"` - Producer intentstream.Producer `json:"producer"` - AudienceKind intentstream.AudienceKind `json:"audience_kind"` - RecipientUserIDs []string `json:"recipient_user_ids,omitempty"` - PayloadJSON string `json:"payload_json"` - IdempotencyKey string `json:"idempotency_key"` - RequestFingerprint string `json:"request_fingerprint"` - RequestID string `json:"request_id,omitempty"` - TraceID string `json:"trace_id,omitempty"` - OccurredAtMS int64 `json:"occurred_at_ms"` - AcceptedAtMS int64 `json:"accepted_at_ms"` - UpdatedAtMS int64 `json:"updated_at_ms"` -} - -type notificationRouteJSON struct { - NotificationID string `json:"notification_id"` - RouteID string `json:"route_id"` - Channel intentstream.Channel `json:"channel"` - RecipientRef string `json:"recipient_ref"` - Status acceptintent.RouteStatus `json:"status"` - AttemptCount int `json:"attempt_count"` - MaxAttempts int `json:"max_attempts"` - NextAttemptAtMS *int64 `json:"next_attempt_at_ms,omitempty"` - ResolvedEmail string `json:"resolved_email,omitempty"` - ResolvedLocale string `json:"resolved_locale,omitempty"` - LastErrorClassification string `json:"last_error_classification,omitempty"` - LastErrorMessage string `json:"last_error_message,omitempty"` - LastErrorAtMS *int64 `json:"last_error_at_ms,omitempty"` - CreatedAtMS int64 `json:"created_at_ms"` - UpdatedAtMS int64 `json:"updated_at_ms"` - PublishedAtMS *int64 `json:"published_at_ms,omitempty"` - DeadLetteredAtMS *int64 `json:"dead_lettered_at_ms,omitempty"` - SkippedAtMS *int64 `json:"skipped_at_ms,omitempty"` -} - -type idempotencyRecordJSON struct { - Producer intentstream.Producer `json:"producer"` - IdempotencyKey string `json:"idempotency_key"` - NotificationID string `json:"notification_id"` - RequestFingerprint string `json:"request_fingerprint"` - CreatedAtMS int64 `json:"created_at_ms"` - ExpiresAtMS int64 `json:"expires_at_ms"` -} - -type malformedIntentJSON struct { - StreamEntryID string `json:"stream_entry_id"` - NotificationType string `json:"notification_type,omitempty"` - Producer string `json:"producer,omitempty"` - IdempotencyKey string `json:"idempotency_key,omitempty"` - FailureCode malformedintent.FailureCode `json:"failure_code"` - FailureMessage string `json:"failure_message"` - RawFields map[string]any `json:"raw_fields_json"` - RecordedAtMS int64 `json:"recorded_at_ms"` -} - -type streamOffsetJSON struct { - Stream string `json:"stream"` - LastProcessedEntryID string `json:"last_processed_entry_id"` - UpdatedAtMS int64 `json:"updated_at_ms"` -} - -type deadLetterEntryJSON struct { - NotificationID string `json:"notification_id"` - RouteID string `json:"route_id"` - Channel intentstream.Channel `json:"channel"` - RecipientRef string `json:"recipient_ref"` - FinalAttemptCount int `json:"final_attempt_count"` - MaxAttempts int `json:"max_attempts"` - FailureClassification string `json:"failure_classification"` - FailureMessage string `json:"failure_message"` - CreatedAtMS int64 `json:"created_at_ms"` - RecoveryHint string `json:"recovery_hint,omitempty"` -} - -// MarshalNotification marshals one notification record into the strict JSON -// representation owned by Notification Service. -func MarshalNotification(record acceptintent.NotificationRecord) ([]byte, error) { - if err := record.Validate(); err != nil { - return nil, fmt.Errorf("marshal notification record: %w", err) - } - - return marshalStrictJSON(notificationRecordJSON{ - NotificationID: record.NotificationID, - NotificationType: record.NotificationType, - Producer: record.Producer, - AudienceKind: record.AudienceKind, - RecipientUserIDs: append([]string(nil), record.RecipientUserIDs...), - PayloadJSON: record.PayloadJSON, - IdempotencyKey: record.IdempotencyKey, - RequestFingerprint: record.RequestFingerprint, - RequestID: record.RequestID, - TraceID: record.TraceID, - OccurredAtMS: unixMilli(record.OccurredAt), - AcceptedAtMS: unixMilli(record.AcceptedAt), - UpdatedAtMS: unixMilli(record.UpdatedAt), - }) -} - -// UnmarshalNotification unmarshals one strict JSON notification record. -func UnmarshalNotification(payload []byte) (acceptintent.NotificationRecord, error) { - var wire notificationRecordJSON - if err := unmarshalStrictJSON(payload, &wire); err != nil { - return acceptintent.NotificationRecord{}, fmt.Errorf("unmarshal notification record: %w", err) - } - - record := acceptintent.NotificationRecord{ - NotificationID: wire.NotificationID, - NotificationType: wire.NotificationType, - Producer: wire.Producer, - AudienceKind: wire.AudienceKind, - RecipientUserIDs: append([]string(nil), wire.RecipientUserIDs...), - PayloadJSON: wire.PayloadJSON, - IdempotencyKey: wire.IdempotencyKey, - RequestFingerprint: wire.RequestFingerprint, - RequestID: wire.RequestID, - TraceID: wire.TraceID, - OccurredAt: time.UnixMilli(wire.OccurredAtMS).UTC(), - AcceptedAt: time.UnixMilli(wire.AcceptedAtMS).UTC(), - UpdatedAt: time.UnixMilli(wire.UpdatedAtMS).UTC(), - } - if err := record.Validate(); err != nil { - return acceptintent.NotificationRecord{}, fmt.Errorf("unmarshal notification record: %w", err) - } - - return record, nil -} - -// MarshalRoute marshals one notification route into the strict JSON -// representation owned by Notification Service. -func MarshalRoute(route acceptintent.NotificationRoute) ([]byte, error) { - if err := route.Validate(); err != nil { - return nil, fmt.Errorf("marshal notification route: %w", err) - } - - return marshalStrictJSON(notificationRouteJSON{ - NotificationID: route.NotificationID, - RouteID: route.RouteID, - Channel: route.Channel, - RecipientRef: route.RecipientRef, - Status: route.Status, - AttemptCount: route.AttemptCount, - MaxAttempts: route.MaxAttempts, - NextAttemptAtMS: optionalUnixMilli(route.NextAttemptAt), - ResolvedEmail: route.ResolvedEmail, - ResolvedLocale: route.ResolvedLocale, - LastErrorClassification: route.LastErrorClassification, - LastErrorMessage: route.LastErrorMessage, - LastErrorAtMS: optionalUnixMilli(route.LastErrorAt), - CreatedAtMS: unixMilli(route.CreatedAt), - UpdatedAtMS: unixMilli(route.UpdatedAt), - PublishedAtMS: optionalUnixMilli(route.PublishedAt), - DeadLetteredAtMS: optionalUnixMilli(route.DeadLetteredAt), - SkippedAtMS: optionalUnixMilli(route.SkippedAt), - }) -} - -// UnmarshalRoute unmarshals one strict JSON notification route. -func UnmarshalRoute(payload []byte) (acceptintent.NotificationRoute, error) { - var wire notificationRouteJSON - if err := unmarshalStrictJSON(payload, &wire); err != nil { - return acceptintent.NotificationRoute{}, fmt.Errorf("unmarshal notification route: %w", err) - } - - route := acceptintent.NotificationRoute{ - NotificationID: wire.NotificationID, - RouteID: wire.RouteID, - Channel: wire.Channel, - RecipientRef: wire.RecipientRef, - Status: wire.Status, - AttemptCount: wire.AttemptCount, - MaxAttempts: wire.MaxAttempts, - ResolvedEmail: wire.ResolvedEmail, - ResolvedLocale: wire.ResolvedLocale, - LastErrorClassification: wire.LastErrorClassification, - LastErrorMessage: wire.LastErrorMessage, - CreatedAt: time.UnixMilli(wire.CreatedAtMS).UTC(), - UpdatedAt: time.UnixMilli(wire.UpdatedAtMS).UTC(), - } - if wire.NextAttemptAtMS != nil { - route.NextAttemptAt = time.UnixMilli(*wire.NextAttemptAtMS).UTC() - } - if wire.LastErrorAtMS != nil { - route.LastErrorAt = time.UnixMilli(*wire.LastErrorAtMS).UTC() - } - if wire.PublishedAtMS != nil { - route.PublishedAt = time.UnixMilli(*wire.PublishedAtMS).UTC() - } - if wire.DeadLetteredAtMS != nil { - route.DeadLetteredAt = time.UnixMilli(*wire.DeadLetteredAtMS).UTC() - } - if wire.SkippedAtMS != nil { - route.SkippedAt = time.UnixMilli(*wire.SkippedAtMS).UTC() - } - if err := route.Validate(); err != nil { - return acceptintent.NotificationRoute{}, fmt.Errorf("unmarshal notification route: %w", err) - } - - return route, nil -} - -// MarshalIdempotency marshals one idempotency record into the strict JSON -// representation owned by Notification Service. -func MarshalIdempotency(record acceptintent.IdempotencyRecord) ([]byte, error) { - if err := record.Validate(); err != nil { - return nil, fmt.Errorf("marshal notification idempotency record: %w", err) - } - - return marshalStrictJSON(idempotencyRecordJSON{ - Producer: record.Producer, - IdempotencyKey: record.IdempotencyKey, - NotificationID: record.NotificationID, - RequestFingerprint: record.RequestFingerprint, - CreatedAtMS: unixMilli(record.CreatedAt), - ExpiresAtMS: unixMilli(record.ExpiresAt), - }) -} - -// UnmarshalIdempotency unmarshals one strict JSON idempotency record. -func UnmarshalIdempotency(payload []byte) (acceptintent.IdempotencyRecord, error) { - var wire idempotencyRecordJSON - if err := unmarshalStrictJSON(payload, &wire); err != nil { - return acceptintent.IdempotencyRecord{}, fmt.Errorf("unmarshal notification idempotency record: %w", err) - } - - record := acceptintent.IdempotencyRecord{ - Producer: wire.Producer, - IdempotencyKey: wire.IdempotencyKey, - NotificationID: wire.NotificationID, - RequestFingerprint: wire.RequestFingerprint, - CreatedAt: time.UnixMilli(wire.CreatedAtMS).UTC(), - ExpiresAt: time.UnixMilli(wire.ExpiresAtMS).UTC(), - } - if err := record.Validate(); err != nil { - return acceptintent.IdempotencyRecord{}, fmt.Errorf("unmarshal notification idempotency record: %w", err) - } - - return record, nil -} - -// MarshalDeadLetter marshals one dead-letter entry into the strict JSON -// representation owned by Notification Service. -func MarshalDeadLetter(entry DeadLetterEntry) ([]byte, error) { - if err := entry.Validate(); err != nil { - return nil, fmt.Errorf("marshal dead letter entry: %w", err) - } - - return marshalStrictJSON(deadLetterEntryJSON{ - NotificationID: entry.NotificationID, - RouteID: entry.RouteID, - Channel: entry.Channel, - RecipientRef: entry.RecipientRef, - FinalAttemptCount: entry.FinalAttemptCount, - MaxAttempts: entry.MaxAttempts, - FailureClassification: entry.FailureClassification, - FailureMessage: entry.FailureMessage, - CreatedAtMS: unixMilli(entry.CreatedAt), - RecoveryHint: entry.RecoveryHint, - }) -} - -// UnmarshalDeadLetter unmarshals one strict JSON dead-letter entry. -func UnmarshalDeadLetter(payload []byte) (DeadLetterEntry, error) { - var wire deadLetterEntryJSON - if err := unmarshalStrictJSON(payload, &wire); err != nil { - return DeadLetterEntry{}, fmt.Errorf("unmarshal dead letter entry: %w", err) - } - - entry := DeadLetterEntry{ - NotificationID: wire.NotificationID, - RouteID: wire.RouteID, - Channel: wire.Channel, - RecipientRef: wire.RecipientRef, - FinalAttemptCount: wire.FinalAttemptCount, - MaxAttempts: wire.MaxAttempts, - FailureClassification: wire.FailureClassification, - FailureMessage: wire.FailureMessage, - CreatedAt: time.UnixMilli(wire.CreatedAtMS).UTC(), - RecoveryHint: wire.RecoveryHint, - } - if err := entry.Validate(); err != nil { - return DeadLetterEntry{}, fmt.Errorf("unmarshal dead letter entry: %w", err) - } - - return entry, nil -} - -// MarshalMalformedIntent marshals one malformed-intent entry into the strict -// JSON representation owned by Notification Service. -func MarshalMalformedIntent(entry malformedintent.Entry) ([]byte, error) { - if err := entry.Validate(); err != nil { - return nil, fmt.Errorf("marshal malformed intent: %w", err) - } - - return marshalStrictJSON(malformedIntentJSON{ - StreamEntryID: entry.StreamEntryID, - NotificationType: entry.NotificationType, - Producer: entry.Producer, - IdempotencyKey: entry.IdempotencyKey, - FailureCode: entry.FailureCode, - FailureMessage: entry.FailureMessage, - RawFields: cloneJSONObject(entry.RawFields), - RecordedAtMS: unixMilli(entry.RecordedAt), - }) -} - -// UnmarshalMalformedIntent unmarshals one strict JSON malformed-intent entry. -func UnmarshalMalformedIntent(payload []byte) (malformedintent.Entry, error) { - var wire malformedIntentJSON - if err := unmarshalStrictJSON(payload, &wire); err != nil { - return malformedintent.Entry{}, fmt.Errorf("unmarshal malformed intent: %w", err) - } - - entry := malformedintent.Entry{ - StreamEntryID: wire.StreamEntryID, - NotificationType: wire.NotificationType, - Producer: wire.Producer, - IdempotencyKey: wire.IdempotencyKey, - FailureCode: wire.FailureCode, - FailureMessage: wire.FailureMessage, - RawFields: cloneJSONObject(wire.RawFields), - RecordedAt: time.UnixMilli(wire.RecordedAtMS).UTC(), - } - if err := entry.Validate(); err != nil { - return malformedintent.Entry{}, fmt.Errorf("unmarshal malformed intent: %w", err) - } - - return entry, nil -} - -// MarshalStreamOffset marshals one stream-offset record into the strict JSON -// representation owned by Notification Service. -func MarshalStreamOffset(offset StreamOffset) ([]byte, error) { - if err := offset.Validate(); err != nil { - return nil, fmt.Errorf("marshal stream offset: %w", err) - } - - return marshalStrictJSON(streamOffsetJSON{ - Stream: offset.Stream, - LastProcessedEntryID: offset.LastProcessedEntryID, - UpdatedAtMS: unixMilli(offset.UpdatedAt), - }) -} - -// UnmarshalStreamOffset unmarshals one strict JSON stream-offset record. -func UnmarshalStreamOffset(payload []byte) (StreamOffset, error) { - var wire streamOffsetJSON - if err := unmarshalStrictJSON(payload, &wire); err != nil { - return StreamOffset{}, fmt.Errorf("unmarshal stream offset: %w", err) - } - - offset := StreamOffset{ - Stream: wire.Stream, - LastProcessedEntryID: wire.LastProcessedEntryID, - UpdatedAt: time.UnixMilli(wire.UpdatedAtMS).UTC(), - } - if err := offset.Validate(); err != nil { - return StreamOffset{}, fmt.Errorf("unmarshal stream offset: %w", err) - } - - return offset, nil -} - // Validate reports whether offset contains a complete persisted consumer // progress record. func (offset StreamOffset) Validate() error { @@ -446,43 +44,43 @@ func (offset StreamOffset) Validate() error { return nil } -// Validate reports whether entry contains a complete dead-letter record. -func (entry DeadLetterEntry) Validate() error { - if entry.NotificationID == "" { - return fmt.Errorf("dead letter entry notification id must not be empty") - } - if entry.RouteID == "" { - return fmt.Errorf("dead letter entry route id must not be empty") - } - if !entry.Channel.IsKnown() { - return fmt.Errorf("dead letter entry channel %q is unsupported", entry.Channel) - } - if entry.RecipientRef == "" { - return fmt.Errorf("dead letter entry recipient ref must not be empty") - } - if entry.FinalAttemptCount <= 0 { - return fmt.Errorf("dead letter entry final attempt count must be positive") - } - if entry.MaxAttempts <= 0 { - return fmt.Errorf("dead letter entry max attempts must be positive") - } - if entry.FailureClassification == "" { - return fmt.Errorf("dead letter entry failure classification must not be empty") - } - if entry.FailureMessage == "" { - return fmt.Errorf("dead letter entry failure message must not be empty") - } - if entry.CreatedAt.IsZero() { - return fmt.Errorf("dead letter entry created at must not be zero") - } - if !entry.CreatedAt.Equal(entry.CreatedAt.UTC()) { - return fmt.Errorf("dead letter entry created at must be UTC") - } - if !entry.CreatedAt.Equal(entry.CreatedAt.Truncate(time.Millisecond)) { - return fmt.Errorf("dead letter entry created at must use millisecond precision") +type streamOffsetJSON struct { + Stream string `json:"stream"` + LastProcessedEntryID string `json:"last_processed_entry_id"` + UpdatedAtMS int64 `json:"updated_at_ms"` +} + +// MarshalStreamOffset marshals one stream-offset record into the strict JSON +// representation owned by Notification Service. +func MarshalStreamOffset(offset StreamOffset) ([]byte, error) { + if err := offset.Validate(); err != nil { + return nil, fmt.Errorf("marshal stream offset: %w", err) } - return nil + return marshalStrictJSON(streamOffsetJSON{ + Stream: offset.Stream, + LastProcessedEntryID: offset.LastProcessedEntryID, + UpdatedAtMS: offset.UpdatedAt.UTC().UnixMilli(), + }) +} + +// UnmarshalStreamOffset unmarshals one strict JSON stream-offset record. +func UnmarshalStreamOffset(payload []byte) (StreamOffset, error) { + var wire streamOffsetJSON + if err := unmarshalStrictJSON(payload, &wire); err != nil { + return StreamOffset{}, fmt.Errorf("unmarshal stream offset: %w", err) + } + + offset := StreamOffset{ + Stream: wire.Stream, + LastProcessedEntryID: wire.LastProcessedEntryID, + UpdatedAt: time.UnixMilli(wire.UpdatedAtMS).UTC(), + } + if err := offset.Validate(); err != nil { + return StreamOffset{}, fmt.Errorf("unmarshal stream offset: %w", err) + } + + return offset, nil } func marshalStrictJSON(value any) ([]byte, error) { @@ -505,43 +103,3 @@ func unmarshalStrictJSON(payload []byte, target any) error { return nil } - -func unixMilli(value time.Time) int64 { - return value.UTC().UnixMilli() -} - -func optionalUnixMilli(value time.Time) *int64 { - if value.IsZero() { - return nil - } - millis := unixMilli(value) - return &millis -} - -func cloneJSONObject(value map[string]any) map[string]any { - if value == nil { - return map[string]any{} - } - - cloned := make(map[string]any, len(value)) - for key, raw := range value { - cloned[key] = cloneJSONValue(raw) - } - - return cloned -} - -func cloneJSONValue(value any) any { - switch typed := value.(type) { - case map[string]any: - return cloneJSONObject(typed) - case []any: - cloned := make([]any, len(typed)) - for index, item := range typed { - cloned[index] = cloneJSONValue(item) - } - return cloned - default: - return typed - } -} diff --git a/notification/internal/adapters/redisstate/errors.go b/notification/internal/adapters/redisstate/errors.go index 0ebd17f..ce0f263 100644 --- a/notification/internal/adapters/redisstate/errors.go +++ b/notification/internal/adapters/redisstate/errors.go @@ -1,10 +1,10 @@ package redisstate -import "errors" +import "galaxy/notification/internal/service/routestate" -var ( - // ErrConflict reports that a Redis mutation could not be applied because - // one of the watched or newly created keys already existed or changed - // concurrently. - ErrConflict = errors.New("redis state conflict") -) +// ErrConflict reports that a Redis mutation could not be applied because +// one of the watched or newly created keys already existed or changed +// concurrently. Aliased to routestate.ErrConflict so the publisher +// boundary uses one stable sentinel regardless of which storage backend +// drives the mutation. +var ErrConflict = routestate.ErrConflict diff --git a/notification/internal/adapters/redisstate/keyspace.go b/notification/internal/adapters/redisstate/keyspace.go index 9ec7f81..3b17b1e 100644 --- a/notification/internal/adapters/redisstate/keyspace.go +++ b/notification/internal/adapters/redisstate/keyspace.go @@ -2,79 +2,25 @@ package redisstate import ( "encoding/base64" - "fmt" - "strings" - - "galaxy/notification/internal/api/intentstream" ) const defaultPrefix = "notification:" -// Keyspace builds the frozen Notification Service Redis keys. All dynamic key -// segments are encoded with base64url so raw key structure does not depend on -// caller-provided characters. +// Keyspace builds the Notification Service Redis keys retained after the +// Stage 5 PostgreSQL migration: only the route lease, the persisted stream +// offset, and the inbound intent stream key are managed here. Durable +// notification state lives in the `notification` PostgreSQL schema. +// +// Dynamic key segments are encoded with base64url so raw key structure +// does not depend on caller-provided characters. type Keyspace struct{} -// Notification returns the primary Redis key for one notification_record. -func (Keyspace) Notification(notificationID string) string { - return defaultPrefix + "records:" + encodeKeyComponent(notificationID) -} - -// Route returns the primary Redis key for one notification_route. -func (Keyspace) Route(notificationID string, routeID string) string { - return defaultPrefix + "routes:" + encodeKeyComponent(notificationID) + ":" + encodeKeyComponent(routeID) -} - -// ParseRoute returns the notification identifier and route identifier encoded -// inside routeKey. -func (Keyspace) ParseRoute(routeKey string) (string, string, error) { - trimmedPrefix := defaultPrefix + "routes:" - if !strings.HasPrefix(routeKey, trimmedPrefix) { - return "", "", fmt.Errorf("parse route key: %q does not use %q prefix", routeKey, trimmedPrefix) - } - - encoded := strings.TrimPrefix(routeKey, trimmedPrefix) - parts := strings.Split(encoded, ":") - if len(parts) != 2 { - return "", "", fmt.Errorf("parse route key: %q must contain exactly two encoded segments", routeKey) - } - - notificationID, err := decodeKeyComponent(parts[0]) - if err != nil { - return "", "", fmt.Errorf("parse route key: notification id: %w", err) - } - routeID, err := decodeKeyComponent(parts[1]) - if err != nil { - return "", "", fmt.Errorf("parse route key: route id: %w", err) - } - - return notificationID, routeID, nil -} - -// Idempotency returns the primary Redis key for one -// notification_idempotency_record. -func (Keyspace) Idempotency(producer intentstream.Producer, idempotencyKey string) string { - return defaultPrefix + "idempotency:" + encodeKeyComponent(string(producer)) + ":" + encodeKeyComponent(idempotencyKey) -} - -// DeadLetter returns the primary Redis key for one -// notification_dead_letter_entry. -func (Keyspace) DeadLetter(notificationID string, routeID string) string { - return defaultPrefix + "dead_letters:" + encodeKeyComponent(notificationID) + ":" + encodeKeyComponent(routeID) -} - // RouteLease returns the temporary Redis key used to coordinate exclusive // publication of one notification_route across replicas. func (Keyspace) RouteLease(notificationID string, routeID string) string { return defaultPrefix + "route_leases:" + encodeKeyComponent(notificationID) + ":" + encodeKeyComponent(routeID) } -// MalformedIntent returns the primary Redis key for one malformed-intent -// record. -func (Keyspace) MalformedIntent(streamEntryID string) string { - return defaultPrefix + "malformed_intents:" + encodeKeyComponent(streamEntryID) -} - // StreamOffset returns the primary Redis key for one persisted intent-consumer // offset. func (Keyspace) StreamOffset(stream string) string { @@ -86,20 +32,6 @@ func (Keyspace) Intents() string { return defaultPrefix + "intents" } -// RouteSchedule returns the frozen route schedule sorted-set key. -func (Keyspace) RouteSchedule() string { - return defaultPrefix + "route_schedule" -} - func encodeKeyComponent(value string) string { return base64.RawURLEncoding.EncodeToString([]byte(value)) } - -func decodeKeyComponent(value string) (string, error) { - decoded, err := base64.RawURLEncoding.DecodeString(value) - if err != nil { - return "", err - } - - return string(decoded), nil -} diff --git a/notification/internal/adapters/redisstate/lease_store.go b/notification/internal/adapters/redisstate/lease_store.go new file mode 100644 index 0000000..932a762 --- /dev/null +++ b/notification/internal/adapters/redisstate/lease_store.go @@ -0,0 +1,108 @@ +package redisstate + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/redis/go-redis/v9" +) + +// releaseRouteLeaseScript releases the route lease only when the supplied +// token still owns it. The Lua script gates the DEL on the SET value match +// so a publisher that lost the lease (TTL expiry, replica swap) cannot +// clear another worker's claim. +var releaseRouteLeaseScript = redis.NewScript(` +if redis.call("GET", KEYS[1]) == ARGV[1] then + return redis.call("DEL", KEYS[1]) +end +return 0 +`) + +// LeaseStore owns the short-lived route lease keys that coordinate exclusive +// route publication across replicas. The lease lives on Redis as a per-route +// SETNX-with-TTL token; releasing it requires the same token via a Lua +// script that compares the stored value before deleting it. +// +// LeaseStore is intentionally separate from the durable route-state storage +// so the publishers can compose one storage-layer adapter (PostgreSQL since +// Stage 5) with the runtime-coordination layer that stays on Redis per +// `ARCHITECTURE.md §Persistence Backends`. +type LeaseStore struct { + client *redis.Client + keys Keyspace +} + +// NewLeaseStore constructs one Redis-backed lease store. +func NewLeaseStore(client *redis.Client) (*LeaseStore, error) { + if client == nil { + return nil, errors.New("new notification lease store: nil redis client") + } + + return &LeaseStore{client: client, keys: Keyspace{}}, nil +} + +// TryAcquireRouteLease attempts to acquire one temporary route lease owned +// by token for ttl. The lease is stored at the route-lease keyspace key and +// auto-expires; a publisher whose work outlives the TTL must accept that +// another replica may pick the route up. +func (store *LeaseStore) TryAcquireRouteLease(ctx context.Context, notificationID string, routeID string, token string, ttl time.Duration) (bool, error) { + if store == nil || store.client == nil { + return false, errors.New("try acquire route lease: nil store") + } + if ctx == nil { + return false, errors.New("try acquire route lease: nil context") + } + if notificationID == "" { + return false, errors.New("try acquire route lease: notification id must not be empty") + } + if routeID == "" { + return false, errors.New("try acquire route lease: route id must not be empty") + } + if token == "" { + return false, errors.New("try acquire route lease: token must not be empty") + } + if ttl <= 0 { + return false, errors.New("try acquire route lease: ttl must be positive") + } + + acquired, err := store.client.SetNX(ctx, store.keys.RouteLease(notificationID, routeID), token, ttl).Result() + if err != nil { + return false, fmt.Errorf("try acquire route lease: %w", err) + } + + return acquired, nil +} + +// ReleaseRouteLease releases one temporary route lease only when token still +// matches the stored owner value. Releasing a lease the caller no longer +// owns is a silent no-op. +func (store *LeaseStore) ReleaseRouteLease(ctx context.Context, notificationID string, routeID string, token string) error { + if store == nil || store.client == nil { + return errors.New("release route lease: nil store") + } + if ctx == nil { + return errors.New("release route lease: nil context") + } + if notificationID == "" { + return errors.New("release route lease: notification id must not be empty") + } + if routeID == "" { + return errors.New("release route lease: route id must not be empty") + } + if token == "" { + return errors.New("release route lease: token must not be empty") + } + + if err := releaseRouteLeaseScript.Run( + ctx, + store.client, + []string{store.keys.RouteLease(notificationID, routeID)}, + token, + ).Err(); err != nil { + return fmt.Errorf("release route lease: %w", err) + } + + return nil +} diff --git a/notification/internal/adapters/redisstate/malformed_intent_store.go b/notification/internal/adapters/redisstate/malformed_intent_store.go deleted file mode 100644 index 50fc52e..0000000 --- a/notification/internal/adapters/redisstate/malformed_intent_store.go +++ /dev/null @@ -1,59 +0,0 @@ -package redisstate - -import ( - "context" - "errors" - "fmt" - "time" - - "galaxy/notification/internal/service/malformedintent" - - "github.com/redis/go-redis/v9" -) - -// MalformedIntentStore provides the Redis-backed storage used for -// operator-visible malformed-intent records. -type MalformedIntentStore struct { - client *redis.Client - keys Keyspace - ttl time.Duration -} - -// NewMalformedIntentStore constructs one Redis-backed malformed-intent store. -func NewMalformedIntentStore(client *redis.Client, ttl time.Duration) (*MalformedIntentStore, error) { - if client == nil { - return nil, errors.New("new malformed intent store: nil redis client") - } - if ttl <= 0 { - return nil, errors.New("new malformed intent store: non-positive ttl") - } - - return &MalformedIntentStore{ - client: client, - keys: Keyspace{}, - ttl: ttl, - }, nil -} - -// Record stores entry idempotently by its Redis Stream entry identifier. -func (store *MalformedIntentStore) Record(ctx context.Context, entry malformedintent.Entry) error { - if store == nil || store.client == nil { - return errors.New("record malformed intent: nil store") - } - if ctx == nil { - return errors.New("record malformed intent: nil context") - } - if err := entry.Validate(); err != nil { - return fmt.Errorf("record malformed intent: %w", err) - } - - payload, err := MarshalMalformedIntent(entry) - if err != nil { - return fmt.Errorf("record malformed intent: %w", err) - } - if err := store.client.Set(ctx, store.keys.MalformedIntent(entry.StreamEntryID), payload, store.ttl).Err(); err != nil { - return fmt.Errorf("record malformed intent: %w", err) - } - - return nil -} diff --git a/notification/internal/adapters/redisstate/route_state_store.go b/notification/internal/adapters/redisstate/route_state_store.go deleted file mode 100644 index f70a8d9..0000000 --- a/notification/internal/adapters/redisstate/route_state_store.go +++ /dev/null @@ -1,657 +0,0 @@ -package redisstate - -import ( - "bytes" - "context" - "errors" - "fmt" - "sort" - "strconv" - "time" - - "galaxy/notification/internal/service/acceptintent" - "galaxy/notification/internal/telemetry" - - "github.com/redis/go-redis/v9" -) - -var releaseRouteLeaseScript = redis.NewScript(` -if redis.call("GET", KEYS[1]) == ARGV[1] then - return redis.call("DEL", KEYS[1]) -end -return 0 -`) - -var completePublishedRouteScript = redis.NewScript(` -if redis.call("GET", KEYS[1]) ~= ARGV[1] then - return 0 -end -if redis.call("GET", KEYS[2]) ~= ARGV[2] then - return 0 -end -local field_count = tonumber(ARGV[6]) -local values = {} -local index = 7 -for _ = 1, field_count do - table.insert(values, ARGV[index]) - table.insert(values, ARGV[index + 1]) - index = index + 2 -end -if tonumber(ARGV[4]) > 0 then - redis.call("XADD", ARGV[3], "MAXLEN", "~", ARGV[4], "*", unpack(values)) -else - redis.call("XADD", ARGV[3], "*", unpack(values)) -end -redis.call("SET", KEYS[1], ARGV[5], "KEEPTTL") -redis.call("ZREM", KEYS[3], KEYS[1]) -redis.call("DEL", KEYS[2]) -return 1 -`) - -// ScheduledRoute stores one due route reference loaded from -// `notification:route_schedule`. -type ScheduledRoute struct { - // RouteKey stores the full Redis route key scheduled for processing. - RouteKey string - - // NotificationID stores the owning notification identifier. - NotificationID string - - // RouteID stores the scheduled route identifier. - RouteID string -} - -// CompleteRoutePublishedInput stores the data required to mark one route as -// published while atomically appending one outbound stream entry. -type CompleteRoutePublishedInput struct { - // ExpectedRoute stores the current route state previously loaded by the - // caller. - ExpectedRoute acceptintent.NotificationRoute - - // LeaseToken stores the route-lease owner token that must still be held. - LeaseToken string - - // PublishedAt stores when the publication attempt succeeded. - PublishedAt time.Time - - // Stream stores the outbound Redis Stream name. - Stream string - - // StreamMaxLen bounds Stream with approximate trimming when positive. Zero - // disables trimming. - StreamMaxLen int64 - - // StreamValues stores the exact Redis Stream fields appended to Stream. - StreamValues map[string]any -} - -// CompleteRouteFailedInput stores the data required to record one retryable -// publication failure. -type CompleteRouteFailedInput struct { - // ExpectedRoute stores the current route state previously loaded by the - // caller. - ExpectedRoute acceptintent.NotificationRoute - - // LeaseToken stores the route-lease owner token that must still be held. - LeaseToken string - - // FailedAt stores when the publication attempt failed. - FailedAt time.Time - - // NextAttemptAt stores the next scheduled retry time. - NextAttemptAt time.Time - - // FailureClassification stores the classified publication failure kind. - FailureClassification string - - // FailureMessage stores the detailed publication failure text. - FailureMessage string -} - -// CompleteRouteDeadLetterInput stores the data required to record one -// exhausted publication failure. -type CompleteRouteDeadLetterInput struct { - // ExpectedRoute stores the current route state previously loaded by the - // caller. - ExpectedRoute acceptintent.NotificationRoute - - // LeaseToken stores the route-lease owner token that must still be held. - LeaseToken string - - // DeadLetteredAt stores when the route exhausted its retry budget. - DeadLetteredAt time.Time - - // FailureClassification stores the classified terminal failure kind. - FailureClassification string - - // FailureMessage stores the detailed terminal failure text. - FailureMessage string - - // RecoveryHint stores the optional operator-facing recovery guidance. - RecoveryHint string -} - -// ListDueRoutes loads up to limit scheduled routes whose next-attempt score is -// due at or before now. -func (store *AcceptanceStore) ListDueRoutes(ctx context.Context, now time.Time, limit int64) ([]ScheduledRoute, error) { - if store == nil || store.client == nil { - return nil, errors.New("list due routes: nil store") - } - if ctx == nil { - return nil, errors.New("list due routes: nil context") - } - if err := validateRouteStateTimestamp("list due routes now", now); err != nil { - return nil, err - } - if limit <= 0 { - return nil, errors.New("list due routes: limit must be positive") - } - - members, err := store.client.ZRangeByScore(ctx, store.keys.RouteSchedule(), &redis.ZRangeBy{ - Min: "-inf", - Max: strconv.FormatInt(now.UnixMilli(), 10), - Count: limit, - }).Result() - if err != nil { - return nil, fmt.Errorf("list due routes: %w", err) - } - - routes := make([]ScheduledRoute, 0, len(members)) - for _, member := range members { - notificationID, routeID, err := store.keys.ParseRoute(member) - if err != nil { - return nil, fmt.Errorf("list due routes: %w", err) - } - routes = append(routes, ScheduledRoute{ - RouteKey: member, - NotificationID: notificationID, - RouteID: routeID, - }) - } - - return routes, nil -} - -// ReadRouteScheduleSnapshot returns the current depth of the durable route -// schedule together with its oldest scheduled timestamp when one exists. -func (store *AcceptanceStore) ReadRouteScheduleSnapshot(ctx context.Context) (telemetry.RouteScheduleSnapshot, error) { - if store == nil || store.client == nil { - return telemetry.RouteScheduleSnapshot{}, errors.New("read route schedule snapshot: nil store") - } - if ctx == nil { - return telemetry.RouteScheduleSnapshot{}, errors.New("read route schedule snapshot: nil context") - } - - depth, err := store.client.ZCard(ctx, store.keys.RouteSchedule()).Result() - if err != nil { - return telemetry.RouteScheduleSnapshot{}, fmt.Errorf("read route schedule snapshot: depth: %w", err) - } - - snapshot := telemetry.RouteScheduleSnapshot{ - Depth: depth, - } - if depth == 0 { - return snapshot, nil - } - - values, err := store.client.ZRangeWithScores(ctx, store.keys.RouteSchedule(), 0, 0).Result() - if err != nil { - return telemetry.RouteScheduleSnapshot{}, fmt.Errorf("read route schedule snapshot: oldest scheduled entry: %w", err) - } - if len(values) == 0 { - return snapshot, nil - } - - oldestScheduledFor := time.UnixMilli(int64(values[0].Score)).UTC() - snapshot.OldestScheduledFor = &oldestScheduledFor - return snapshot, nil -} - -// TryAcquireRouteLease attempts to acquire one temporary route lease owned by -// token for ttl. -func (store *AcceptanceStore) TryAcquireRouteLease(ctx context.Context, notificationID string, routeID string, token string, ttl time.Duration) (bool, error) { - if store == nil || store.client == nil { - return false, errors.New("try acquire route lease: nil store") - } - if ctx == nil { - return false, errors.New("try acquire route lease: nil context") - } - if notificationID == "" { - return false, errors.New("try acquire route lease: notification id must not be empty") - } - if routeID == "" { - return false, errors.New("try acquire route lease: route id must not be empty") - } - if token == "" { - return false, errors.New("try acquire route lease: token must not be empty") - } - if ttl <= 0 { - return false, errors.New("try acquire route lease: ttl must be positive") - } - - acquired, err := store.client.SetNX(ctx, store.keys.RouteLease(notificationID, routeID), token, ttl).Result() - if err != nil { - return false, fmt.Errorf("try acquire route lease: %w", err) - } - - return acquired, nil -} - -// ReleaseRouteLease releases one temporary route lease only when token still -// matches the stored owner value. -func (store *AcceptanceStore) ReleaseRouteLease(ctx context.Context, notificationID string, routeID string, token string) error { - if store == nil || store.client == nil { - return errors.New("release route lease: nil store") - } - if ctx == nil { - return errors.New("release route lease: nil context") - } - if notificationID == "" { - return errors.New("release route lease: notification id must not be empty") - } - if routeID == "" { - return errors.New("release route lease: route id must not be empty") - } - if token == "" { - return errors.New("release route lease: token must not be empty") - } - - if err := releaseRouteLeaseScript.Run( - ctx, - store.client, - []string{store.keys.RouteLease(notificationID, routeID)}, - token, - ).Err(); err != nil { - return fmt.Errorf("release route lease: %w", err) - } - - return nil -} - -// CompleteRoutePublished atomically appends one outbound stream entry and -// marks the corresponding route as published. -func (store *AcceptanceStore) CompleteRoutePublished(ctx context.Context, input CompleteRoutePublishedInput) error { - if store == nil || store.client == nil { - return errors.New("complete route published: nil store") - } - if ctx == nil { - return errors.New("complete route published: nil context") - } - if err := input.Validate(); err != nil { - return fmt.Errorf("complete route published: %w", err) - } - - updatedRoute := input.ExpectedRoute - updatedRoute.Status = acceptintent.RouteStatusPublished - updatedRoute.AttemptCount++ - updatedRoute.NextAttemptAt = time.Time{} - updatedRoute.LastErrorClassification = "" - updatedRoute.LastErrorMessage = "" - updatedRoute.LastErrorAt = time.Time{} - updatedRoute.UpdatedAt = input.PublishedAt - updatedRoute.PublishedAt = input.PublishedAt - updatedRoute.DeadLetteredAt = time.Time{} - payload, err := MarshalRoute(updatedRoute) - if err != nil { - return fmt.Errorf("complete route published: %w", err) - } - expectedPayload, err := MarshalRoute(input.ExpectedRoute) - if err != nil { - return fmt.Errorf("complete route published: %w", err) - } - streamArgs, err := flattenStreamValues(input.StreamValues) - if err != nil { - return fmt.Errorf("complete route published: %w", err) - } - - result, err := completePublishedRouteScript.Run( - ctx, - store.client, - []string{ - store.keys.Route(updatedRoute.NotificationID, updatedRoute.RouteID), - store.keys.RouteLease(updatedRoute.NotificationID, updatedRoute.RouteID), - store.keys.RouteSchedule(), - }, - append([]any{ - string(expectedPayload), - input.LeaseToken, - input.Stream, - input.StreamMaxLen, - string(payload), - len(streamArgs) / 2, - }, streamArgs...)..., - ).Int() - switch { - case errors.Is(err, redis.Nil): - return ErrConflict - case err != nil: - return err - case result != 1: - return ErrConflict - default: - return nil - } -} - -// CompleteRouteFailed atomically records one retryable publication failure and -// reschedules the route. -func (store *AcceptanceStore) CompleteRouteFailed(ctx context.Context, input CompleteRouteFailedInput) error { - if store == nil || store.client == nil { - return errors.New("complete route failed: nil store") - } - if ctx == nil { - return errors.New("complete route failed: nil context") - } - if err := input.Validate(); err != nil { - return fmt.Errorf("complete route failed: %w", err) - } - - updatedRoute := input.ExpectedRoute - updatedRoute.Status = acceptintent.RouteStatusFailed - updatedRoute.AttemptCount++ - updatedRoute.NextAttemptAt = input.NextAttemptAt - updatedRoute.LastErrorClassification = input.FailureClassification - updatedRoute.LastErrorMessage = input.FailureMessage - updatedRoute.LastErrorAt = input.FailedAt - updatedRoute.UpdatedAt = input.FailedAt - payload, err := MarshalRoute(updatedRoute) - if err != nil { - return fmt.Errorf("complete route failed: %w", err) - } - - return store.completeRouteMutation(ctx, input.ExpectedRoute, input.LeaseToken, func(pipe redis.Pipeliner) error { - pipe.SetArgs(ctx, store.keys.Route(updatedRoute.NotificationID, updatedRoute.RouteID), payload, redis.SetArgs{KeepTTL: true}) - pipe.ZAdd(ctx, store.keys.RouteSchedule(), redis.Z{ - Score: float64(input.NextAttemptAt.UnixMilli()), - Member: store.keys.Route(updatedRoute.NotificationID, updatedRoute.RouteID), - }) - pipe.Del(ctx, store.keys.RouteLease(updatedRoute.NotificationID, updatedRoute.RouteID)) - return nil - }) -} - -// CompleteRouteDeadLetter atomically records one exhausted publication -// failure, stores the dead-letter entry, and removes the route from the -// retry schedule. -func (store *AcceptanceStore) CompleteRouteDeadLetter(ctx context.Context, input CompleteRouteDeadLetterInput) error { - if store == nil || store.client == nil { - return errors.New("complete route dead letter: nil store") - } - if ctx == nil { - return errors.New("complete route dead letter: nil context") - } - if err := input.Validate(); err != nil { - return fmt.Errorf("complete route dead letter: %w", err) - } - - updatedRoute := input.ExpectedRoute - updatedRoute.Status = acceptintent.RouteStatusDeadLetter - updatedRoute.AttemptCount++ - updatedRoute.NextAttemptAt = time.Time{} - updatedRoute.LastErrorClassification = input.FailureClassification - updatedRoute.LastErrorMessage = input.FailureMessage - updatedRoute.LastErrorAt = input.DeadLetteredAt - updatedRoute.UpdatedAt = input.DeadLetteredAt - updatedRoute.DeadLetteredAt = input.DeadLetteredAt - if updatedRoute.AttemptCount < updatedRoute.MaxAttempts { - return fmt.Errorf( - "complete route dead letter: final attempt count %d is below max attempts %d", - updatedRoute.AttemptCount, - updatedRoute.MaxAttempts, - ) - } - - routePayload, err := MarshalRoute(updatedRoute) - if err != nil { - return fmt.Errorf("complete route dead letter: %w", err) - } - deadLetterPayload, err := MarshalDeadLetter(DeadLetterEntry{ - NotificationID: updatedRoute.NotificationID, - RouteID: updatedRoute.RouteID, - Channel: updatedRoute.Channel, - RecipientRef: updatedRoute.RecipientRef, - FinalAttemptCount: updatedRoute.AttemptCount, - MaxAttempts: updatedRoute.MaxAttempts, - FailureClassification: input.FailureClassification, - FailureMessage: input.FailureMessage, - CreatedAt: input.DeadLetteredAt, - RecoveryHint: input.RecoveryHint, - }) - if err != nil { - return fmt.Errorf("complete route dead letter: %w", err) - } - - return store.completeRouteMutation(ctx, input.ExpectedRoute, input.LeaseToken, func(pipe redis.Pipeliner) error { - pipe.SetArgs(ctx, store.keys.Route(updatedRoute.NotificationID, updatedRoute.RouteID), routePayload, redis.SetArgs{KeepTTL: true}) - pipe.Set(ctx, store.keys.DeadLetter(updatedRoute.NotificationID, updatedRoute.RouteID), deadLetterPayload, store.cfg.DeadLetterTTL) - pipe.ZRem(ctx, store.keys.RouteSchedule(), store.keys.Route(updatedRoute.NotificationID, updatedRoute.RouteID)) - pipe.Del(ctx, store.keys.RouteLease(updatedRoute.NotificationID, updatedRoute.RouteID)) - return nil - }) -} - -func (store *AcceptanceStore) completeRouteMutation( - ctx context.Context, - expectedRoute acceptintent.NotificationRoute, - leaseToken string, - mutate func(redis.Pipeliner) error, -) error { - routeKey := store.keys.Route(expectedRoute.NotificationID, expectedRoute.RouteID) - leaseKey := store.keys.RouteLease(expectedRoute.NotificationID, expectedRoute.RouteID) - - watchErr := store.client.Watch(ctx, func(tx *redis.Tx) error { - currentRoute, err := loadWatchedRoute(ctx, tx, routeKey) - switch { - case errors.Is(err, redis.Nil): - return ErrConflict - case err != nil: - return err - } - if err := ensureRoutesEqual(expectedRoute, currentRoute); err != nil { - return err - } - - leaseValue, err := tx.Get(ctx, leaseKey).Result() - switch { - case errors.Is(err, redis.Nil): - return ErrConflict - case err != nil: - return err - case leaseValue != leaseToken: - return ErrConflict - } - - _, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error { - return mutate(pipe) - }) - - return err - }, routeKey, leaseKey) - - switch { - case errors.Is(watchErr, ErrConflict), errors.Is(watchErr, redis.TxFailedErr): - return ErrConflict - case watchErr != nil: - return watchErr - default: - return nil - } -} - -func loadWatchedRoute(ctx context.Context, tx *redis.Tx, routeKey string) (acceptintent.NotificationRoute, error) { - payload, err := tx.Get(ctx, routeKey).Bytes() - if err != nil { - return acceptintent.NotificationRoute{}, err - } - - return UnmarshalRoute(payload) -} - -func ensureRoutesEqual(expected acceptintent.NotificationRoute, actual acceptintent.NotificationRoute) error { - expectedPayload, err := MarshalRoute(expected) - if err != nil { - return fmt.Errorf("marshal expected route: %w", err) - } - actualPayload, err := MarshalRoute(actual) - if err != nil { - return fmt.Errorf("marshal current route: %w", err) - } - if !bytes.Equal(expectedPayload, actualPayload) { - return ErrConflict - } - - return nil -} - -func validateCompletionRoute(route acceptintent.NotificationRoute) error { - if err := route.Validate(); err != nil { - return err - } - switch route.Status { - case acceptintent.RouteStatusPending, acceptintent.RouteStatusFailed: - return nil - default: - return fmt.Errorf("route status %q is not completable", route.Status) - } -} - -func validateStreamValues(values map[string]any) error { - if len(values) == 0 { - return fmt.Errorf("stream values must not be empty") - } - - for key, raw := range values { - if key == "" { - return fmt.Errorf("stream values key must not be empty") - } - switch typed := raw.(type) { - case string: - if typed == "" { - return fmt.Errorf("stream values %q must not be empty", key) - } - case []byte: - if len(typed) == 0 { - return fmt.Errorf("stream values %q must not be empty", key) - } - default: - return fmt.Errorf("stream values %q must be string or []byte", key) - } - } - - return nil -} - -func flattenStreamValues(values map[string]any) ([]any, error) { - keys := make([]string, 0, len(values)) - for key := range values { - keys = append(keys, key) - } - sort.Strings(keys) - - args := make([]any, 0, len(values)*2) - for _, key := range keys { - args = append(args, key, values[key]) - } - - return args, nil -} - -func validateRouteStateTimestamp(name string, value time.Time) error { - if value.IsZero() { - return fmt.Errorf("%s must not be zero", name) - } - if !value.Equal(value.UTC()) { - return fmt.Errorf("%s must be UTC", name) - } - if !value.Equal(value.Truncate(time.Millisecond)) { - return fmt.Errorf("%s must use millisecond precision", name) - } - - return nil -} - -// Validate reports whether route contains a complete due-route reference. -func (route ScheduledRoute) Validate() error { - if route.RouteKey == "" { - return fmt.Errorf("scheduled route key must not be empty") - } - if route.NotificationID == "" { - return fmt.Errorf("scheduled route notification id must not be empty") - } - if route.RouteID == "" { - return fmt.Errorf("scheduled route route id must not be empty") - } - - return nil -} - -// Validate reports whether input contains a complete published-route -// transition. -func (input CompleteRoutePublishedInput) Validate() error { - if err := validateCompletionRoute(input.ExpectedRoute); err != nil { - return err - } - if input.LeaseToken == "" { - return fmt.Errorf("lease token must not be empty") - } - if err := validateRouteStateTimestamp("published at", input.PublishedAt); err != nil { - return err - } - if input.Stream == "" { - return fmt.Errorf("stream must not be empty") - } - if input.StreamMaxLen < 0 { - return fmt.Errorf("stream max len must not be negative") - } - if err := validateStreamValues(input.StreamValues); err != nil { - return err - } - - return nil -} - -// Validate reports whether input contains a complete retryable failure -// transition. -func (input CompleteRouteFailedInput) Validate() error { - if err := validateCompletionRoute(input.ExpectedRoute); err != nil { - return err - } - if input.LeaseToken == "" { - return fmt.Errorf("lease token must not be empty") - } - if err := validateRouteStateTimestamp("failed at", input.FailedAt); err != nil { - return err - } - if err := validateRouteStateTimestamp("next attempt at", input.NextAttemptAt); err != nil { - return err - } - if input.FailureClassification == "" { - return fmt.Errorf("failure classification must not be empty") - } - if input.FailureMessage == "" { - return fmt.Errorf("failure message must not be empty") - } - - return nil -} - -// Validate reports whether input contains a complete dead-letter transition. -func (input CompleteRouteDeadLetterInput) Validate() error { - if err := validateCompletionRoute(input.ExpectedRoute); err != nil { - return err - } - if input.LeaseToken == "" { - return fmt.Errorf("lease token must not be empty") - } - if err := validateRouteStateTimestamp("dead lettered at", input.DeadLetteredAt); err != nil { - return err - } - if input.FailureClassification == "" { - return fmt.Errorf("failure classification must not be empty") - } - if input.FailureMessage == "" { - return fmt.Errorf("failure message must not be empty") - } - - return nil -} diff --git a/notification/internal/adapters/redisstate/route_state_store_test.go b/notification/internal/adapters/redisstate/route_state_store_test.go deleted file mode 100644 index f2a17a9..0000000 --- a/notification/internal/adapters/redisstate/route_state_store_test.go +++ /dev/null @@ -1,465 +0,0 @@ -package redisstate - -import ( - "context" - "testing" - "time" - - "galaxy/notification/internal/api/intentstream" - "galaxy/notification/internal/service/acceptintent" - - "github.com/alicebob/miniredis/v2" - "github.com/stretchr/testify/require" -) - -func TestAcceptanceStoreListDueRoutesLoadsScheduledMembers(t *testing.T) { - t.Parallel() - - server := miniredis.RunT(t) - client := newTestRedisClient(t, server) - - store, err := NewAcceptanceStore(client, AcceptanceConfig{ - RecordTTL: 24 * time.Hour, - DeadLetterTTL: 72 * time.Hour, - IdempotencyTTL: 7 * 24 * time.Hour, - }) - require.NoError(t, err) - - now := time.UnixMilli(1775121700000).UTC() - require.NoError(t, store.CreateAcceptance(context.Background(), validUserAcceptanceInput(now, 0))) - - routes, err := store.ListDueRoutes(context.Background(), now, 10) - require.NoError(t, err) - require.Len(t, routes, 2) - require.ElementsMatch(t, []string{"push:user:user-1", "email:user:user-1"}, []string{routes[0].RouteID, routes[1].RouteID}) - - for _, route := range routes { - require.NoError(t, route.Validate()) - } -} - -func TestAcceptanceStoreReadRouteScheduleSnapshot(t *testing.T) { - t.Parallel() - - server := miniredis.RunT(t) - client := newTestRedisClient(t, server) - - store, err := NewAcceptanceStore(client, AcceptanceConfig{ - RecordTTL: 24 * time.Hour, - DeadLetterTTL: 72 * time.Hour, - IdempotencyTTL: 7 * 24 * time.Hour, - }) - require.NoError(t, err) - - now := time.UnixMilli(1775121700000).UTC() - require.NoError(t, store.CreateAcceptance(context.Background(), validUserAcceptanceInput(now, 0))) - - snapshot, err := store.ReadRouteScheduleSnapshot(context.Background()) - require.NoError(t, err) - require.Equal(t, int64(2), snapshot.Depth) - require.NotNil(t, snapshot.OldestScheduledFor) - require.Equal(t, now, *snapshot.OldestScheduledFor) -} - -func TestAcceptanceStoreRouteLeaseAcquireReleaseAndExpire(t *testing.T) { - t.Parallel() - - server := miniredis.RunT(t) - client := newTestRedisClient(t, server) - - store, err := NewAcceptanceStore(client, AcceptanceConfig{ - RecordTTL: 24 * time.Hour, - DeadLetterTTL: 72 * time.Hour, - IdempotencyTTL: 7 * 24 * time.Hour, - }) - require.NoError(t, err) - - acquired, err := store.TryAcquireRouteLease(context.Background(), "1775121700000-0", "push:user:user-1", "token-1", 2*time.Second) - require.NoError(t, err) - require.True(t, acquired) - - acquired, err = store.TryAcquireRouteLease(context.Background(), "1775121700000-0", "push:user:user-1", "token-2", 2*time.Second) - require.NoError(t, err) - require.False(t, acquired) - - require.NoError(t, store.ReleaseRouteLease(context.Background(), "1775121700000-0", "push:user:user-1", "token-1")) - acquired, err = store.TryAcquireRouteLease(context.Background(), "1775121700000-0", "push:user:user-1", "token-3", 2*time.Second) - require.NoError(t, err) - require.True(t, acquired) - - server.FastForward(3 * time.Second) - acquired, err = store.TryAcquireRouteLease(context.Background(), "1775121700000-0", "push:user:user-1", "token-4", 2*time.Second) - require.NoError(t, err) - require.True(t, acquired) -} - -func TestAcceptanceStoreCompleteRoutePublishedAppendsTrimmedStreamEntryAndMarksRoutePublished(t *testing.T) { - t.Parallel() - - server := miniredis.RunT(t) - client := newTestRedisClient(t, server) - - store, err := NewAcceptanceStore(client, AcceptanceConfig{ - RecordTTL: 24 * time.Hour, - DeadLetterTTL: 72 * time.Hour, - IdempotencyTTL: 7 * 24 * time.Hour, - }) - require.NoError(t, err) - - now := time.UnixMilli(1775121700000).UTC() - input := validUserAcceptanceInput(now, 0) - require.NoError(t, store.CreateAcceptance(context.Background(), input)) - - acquired, err := store.TryAcquireRouteLease(context.Background(), input.Notification.NotificationID, "push:user:user-1", "token-1", 5*time.Second) - require.NoError(t, err) - require.True(t, acquired) - - route, found, err := store.GetRoute(context.Background(), input.Notification.NotificationID, "push:user:user-1") - require.NoError(t, err) - require.True(t, found) - - publishedAt := now.Add(time.Second).UTC().Truncate(time.Millisecond) - require.NoError(t, store.CompleteRoutePublished(context.Background(), CompleteRoutePublishedInput{ - ExpectedRoute: route, - LeaseToken: "token-1", - PublishedAt: publishedAt, - Stream: "gateway:client-events", - StreamMaxLen: 1024, - StreamValues: map[string]any{ - "user_id": "user-1", - "event_type": "game.turn.ready", - "event_id": input.Notification.NotificationID + "/push:user:user-1", - "payload_bytes": []byte("payload-1"), - "request_id": "request-1", - "trace_id": "trace-1", - }, - })) - - updatedRoute, found, err := store.GetRoute(context.Background(), input.Notification.NotificationID, "push:user:user-1") - require.NoError(t, err) - require.True(t, found) - require.Equal(t, acceptintent.RouteStatusPublished, updatedRoute.Status) - require.Equal(t, 1, updatedRoute.AttemptCount) - require.Equal(t, publishedAt, updatedRoute.PublishedAt) - - scheduled, err := client.ZRange(context.Background(), Keyspace{}.RouteSchedule(), 0, -1).Result() - require.NoError(t, err) - require.Equal(t, []string{Keyspace{}.Route(input.Notification.NotificationID, "email:user:user-1")}, scheduled) - - messages, err := client.XRange(context.Background(), "gateway:client-events", "-", "+").Result() - require.NoError(t, err) - require.Len(t, messages, 1) - require.Equal(t, "user-1", messages[0].Values["user_id"]) - require.Equal(t, "game.turn.ready", messages[0].Values["event_type"]) - - leaseKey := Keyspace{}.RouteLease(input.Notification.NotificationID, "push:user:user-1") - _, err = client.Get(context.Background(), leaseKey).Result() - require.Error(t, err) -} - -func TestAcceptanceStoreCompleteRoutePublishedAppendsUntrimmedMailCommand(t *testing.T) { - t.Parallel() - - server := miniredis.RunT(t) - client := newTestRedisClient(t, server) - - store, err := NewAcceptanceStore(client, AcceptanceConfig{ - RecordTTL: 24 * time.Hour, - DeadLetterTTL: 72 * time.Hour, - IdempotencyTTL: 7 * 24 * time.Hour, - }) - require.NoError(t, err) - - now := time.UnixMilli(1775121700000).UTC() - input := validUserAcceptanceInput(now, 0) - require.NoError(t, store.CreateAcceptance(context.Background(), input)) - - acquired, err := store.TryAcquireRouteLease(context.Background(), input.Notification.NotificationID, "email:user:user-1", "token-1", 5*time.Second) - require.NoError(t, err) - require.True(t, acquired) - - route, found, err := store.GetRoute(context.Background(), input.Notification.NotificationID, "email:user:user-1") - require.NoError(t, err) - require.True(t, found) - - publishedAt := now.Add(time.Second).UTC().Truncate(time.Millisecond) - require.NoError(t, store.CompleteRoutePublished(context.Background(), CompleteRoutePublishedInput{ - ExpectedRoute: route, - LeaseToken: "token-1", - PublishedAt: publishedAt, - Stream: "mail:delivery_commands", - StreamMaxLen: 0, - StreamValues: map[string]any{ - "delivery_id": input.Notification.NotificationID + "/email:user:user-1", - "source": "notification", - "payload_mode": "template", - "idempotency_key": "notification:" + input.Notification.NotificationID + "/email:user:user-1", - "requested_at_ms": "1775121700000", - "payload_json": `{"to":["pilot@example.com"],"cc":[],"bcc":[],"reply_to":[],"template_id":"game.turn.ready","locale":"en","variables":{"game_id":"game-123","game_name":"Nebula Clash","turn_number":54},"attachments":[]}`, - }, - })) - - updatedRoute, found, err := store.GetRoute(context.Background(), input.Notification.NotificationID, "email:user:user-1") - require.NoError(t, err) - require.True(t, found) - require.Equal(t, acceptintent.RouteStatusPublished, updatedRoute.Status) - require.Equal(t, 1, updatedRoute.AttemptCount) - require.Equal(t, publishedAt, updatedRoute.PublishedAt) - - messages, err := client.XRange(context.Background(), "mail:delivery_commands", "-", "+").Result() - require.NoError(t, err) - require.Len(t, messages, 1) - require.Equal(t, "notification", messages[0].Values["source"]) - require.Equal(t, "template", messages[0].Values["payload_mode"]) - require.Equal(t, "1775121700000-0/email:user:user-1", messages[0].Values["delivery_id"]) -} - -func TestAcceptanceStoreCompleteRouteFailedReschedulesRoute(t *testing.T) { - t.Parallel() - - server := miniredis.RunT(t) - client := newTestRedisClient(t, server) - - store, err := NewAcceptanceStore(client, AcceptanceConfig{ - RecordTTL: 24 * time.Hour, - DeadLetterTTL: 72 * time.Hour, - IdempotencyTTL: 7 * 24 * time.Hour, - }) - require.NoError(t, err) - - now := time.UnixMilli(1775121700000).UTC() - input := validUserAcceptanceInput(now, 0) - require.NoError(t, store.CreateAcceptance(context.Background(), input)) - - acquired, err := store.TryAcquireRouteLease(context.Background(), input.Notification.NotificationID, "push:user:user-1", "token-1", 5*time.Second) - require.NoError(t, err) - require.True(t, acquired) - - route, found, err := store.GetRoute(context.Background(), input.Notification.NotificationID, "push:user:user-1") - require.NoError(t, err) - require.True(t, found) - - failedAt := now.Add(time.Second).UTC().Truncate(time.Millisecond) - nextAttemptAt := failedAt.Add(2 * time.Second).UTC().Truncate(time.Millisecond) - require.NoError(t, store.CompleteRouteFailed(context.Background(), CompleteRouteFailedInput{ - ExpectedRoute: route, - LeaseToken: "token-1", - FailedAt: failedAt, - NextAttemptAt: nextAttemptAt, - FailureClassification: "gateway_stream_publish_failed", - FailureMessage: "temporary outage", - })) - - updatedRoute, found, err := store.GetRoute(context.Background(), input.Notification.NotificationID, "push:user:user-1") - require.NoError(t, err) - require.True(t, found) - require.Equal(t, acceptintent.RouteStatusFailed, updatedRoute.Status) - require.Equal(t, 1, updatedRoute.AttemptCount) - require.Equal(t, nextAttemptAt, updatedRoute.NextAttemptAt) - require.Equal(t, "gateway_stream_publish_failed", updatedRoute.LastErrorClassification) - - scheduled, err := client.ZRangeWithScores(context.Background(), Keyspace{}.RouteSchedule(), 0, -1).Result() - require.NoError(t, err) - require.Len(t, scheduled, 2) - require.Contains(t, []string{ - scheduled[0].Member.(string), - scheduled[1].Member.(string), - }, Keyspace{}.Route(input.Notification.NotificationID, "push:user:user-1")) -} - -func TestAcceptanceStoreCompleteRouteDeadLetterStoresTerminalFailure(t *testing.T) { - t.Parallel() - - server := miniredis.RunT(t) - client := newTestRedisClient(t, server) - - store, err := NewAcceptanceStore(client, AcceptanceConfig{ - RecordTTL: 24 * time.Hour, - DeadLetterTTL: 72 * time.Hour, - IdempotencyTTL: 7 * 24 * time.Hour, - }) - require.NoError(t, err) - - now := time.UnixMilli(1775121700000).UTC() - input := validUserAcceptanceInput(now, 2) - require.NoError(t, store.CreateAcceptance(context.Background(), input)) - - acquired, err := store.TryAcquireRouteLease(context.Background(), input.Notification.NotificationID, "push:user:user-1", "token-1", 5*time.Second) - require.NoError(t, err) - require.True(t, acquired) - - route, found, err := store.GetRoute(context.Background(), input.Notification.NotificationID, "push:user:user-1") - require.NoError(t, err) - require.True(t, found) - - deadLetteredAt := now.Add(time.Second).UTC().Truncate(time.Millisecond) - require.NoError(t, store.CompleteRouteDeadLetter(context.Background(), CompleteRouteDeadLetterInput{ - ExpectedRoute: route, - LeaseToken: "token-1", - DeadLetteredAt: deadLetteredAt, - FailureClassification: "payload_encoding_failed", - FailureMessage: "payload is invalid", - })) - - updatedRoute, found, err := store.GetRoute(context.Background(), input.Notification.NotificationID, "push:user:user-1") - require.NoError(t, err) - require.True(t, found) - require.Equal(t, acceptintent.RouteStatusDeadLetter, updatedRoute.Status) - require.Equal(t, 3, updatedRoute.AttemptCount) - require.Equal(t, deadLetteredAt, updatedRoute.DeadLetteredAt) - - payload, err := client.Get(context.Background(), Keyspace{}.DeadLetter(input.Notification.NotificationID, "push:user:user-1")).Bytes() - require.NoError(t, err) - entry, err := UnmarshalDeadLetter(payload) - require.NoError(t, err) - require.Equal(t, "payload_encoding_failed", entry.FailureClassification) - require.Equal(t, 3, entry.FinalAttemptCount) - - scheduled, err := client.ZRange(context.Background(), Keyspace{}.RouteSchedule(), 0, -1).Result() - require.NoError(t, err) - require.Equal(t, []string{Keyspace{}.Route(input.Notification.NotificationID, "email:user:user-1")}, scheduled) -} - -func TestAcceptanceStoreDeadLetterIsIsolatedByChannelAndRecipient(t *testing.T) { - t.Parallel() - - server := miniredis.RunT(t) - client := newTestRedisClient(t, server) - - store, err := NewAcceptanceStore(client, AcceptanceConfig{ - RecordTTL: 24 * time.Hour, - DeadLetterTTL: 72 * time.Hour, - IdempotencyTTL: 7 * 24 * time.Hour, - }) - require.NoError(t, err) - - now := time.UnixMilli(1775121700000).UTC() - input := validUserAcceptanceInput(now, 2) - input.Notification.RecipientUserIDs = []string{"user-1", "user-2"} - input.Routes = append(input.Routes, - acceptintent.NotificationRoute{ - NotificationID: input.Notification.NotificationID, - RouteID: "push:user:user-2", - Channel: intentstream.ChannelPush, - RecipientRef: "user:user-2", - Status: acceptintent.RouteStatusPending, - AttemptCount: 0, - MaxAttempts: 3, - NextAttemptAt: now, - ResolvedEmail: "second@example.com", - ResolvedLocale: "en", - CreatedAt: now, - UpdatedAt: now, - }, - acceptintent.NotificationRoute{ - NotificationID: input.Notification.NotificationID, - RouteID: "email:user:user-2", - Channel: intentstream.ChannelEmail, - RecipientRef: "user:user-2", - Status: acceptintent.RouteStatusPending, - AttemptCount: 0, - MaxAttempts: 7, - NextAttemptAt: now, - ResolvedEmail: "second@example.com", - ResolvedLocale: "en", - CreatedAt: now, - UpdatedAt: now, - }, - ) - require.NoError(t, store.CreateAcceptance(context.Background(), input)) - - acquired, err := store.TryAcquireRouteLease(context.Background(), input.Notification.NotificationID, "push:user:user-1", "token-1", 5*time.Second) - require.NoError(t, err) - require.True(t, acquired) - - route, found, err := store.GetRoute(context.Background(), input.Notification.NotificationID, "push:user:user-1") - require.NoError(t, err) - require.True(t, found) - - deadLetteredAt := now.Add(time.Second).UTC().Truncate(time.Millisecond) - require.NoError(t, store.CompleteRouteDeadLetter(context.Background(), CompleteRouteDeadLetterInput{ - ExpectedRoute: route, - LeaseToken: "token-1", - DeadLetteredAt: deadLetteredAt, - FailureClassification: "gateway_stream_publish_failed", - FailureMessage: "gateway unavailable", - })) - - deadLetterRoute, found, err := store.GetRoute(context.Background(), input.Notification.NotificationID, "push:user:user-1") - require.NoError(t, err) - require.True(t, found) - require.Equal(t, acceptintent.RouteStatusDeadLetter, deadLetterRoute.Status) - - for _, routeID := range []string{"email:user:user-1", "push:user:user-2", "email:user:user-2"} { - route, found, err := store.GetRoute(context.Background(), input.Notification.NotificationID, routeID) - require.NoError(t, err) - require.True(t, found, "route %s should remain stored", routeID) - require.Equal(t, acceptintent.RouteStatusPending, route.Status, "route %s should remain pending", routeID) - } - - scheduled, err := client.ZRange(context.Background(), Keyspace{}.RouteSchedule(), 0, -1).Result() - require.NoError(t, err) - require.ElementsMatch(t, []string{ - Keyspace{}.Route(input.Notification.NotificationID, "email:user:user-1"), - Keyspace{}.Route(input.Notification.NotificationID, "push:user:user-2"), - Keyspace{}.Route(input.Notification.NotificationID, "email:user:user-2"), - }, scheduled) -} - -func validUserAcceptanceInput(now time.Time, pushAttemptCount int) acceptintent.CreateAcceptanceInput { - return acceptintent.CreateAcceptanceInput{ - Notification: acceptintent.NotificationRecord{ - NotificationID: "1775121700000-0", - NotificationType: intentstream.NotificationTypeGameTurnReady, - Producer: intentstream.ProducerGameMaster, - AudienceKind: intentstream.AudienceKindUser, - RecipientUserIDs: []string{"user-1"}, - PayloadJSON: `{"game_id":"game-123","game_name":"Nebula Clash","turn_number":54}`, - IdempotencyKey: "game-123:turn-54", - RequestFingerprint: "sha256:deadbeef", - RequestID: "request-1", - TraceID: "trace-1", - OccurredAt: now, - AcceptedAt: now, - UpdatedAt: now, - }, - Routes: []acceptintent.NotificationRoute{ - { - NotificationID: "1775121700000-0", - RouteID: "push:user:user-1", - Channel: intentstream.ChannelPush, - RecipientRef: "user:user-1", - Status: acceptintent.RouteStatusPending, - AttemptCount: pushAttemptCount, - MaxAttempts: 3, - NextAttemptAt: now, - ResolvedEmail: "pilot@example.com", - ResolvedLocale: "en", - CreatedAt: now, - UpdatedAt: now, - }, - { - NotificationID: "1775121700000-0", - RouteID: "email:user:user-1", - Channel: intentstream.ChannelEmail, - RecipientRef: "user:user-1", - Status: acceptintent.RouteStatusPending, - AttemptCount: 0, - MaxAttempts: 7, - NextAttemptAt: now, - ResolvedEmail: "pilot@example.com", - ResolvedLocale: "en", - CreatedAt: now, - UpdatedAt: now, - }, - }, - Idempotency: acceptintent.IdempotencyRecord{ - Producer: intentstream.ProducerGameMaster, - IdempotencyKey: "game-123:turn-54", - NotificationID: "1775121700000-0", - RequestFingerprint: "sha256:deadbeef", - CreatedAt: now, - ExpiresAt: now.Add(7 * 24 * time.Hour), - }, - } -} diff --git a/notification/internal/app/runtime.go b/notification/internal/app/runtime.go index f7ca904..5ac9008 100644 --- a/notification/internal/app/runtime.go +++ b/notification/internal/app/runtime.go @@ -5,7 +5,11 @@ import ( "errors" "fmt" "log/slog" + "time" + "galaxy/notification/internal/adapters/postgres/migrations" + "galaxy/notification/internal/adapters/postgres/notificationstore" + "galaxy/notification/internal/adapters/postgres/routepublisher" redisadapter "galaxy/notification/internal/adapters/redis" "galaxy/notification/internal/adapters/redisstate" userserviceadapter "galaxy/notification/internal/adapters/userservice" @@ -14,10 +18,16 @@ import ( "galaxy/notification/internal/service/acceptintent" "galaxy/notification/internal/telemetry" "galaxy/notification/internal/worker" + "galaxy/postgres" "github.com/redis/go-redis/v9" ) +// systemClock satisfies the worker.Clock contract for runtime wiring. +type systemClock struct{} + +func (systemClock) Now() time.Time { return time.Now() } + // Runtime owns the runnable Notification Service process plus the cleanup // functions that release runtime resources after shutdown. type Runtime struct { @@ -25,16 +35,24 @@ type Runtime struct { app *App - probeServer *internalhttp.Server - telemetry *telemetry.Runtime - intentConsumer *worker.IntentConsumer - pushPublisher *worker.PushPublisher - emailPublisher *worker.EmailPublisher + probeServer *internalhttp.Server + telemetry *telemetry.Runtime + intentConsumer *worker.IntentConsumer + pushPublisher *worker.PushPublisher + emailPublisher *worker.EmailPublisher + retentionWorker *worker.SQLRetentionWorker cleanupFns []func() error } // NewRuntime constructs the runnable Notification Service process from cfg. +// +// PostgreSQL migrations apply strictly before any HTTP listener becomes +// ready. The runtime opens one shared `*redis.Client` consumed by the intent +// consumer (XREAD), the publishers (outbound XADDs), the route lease store, +// and the persisted stream offset store. Per PG_PLAN.md §5 the durable +// notification state lives in PostgreSQL while the lease key, the consumer +// offset, and the streams themselves remain on Redis. func NewRuntime(ctx context.Context, cfg config.Config, logger *slog.Logger) (*Runtime, error) { if ctx == nil { return nil, fmt.Errorf("new notification runtime: nil context") @@ -91,17 +109,42 @@ func NewRuntime(ctx context.Context, cfg config.Config, logger *slog.Logger) (*R return cleanupOnError(fmt.Errorf("new notification runtime: %w", err)) } - acceptanceStore, err := redisstate.NewAcceptanceStore(redisClient, redisstate.AcceptanceConfig{ - RecordTTL: cfg.Retry.RecordTTL, - DeadLetterTTL: cfg.Retry.DeadLetterTTL, - IdempotencyTTL: cfg.Retry.IdempotencyTTL, + pgPool, err := postgres.OpenPrimary(ctx, cfg.Postgres.Conn, + postgres.WithTracerProvider(telemetryRuntime.TracerProvider()), + postgres.WithMeterProvider(telemetryRuntime.MeterProvider()), + ) + if err != nil { + return cleanupOnError(fmt.Errorf("new notification runtime: open postgres: %w", err)) + } + runtime.cleanupFns = append(runtime.cleanupFns, pgPool.Close) + unregisterPGStats, err := postgres.InstrumentDBStats(pgPool, + postgres.WithMeterProvider(telemetryRuntime.MeterProvider()), + ) + if err != nil { + return cleanupOnError(fmt.Errorf("new notification runtime: instrument postgres: %w", err)) + } + runtime.cleanupFns = append(runtime.cleanupFns, func() error { + unregisterPGStats() + return nil + }) + if err := postgres.Ping(ctx, pgPool, cfg.Postgres.Conn.OperationTimeout); err != nil { + return cleanupOnError(fmt.Errorf("new notification runtime: ping postgres: %w", err)) + } + if err := postgres.RunMigrations(ctx, pgPool, migrations.FS(), "."); err != nil { + return cleanupOnError(fmt.Errorf("new notification runtime: run postgres migrations: %w", err)) + } + + notificationStore, err := notificationstore.New(notificationstore.Config{ + DB: pgPool, + OperationTimeout: cfg.Postgres.Conn.OperationTimeout, }) if err != nil { - return cleanupOnError(fmt.Errorf("new notification runtime: acceptance store: %w", err)) + return cleanupOnError(fmt.Errorf("new notification runtime: notification store: %w", err)) } - malformedIntentStore, err := redisstate.NewMalformedIntentStore(redisClient, cfg.Retry.DeadLetterTTL) + + leaseStore, err := redisstate.NewLeaseStore(redisClient) if err != nil { - return cleanupOnError(fmt.Errorf("new notification runtime: malformed intent store: %w", err)) + return cleanupOnError(fmt.Errorf("new notification runtime: lease store: %w", err)) } streamOffsetStore, err := redisstate.NewStreamOffsetStore(redisClient) if err != nil { @@ -111,8 +154,14 @@ func NewRuntime(ctx context.Context, cfg config.Config, logger *slog.Logger) (*R if err != nil { return cleanupOnError(fmt.Errorf("new notification runtime: intent stream lag reader: %w", err)) } - telemetryRuntime.SetRouteScheduleSnapshotReader(acceptanceStore) + publisherStore, err := routepublisher.New(notificationStore, leaseStore) + if err != nil { + return cleanupOnError(fmt.Errorf("new notification runtime: route publisher store: %w", err)) + } + + telemetryRuntime.SetRouteScheduleSnapshotReader(notificationStore) telemetryRuntime.SetIntentStreamLagSnapshotReader(intentStreamLagReader) + userDirectory, err := userserviceadapter.NewClient(userserviceadapter.Config{ BaseURL: cfg.UserService.BaseURL, RequestTimeout: cfg.UserService.Timeout, @@ -121,8 +170,9 @@ func NewRuntime(ctx context.Context, cfg config.Config, logger *slog.Logger) (*R return cleanupOnError(fmt.Errorf("new notification runtime: user service client: %w", err)) } runtime.cleanupFns = append(runtime.cleanupFns, userDirectory.Close) + acceptIntentService, err := acceptintent.New(acceptintent.Config{ - Store: acceptanceStore, + Store: notificationStore, UserDirectory: userDirectory, Clock: nil, Logger: logger, @@ -140,7 +190,7 @@ func NewRuntime(ctx context.Context, cfg config.Config, logger *slog.Logger) (*R Stream: cfg.Streams.Intents, BlockTimeout: cfg.IntentsReadBlockTimeout, Acceptor: acceptIntentService, - MalformedRecorder: malformedIntentStore, + MalformedRecorder: notificationStore, OffsetStore: streamOffsetStore, Telemetry: telemetryRuntime, }, logger) @@ -149,7 +199,7 @@ func NewRuntime(ctx context.Context, cfg config.Config, logger *slog.Logger) (*R } runtime.intentConsumer = intentConsumer pushPublisher, err := worker.NewPushPublisher(worker.PushPublisherConfig{ - Store: acceptanceStore, + Store: publisherStore, GatewayStream: cfg.Streams.GatewayClientEvents, GatewayStreamMaxLen: cfg.Streams.GatewayClientEventsStreamMaxLen, RouteLeaseTTL: cfg.Retry.RouteLeaseTTL, @@ -158,13 +208,14 @@ func NewRuntime(ctx context.Context, cfg config.Config, logger *slog.Logger) (*R Encoder: nil, Telemetry: telemetryRuntime, Clock: nil, + StreamPublisher: redisClient, }, logger) if err != nil { return cleanupOnError(fmt.Errorf("new notification runtime: push publisher: %w", err)) } runtime.pushPublisher = pushPublisher emailPublisher, err := worker.NewEmailPublisher(worker.EmailPublisherConfig{ - Store: acceptanceStore, + Store: publisherStore, MailDeliveryCommandsStream: cfg.Streams.MailDeliveryCommands, RouteLeaseTTL: cfg.Retry.RouteLeaseTTL, RouteBackoffMin: cfg.Retry.RouteBackoffMin, @@ -172,12 +223,25 @@ func NewRuntime(ctx context.Context, cfg config.Config, logger *slog.Logger) (*R Encoder: nil, Telemetry: telemetryRuntime, Clock: nil, + StreamPublisher: redisClient, }, logger) if err != nil { return cleanupOnError(fmt.Errorf("new notification runtime: email publisher: %w", err)) } runtime.emailPublisher = emailPublisher + retentionWorker, err := worker.NewSQLRetentionWorker(worker.SQLRetentionConfig{ + Store: notificationStore, + RecordRetention: cfg.Retention.RecordRetention, + MalformedIntentRetention: cfg.Retention.MalformedIntentRetention, + CleanupInterval: cfg.Retention.CleanupInterval, + Clock: systemClock{}, + }, logger) + if err != nil { + return cleanupOnError(fmt.Errorf("new notification runtime: sql retention worker: %w", err)) + } + runtime.retentionWorker = retentionWorker + probeServer, err := internalhttp.NewServer(internalhttp.Config{ Addr: cfg.InternalHTTP.Addr, ReadHeaderTimeout: cfg.InternalHTTP.ReadHeaderTimeout, @@ -191,7 +255,7 @@ func NewRuntime(ctx context.Context, cfg config.Config, logger *slog.Logger) (*R return cleanupOnError(fmt.Errorf("new notification runtime: internal HTTP server: %w", err)) } runtime.probeServer = probeServer - runtime.app = New(cfg, probeServer, intentConsumer, pushPublisher, emailPublisher) + runtime.app = New(cfg, probeServer, intentConsumer, pushPublisher, emailPublisher, retentionWorker) return runtime, nil } diff --git a/notification/internal/app/runtime_smoke_test.go b/notification/internal/app/runtime_smoke_test.go deleted file mode 100644 index fb102ee..0000000 --- a/notification/internal/app/runtime_smoke_test.go +++ /dev/null @@ -1,72 +0,0 @@ -package app - -import ( - "context" - "net/http" - "os" - "testing" - "time" - - "galaxy/notification/internal/config" - - "github.com/stretchr/testify/require" - testcontainers "github.com/testcontainers/testcontainers-go" - rediscontainer "github.com/testcontainers/testcontainers-go/modules/redis" -) - -const ( - realRuntimeSmokeEnv = "NOTIFICATION_REAL_RUNTIME_SMOKE" - realRuntimeRedisImage = "redis:7" -) - -func TestRealRuntimeCompatibility(t *testing.T) { - if os.Getenv(realRuntimeSmokeEnv) != "1" { - t.Skipf("set %s=1 to run the real runtime smoke suite", realRuntimeSmokeEnv) - } - - ctx := context.Background() - - redisContainer, err := rediscontainer.Run(ctx, realRuntimeRedisImage) - require.NoError(t, err) - testcontainers.CleanupContainer(t, redisContainer) - - redisAddr, err := redisContainer.Endpoint(ctx, "") - require.NoError(t, err) - - cfg := config.DefaultConfig() - cfg.Redis.Addr = redisAddr - cfg.UserService.BaseURL = "http://user-service.internal" - cfg.InternalHTTP.Addr = mustFreeAddr(t) - cfg.ShutdownTimeout = 2 * time.Second - cfg.Telemetry.TracesExporter = "none" - cfg.Telemetry.MetricsExporter = "none" - - runtime, err := NewRuntime(context.Background(), cfg, testLogger()) - require.NoError(t, err) - defer func() { - require.NoError(t, runtime.Close()) - }() - - runCtx, cancel := context.WithCancel(context.Background()) - defer cancel() - - runErrCh := make(chan error, 1) - go func() { - runErrCh <- runtime.Run(runCtx) - }() - - client := &http.Client{ - Timeout: 500 * time.Millisecond, - Transport: &http.Transport{ - DisableKeepAlives: true, - }, - } - t.Cleanup(client.CloseIdleConnections) - - waitForRuntimeReady(t, client, cfg.InternalHTTP.Addr) - assertHTTPStatus(t, client, "http://"+cfg.InternalHTTP.Addr+"/healthz", http.StatusOK) - assertHTTPStatus(t, client, "http://"+cfg.InternalHTTP.Addr+"/readyz", http.StatusOK) - - cancel() - waitForRunResult(t, runErrCh, cfg.ShutdownTimeout+2*time.Second) -} diff --git a/notification/internal/app/runtime_test.go b/notification/internal/app/runtime_test.go deleted file mode 100644 index 157f962..0000000 --- a/notification/internal/app/runtime_test.go +++ /dev/null @@ -1,581 +0,0 @@ -package app - -import ( - "context" - "encoding/json" - "io" - "log/slog" - "net" - "net/http" - "net/http/httptest" - "strconv" - "testing" - "time" - - redisstate "galaxy/notification/internal/adapters/redisstate" - "galaxy/notification/internal/config" - - "github.com/alicebob/miniredis/v2" - "github.com/redis/go-redis/v9" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestNewRuntimeStartsProbeListenerAndStopsCleanly(t *testing.T) { - t.Parallel() - - redisServer := miniredis.RunT(t) - userService := newUserLookupServer(t, func(http.ResponseWriter, *http.Request) {}) - defer userService.Close() - - cfg := config.DefaultConfig() - cfg.Redis.Addr = redisServer.Addr() - cfg.UserService.BaseURL = userService.URL - cfg.InternalHTTP.Addr = mustFreeAddr(t) - cfg.ShutdownTimeout = 10 * time.Second - cfg.IntentsReadBlockTimeout = 25 * time.Millisecond - cfg.Telemetry.TracesExporter = "none" - cfg.Telemetry.MetricsExporter = "none" - - runtime, err := NewRuntime(context.Background(), cfg, testLogger()) - require.NoError(t, err) - defer func() { - require.NoError(t, runtime.Close()) - }() - - runCtx, cancel := context.WithCancel(context.Background()) - defer cancel() - - runErrCh := make(chan error, 1) - go func() { - runErrCh <- runtime.Run(runCtx) - }() - - client := newTestHTTPClient(t) - waitForRuntimeReady(t, client, cfg.InternalHTTP.Addr) - assertHTTPStatus(t, client, "http://"+cfg.InternalHTTP.Addr+"/healthz", http.StatusOK) - assertHTTPStatus(t, client, "http://"+cfg.InternalHTTP.Addr+"/readyz", http.StatusOK) - assertHTTPStatus(t, client, "http://"+cfg.InternalHTTP.Addr+"/metrics", http.StatusNotFound) - - cancel() - waitForRunResult(t, runErrCh, cfg.ShutdownTimeout+2*time.Second) -} - -func TestNewRuntimeFailsFastWhenRedisPingCheckFails(t *testing.T) { - t.Parallel() - - cfg := config.DefaultConfig() - cfg.Redis.Addr = mustFreeAddr(t) - cfg.UserService.BaseURL = "http://127.0.0.1:18080" - cfg.IntentsReadBlockTimeout = 25 * time.Millisecond - cfg.Telemetry.TracesExporter = "none" - cfg.Telemetry.MetricsExporter = "none" - - runtime, err := NewRuntime(context.Background(), cfg, testLogger()) - require.Nil(t, runtime) - require.Error(t, err) - assert.ErrorContains(t, err, "ping redis") -} - -func TestNewRuntimeAcceptsIntentThroughConsumer(t *testing.T) { - t.Parallel() - - redisServer := miniredis.RunT(t) - redisClient := redis.NewClient(&redis.Options{ - Addr: redisServer.Addr(), - Protocol: 2, - DisableIdentity: true, - }) - t.Cleanup(func() { - assert.NoError(t, redisClient.Close()) - }) - userService := newUserLookupServer(t, func(writer http.ResponseWriter, request *http.Request) { - writeJSON(t, writer, http.StatusOK, map[string]any{ - "user": map[string]any{ - "email": "pilot@example.com", - "preferred_language": "en-US", - }, - }) - }) - defer userService.Close() - - cfg := config.DefaultConfig() - cfg.Redis.Addr = redisServer.Addr() - cfg.UserService.BaseURL = userService.URL - cfg.InternalHTTP.Addr = mustFreeAddr(t) - cfg.ShutdownTimeout = 10 * time.Second - cfg.IntentsReadBlockTimeout = 25 * time.Millisecond - cfg.Telemetry.TracesExporter = "none" - cfg.Telemetry.MetricsExporter = "none" - - runtime, err := NewRuntime(context.Background(), cfg, testLogger()) - require.NoError(t, err) - defer func() { - require.NoError(t, runtime.Close()) - }() - - runCtx, cancel := context.WithCancel(context.Background()) - defer cancel() - - runErrCh := make(chan error, 1) - go func() { - runErrCh <- runtime.Run(runCtx) - }() - - client := newTestHTTPClient(t) - waitForRuntimeReady(t, client, cfg.InternalHTTP.Addr) - - messageID, err := redisClient.XAdd(context.Background(), &redis.XAddArgs{ - Stream: cfg.Streams.Intents, - Values: map[string]any{ - "notification_type": "game.turn.ready", - "producer": "game_master", - "audience_kind": "user", - "recipient_user_ids_json": `["user-1"]`, - "idempotency_key": "game-123:turn-ready", - "occurred_at_ms": "1775121700000", - "payload_json": `{"game_id":"game-123","game_name":"Nebula Clash","turn_number":54}`, - }, - }).Result() - require.NoError(t, err) - - require.Eventually(t, func() bool { - payload, err := redisClient.Get(context.Background(), redisstate.Keyspace{}.Route(messageID, "email:user:user-1")).Bytes() - if err != nil { - return false - } - route, err := redisstate.UnmarshalRoute(payload) - if err != nil { - return false - } - return route.ResolvedEmail == "pilot@example.com" && route.ResolvedLocale == "en" - }, time.Second, 10*time.Millisecond) - - cancel() - waitForRunResult(t, runErrCh, cfg.ShutdownTimeout+2*time.Second) -} - -func TestNewRuntimePublishesAcceptedPushAndEmailRoutes(t *testing.T) { - t.Parallel() - - redisServer := miniredis.RunT(t) - redisClient := redis.NewClient(&redis.Options{ - Addr: redisServer.Addr(), - Protocol: 2, - DisableIdentity: true, - }) - t.Cleanup(func() { - assert.NoError(t, redisClient.Close()) - }) - userService := newUserLookupServer(t, func(writer http.ResponseWriter, request *http.Request) { - writeJSON(t, writer, http.StatusOK, map[string]any{ - "user": map[string]any{ - "email": "pilot@example.com", - "preferred_language": "en-US", - }, - }) - }) - defer userService.Close() - - cfg := config.DefaultConfig() - cfg.Redis.Addr = redisServer.Addr() - cfg.UserService.BaseURL = userService.URL - cfg.InternalHTTP.Addr = mustFreeAddr(t) - cfg.ShutdownTimeout = 10 * time.Second - cfg.IntentsReadBlockTimeout = 25 * time.Millisecond - cfg.Telemetry.TracesExporter = "none" - cfg.Telemetry.MetricsExporter = "none" - - runtime, err := NewRuntime(context.Background(), cfg, testLogger()) - require.NoError(t, err) - defer func() { - require.NoError(t, runtime.Close()) - }() - - runCtx, cancel := context.WithCancel(context.Background()) - defer cancel() - - runErrCh := make(chan error, 1) - go func() { - runErrCh <- runtime.Run(runCtx) - }() - - client := newTestHTTPClient(t) - waitForRuntimeReady(t, client, cfg.InternalHTTP.Addr) - - messageID, err := redisClient.XAdd(context.Background(), &redis.XAddArgs{ - Stream: cfg.Streams.Intents, - Values: map[string]any{ - "notification_type": "game.turn.ready", - "producer": "game_master", - "audience_kind": "user", - "recipient_user_ids_json": `["user-1"]`, - "idempotency_key": "game-123:turn-ready", - "occurred_at_ms": "1775121700000", - "payload_json": `{"game_id":"game-123","game_name":"Nebula Clash","turn_number":54}`, - "request_id": "request-1", - "trace_id": "trace-1", - }, - }).Result() - require.NoError(t, err) - - require.Eventually(t, func() bool { - pushPayload, err := redisClient.Get(context.Background(), redisstate.Keyspace{}.Route(messageID, "push:user:user-1")).Bytes() - if err != nil { - return false - } - pushRoute, err := redisstate.UnmarshalRoute(pushPayload) - if err != nil { - return false - } - - emailPayload, err := redisClient.Get(context.Background(), redisstate.Keyspace{}.Route(messageID, "email:user:user-1")).Bytes() - if err != nil { - return false - } - emailRoute, err := redisstate.UnmarshalRoute(emailPayload) - if err != nil { - return false - } - - return pushRoute.Status == "published" && pushRoute.AttemptCount == 1 && - emailRoute.Status == "published" && emailRoute.AttemptCount == 1 - }, 2*time.Second, 10*time.Millisecond) - - pushRoutePayload, err := redisClient.Get(context.Background(), redisstate.Keyspace{}.Route(messageID, "push:user:user-1")).Bytes() - require.NoError(t, err) - pushRoute, err := redisstate.UnmarshalRoute(pushRoutePayload) - require.NoError(t, err) - require.Equal(t, "published", string(pushRoute.Status)) - - notificationPayload, err := redisClient.Get(context.Background(), redisstate.Keyspace{}.Notification(messageID)).Bytes() - require.NoError(t, err) - notificationRecord, err := redisstate.UnmarshalNotification(notificationPayload) - require.NoError(t, err) - - emailRoutePayload, err := redisClient.Get(context.Background(), redisstate.Keyspace{}.Route(messageID, "email:user:user-1")).Bytes() - require.NoError(t, err) - emailRoute, err := redisstate.UnmarshalRoute(emailRoutePayload) - require.NoError(t, err) - require.Equal(t, "published", string(emailRoute.Status)) - - messages, err := redisClient.XRange(context.Background(), cfg.Streams.GatewayClientEvents, "-", "+").Result() - require.NoError(t, err) - require.Len(t, messages, 1) - require.Equal(t, "user-1", messages[0].Values["user_id"]) - require.Equal(t, "game.turn.ready", messages[0].Values["event_type"]) - require.Equal(t, messageID+"/push:user:user-1", messages[0].Values["event_id"]) - require.Equal(t, "request-1", messages[0].Values["request_id"]) - require.Equal(t, "trace-1", messages[0].Values["trace_id"]) - require.NotContains(t, messages[0].Values, "device_session_id") - switch payload := messages[0].Values["payload_bytes"].(type) { - case string: - require.NotEmpty(t, payload) - case []byte: - require.NotEmpty(t, payload) - default: - require.Failf(t, "unexpected payload type", "payload_bytes has type %T", payload) - } - - mailCommands, err := redisClient.XRange(context.Background(), cfg.Streams.MailDeliveryCommands, "-", "+").Result() - require.NoError(t, err) - require.Len(t, mailCommands, 1) - require.Equal(t, messageID+"/email:user:user-1", mailCommands[0].Values["delivery_id"]) - require.Equal(t, "notification", mailCommands[0].Values["source"]) - require.Equal(t, "template", mailCommands[0].Values["payload_mode"]) - require.Equal(t, "notification:"+messageID+"/email:user:user-1", mailCommands[0].Values["idempotency_key"]) - require.Equal(t, strconv.FormatInt(notificationRecord.AcceptedAt.UnixMilli(), 10), mailCommands[0].Values["requested_at_ms"]) - require.Equal(t, "request-1", mailCommands[0].Values["request_id"]) - require.Equal(t, "trace-1", mailCommands[0].Values["trace_id"]) - require.JSONEq(t, - `{"to":["pilot@example.com"],"cc":[],"bcc":[],"reply_to":[],"template_id":"game.turn.ready","locale":"en","variables":{"game_id":"game-123","game_name":"Nebula Clash","turn_number":54},"attachments":[]}`, - mailCommands[0].Values["payload_json"].(string), - ) - - cancel() - waitForRunResult(t, runErrCh, cfg.ShutdownTimeout+2*time.Second) -} - -func TestNewRuntimePublishesAdminEmailRouteOnlyToMailService(t *testing.T) { - t.Parallel() - - redisServer := miniredis.RunT(t) - redisClient := redis.NewClient(&redis.Options{ - Addr: redisServer.Addr(), - Protocol: 2, - DisableIdentity: true, - }) - t.Cleanup(func() { - assert.NoError(t, redisClient.Close()) - }) - userService := newUserLookupServer(t, func(http.ResponseWriter, *http.Request) {}) - defer userService.Close() - - cfg := config.DefaultConfig() - cfg.Redis.Addr = redisServer.Addr() - cfg.UserService.BaseURL = userService.URL - cfg.AdminRouting.LobbyApplicationSubmitted = []string{"owner@example.com"} - cfg.InternalHTTP.Addr = mustFreeAddr(t) - cfg.ShutdownTimeout = 10 * time.Second - cfg.IntentsReadBlockTimeout = 25 * time.Millisecond - cfg.Telemetry.TracesExporter = "none" - cfg.Telemetry.MetricsExporter = "none" - - runtime, err := NewRuntime(context.Background(), cfg, testLogger()) - require.NoError(t, err) - defer func() { - require.NoError(t, runtime.Close()) - }() - - runCtx, cancel := context.WithCancel(context.Background()) - defer cancel() - - runErrCh := make(chan error, 1) - go func() { - runErrCh <- runtime.Run(runCtx) - }() - - client := newTestHTTPClient(t) - waitForRuntimeReady(t, client, cfg.InternalHTTP.Addr) - - messageID, err := redisClient.XAdd(context.Background(), &redis.XAddArgs{ - Stream: cfg.Streams.Intents, - Values: map[string]any{ - "notification_type": "lobby.application.submitted", - "producer": "game_lobby", - "audience_kind": "admin_email", - "idempotency_key": "game-123:application-submitted:user-42", - "occurred_at_ms": "1775121700000", - "payload_json": `{"applicant_name":"Nova Pilot","applicant_user_id":"user-42","game_id":"game-123","game_name":"Nebula Clash"}`, - }, - }).Result() - require.NoError(t, err) - - require.Eventually(t, func() bool { - payload, err := redisClient.Get(context.Background(), redisstate.Keyspace{}.Route(messageID, "email:email:owner@example.com")).Bytes() - if err != nil { - return false - } - route, err := redisstate.UnmarshalRoute(payload) - if err != nil { - return false - } - - return route.Status == "published" && route.AttemptCount == 1 - }, 2*time.Second, 10*time.Millisecond) - - pushRoutePayload, err := redisClient.Get(context.Background(), redisstate.Keyspace{}.Route(messageID, "push:email:owner@example.com")).Bytes() - require.NoError(t, err) - pushRoute, err := redisstate.UnmarshalRoute(pushRoutePayload) - require.NoError(t, err) - require.Equal(t, "skipped", string(pushRoute.Status)) - - mailCommands, err := redisClient.XRange(context.Background(), cfg.Streams.MailDeliveryCommands, "-", "+").Result() - require.NoError(t, err) - require.Len(t, mailCommands, 1) - require.Equal(t, messageID+"/email:email:owner@example.com", mailCommands[0].Values["delivery_id"]) - require.JSONEq(t, - `{"to":["owner@example.com"],"cc":[],"bcc":[],"reply_to":[],"template_id":"lobby.application.submitted","locale":"en","variables":{"applicant_name":"Nova Pilot","applicant_user_id":"user-42","game_id":"game-123","game_name":"Nebula Clash"},"attachments":[]}`, - mailCommands[0].Values["payload_json"].(string), - ) - - gatewayMessages, err := redisClient.XRange(context.Background(), cfg.Streams.GatewayClientEvents, "-", "+").Result() - require.NoError(t, err) - require.Empty(t, gatewayMessages) - - cancel() - waitForRunResult(t, runErrCh, cfg.ShutdownTimeout+2*time.Second) -} - -func TestNewRuntimeUsesConfiguredUserServiceTimeout(t *testing.T) { - t.Parallel() - - redisServer := miniredis.RunT(t) - redisClient := redis.NewClient(&redis.Options{ - Addr: redisServer.Addr(), - Protocol: 2, - DisableIdentity: true, - }) - t.Cleanup(func() { - assert.NoError(t, redisClient.Close()) - }) - userService := newUserLookupServer(t, func(_ http.ResponseWriter, request *http.Request) { - <-request.Context().Done() - }) - defer userService.Close() - - cfg := config.DefaultConfig() - cfg.Redis.Addr = redisServer.Addr() - cfg.UserService.BaseURL = userService.URL - cfg.UserService.Timeout = 20 * time.Millisecond - cfg.InternalHTTP.Addr = mustFreeAddr(t) - cfg.ShutdownTimeout = 10 * time.Second - cfg.IntentsReadBlockTimeout = 25 * time.Millisecond - cfg.Telemetry.TracesExporter = "none" - cfg.Telemetry.MetricsExporter = "none" - - runtime, err := NewRuntime(context.Background(), cfg, testLogger()) - require.NoError(t, err) - defer func() { - require.NoError(t, runtime.Close()) - }() - - runCtx, cancel := context.WithCancel(context.Background()) - defer cancel() - - runErrCh := make(chan error, 1) - go func() { - runErrCh <- runtime.Run(runCtx) - }() - - client := newTestHTTPClient(t) - waitForRuntimeReady(t, client, cfg.InternalHTTP.Addr) - - messageID, err := redisClient.XAdd(context.Background(), &redis.XAddArgs{ - Stream: cfg.Streams.Intents, - Values: map[string]any{ - "notification_type": "game.turn.ready", - "producer": "game_master", - "audience_kind": "user", - "recipient_user_ids_json": `["user-1"]`, - "idempotency_key": "game-123:turn-ready", - "occurred_at_ms": "1775121700000", - "payload_json": `{"game_id":"game-123","game_name":"Nebula Clash","turn_number":54}`, - }, - }).Result() - require.NoError(t, err) - - var runErr error - require.Eventually(t, func() bool { - select { - case runErr = <-runErrCh: - return true - default: - return false - } - }, time.Second, 10*time.Millisecond) - - require.Error(t, runErr) - require.ErrorContains(t, runErr, "context deadline exceeded") - - offsetStore, err := redisstate.NewStreamOffsetStore(redisClient) - require.NoError(t, err) - offset, found, err := offsetStore.Load(context.Background(), cfg.Streams.Intents) - require.NoError(t, err) - require.False(t, found) - require.Empty(t, offset) - - _, err = redisClient.Get(context.Background(), redisstate.Keyspace{}.Notification(messageID)).Bytes() - require.Error(t, err) -} - -func testLogger() *slog.Logger { - return slog.New(slog.NewTextHandler(io.Discard, nil)) -} - -func newTestHTTPClient(t *testing.T) *http.Client { - t.Helper() - - transport := &http.Transport{DisableKeepAlives: true} - t.Cleanup(transport.CloseIdleConnections) - - return &http.Client{ - Timeout: 500 * time.Millisecond, - Transport: transport, - } -} - -func waitForRuntimeReady(t *testing.T, client *http.Client, addr string) { - t.Helper() - - require.Eventually(t, func() bool { - request, err := http.NewRequest(http.MethodGet, "http://"+addr+"/readyz", nil) - if err != nil { - return false - } - - response, err := client.Do(request) - if err != nil { - return false - } - defer response.Body.Close() - _, _ = io.Copy(io.Discard, response.Body) - - return response.StatusCode == http.StatusOK - }, 5*time.Second, 25*time.Millisecond, "notification runtime did not become reachable") -} - -func waitForRunResult(t *testing.T, runErrCh <-chan error, waitTimeout time.Duration) { - t.Helper() - - var err error - require.Eventually(t, func() bool { - select { - case err = <-runErrCh: - return true - default: - return false - } - }, waitTimeout, 10*time.Millisecond, "notification runtime did not stop") - require.NoError(t, err) -} - -func assertHTTPStatus(t *testing.T, client *http.Client, target string, want int) { - t.Helper() - - request, err := http.NewRequest(http.MethodGet, target, nil) - require.NoError(t, err) - - response, err := client.Do(request) - require.NoError(t, err) - defer response.Body.Close() - _, _ = io.Copy(io.Discard, response.Body) - - require.Equal(t, want, response.StatusCode) -} - -func mustFreeAddr(t *testing.T) string { - t.Helper() - - listener, err := net.Listen("tcp", "127.0.0.1:0") - require.NoError(t, err) - defer func() { - assert.NoError(t, listener.Close()) - }() - - return listener.Addr().String() -} - -func newUserLookupServer(t *testing.T, handler func(http.ResponseWriter, *http.Request)) *httptest.Server { - t.Helper() - - return httptest.NewServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) { - if request.Method != http.MethodGet { - http.NotFound(writer, request) - return - } - if request.URL.Path != "/api/v1/internal/users/user-1" { - writeJSON(t, writer, http.StatusNotFound, map[string]any{ - "error": map[string]any{ - "code": "subject_not_found", - "message": "subject not found", - }, - }) - return - } - - handler(writer, request) - })) -} - -func writeJSON(t *testing.T, writer http.ResponseWriter, statusCode int, payload any) { - t.Helper() - - body, err := json.Marshal(payload) - require.NoError(t, err) - - writer.Header().Set("Content-Type", "application/json") - writer.WriteHeader(statusCode) - _, err = writer.Write(body) - require.NoError(t, err) -} diff --git a/notification/internal/config/config.go b/notification/internal/config/config.go index 33292c0..9e5d89c 100644 --- a/notification/internal/config/config.go +++ b/notification/internal/config/config.go @@ -3,21 +3,21 @@ package config import ( - "crypto/tls" "fmt" - "log/slog" "net" netmail "net/mail" "net/url" - "os" - "strconv" "strings" "time" "galaxy/notification/internal/telemetry" + "galaxy/postgres" + "galaxy/redisconn" ) const ( + envPrefix = "NOTIFICATION" + shutdownTimeoutEnvVar = "NOTIFICATION_SHUTDOWN_TIMEOUT" logLevelEnvVar = "NOTIFICATION_LOG_LEVEL" @@ -26,28 +26,23 @@ const ( internalHTTPReadTimeoutEnvVar = "NOTIFICATION_INTERNAL_HTTP_READ_TIMEOUT" internalHTTPIdleTimeoutEnvVar = "NOTIFICATION_INTERNAL_HTTP_IDLE_TIMEOUT" - redisAddrEnvVar = "NOTIFICATION_REDIS_ADDR" - redisUsernameEnvVar = "NOTIFICATION_REDIS_USERNAME" - redisPasswordEnvVar = "NOTIFICATION_REDIS_PASSWORD" - redisDBEnvVar = "NOTIFICATION_REDIS_DB" - redisTLSEnabledEnvVar = "NOTIFICATION_REDIS_TLS_ENABLED" - redisOperationTimeoutEnvVar = "NOTIFICATION_REDIS_OPERATION_TIMEOUT" - - intentsStreamEnvVar = "NOTIFICATION_INTENTS_STREAM" - intentsReadBlockTimeoutEnvVar = "NOTIFICATION_INTENTS_READ_BLOCK_TIMEOUT" - gatewayClientEventsStreamEnvVar = "NOTIFICATION_GATEWAY_CLIENT_EVENTS_STREAM" + intentsStreamEnvVar = "NOTIFICATION_INTENTS_STREAM" + intentsReadBlockTimeoutEnvVar = "NOTIFICATION_INTENTS_READ_BLOCK_TIMEOUT" + gatewayClientEventsStreamEnvVar = "NOTIFICATION_GATEWAY_CLIENT_EVENTS_STREAM" gatewayClientEventsStreamMaxEnvVar = "NOTIFICATION_GATEWAY_CLIENT_EVENTS_STREAM_MAX_LEN" - mailDeliveryCommandsStreamEnvVar = "NOTIFICATION_MAIL_DELIVERY_COMMANDS_STREAM" + mailDeliveryCommandsStreamEnvVar = "NOTIFICATION_MAIL_DELIVERY_COMMANDS_STREAM" pushRetryMaxAttemptsEnvVar = "NOTIFICATION_PUSH_RETRY_MAX_ATTEMPTS" emailRetryMaxAttemptsEnvVar = "NOTIFICATION_EMAIL_RETRY_MAX_ATTEMPTS" routeLeaseTTLEnvVar = "NOTIFICATION_ROUTE_LEASE_TTL" routeBackoffMinEnvVar = "NOTIFICATION_ROUTE_BACKOFF_MIN" routeBackoffMaxEnvVar = "NOTIFICATION_ROUTE_BACKOFF_MAX" - deadLetterTTLEnvVar = "NOTIFICATION_DEAD_LETTER_TTL" - recordTTLEnvVar = "NOTIFICATION_RECORD_TTL" idempotencyTTLEnvVar = "NOTIFICATION_IDEMPOTENCY_TTL" + recordRetentionEnvVar = "NOTIFICATION_RECORD_RETENTION" + malformedIntentRetentionEnvVar = "NOTIFICATION_MALFORMED_INTENT_RETENTION" + cleanupIntervalEnvVar = "NOTIFICATION_CLEANUP_INTERVAL" + userServiceBaseURLEnvVar = "NOTIFICATION_USER_SERVICE_BASE_URL" userServiceTimeoutEnvVar = "NOTIFICATION_USER_SERVICE_TIMEOUT" @@ -71,24 +66,24 @@ const ( defaultReadHeaderTimeout = 2 * time.Second defaultReadTimeout = 10 * time.Second defaultIdleTimeout = time.Minute - defaultRedisDB = 0 - defaultRedisOperationTimeout = 250 * time.Millisecond - defaultIntentsStream = "notification:intents" - defaultIntentsReadBlockTimeout = 2 * time.Second - defaultGatewayClientEventsStream = "gateway:client-events" + defaultIntentsStream = "notification:intents" + defaultIntentsReadBlockTimeout = 2 * time.Second + defaultGatewayClientEventsStream = "gateway:client-events" defaultGatewayClientEventsStreamMaxLen int64 = 1024 - defaultMailDeliveryCommandsStream = "mail:delivery_commands" + defaultMailDeliveryCommandsStream = "mail:delivery_commands" defaultPushRetryMaxAttempts = 3 defaultEmailRetryMaxAttempts = 7 defaultRouteLeaseTTL = 5 * time.Second defaultRouteBackoffMin = time.Second defaultRouteBackoffMax = 5 * time.Minute - defaultDeadLetterTTL = 720 * time.Hour - defaultRecordTTL = 720 * time.Hour defaultIdempotencyTTL = 168 * time.Hour + defaultRecordRetention = 30 * 24 * time.Hour + defaultMalformedIntentRetention = 90 * 24 * time.Hour + defaultCleanupInterval = time.Hour + defaultUserServiceTimeout = time.Second defaultOTelServiceName = "galaxy-notification" @@ -109,20 +104,29 @@ type Config struct { // InternalHTTP configures the private probe HTTP listener. InternalHTTP InternalHTTPConfig - // Redis configures the shared Redis client used by the process. + // Redis configures the shared Redis connection topology and the inbound + // `notification:intents` stream plus the outbound stream names. Durable + // notification state lives in PostgreSQL after Stage 5 of `PG_PLAN.md`. Redis RedisConfig - // Streams stores the stable stream names reserved for notification ingress - // and downstream publication. + // Postgres configures the PostgreSQL-backed durable store consumed via + // `pkg/postgres`. + Postgres PostgresConfig + + // Streams stores the stable Redis Stream names reserved for ingress and + // downstream publication. Streams StreamsConfig // IntentsReadBlockTimeout stores the maximum Redis Streams blocking read // window used by the intent consumer. IntentsReadBlockTimeout time.Duration - // Retry stores the frozen retry and retention settings. + // Retry stores the frozen retry settings used by the route publishers. Retry RetryConfig + // Retention stores the periodic SQL retention worker configuration. + Retention RetentionConfig + // UserService configures the trusted user-enrichment dependency. UserService UserServiceConfig @@ -174,51 +178,29 @@ func (cfg InternalHTTPConfig) Validate() error { } } -// RedisConfig configures the shared Redis client and its connection settings. +// RedisConfig configures the Notification Service Redis connection topology. +// Per-call timeouts live in `Conn.OperationTimeout`. type RedisConfig struct { - // Addr stores the Redis network address. - Addr string - - // Username stores the optional Redis ACL username. - Username string - - // Password stores the optional Redis ACL password. - Password string - - // DB stores the Redis logical database index. - DB int - - // TLSEnabled reports whether TLS must be used for Redis connections. - TLSEnabled bool - - // OperationTimeout bounds one Redis round trip including the startup PING. - OperationTimeout time.Duration -} - -// TLSConfig returns the conservative TLS configuration used by the Redis -// client when TLSEnabled is true. -func (cfg RedisConfig) TLSConfig() *tls.Config { - if !cfg.TLSEnabled { - return nil - } - - return &tls.Config{MinVersion: tls.VersionTLS12} + // Conn carries the connection topology (master, replicas, password, db, + // per-call timeout). Loaded via redisconn.LoadFromEnv("NOTIFICATION"). + Conn redisconn.Config } // Validate reports whether cfg stores a usable Redis configuration. func (cfg RedisConfig) Validate() error { - switch { - case strings.TrimSpace(cfg.Addr) == "": - return fmt.Errorf("redis addr must not be empty") - case !isTCPAddr(cfg.Addr): - return fmt.Errorf("redis addr %q must use host:port form", cfg.Addr) - case cfg.DB < 0: - return fmt.Errorf("redis db must not be negative") - case cfg.OperationTimeout <= 0: - return fmt.Errorf("redis operation timeout must be positive") - default: - return nil - } + return cfg.Conn.Validate() +} + +// PostgresConfig configures the PostgreSQL-backed durable store. +type PostgresConfig struct { + // Conn stores the primary plus replica DSN topology and pool tuning. + // Loaded via postgres.LoadFromEnv("NOTIFICATION"). + Conn postgres.Config +} + +// Validate reports whether cfg stores a usable PostgreSQL configuration. +func (cfg PostgresConfig) Validate() error { + return cfg.Conn.Validate() } // StreamsConfig stores the stable Redis Stream names used by Notification @@ -254,8 +236,8 @@ func (cfg StreamsConfig) Validate() error { } } -// RetryConfig stores the frozen retry budgets, backoff settings, and retention -// periods used by the service. +// RetryConfig stores the frozen retry budgets, backoff settings, and the +// per-acceptance idempotency window. type RetryConfig struct { // PushMaxAttempts stores the route retry budget for the `push` channel. PushMaxAttempts int @@ -273,18 +255,13 @@ type RetryConfig struct { // RouteBackoffMax stores the maximum retry backoff. RouteBackoffMax time.Duration - // DeadLetterTTL stores the retention period for dead-letter and malformed - // intent records. - DeadLetterTTL time.Duration - - // RecordTTL stores the retention period for notification and route records. - RecordTTL time.Duration - - // IdempotencyTTL stores the retention period for idempotency records. + // IdempotencyTTL stores the per-acceptance idempotency window the service + // layer applies to the durable `idempotency_expires_at` column on the + // `records` table. IdempotencyTTL time.Duration } -// Validate reports whether cfg stores usable retry and retention settings. +// Validate reports whether cfg stores usable retry settings. func (cfg RetryConfig) Validate() error { switch { case cfg.PushMaxAttempts <= 0: @@ -299,10 +276,6 @@ func (cfg RetryConfig) Validate() error { return fmt.Errorf("route backoff max must be positive") case cfg.RouteBackoffMin > cfg.RouteBackoffMax: return fmt.Errorf("route backoff min must not exceed route backoff max") - case cfg.DeadLetterTTL <= 0: - return fmt.Errorf("dead-letter ttl must be positive") - case cfg.RecordTTL <= 0: - return fmt.Errorf("record ttl must be positive") case cfg.IdempotencyTTL <= 0: return fmt.Errorf("idempotency ttl must be positive") default: @@ -310,6 +283,36 @@ func (cfg RetryConfig) Validate() error { } } +// RetentionConfig stores the durable retention windows applied by the +// periodic SQL retention worker. +type RetentionConfig struct { + // RecordRetention bounds how long records (and their cascaded routes and + // dead_letters) survive after acceptance. + RecordRetention time.Duration + + // MalformedIntentRetention bounds how long malformed-intent rows survive + // after their original `recorded_at`. + MalformedIntentRetention time.Duration + + // CleanupInterval stores the wall-clock period between two retention + // passes. + CleanupInterval time.Duration +} + +// Validate reports whether cfg stores a usable retention configuration. +func (cfg RetentionConfig) Validate() error { + switch { + case cfg.RecordRetention <= 0: + return fmt.Errorf("%s must be positive", recordRetentionEnvVar) + case cfg.MalformedIntentRetention <= 0: + return fmt.Errorf("%s must be positive", malformedIntentRetentionEnvVar) + case cfg.CleanupInterval <= 0: + return fmt.Errorf("%s must be positive", cleanupIntervalEnvVar) + default: + return nil + } +} + // UserServiceConfig configures the trusted user-enrichment dependency. type UserServiceConfig struct { // BaseURL stores the absolute base URL of the trusted User Service. @@ -336,12 +339,10 @@ func (cfg UserServiceConfig) Validate() error { // AdminRoutingConfig stores the type-specific configured administrator email // lists. type AdminRoutingConfig struct { - // GeoReviewRecommended stores recipients for - // `geo.review_recommended`. + // GeoReviewRecommended stores recipients for `geo.review_recommended`. GeoReviewRecommended []string - // GameGenerationFailed stores recipients for - // `game.generation_failed`. + // GameGenerationFailed stores recipients for `game.generation_failed`. GameGenerationFailed []string // LobbyRuntimePausedAfterStart stores recipients for @@ -431,14 +432,16 @@ func DefaultConfig() Config { IdleTimeout: defaultIdleTimeout, }, Redis: RedisConfig{ - DB: defaultRedisDB, - OperationTimeout: defaultRedisOperationTimeout, + Conn: redisconn.DefaultConfig(), + }, + Postgres: PostgresConfig{ + Conn: postgres.DefaultConfig(), }, Streams: StreamsConfig{ - Intents: defaultIntentsStream, - GatewayClientEvents: defaultGatewayClientEventsStream, + Intents: defaultIntentsStream, + GatewayClientEvents: defaultGatewayClientEventsStream, GatewayClientEventsStreamMaxLen: defaultGatewayClientEventsStreamMaxLen, - MailDeliveryCommands: defaultMailDeliveryCommandsStream, + MailDeliveryCommands: defaultMailDeliveryCommandsStream, }, IntentsReadBlockTimeout: defaultIntentsReadBlockTimeout, Retry: RetryConfig{ @@ -447,10 +450,13 @@ func DefaultConfig() Config { RouteLeaseTTL: defaultRouteLeaseTTL, RouteBackoffMin: defaultRouteBackoffMin, RouteBackoffMax: defaultRouteBackoffMax, - DeadLetterTTL: defaultDeadLetterTTL, - RecordTTL: defaultRecordTTL, IdempotencyTTL: defaultIdempotencyTTL, }, + Retention: RetentionConfig{ + RecordRetention: defaultRecordRetention, + MalformedIntentRetention: defaultMalformedIntentRetention, + CleanupInterval: defaultCleanupInterval, + }, UserService: UserServiceConfig{ Timeout: defaultUserServiceTimeout, }, @@ -462,167 +468,21 @@ func DefaultConfig() Config { } } -// LoadFromEnv loads the Notification Service process configuration from -// environment variables, applying documented defaults where appropriate. -func LoadFromEnv() (Config, error) { - cfg := DefaultConfig() - - var err error - - cfg.ShutdownTimeout, err = loadDurationEnvWithDefault(shutdownTimeoutEnvVar, cfg.ShutdownTimeout) - if err != nil { - return Config{}, fmt.Errorf("load notification config: %w", err) - } - - cfg.Logging.Level = loadStringEnvWithDefault(logLevelEnvVar, cfg.Logging.Level) - if err := validateLogLevel(cfg.Logging.Level); err != nil { - return Config{}, fmt.Errorf("load notification config: %s: %w", logLevelEnvVar, err) - } - - cfg.InternalHTTP.Addr = loadStringEnvWithDefault(internalHTTPAddrEnvVar, cfg.InternalHTTP.Addr) - cfg.InternalHTTP.ReadHeaderTimeout, err = loadDurationEnvWithDefault(internalHTTPReadHeaderTimeoutEnvVar, cfg.InternalHTTP.ReadHeaderTimeout) - if err != nil { - return Config{}, fmt.Errorf("load notification config: %w", err) - } - cfg.InternalHTTP.ReadTimeout, err = loadDurationEnvWithDefault(internalHTTPReadTimeoutEnvVar, cfg.InternalHTTP.ReadTimeout) - if err != nil { - return Config{}, fmt.Errorf("load notification config: %w", err) - } - cfg.InternalHTTP.IdleTimeout, err = loadDurationEnvWithDefault(internalHTTPIdleTimeoutEnvVar, cfg.InternalHTTP.IdleTimeout) - if err != nil { - return Config{}, fmt.Errorf("load notification config: %w", err) - } - - cfg.Redis.Addr = loadStringEnvWithDefault(redisAddrEnvVar, cfg.Redis.Addr) - cfg.Redis.Username = os.Getenv(redisUsernameEnvVar) - cfg.Redis.Password = os.Getenv(redisPasswordEnvVar) - cfg.Redis.DB, err = loadIntEnvWithDefault(redisDBEnvVar, cfg.Redis.DB) - if err != nil { - return Config{}, fmt.Errorf("load notification config: %w", err) - } - cfg.Redis.TLSEnabled, err = loadBoolEnvWithDefault(redisTLSEnabledEnvVar, cfg.Redis.TLSEnabled) - if err != nil { - return Config{}, fmt.Errorf("load notification config: %w", err) - } - cfg.Redis.OperationTimeout, err = loadDurationEnvWithDefault(redisOperationTimeoutEnvVar, cfg.Redis.OperationTimeout) - if err != nil { - return Config{}, fmt.Errorf("load notification config: %w", err) - } - - cfg.Streams.Intents = loadStringEnvWithDefault(intentsStreamEnvVar, cfg.Streams.Intents) - cfg.Streams.GatewayClientEvents = loadStringEnvWithDefault(gatewayClientEventsStreamEnvVar, cfg.Streams.GatewayClientEvents) - cfg.Streams.GatewayClientEventsStreamMaxLen, err = loadInt64EnvWithDefault(gatewayClientEventsStreamMaxEnvVar, cfg.Streams.GatewayClientEventsStreamMaxLen) - if err != nil { - return Config{}, fmt.Errorf("load notification config: %w", err) - } - cfg.Streams.MailDeliveryCommands = loadStringEnvWithDefault(mailDeliveryCommandsStreamEnvVar, cfg.Streams.MailDeliveryCommands) - cfg.IntentsReadBlockTimeout, err = loadDurationEnvWithDefault(intentsReadBlockTimeoutEnvVar, cfg.IntentsReadBlockTimeout) - if err != nil { - return Config{}, fmt.Errorf("load notification config: %w", err) - } - - cfg.Retry.PushMaxAttempts, err = loadIntEnvWithDefault(pushRetryMaxAttemptsEnvVar, cfg.Retry.PushMaxAttempts) - if err != nil { - return Config{}, fmt.Errorf("load notification config: %w", err) - } - cfg.Retry.EmailMaxAttempts, err = loadIntEnvWithDefault(emailRetryMaxAttemptsEnvVar, cfg.Retry.EmailMaxAttempts) - if err != nil { - return Config{}, fmt.Errorf("load notification config: %w", err) - } - cfg.Retry.RouteLeaseTTL, err = loadDurationEnvWithDefault(routeLeaseTTLEnvVar, cfg.Retry.RouteLeaseTTL) - if err != nil { - return Config{}, fmt.Errorf("load notification config: %w", err) - } - cfg.Retry.RouteBackoffMin, err = loadDurationEnvWithDefault(routeBackoffMinEnvVar, cfg.Retry.RouteBackoffMin) - if err != nil { - return Config{}, fmt.Errorf("load notification config: %w", err) - } - cfg.Retry.RouteBackoffMax, err = loadDurationEnvWithDefault(routeBackoffMaxEnvVar, cfg.Retry.RouteBackoffMax) - if err != nil { - return Config{}, fmt.Errorf("load notification config: %w", err) - } - cfg.Retry.DeadLetterTTL, err = loadDurationEnvWithDefault(deadLetterTTLEnvVar, cfg.Retry.DeadLetterTTL) - if err != nil { - return Config{}, fmt.Errorf("load notification config: %w", err) - } - cfg.Retry.RecordTTL, err = loadDurationEnvWithDefault(recordTTLEnvVar, cfg.Retry.RecordTTL) - if err != nil { - return Config{}, fmt.Errorf("load notification config: %w", err) - } - cfg.Retry.IdempotencyTTL, err = loadDurationEnvWithDefault(idempotencyTTLEnvVar, cfg.Retry.IdempotencyTTL) - if err != nil { - return Config{}, fmt.Errorf("load notification config: %w", err) - } - - cfg.UserService.BaseURL = normalizeBaseURL(loadStringEnvWithDefault(userServiceBaseURLEnvVar, cfg.UserService.BaseURL)) - cfg.UserService.Timeout, err = loadDurationEnvWithDefault(userServiceTimeoutEnvVar, cfg.UserService.Timeout) - if err != nil { - return Config{}, fmt.Errorf("load notification config: %w", err) - } - - cfg.AdminRouting.GeoReviewRecommended, err = loadEmailListEnv(adminEmailsGeoReviewRecommendedEnvVar, cfg.AdminRouting.GeoReviewRecommended) - if err != nil { - return Config{}, fmt.Errorf("load notification config: %w", err) - } - cfg.AdminRouting.GameGenerationFailed, err = loadEmailListEnv(adminEmailsGameGenerationFailedEnvVar, cfg.AdminRouting.GameGenerationFailed) - if err != nil { - return Config{}, fmt.Errorf("load notification config: %w", err) - } - cfg.AdminRouting.LobbyRuntimePausedAfterStart, err = loadEmailListEnv(adminEmailsLobbyRuntimePausedAfterEnvVar, cfg.AdminRouting.LobbyRuntimePausedAfterStart) - if err != nil { - return Config{}, fmt.Errorf("load notification config: %w", err) - } - cfg.AdminRouting.LobbyApplicationSubmitted, err = loadEmailListEnv(adminEmailsLobbyApplicationSubmittedEnvVar, cfg.AdminRouting.LobbyApplicationSubmitted) - if err != nil { - return Config{}, fmt.Errorf("load notification config: %w", err) - } - - cfg.Telemetry.ServiceName = loadStringEnvWithDefault(otelServiceNameEnvVar, cfg.Telemetry.ServiceName) - cfg.Telemetry.TracesExporter = normalizeExporterValue(loadStringEnvWithDefault(otelTracesExporterEnvVar, cfg.Telemetry.TracesExporter)) - cfg.Telemetry.MetricsExporter = normalizeExporterValue(loadStringEnvWithDefault(otelMetricsExporterEnvVar, cfg.Telemetry.MetricsExporter)) - cfg.Telemetry.TracesProtocol = loadOTLPProtocol( - os.Getenv(otelExporterOTLPTracesProtocolEnvVar), - os.Getenv(otelExporterOTLPProtocolEnvVar), - cfg.Telemetry.TracesExporter, - ) - cfg.Telemetry.MetricsProtocol = loadOTLPProtocol( - os.Getenv(otelExporterOTLPMetricsProtocolEnvVar), - os.Getenv(otelExporterOTLPProtocolEnvVar), - cfg.Telemetry.MetricsExporter, - ) - cfg.Telemetry.StdoutTracesEnabled, err = loadBoolEnvWithDefault(otelStdoutTracesEnabledEnvVar, cfg.Telemetry.StdoutTracesEnabled) - if err != nil { - return Config{}, fmt.Errorf("load notification config: %w", err) - } - cfg.Telemetry.StdoutMetricsEnabled, err = loadBoolEnvWithDefault(otelStdoutMetricsEnabledEnvVar, cfg.Telemetry.StdoutMetricsEnabled) - if err != nil { - return Config{}, fmt.Errorf("load notification config: %w", err) - } - - if err := cfg.Validate(); err != nil { - return Config{}, err - } - - return cfg, nil -} - // Validate reports whether cfg contains a consistent Notification Service // process configuration. func (cfg Config) Validate() error { - switch { - case cfg.ShutdownTimeout <= 0: + if cfg.ShutdownTimeout <= 0 { return fmt.Errorf("load notification config: %s must be positive", shutdownTimeoutEnvVar) - case strings.TrimSpace(cfg.Redis.Addr) == "": - return fmt.Errorf("load notification config: %s must not be empty", redisAddrEnvVar) - case strings.TrimSpace(cfg.UserService.BaseURL) == "": - return fmt.Errorf("load notification config: %s must not be empty", userServiceBaseURLEnvVar) } if err := cfg.InternalHTTP.Validate(); err != nil { return fmt.Errorf("load notification config: %s", err) } if err := cfg.Redis.Validate(); err != nil { - return fmt.Errorf("load notification config: %s", err) + return fmt.Errorf("load notification config: %w", err) + } + if err := cfg.Postgres.Validate(); err != nil { + return fmt.Errorf("load notification config: %w", err) } if err := cfg.Streams.Validate(); err != nil { return fmt.Errorf("load notification config: %s", err) @@ -633,6 +493,9 @@ func (cfg Config) Validate() error { if err := cfg.Retry.Validate(); err != nil { return fmt.Errorf("load notification config: %s", err) } + if err := cfg.Retention.Validate(); err != nil { + return fmt.Errorf("load notification config: %s", err) + } if err := cfg.UserService.Validate(); err != nil { return fmt.Errorf("load notification config: %s", err) } @@ -646,77 +509,35 @@ func (cfg Config) Validate() error { return nil } -func loadStringEnvWithDefault(name string, value string) string { - if raw, ok := os.LookupEnv(name); ok { - return strings.TrimSpace(raw) +func validateNormalizedEmailList(name string, values []string) error { + for index, value := range values { + normalized, err := normalizeMailboxAddress(value) + if err != nil { + return fmt.Errorf("%s[%d]: %w", name, index, err) + } + if normalized != value { + return fmt.Errorf("%s[%d]: email address must already be normalized", name, index) + } } - return value + return nil } -func loadDurationEnvWithDefault(name string, value time.Duration) (time.Duration, error) { - raw, ok := os.LookupEnv(name) - if !ok { - return value, nil +func normalizeMailboxAddress(value string) (string, error) { + trimmed := strings.TrimSpace(value) + if trimmed == "" { + return "", fmt.Errorf("email address must not be empty") } - parsed, err := time.ParseDuration(strings.TrimSpace(raw)) + parsed, err := netmail.ParseAddress(trimmed) if err != nil { - return 0, fmt.Errorf("%s: %w", name, err) + return "", fmt.Errorf("invalid email address %q: %w", trimmed, err) + } + if parsed.Name != "" { + return "", fmt.Errorf("email address %q must not include a display name", trimmed) } - return parsed, nil -} - -func loadIntEnvWithDefault(name string, value int) (int, error) { - raw, ok := os.LookupEnv(name) - if !ok { - return value, nil - } - - parsed, err := strconv.Atoi(strings.TrimSpace(raw)) - if err != nil { - return 0, fmt.Errorf("%s: %w", name, err) - } - - return parsed, nil -} - -func loadInt64EnvWithDefault(name string, value int64) (int64, error) { - raw, ok := os.LookupEnv(name) - if !ok { - return value, nil - } - - parsed, err := strconv.ParseInt(strings.TrimSpace(raw), 10, 64) - if err != nil { - return 0, fmt.Errorf("%s: %w", name, err) - } - - return parsed, nil -} - -func loadBoolEnvWithDefault(name string, value bool) (bool, error) { - raw, ok := os.LookupEnv(name) - if !ok { - return value, nil - } - - parsed, err := strconv.ParseBool(strings.TrimSpace(raw)) - if err != nil { - return false, fmt.Errorf("%s: %w", name, err) - } - - return parsed, nil -} - -func loadEmailListEnv(name string, value []string) ([]string, error) { - raw, ok := os.LookupEnv(name) - if !ok { - return append([]string(nil), value...), nil - } - - return parseEmailList(name, raw) + return strings.ToLower(parsed.Address), nil } func parseEmailList(name string, raw string) ([]string, error) { @@ -743,63 +564,6 @@ func parseEmailList(name string, raw string) ([]string, error) { return addresses, nil } -func normalizeMailboxAddress(value string) (string, error) { - trimmed := strings.TrimSpace(value) - if trimmed == "" { - return "", fmt.Errorf("email address must not be empty") - } - - parsed, err := netmail.ParseAddress(trimmed) - if err != nil { - return "", fmt.Errorf("invalid email address %q: %w", trimmed, err) - } - if parsed.Name != "" { - return "", fmt.Errorf("email address %q must not include a display name", trimmed) - } - - return strings.ToLower(parsed.Address), nil -} - -func validateNormalizedEmailList(name string, values []string) error { - for index, value := range values { - normalized, err := normalizeMailboxAddress(value) - if err != nil { - return fmt.Errorf("%s[%d]: %w", name, index, err) - } - if normalized != value { - return fmt.Errorf("%s[%d]: email address must already be normalized", name, index) - } - } - - return nil -} - -func validateLogLevel(value string) error { - var level slog.Level - return level.UnmarshalText([]byte(strings.TrimSpace(value))) -} - -func normalizeExporterValue(value string) string { - switch strings.TrimSpace(value) { - case "", otelExporterNone: - return otelExporterNone - default: - return strings.TrimSpace(value) - } -} - -func loadOTLPProtocol(primary string, fallback string, exporter string) string { - protocol := strings.TrimSpace(primary) - if protocol == "" { - protocol = strings.TrimSpace(fallback) - } - if protocol == "" && exporter == otelExporterOTLP { - return otelProtocolHTTPProtobuf - } - - return protocol -} - func normalizeBaseURL(value string) string { trimmed := strings.TrimSpace(value) if trimmed == "" { diff --git a/notification/internal/config/config_test.go b/notification/internal/config/config_test.go index 269927e..9385111 100644 --- a/notification/internal/config/config_test.go +++ b/notification/internal/config/config_test.go @@ -4,12 +4,42 @@ import ( "testing" "time" + "galaxy/postgres" + "galaxy/redisconn" + "github.com/stretchr/testify/require" ) -func TestLoadFromEnvUsesDefaults(t *testing.T) { - t.Setenv(redisAddrEnvVar, "127.0.0.1:6379") +const ( + envRedisMasterAddr = "NOTIFICATION_REDIS_MASTER_ADDR" + envRedisReplicaAddrs = "NOTIFICATION_REDIS_REPLICA_ADDRS" + envRedisPassword = "NOTIFICATION_REDIS_PASSWORD" + envRedisDB = "NOTIFICATION_REDIS_DB" + envRedisOpTimeout = "NOTIFICATION_REDIS_OPERATION_TIMEOUT" + envRedisTLSEnabled = "NOTIFICATION_REDIS_TLS_ENABLED" + envRedisUsername = "NOTIFICATION_REDIS_USERNAME" + + envPostgresPrimaryDSN = "NOTIFICATION_POSTGRES_PRIMARY_DSN" + envPostgresOpTimeout = "NOTIFICATION_POSTGRES_OPERATION_TIMEOUT" + envPostgresMaxOpenConns = "NOTIFICATION_POSTGRES_MAX_OPEN_CONNS" + envPostgresMaxIdleConns = "NOTIFICATION_POSTGRES_MAX_IDLE_CONNS" + envPostgresConnMaxLife = "NOTIFICATION_POSTGRES_CONN_MAX_LIFETIME" +) + +const ( + defaultPrimaryDSN = "postgres://notificationservice:notificationservice@127.0.0.1:5432/galaxy?search_path=notification&sslmode=disable" +) + +func setRequiredConnEnv(t *testing.T) { + t.Helper() + t.Setenv(envRedisMasterAddr, "127.0.0.1:6379") + t.Setenv(envRedisPassword, "secret") + t.Setenv(envPostgresPrimaryDSN, defaultPrimaryDSN) t.Setenv(userServiceBaseURLEnvVar, "http://user-service.internal") +} + +func TestLoadFromEnvUsesDefaults(t *testing.T) { + setRequiredConnEnv(t) cfg, err := LoadFromEnv() require.NoError(t, err) @@ -18,11 +48,14 @@ func TestLoadFromEnvUsesDefaults(t *testing.T) { require.Equal(t, defaults.ShutdownTimeout, cfg.ShutdownTimeout) require.Equal(t, defaults.Logging, cfg.Logging) require.Equal(t, defaults.InternalHTTP, cfg.InternalHTTP) - require.Equal(t, "127.0.0.1:6379", cfg.Redis.Addr) - require.Equal(t, defaults.Redis.DB, cfg.Redis.DB) - require.Equal(t, defaults.Redis.OperationTimeout, cfg.Redis.OperationTimeout) + require.Equal(t, "127.0.0.1:6379", cfg.Redis.Conn.MasterAddr) + require.Equal(t, "secret", cfg.Redis.Conn.Password) + require.Equal(t, defaults.Redis.Conn.DB, cfg.Redis.Conn.DB) + require.Equal(t, defaults.Redis.Conn.OperationTimeout, cfg.Redis.Conn.OperationTimeout) + require.Equal(t, defaultPrimaryDSN, cfg.Postgres.Conn.PrimaryDSN) require.Equal(t, defaults.Streams, cfg.Streams) require.Equal(t, defaults.Retry, cfg.Retry) + require.Equal(t, defaults.Retention, cfg.Retention) require.Equal(t, UserServiceConfig{ BaseURL: "http://user-service.internal", Timeout: defaults.UserService.Timeout, @@ -38,12 +71,19 @@ func TestLoadFromEnvAppliesOverrides(t *testing.T) { t.Setenv(internalHTTPReadHeaderTimeoutEnvVar, "3s") t.Setenv(internalHTTPReadTimeoutEnvVar, "11s") t.Setenv(internalHTTPIdleTimeoutEnvVar, "61s") - t.Setenv(redisAddrEnvVar, "127.0.0.1:6380") - t.Setenv(redisUsernameEnvVar, "alice") - t.Setenv(redisPasswordEnvVar, "secret") - t.Setenv(redisDBEnvVar, "3") - t.Setenv(redisTLSEnabledEnvVar, "true") - t.Setenv(redisOperationTimeoutEnvVar, "750ms") + + t.Setenv(envRedisMasterAddr, "127.0.0.1:6380") + t.Setenv(envRedisReplicaAddrs, "127.0.0.1:6381,127.0.0.1:6382") + t.Setenv(envRedisPassword, "topsecret") + t.Setenv(envRedisDB, "3") + t.Setenv(envRedisOpTimeout, "750ms") + + t.Setenv(envPostgresPrimaryDSN, defaultPrimaryDSN) + t.Setenv(envPostgresOpTimeout, "1500ms") + t.Setenv(envPostgresMaxOpenConns, "32") + t.Setenv(envPostgresMaxIdleConns, "8") + t.Setenv(envPostgresConnMaxLife, "45m") + t.Setenv(intentsStreamEnvVar, "notification:test_intents") t.Setenv(intentsReadBlockTimeoutEnvVar, "3500ms") t.Setenv(gatewayClientEventsStreamEnvVar, "gateway:test_client-events") @@ -54,9 +94,10 @@ func TestLoadFromEnvAppliesOverrides(t *testing.T) { t.Setenv(routeLeaseTTLEnvVar, "7s") t.Setenv(routeBackoffMinEnvVar, "2s") t.Setenv(routeBackoffMaxEnvVar, "7m") - t.Setenv(deadLetterTTLEnvVar, "120h") - t.Setenv(recordTTLEnvVar, "240h") t.Setenv(idempotencyTTLEnvVar, "48h") + t.Setenv(recordRetentionEnvVar, "21d") + t.Setenv(malformedIntentRetentionEnvVar, "168h") + t.Setenv(cleanupIntervalEnvVar, "30m") t.Setenv(userServiceBaseURLEnvVar, "https://user-service.internal/api/") t.Setenv(userServiceTimeoutEnvVar, "1500ms") t.Setenv(adminEmailsGeoReviewRecommendedEnvVar, "First@example.com, second@example.com, first@example.com") @@ -70,6 +111,9 @@ func TestLoadFromEnvAppliesOverrides(t *testing.T) { t.Setenv(otelStdoutTracesEnabledEnvVar, "true") t.Setenv(otelStdoutMetricsEnabledEnvVar, "true") + // Time package does not support `21d`; use 504h directly. + t.Setenv(recordRetentionEnvVar, "504h") + cfg, err := LoadFromEnv() require.NoError(t, err) @@ -82,18 +126,28 @@ func TestLoadFromEnvAppliesOverrides(t *testing.T) { IdleTimeout: 61 * time.Second, }, cfg.InternalHTTP) require.Equal(t, RedisConfig{ - Addr: "127.0.0.1:6380", - Username: "alice", - Password: "secret", - DB: 3, - TLSEnabled: true, - OperationTimeout: 750 * time.Millisecond, + Conn: redisconn.Config{ + MasterAddr: "127.0.0.1:6380", + ReplicaAddrs: []string{"127.0.0.1:6381", "127.0.0.1:6382"}, + Password: "topsecret", + DB: 3, + OperationTimeout: 750 * time.Millisecond, + }, }, cfg.Redis) + require.Equal(t, PostgresConfig{ + Conn: postgres.Config{ + PrimaryDSN: defaultPrimaryDSN, + OperationTimeout: 1500 * time.Millisecond, + MaxOpenConns: 32, + MaxIdleConns: 8, + ConnMaxLifetime: 45 * time.Minute, + }, + }, cfg.Postgres) require.Equal(t, StreamsConfig{ - Intents: "notification:test_intents", - GatewayClientEvents: "gateway:test_client-events", + Intents: "notification:test_intents", + GatewayClientEvents: "gateway:test_client-events", GatewayClientEventsStreamMaxLen: 2048, - MailDeliveryCommands: "mail:test_delivery_commands", + MailDeliveryCommands: "mail:test_delivery_commands", }, cfg.Streams) require.Equal(t, 3500*time.Millisecond, cfg.IntentsReadBlockTimeout) require.Equal(t, RetryConfig{ @@ -102,10 +156,13 @@ func TestLoadFromEnvAppliesOverrides(t *testing.T) { RouteLeaseTTL: 7 * time.Second, RouteBackoffMin: 2 * time.Second, RouteBackoffMax: 7 * time.Minute, - DeadLetterTTL: 120 * time.Hour, - RecordTTL: 240 * time.Hour, IdempotencyTTL: 48 * time.Hour, }, cfg.Retry) + require.Equal(t, RetentionConfig{ + RecordRetention: 504 * time.Hour, + MalformedIntentRetention: 168 * time.Hour, + CleanupInterval: 30 * time.Minute, + }, cfg.Retention) require.Equal(t, UserServiceConfig{ BaseURL: "https://user-service.internal/api", Timeout: 1500 * time.Millisecond, @@ -127,6 +184,27 @@ func TestLoadFromEnvAppliesOverrides(t *testing.T) { }, cfg.Telemetry) } +func TestLoadFromEnvRejectsDeprecatedRedisVars(t *testing.T) { + tests := []struct { + name string + envName string + }{ + {name: "tls enabled rejected", envName: envRedisTLSEnabled}, + {name: "username rejected", envName: envRedisUsername}, + } + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + setRequiredConnEnv(t) + t.Setenv(tt.envName, "true") + + _, err := LoadFromEnv() + require.Error(t, err) + require.Contains(t, err.Error(), tt.envName) + }) + } +} + func TestLoadFromEnvRejectsInvalidValues(t *testing.T) { tests := []struct { name string @@ -135,14 +213,16 @@ func TestLoadFromEnvRejectsInvalidValues(t *testing.T) { }{ {name: "invalid duration", envName: shutdownTimeoutEnvVar, envVal: "later"}, {name: "invalid log level", envName: logLevelEnvVar, envVal: "verbose"}, - {name: "invalid redis db", envName: redisDBEnvVar, envVal: "db-three"}, - {name: "invalid redis tls", envName: redisTLSEnabledEnvVar, envVal: "sometimes"}, + {name: "invalid redis db", envName: envRedisDB, envVal: "db-three"}, {name: "invalid push retries", envName: pushRetryMaxAttemptsEnvVar, envVal: "many"}, {name: "invalid email retries", envName: emailRetryMaxAttemptsEnvVar, envVal: "several"}, {name: "invalid gateway client events stream max len", envName: gatewayClientEventsStreamMaxEnvVar, envVal: "many"}, {name: "invalid user service timeout", envName: userServiceTimeoutEnvVar, envVal: "soon"}, {name: "invalid intents read block timeout", envName: intentsReadBlockTimeoutEnvVar, envVal: "later"}, {name: "invalid route lease ttl", envName: routeLeaseTTLEnvVar, envVal: "eventually"}, + {name: "invalid record retention", envName: recordRetentionEnvVar, envVal: "later"}, + {name: "invalid malformed intent retention", envName: malformedIntentRetentionEnvVar, envVal: "later"}, + {name: "invalid cleanup interval", envName: cleanupIntervalEnvVar, envVal: "later"}, {name: "invalid traces exporter", envName: otelTracesExporterEnvVar, envVal: "stdout"}, {name: "invalid metrics protocol", envName: otelExporterOTLPMetricsProtocolEnvVar, envVal: "udp"}, {name: "invalid stdout traces", envName: otelStdoutTracesEnabledEnvVar, envVal: "sometimes"}, @@ -152,8 +232,7 @@ func TestLoadFromEnvRejectsInvalidValues(t *testing.T) { tt := tt t.Run(tt.name, func(t *testing.T) { - t.Setenv(redisAddrEnvVar, "127.0.0.1:6379") - t.Setenv(userServiceBaseURLEnvVar, "http://user-service.internal") + setRequiredConnEnv(t) t.Setenv(tt.envName, tt.envVal) _, err := LoadFromEnv() @@ -163,20 +242,44 @@ func TestLoadFromEnvRejectsInvalidValues(t *testing.T) { } func TestLoadFromEnvRejectsMissingRequiredValues(t *testing.T) { - t.Run("missing redis addr", func(t *testing.T) { + t.Run("missing redis master addr", func(t *testing.T) { + t.Setenv(envRedisPassword, "secret") + t.Setenv(envPostgresPrimaryDSN, defaultPrimaryDSN) t.Setenv(userServiceBaseURLEnvVar, "http://user-service.internal") _, err := LoadFromEnv() require.Error(t, err) - require.Contains(t, err.Error(), redisAddrEnvVar) + require.Contains(t, err.Error(), envRedisMasterAddr) }) - t.Run("missing user service base url", func(t *testing.T) { - t.Setenv(redisAddrEnvVar, "127.0.0.1:6379") + t.Run("missing redis password", func(t *testing.T) { + t.Setenv(envRedisMasterAddr, "127.0.0.1:6379") + t.Setenv(envPostgresPrimaryDSN, defaultPrimaryDSN) + t.Setenv(userServiceBaseURLEnvVar, "http://user-service.internal") _, err := LoadFromEnv() require.Error(t, err) - require.Contains(t, err.Error(), userServiceBaseURLEnvVar) + require.Contains(t, err.Error(), envRedisPassword) + }) + + t.Run("missing postgres primary dsn", func(t *testing.T) { + t.Setenv(envRedisMasterAddr, "127.0.0.1:6379") + t.Setenv(envRedisPassword, "secret") + t.Setenv(userServiceBaseURLEnvVar, "http://user-service.internal") + + _, err := LoadFromEnv() + require.Error(t, err) + require.Contains(t, err.Error(), envPostgresPrimaryDSN) + }) + + t.Run("missing user service base url", func(t *testing.T) { + t.Setenv(envRedisMasterAddr, "127.0.0.1:6379") + t.Setenv(envRedisPassword, "secret") + t.Setenv(envPostgresPrimaryDSN, defaultPrimaryDSN) + + _, err := LoadFromEnv() + require.Error(t, err) + require.Contains(t, err.Error(), "user service base URL") }) } @@ -188,7 +291,6 @@ func TestLoadFromEnvRejectsInvalidConfiguration(t *testing.T) { want string }{ {name: "invalid internal http addr", envName: internalHTTPAddrEnvVar, envVal: "127.0.0.1", want: "internal HTTP addr"}, - {name: "invalid redis addr", envName: redisAddrEnvVar, envVal: "127.0.0.1", want: "redis addr"}, {name: "relative user service url", envName: userServiceBaseURLEnvVar, envVal: "/internal/users", want: "absolute http(s) URL"}, {name: "invalid admin email", envName: adminEmailsGeoReviewRecommendedEnvVar, envVal: "broken-email", want: "invalid email address"}, {name: "blank admin email slot", envName: adminEmailsGameGenerationFailedEnvVar, envVal: "ops@example.com, , second@example.com", want: "must not be empty"}, @@ -201,8 +303,7 @@ func TestLoadFromEnvRejectsInvalidConfiguration(t *testing.T) { tt := tt t.Run(tt.name, func(t *testing.T) { - t.Setenv(redisAddrEnvVar, "127.0.0.1:6379") - t.Setenv(userServiceBaseURLEnvVar, "http://user-service.internal") + setRequiredConnEnv(t) t.Setenv(routeBackoffMaxEnvVar, "5m") t.Setenv(tt.envName, tt.envVal) @@ -223,7 +324,7 @@ func TestLoadFromEnvRejectsNonPositiveValues(t *testing.T) { {name: "read header timeout", envName: internalHTTPReadHeaderTimeoutEnvVar, envVal: "0s"}, {name: "read timeout", envName: internalHTTPReadTimeoutEnvVar, envVal: "0s"}, {name: "idle timeout", envName: internalHTTPIdleTimeoutEnvVar, envVal: "0s"}, - {name: "redis timeout", envName: redisOperationTimeoutEnvVar, envVal: "0s"}, + {name: "redis timeout", envName: envRedisOpTimeout, envVal: "0s"}, {name: "intents read block timeout", envName: intentsReadBlockTimeoutEnvVar, envVal: "0s"}, {name: "push retries", envName: pushRetryMaxAttemptsEnvVar, envVal: "0"}, {name: "email retries", envName: emailRetryMaxAttemptsEnvVar, envVal: "0"}, @@ -231,9 +332,10 @@ func TestLoadFromEnvRejectsNonPositiveValues(t *testing.T) { {name: "route lease ttl", envName: routeLeaseTTLEnvVar, envVal: "0s"}, {name: "route backoff min", envName: routeBackoffMinEnvVar, envVal: "0s"}, {name: "route backoff max", envName: routeBackoffMaxEnvVar, envVal: "0s"}, - {name: "dead letter ttl", envName: deadLetterTTLEnvVar, envVal: "0s"}, - {name: "record ttl", envName: recordTTLEnvVar, envVal: "0s"}, {name: "idempotency ttl", envName: idempotencyTTLEnvVar, envVal: "0s"}, + {name: "record retention", envName: recordRetentionEnvVar, envVal: "0s"}, + {name: "malformed intent retention", envName: malformedIntentRetentionEnvVar, envVal: "0s"}, + {name: "cleanup interval", envName: cleanupIntervalEnvVar, envVal: "0s"}, {name: "user service timeout", envName: userServiceTimeoutEnvVar, envVal: "0s"}, } @@ -241,8 +343,7 @@ func TestLoadFromEnvRejectsNonPositiveValues(t *testing.T) { tt := tt t.Run(tt.name, func(t *testing.T) { - t.Setenv(redisAddrEnvVar, "127.0.0.1:6379") - t.Setenv(userServiceBaseURLEnvVar, "http://user-service.internal") + setRequiredConnEnv(t) t.Setenv(tt.envName, tt.envVal) _, err := LoadFromEnv() diff --git a/notification/internal/config/env.go b/notification/internal/config/env.go new file mode 100644 index 0000000..e4c120c --- /dev/null +++ b/notification/internal/config/env.go @@ -0,0 +1,262 @@ +package config + +import ( + "fmt" + "log/slog" + "os" + "strconv" + "strings" + "time" + + "galaxy/postgres" + "galaxy/redisconn" +) + +// LoadFromEnv builds Config from environment variables and validates the +// resulting configuration. Connection topology for Redis and PostgreSQL is +// delegated to the shared `pkg/redisconn` and `pkg/postgres` LoadFromEnv +// helpers — the Redis loader hard-fails on the deprecated +// `NOTIFICATION_REDIS_TLS_ENABLED` / `NOTIFICATION_REDIS_USERNAME` env vars; +// the Postgres loader requires a primary DSN. +func LoadFromEnv() (Config, error) { + cfg := DefaultConfig() + + var err error + + cfg.ShutdownTimeout, err = durationEnv(shutdownTimeoutEnvVar, cfg.ShutdownTimeout) + if err != nil { + return Config{}, err + } + + cfg.Logging.Level = stringEnv(logLevelEnvVar, cfg.Logging.Level) + + cfg.InternalHTTP.Addr = stringEnv(internalHTTPAddrEnvVar, cfg.InternalHTTP.Addr) + cfg.InternalHTTP.ReadHeaderTimeout, err = durationEnv(internalHTTPReadHeaderTimeoutEnvVar, cfg.InternalHTTP.ReadHeaderTimeout) + if err != nil { + return Config{}, err + } + cfg.InternalHTTP.ReadTimeout, err = durationEnv(internalHTTPReadTimeoutEnvVar, cfg.InternalHTTP.ReadTimeout) + if err != nil { + return Config{}, err + } + cfg.InternalHTTP.IdleTimeout, err = durationEnv(internalHTTPIdleTimeoutEnvVar, cfg.InternalHTTP.IdleTimeout) + if err != nil { + return Config{}, err + } + + redisConn, err := redisconn.LoadFromEnv(envPrefix) + if err != nil { + return Config{}, err + } + cfg.Redis.Conn = redisConn + + pgConn, err := postgres.LoadFromEnv(envPrefix) + if err != nil { + return Config{}, err + } + cfg.Postgres.Conn = pgConn + + cfg.Streams.Intents = stringEnv(intentsStreamEnvVar, cfg.Streams.Intents) + cfg.Streams.GatewayClientEvents = stringEnv(gatewayClientEventsStreamEnvVar, cfg.Streams.GatewayClientEvents) + cfg.Streams.GatewayClientEventsStreamMaxLen, err = int64Env(gatewayClientEventsStreamMaxEnvVar, cfg.Streams.GatewayClientEventsStreamMaxLen) + if err != nil { + return Config{}, err + } + cfg.Streams.MailDeliveryCommands = stringEnv(mailDeliveryCommandsStreamEnvVar, cfg.Streams.MailDeliveryCommands) + cfg.IntentsReadBlockTimeout, err = durationEnv(intentsReadBlockTimeoutEnvVar, cfg.IntentsReadBlockTimeout) + if err != nil { + return Config{}, err + } + + cfg.Retry.PushMaxAttempts, err = intEnv(pushRetryMaxAttemptsEnvVar, cfg.Retry.PushMaxAttempts) + if err != nil { + return Config{}, err + } + cfg.Retry.EmailMaxAttempts, err = intEnv(emailRetryMaxAttemptsEnvVar, cfg.Retry.EmailMaxAttempts) + if err != nil { + return Config{}, err + } + cfg.Retry.RouteLeaseTTL, err = durationEnv(routeLeaseTTLEnvVar, cfg.Retry.RouteLeaseTTL) + if err != nil { + return Config{}, err + } + cfg.Retry.RouteBackoffMin, err = durationEnv(routeBackoffMinEnvVar, cfg.Retry.RouteBackoffMin) + if err != nil { + return Config{}, err + } + cfg.Retry.RouteBackoffMax, err = durationEnv(routeBackoffMaxEnvVar, cfg.Retry.RouteBackoffMax) + if err != nil { + return Config{}, err + } + cfg.Retry.IdempotencyTTL, err = durationEnv(idempotencyTTLEnvVar, cfg.Retry.IdempotencyTTL) + if err != nil { + return Config{}, err + } + + cfg.Retention.RecordRetention, err = durationEnv(recordRetentionEnvVar, cfg.Retention.RecordRetention) + if err != nil { + return Config{}, err + } + cfg.Retention.MalformedIntentRetention, err = durationEnv(malformedIntentRetentionEnvVar, cfg.Retention.MalformedIntentRetention) + if err != nil { + return Config{}, err + } + cfg.Retention.CleanupInterval, err = durationEnv(cleanupIntervalEnvVar, cfg.Retention.CleanupInterval) + if err != nil { + return Config{}, err + } + + cfg.UserService.BaseURL = normalizeBaseURL(stringEnv(userServiceBaseURLEnvVar, cfg.UserService.BaseURL)) + cfg.UserService.Timeout, err = durationEnv(userServiceTimeoutEnvVar, cfg.UserService.Timeout) + if err != nil { + return Config{}, err + } + + cfg.AdminRouting.GeoReviewRecommended, err = emailListEnv(adminEmailsGeoReviewRecommendedEnvVar, cfg.AdminRouting.GeoReviewRecommended) + if err != nil { + return Config{}, err + } + cfg.AdminRouting.GameGenerationFailed, err = emailListEnv(adminEmailsGameGenerationFailedEnvVar, cfg.AdminRouting.GameGenerationFailed) + if err != nil { + return Config{}, err + } + cfg.AdminRouting.LobbyRuntimePausedAfterStart, err = emailListEnv(adminEmailsLobbyRuntimePausedAfterEnvVar, cfg.AdminRouting.LobbyRuntimePausedAfterStart) + if err != nil { + return Config{}, err + } + cfg.AdminRouting.LobbyApplicationSubmitted, err = emailListEnv(adminEmailsLobbyApplicationSubmittedEnvVar, cfg.AdminRouting.LobbyApplicationSubmitted) + if err != nil { + return Config{}, err + } + + cfg.Telemetry.ServiceName = stringEnv(otelServiceNameEnvVar, cfg.Telemetry.ServiceName) + cfg.Telemetry.TracesExporter = normalizeExporterValue(stringEnv(otelTracesExporterEnvVar, cfg.Telemetry.TracesExporter)) + cfg.Telemetry.MetricsExporter = normalizeExporterValue(stringEnv(otelMetricsExporterEnvVar, cfg.Telemetry.MetricsExporter)) + cfg.Telemetry.TracesProtocol = loadOTLPProtocol( + os.Getenv(otelExporterOTLPTracesProtocolEnvVar), + os.Getenv(otelExporterOTLPProtocolEnvVar), + cfg.Telemetry.TracesExporter, + ) + cfg.Telemetry.MetricsProtocol = loadOTLPProtocol( + os.Getenv(otelExporterOTLPMetricsProtocolEnvVar), + os.Getenv(otelExporterOTLPProtocolEnvVar), + cfg.Telemetry.MetricsExporter, + ) + cfg.Telemetry.StdoutTracesEnabled, err = boolEnv(otelStdoutTracesEnabledEnvVar, cfg.Telemetry.StdoutTracesEnabled) + if err != nil { + return Config{}, err + } + cfg.Telemetry.StdoutMetricsEnabled, err = boolEnv(otelStdoutMetricsEnabledEnvVar, cfg.Telemetry.StdoutMetricsEnabled) + if err != nil { + return Config{}, err + } + + if err := validateLogLevel(cfg.Logging.Level); err != nil { + return Config{}, fmt.Errorf("load notification config: %s: %w", logLevelEnvVar, err) + } + if err := cfg.Validate(); err != nil { + return Config{}, err + } + + return cfg, nil +} + +func stringEnv(name string, fallback string) string { + value, ok := os.LookupEnv(name) + if !ok { + return fallback + } + + return strings.TrimSpace(value) +} + +func durationEnv(name string, fallback time.Duration) (time.Duration, error) { + value, ok := os.LookupEnv(name) + if !ok { + return fallback, nil + } + + parsed, err := time.ParseDuration(strings.TrimSpace(value)) + if err != nil { + return 0, fmt.Errorf("%s: %w", name, err) + } + + return parsed, nil +} + +func intEnv(name string, fallback int) (int, error) { + value, ok := os.LookupEnv(name) + if !ok { + return fallback, nil + } + + parsed, err := strconv.Atoi(strings.TrimSpace(value)) + if err != nil { + return 0, fmt.Errorf("%s: %w", name, err) + } + + return parsed, nil +} + +func int64Env(name string, fallback int64) (int64, error) { + value, ok := os.LookupEnv(name) + if !ok { + return fallback, nil + } + + parsed, err := strconv.ParseInt(strings.TrimSpace(value), 10, 64) + if err != nil { + return 0, fmt.Errorf("%s: %w", name, err) + } + + return parsed, nil +} + +func boolEnv(name string, fallback bool) (bool, error) { + value, ok := os.LookupEnv(name) + if !ok { + return fallback, nil + } + + parsed, err := strconv.ParseBool(strings.TrimSpace(value)) + if err != nil { + return false, fmt.Errorf("%s: %w", name, err) + } + + return parsed, nil +} + +func emailListEnv(name string, fallback []string) ([]string, error) { + raw, ok := os.LookupEnv(name) + if !ok { + return append([]string(nil), fallback...), nil + } + + return parseEmailList(name, raw) +} + +func validateLogLevel(value string) error { + var level slog.Level + return level.UnmarshalText([]byte(strings.TrimSpace(value))) +} + +func normalizeExporterValue(value string) string { + switch strings.TrimSpace(value) { + case "", otelExporterNone: + return otelExporterNone + default: + return strings.TrimSpace(value) + } +} + +func loadOTLPProtocol(primary string, fallback string, exporter string) string { + protocol := strings.TrimSpace(primary) + if protocol == "" { + protocol = strings.TrimSpace(fallback) + } + if protocol == "" && exporter == otelExporterOTLP { + return otelProtocolHTTPProtobuf + } + + return protocol +} diff --git a/notification/internal/service/routestate/types.go b/notification/internal/service/routestate/types.go new file mode 100644 index 0000000..84353cd --- /dev/null +++ b/notification/internal/service/routestate/types.go @@ -0,0 +1,254 @@ +// Package routestate carries the value types and inputs used by the route +// publishers to drive notification-route lifecycle transitions. The types +// are storage-agnostic: they were originally defined inside the Redis +// adapter package but were lifted here as part of the Stage 5 PostgreSQL +// migration so the publisher contracts can be satisfied by either a +// Redis-backed or a PostgreSQL-backed adapter (or a composite that splits +// state and lease storage between the two backends). +package routestate + +import ( + "errors" + "fmt" + "time" + + "galaxy/notification/internal/service/acceptintent" +) + +// ErrConflict reports that a route-state mutation lost an optimistic +// concurrency check (the row, the lease, or both no longer match the value +// the caller observed when it claimed the work). Publishers treat this as a +// no-op: the work was either already finished by another replica or has been +// rescheduled. +var ErrConflict = errors.New("route state conflict") + +// ScheduledRoute carries one due route reference returned by a route-state +// store that exposes the schedule. +type ScheduledRoute struct { + // RouteKey stores the implementation-specific scheduling key. Redis + // adapters set this to the full sorted-set member; SQL adapters set it to + // a synthetic "/" string. Tests only require it + // to be non-empty and stable. + RouteKey string + + // NotificationID stores the owning notification identifier. + NotificationID string + + // RouteID stores the scheduled route identifier. + RouteID string +} + +// Validate reports whether route contains a complete due-route reference. +func (route ScheduledRoute) Validate() error { + if route.RouteKey == "" { + return fmt.Errorf("scheduled route key must not be empty") + } + if route.NotificationID == "" { + return fmt.Errorf("scheduled route notification id must not be empty") + } + if route.RouteID == "" { + return fmt.Errorf("scheduled route route id must not be empty") + } + + return nil +} + +// CompleteRoutePublishedInput carries the data required to mark one route as +// published while atomically appending one outbound stream entry. +type CompleteRoutePublishedInput struct { + // ExpectedRoute stores the current route state previously loaded by the + // caller. The store uses it as the optimistic-concurrency token. + ExpectedRoute acceptintent.NotificationRoute + + // LeaseToken stores the route-lease owner token that must still be held. + LeaseToken string + + // PublishedAt stores when the publication attempt succeeded. + PublishedAt time.Time + + // Stream stores the outbound Redis Stream name. + Stream string + + // StreamMaxLen bounds Stream with approximate trimming when positive. Zero + // disables trimming. + StreamMaxLen int64 + + // StreamValues stores the exact Redis Stream fields appended to Stream. + StreamValues map[string]any +} + +// Validate reports whether input contains a complete published-route +// transition. +func (input CompleteRoutePublishedInput) Validate() error { + if err := validateCompletionRoute(input.ExpectedRoute); err != nil { + return err + } + if input.LeaseToken == "" { + return fmt.Errorf("lease token must not be empty") + } + if err := validateRouteStateTimestamp("published at", input.PublishedAt); err != nil { + return err + } + if input.Stream == "" { + return fmt.Errorf("stream must not be empty") + } + if input.StreamMaxLen < 0 { + return fmt.Errorf("stream max len must not be negative") + } + if err := validateStreamValues(input.StreamValues); err != nil { + return err + } + + return nil +} + +// CompleteRouteFailedInput carries the data required to record one retryable +// publication failure. +type CompleteRouteFailedInput struct { + // ExpectedRoute stores the current route state previously loaded by the + // caller. + ExpectedRoute acceptintent.NotificationRoute + + // LeaseToken stores the route-lease owner token that must still be held. + LeaseToken string + + // FailedAt stores when the publication attempt failed. + FailedAt time.Time + + // NextAttemptAt stores the next scheduled retry time. + NextAttemptAt time.Time + + // FailureClassification stores the classified publication failure kind. + FailureClassification string + + // FailureMessage stores the detailed publication failure text. + FailureMessage string +} + +// Validate reports whether input contains a complete retryable failure +// transition. +func (input CompleteRouteFailedInput) Validate() error { + if err := validateCompletionRoute(input.ExpectedRoute); err != nil { + return err + } + if input.LeaseToken == "" { + return fmt.Errorf("lease token must not be empty") + } + if err := validateRouteStateTimestamp("failed at", input.FailedAt); err != nil { + return err + } + if err := validateRouteStateTimestamp("next attempt at", input.NextAttemptAt); err != nil { + return err + } + if input.FailureClassification == "" { + return fmt.Errorf("failure classification must not be empty") + } + if input.FailureMessage == "" { + return fmt.Errorf("failure message must not be empty") + } + + return nil +} + +// CompleteRouteDeadLetterInput carries the data required to record one +// exhausted publication failure. +type CompleteRouteDeadLetterInput struct { + // ExpectedRoute stores the current route state previously loaded by the + // caller. + ExpectedRoute acceptintent.NotificationRoute + + // LeaseToken stores the route-lease owner token that must still be held. + LeaseToken string + + // DeadLetteredAt stores when the route exhausted its retry budget. + DeadLetteredAt time.Time + + // FailureClassification stores the classified terminal failure kind. + FailureClassification string + + // FailureMessage stores the detailed terminal failure text. + FailureMessage string + + // RecoveryHint stores the optional operator-facing recovery guidance. + RecoveryHint string +} + +// Validate reports whether input contains a complete dead-letter transition. +func (input CompleteRouteDeadLetterInput) Validate() error { + if err := validateCompletionRoute(input.ExpectedRoute); err != nil { + return err + } + if input.LeaseToken == "" { + return fmt.Errorf("lease token must not be empty") + } + if err := validateRouteStateTimestamp("dead lettered at", input.DeadLetteredAt); err != nil { + return err + } + if input.FailureClassification == "" { + return fmt.Errorf("failure classification must not be empty") + } + if input.FailureMessage == "" { + return fmt.Errorf("failure message must not be empty") + } + + return nil +} + +// ValidateUTCMillisecondTimestamp reports whether value is a non-zero UTC +// time truncated to millisecond precision. Exposed for callers that need the +// same boundary check the routestate inputs apply. +func ValidateUTCMillisecondTimestamp(name string, value time.Time) error { + return validateRouteStateTimestamp(name, value) +} + +func validateRouteStateTimestamp(name string, value time.Time) error { + if value.IsZero() { + return fmt.Errorf("%s must not be zero", name) + } + if !value.Equal(value.UTC()) { + return fmt.Errorf("%s must be UTC", name) + } + if !value.Equal(value.Truncate(time.Millisecond)) { + return fmt.Errorf("%s must use millisecond precision", name) + } + + return nil +} + +func validateCompletionRoute(route acceptintent.NotificationRoute) error { + if err := route.Validate(); err != nil { + return err + } + switch route.Status { + case acceptintent.RouteStatusPending, acceptintent.RouteStatusFailed: + return nil + default: + return fmt.Errorf("route status %q is not completable", route.Status) + } +} + +func validateStreamValues(values map[string]any) error { + if len(values) == 0 { + return fmt.Errorf("stream values must not be empty") + } + + for key, raw := range values { + if key == "" { + return fmt.Errorf("stream values key must not be empty") + } + switch typed := raw.(type) { + case string: + if typed == "" { + return fmt.Errorf("stream values %q must not be empty", key) + } + case []byte: + if len(typed) == 0 { + return fmt.Errorf("stream values %q must not be empty", key) + } + default: + return fmt.Errorf("stream values %q must be string or []byte", key) + } + } + + return nil +} diff --git a/notification/internal/worker/email_publisher.go b/notification/internal/worker/email_publisher.go index 95abb46..2482004 100644 --- a/notification/internal/worker/email_publisher.go +++ b/notification/internal/worker/email_publisher.go @@ -8,11 +8,13 @@ import ( "strings" "time" - "galaxy/notification/internal/adapters/redisstate" "galaxy/notification/internal/api/intentstream" "galaxy/notification/internal/logging" "galaxy/notification/internal/service/acceptintent" "galaxy/notification/internal/service/publishmail" + "galaxy/notification/internal/service/routestate" + + "github.com/redis/go-redis/v9" ) const ( @@ -24,7 +26,7 @@ const ( // by EmailPublisher. type EmailRouteStateStore interface { // ListDueRoutes loads due scheduled routes. - ListDueRoutes(context.Context, time.Time, int64) ([]redisstate.ScheduledRoute, error) + ListDueRoutes(context.Context, time.Time, int64) ([]routestate.ScheduledRoute, error) // TryAcquireRouteLease attempts to acquire one temporary route lease. TryAcquireRouteLease(context.Context, string, string, string, time.Duration) (bool, error) @@ -39,13 +41,13 @@ type EmailRouteStateStore interface { GetRoute(context.Context, string, string) (acceptintent.NotificationRoute, bool, error) // CompleteRoutePublished records one successful publication. - CompleteRoutePublished(context.Context, redisstate.CompleteRoutePublishedInput) error + CompleteRoutePublished(context.Context, routestate.CompleteRoutePublishedInput) error // CompleteRouteFailed records one retryable publication failure. - CompleteRouteFailed(context.Context, redisstate.CompleteRouteFailedInput) error + CompleteRouteFailed(context.Context, routestate.CompleteRouteFailedInput) error // CompleteRouteDeadLetter records one exhausted publication failure. - CompleteRouteDeadLetter(context.Context, redisstate.CompleteRouteDeadLetterInput) error + CompleteRouteDeadLetter(context.Context, routestate.CompleteRouteDeadLetterInput) error } // EmailCommandEncoder encodes one email-capable notification route into a @@ -90,6 +92,10 @@ type EmailPublisherConfig struct { // Clock provides wall-clock timestamps. Clock Clock + + // StreamPublisher emits the outbound mail-delivery command before the + // route's PostgreSQL state transition is committed. + StreamPublisher StreamPublisher } // EmailPublisher publishes due email routes into the Mail Service command @@ -105,6 +111,7 @@ type EmailPublisher struct { encoder EmailCommandEncoder telemetry RoutePublisherTelemetry clock Clock + streamPublisher StreamPublisher workerToken string logger *slog.Logger } @@ -114,6 +121,8 @@ func NewEmailPublisher(cfg EmailPublisherConfig, logger *slog.Logger) (*EmailPub switch { case cfg.Store == nil: return nil, errors.New("new email publisher: nil store") + case cfg.StreamPublisher == nil: + return nil, errors.New("new email publisher: nil stream publisher") case strings.TrimSpace(cfg.MailDeliveryCommandsStream) == "": return nil, errors.New("new email publisher: mail delivery-commands stream must not be empty") case cfg.RouteLeaseTTL <= 0: @@ -157,6 +166,7 @@ func NewEmailPublisher(cfg EmailPublisherConfig, logger *slog.Logger) (*EmailPub encoder: cfg.Encoder, telemetry: cfg.Telemetry, clock: cfg.Clock, + streamPublisher: cfg.StreamPublisher, workerToken: workerToken, logger: logger.With("component", "email_publisher", "stream", cfg.MailDeliveryCommandsStream), }, nil @@ -237,7 +247,7 @@ func (publisher *EmailPublisher) publishDueRoutes(ctx context.Context) (bool, er return progress, nil } -func (publisher *EmailPublisher) publishRoute(ctx context.Context, now time.Time, dueRoute redisstate.ScheduledRoute) (bool, error) { +func (publisher *EmailPublisher) publishRoute(ctx context.Context, now time.Time, dueRoute routestate.ScheduledRoute) (bool, error) { acquired, err := publisher.store.TryAcquireRouteLease(ctx, dueRoute.NotificationID, dueRoute.RouteID, publisher.workerToken, publisher.routeLeaseTTL) if err != nil { return false, fmt.Errorf("acquire route lease %q: %w", dueRoute.RouteID, err) @@ -283,7 +293,14 @@ func (publisher *EmailPublisher) publishRoute(ctx context.Context, now time.Time return publisher.recordFailure(ctx, notification, route, emailFailureClassificationPayloadEncoding, err.Error()) } - err = publisher.store.CompleteRoutePublished(ctx, redisstate.CompleteRoutePublishedInput{ + if err := publisher.streamPublisher.XAdd(ctx, &redis.XAddArgs{ + Stream: publisher.mailDeliveryCommandsStream, + Values: command.Values(), + }).Err(); err != nil { + return publisher.recordFailure(ctx, notification, route, emailFailureClassificationMailStreamWrite, err.Error()) + } + + err = publisher.store.CompleteRoutePublished(ctx, routestate.CompleteRoutePublishedInput{ ExpectedRoute: route, LeaseToken: publisher.workerToken, PublishedAt: publisher.now(), @@ -312,7 +329,7 @@ func (publisher *EmailPublisher) publishRoute(ctx context.Context, now time.Time logArgs = append(logArgs, logging.TraceAttrsFromContext(ctx)...) publisher.logger.Info("email route published", logArgs...) return true, nil - case errors.Is(err, redisstate.ErrConflict): + case errors.Is(err, routestate.ErrConflict): return false, nil default: return publisher.recordFailure(ctx, notification, route, emailFailureClassificationMailStreamWrite, err.Error()) @@ -349,7 +366,7 @@ func (publisher *EmailPublisher) recordFailure( logArgs = append(logArgs, logging.TraceAttrsFromContext(ctx)...) if attemptNumber >= route.MaxAttempts { - err := publisher.store.CompleteRouteDeadLetter(ctx, redisstate.CompleteRouteDeadLetterInput{ + err := publisher.store.CompleteRouteDeadLetter(ctx, routestate.CompleteRouteDeadLetterInput{ ExpectedRoute: route, LeaseToken: publisher.workerToken, DeadLetteredAt: failureAt, @@ -362,7 +379,7 @@ func (publisher *EmailPublisher) recordFailure( publisher.recordRouteDeadLetter(ctx, notification, route, classification) publisher.logger.Warn("email route dead-lettered", logArgs...) return true, nil - case errors.Is(err, redisstate.ErrConflict): + case errors.Is(err, routestate.ErrConflict): return false, nil default: return false, fmt.Errorf("dead-letter route %q: %w", route.RouteID, err) @@ -370,7 +387,7 @@ func (publisher *EmailPublisher) recordFailure( } nextAttemptAt := failureAt.Add(routeBackoffDelay(attemptNumber, publisher.routeBackoffMin, publisher.routeBackoffMax)).UTC().Truncate(time.Millisecond) - err := publisher.store.CompleteRouteFailed(ctx, redisstate.CompleteRouteFailedInput{ + err := publisher.store.CompleteRouteFailed(ctx, routestate.CompleteRouteFailedInput{ ExpectedRoute: route, LeaseToken: publisher.workerToken, FailedAt: failureAt, @@ -385,7 +402,7 @@ func (publisher *EmailPublisher) recordFailure( logArgs = append(logArgs, "next_attempt_at", nextAttemptAt) publisher.logger.Warn("email route failed and was rescheduled", logArgs...) return true, nil - case errors.Is(err, redisstate.ErrConflict): + case errors.Is(err, routestate.ErrConflict): return false, nil default: return false, fmt.Errorf("reschedule route %q: %w", route.RouteID, err) diff --git a/notification/internal/worker/email_publisher_test.go b/notification/internal/worker/email_publisher_test.go deleted file mode 100644 index a6a278c..0000000 --- a/notification/internal/worker/email_publisher_test.go +++ /dev/null @@ -1,232 +0,0 @@ -package worker - -import ( - "context" - "testing" - "time" - - redisstate "galaxy/notification/internal/adapters/redisstate" - "galaxy/notification/internal/service/acceptintent" - - "github.com/alicebob/miniredis/v2" - "github.com/redis/go-redis/v9" - "github.com/stretchr/testify/require" -) - -func TestEmailPublisherPublishesDueEmailRouteAndLeavesPushRoutePending(t *testing.T) { - t.Parallel() - - fixture := newEmailPublisherFixture(t) - require.NoError(t, fixture.store.CreateAcceptance(context.Background(), validEmailAcceptanceInput(fixture.now, 0))) - - running := runEmailPublisher(t, fixture.publisher) - defer running.stop(t) - - require.Eventually(t, func() bool { - route, found, err := fixture.store.GetRoute(context.Background(), "1775121700000-0", "email:user:user-1") - return err == nil && found && route.Status == acceptintent.RouteStatusPublished - }, time.Second, 10*time.Millisecond) - - pushRoute, found, err := fixture.store.GetRoute(context.Background(), "1775121700000-0", "push:user:user-1") - require.NoError(t, err) - require.True(t, found) - require.Equal(t, acceptintent.RouteStatusPending, pushRoute.Status) - - messages, err := fixture.client.XRange(context.Background(), fixture.mailStream, "-", "+").Result() - require.NoError(t, err) - require.Len(t, messages, 1) - require.Equal(t, "1775121700000-0/email:user:user-1", messages[0].Values["delivery_id"]) - require.Equal(t, "notification", messages[0].Values["source"]) - require.Equal(t, "template", messages[0].Values["payload_mode"]) - require.True(t, fixture.telemetry.hasRoutePublishAttempt("email", "published", "")) -} - -func TestEmailPublisherRetriesMailStreamPublicationFailures(t *testing.T) { - t.Parallel() - - fixture := newEmailPublisherFixture(t) - require.NoError(t, fixture.store.CreateAcceptance(context.Background(), validEmailAcceptanceInput(fixture.now, 0))) - require.NoError(t, fixture.client.Set(context.Background(), fixture.mailStream, "wrong-type", 0).Err()) - - running := runEmailPublisher(t, fixture.publisher) - defer running.stop(t) - - require.Eventually(t, func() bool { - route, found, err := fixture.store.GetRoute(context.Background(), "1775121700000-0", "email:user:user-1") - return err == nil && found && route.Status == acceptintent.RouteStatusFailed && route.AttemptCount == 1 - }, time.Second, 10*time.Millisecond) - require.True(t, fixture.telemetry.hasRoutePublishAttempt("email", "retry", emailFailureClassificationMailStreamWrite)) - require.True(t, fixture.telemetry.hasRouteRetry("email")) - - require.NoError(t, fixture.client.Del(context.Background(), fixture.mailStream).Err()) - - require.Eventually(t, func() bool { - route, found, err := fixture.store.GetRoute(context.Background(), "1775121700000-0", "email:user:user-1") - return err == nil && found && route.Status == acceptintent.RouteStatusPublished && route.AttemptCount == 2 - }, 2*time.Second, 10*time.Millisecond) - - messages, err := fixture.client.XRange(context.Background(), fixture.mailStream, "-", "+").Result() - require.NoError(t, err) - require.Len(t, messages, 1) - require.True(t, fixture.telemetry.hasRoutePublishAttempt("email", "published", "")) -} - -func TestEmailPublisherLeasePreventsDuplicatePublicationAcrossReplicas(t *testing.T) { - t.Parallel() - - fixture := newEmailPublisherFixture(t) - require.NoError(t, fixture.store.CreateAcceptance(context.Background(), validEmailAcceptanceInput(fixture.now, 0))) - - otherPublisher, err := NewEmailPublisher(EmailPublisherConfig{ - Store: fixture.store, - MailDeliveryCommandsStream: fixture.mailStream, - RouteLeaseTTL: 200 * time.Millisecond, - RouteBackoffMin: 20 * time.Millisecond, - RouteBackoffMax: 20 * time.Millisecond, - PollInterval: 10 * time.Millisecond, - BatchSize: 16, - Clock: newSteppingClock(fixture.now, time.Millisecond), - }, testWorkerLogger()) - require.NoError(t, err) - - first := runEmailPublisher(t, fixture.publisher) - defer first.stop(t) - second := runEmailPublisher(t, otherPublisher) - defer second.stop(t) - - require.Eventually(t, func() bool { - route, found, err := fixture.store.GetRoute(context.Background(), "1775121700000-0", "email:user:user-1") - return err == nil && found && route.Status == acceptintent.RouteStatusPublished - }, time.Second, 10*time.Millisecond) - - messages, err := fixture.client.XRange(context.Background(), fixture.mailStream, "-", "+").Result() - require.NoError(t, err) - require.Len(t, messages, 1) -} - -func TestEmailPublisherDeadLettersExhaustedRoute(t *testing.T) { - t.Parallel() - - fixture := newEmailPublisherFixture(t) - require.NoError(t, fixture.store.CreateAcceptance(context.Background(), validEmailAcceptanceInput(fixture.now, 6))) - require.NoError(t, fixture.client.Set(context.Background(), fixture.mailStream, "wrong-type", 0).Err()) - - running := runEmailPublisher(t, fixture.publisher) - defer running.stop(t) - - require.Eventually(t, func() bool { - route, found, err := fixture.store.GetRoute(context.Background(), "1775121700000-0", "email:user:user-1") - return err == nil && found && route.Status == acceptintent.RouteStatusDeadLetter && route.AttemptCount == 7 - }, time.Second, 10*time.Millisecond) - - deadLetterPayload, err := fixture.client.Get(context.Background(), redisstate.Keyspace{}.DeadLetter("1775121700000-0", "email:user:user-1")).Bytes() - require.NoError(t, err) - deadLetter, err := redisstate.UnmarshalDeadLetter(deadLetterPayload) - require.NoError(t, err) - require.Equal(t, emailFailureClassificationMailStreamWrite, deadLetter.FailureClassification) - require.True(t, fixture.telemetry.hasRoutePublishAttempt("email", "dead_letter", emailFailureClassificationMailStreamWrite)) - require.True(t, fixture.telemetry.hasRouteDeadLetter("email", emailFailureClassificationMailStreamWrite)) -} - -type emailPublisherFixture struct { - client *redis.Client - store *redisstate.AcceptanceStore - publisher *EmailPublisher - mailStream string - now time.Time - clock *steppingClock - telemetry *recordingWorkerTelemetry -} - -func newEmailPublisherFixture(t *testing.T) emailPublisherFixture { - t.Helper() - - server := miniredis.RunT(t) - client := redis.NewClient(&redis.Options{ - Addr: server.Addr(), - Protocol: 2, - DisableIdentity: true, - }) - t.Cleanup(func() { - require.NoError(t, client.Close()) - }) - - store, err := redisstate.NewAcceptanceStore(client, redisstate.AcceptanceConfig{ - RecordTTL: 24 * time.Hour, - DeadLetterTTL: 72 * time.Hour, - IdempotencyTTL: 7 * 24 * time.Hour, - }) - require.NoError(t, err) - - now := time.UnixMilli(1775121700000).UTC() - clock := newSteppingClock(now, time.Millisecond) - telemetry := &recordingWorkerTelemetry{} - publisher, err := NewEmailPublisher(EmailPublisherConfig{ - Store: store, - MailDeliveryCommandsStream: "mail:delivery_commands", - RouteLeaseTTL: 200 * time.Millisecond, - RouteBackoffMin: 20 * time.Millisecond, - RouteBackoffMax: 20 * time.Millisecond, - PollInterval: 10 * time.Millisecond, - BatchSize: 16, - Telemetry: telemetry, - Clock: clock, - }, testWorkerLogger()) - require.NoError(t, err) - - return emailPublisherFixture{ - client: client, - store: store, - publisher: publisher, - mailStream: "mail:delivery_commands", - now: now, - clock: clock, - telemetry: telemetry, - } -} - -func validEmailAcceptanceInput(now time.Time, emailAttemptCount int) acceptintent.CreateAcceptanceInput { - input := validPushAcceptanceInput(now) - for index := range input.Routes { - if input.Routes[index].RouteID != "email:user:user-1" { - continue - } - input.Routes[index].AttemptCount = emailAttemptCount - input.Routes[index].MaxAttempts = 7 - } - - return input -} - -type runningEmailPublisher struct { - cancel context.CancelFunc - resultCh chan error -} - -func runEmailPublisher(t *testing.T, publisher *EmailPublisher) runningEmailPublisher { - t.Helper() - - ctx, cancel := context.WithCancel(context.Background()) - resultCh := make(chan error, 1) - go func() { - resultCh <- publisher.Run(ctx) - }() - - return runningEmailPublisher{ - cancel: cancel, - resultCh: resultCh, - } -} - -func (r runningEmailPublisher) stop(t *testing.T) { - t.Helper() - - r.cancel() - - select { - case err := <-r.resultCh: - require.ErrorIs(t, err, context.Canceled) - case <-time.After(time.Second): - require.FailNow(t, "email publisher did not stop") - } -} diff --git a/notification/internal/worker/intent_consumer_test.go b/notification/internal/worker/intent_consumer_test.go deleted file mode 100644 index a77ff97..0000000 --- a/notification/internal/worker/intent_consumer_test.go +++ /dev/null @@ -1,422 +0,0 @@ -package worker - -import ( - "context" - "errors" - "io" - "log/slog" - "testing" - "time" - - redisstate "galaxy/notification/internal/adapters/redisstate" - "galaxy/notification/internal/config" - "galaxy/notification/internal/service/acceptintent" - "galaxy/notification/internal/service/malformedintent" - - "github.com/alicebob/miniredis/v2" - "github.com/redis/go-redis/v9" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestIntentConsumerStartsFromZeroOffsetWhenNoStoredOffsetExists(t *testing.T) { - t.Parallel() - - fixture := newIntentConsumerFixture(t, stubUserDirectory{ - records: map[string]acceptintent.UserRecord{ - "user-1": {Email: "pilot@example.com", PreferredLanguage: "en"}, - }, - }) - messageID := addValidIntent(t, fixture.client, fixture.stream, `{"turn_number":54,"game_name":"Nebula Clash","game_id":"game-123"}`) - - running := runIntentConsumer(t, fixture.consumer) - defer running.stop(t) - - require.Eventually(t, func() bool { - _, found, err := fixture.acceptanceStore.GetNotification(context.Background(), messageID) - return err == nil && found - }, time.Second, 10*time.Millisecond) -} - -func TestIntentConsumerContinuesFromSavedOffsetAfterRestart(t *testing.T) { - t.Parallel() - - fixture := newIntentConsumerFixture(t, stubUserDirectory{ - records: map[string]acceptintent.UserRecord{ - "user-1": {Email: "pilot@example.com", PreferredLanguage: "en"}, - }, - }) - firstID := addValidIntent(t, fixture.client, fixture.stream, `{"turn_number":54,"game_name":"Nebula Clash","game_id":"game-123"}`) - require.NoError(t, fixture.offsetStore.Save(context.Background(), fixture.stream, firstID)) - secondID := addValidIntent(t, fixture.client, fixture.stream, `{"turn_number":55,"game_name":"Nebula Clash","game_id":"game-123"}`) - - running := runIntentConsumer(t, fixture.consumer) - defer running.stop(t) - - require.Eventually(t, func() bool { - _, found, err := fixture.acceptanceStore.GetNotification(context.Background(), secondID) - return err == nil && found - }, time.Second, 10*time.Millisecond) - - _, found, err := fixture.acceptanceStore.GetNotification(context.Background(), firstID) - require.NoError(t, err) - require.False(t, found) -} - -func TestIntentConsumerRecordsIdempotencyConflictsAndAdvancesOffset(t *testing.T) { - t.Parallel() - - fixture := newIntentConsumerFixture(t, stubUserDirectory{ - records: map[string]acceptintent.UserRecord{ - "user-1": {Email: "pilot@example.com", PreferredLanguage: "en"}, - }, - }) - firstID := addValidIntent(t, fixture.client, fixture.stream, `{"turn_number":54,"game_name":"Nebula Clash","game_id":"game-123"}`) - secondID := addValidIntent(t, fixture.client, fixture.stream, `{"turn_number":55,"game_name":"Nebula Clash","game_id":"game-123"}`) - - running := runIntentConsumer(t, fixture.consumer) - defer running.stop(t) - - require.Eventually(t, func() bool { - payload, err := fixture.client.Get(context.Background(), redisstate.Keyspace{}.MalformedIntent(secondID)).Bytes() - if err != nil { - return false - } - entry, err := redisstate.UnmarshalMalformedIntent(payload) - if err != nil { - return false - } - return entry.FailureCode == "idempotency_conflict" - }, time.Second, 10*time.Millisecond) - - offset, found, err := fixture.offsetStore.Load(context.Background(), fixture.stream) - require.NoError(t, err) - require.True(t, found) - require.Equal(t, secondID, offset) - - _, found, err = fixture.acceptanceStore.GetNotification(context.Background(), firstID) - require.NoError(t, err) - require.True(t, found) - - _, found, err = fixture.acceptanceStore.GetNotification(context.Background(), secondID) - require.NoError(t, err) - require.False(t, found) -} - -func TestIntentConsumerShutdownInterruptsBlockingRead(t *testing.T) { - t.Parallel() - - fixture := newIntentConsumerFixture(t, stubUserDirectory{}) - - ctx, cancel := context.WithCancel(context.Background()) - resultCh := make(chan error, 1) - go func() { - resultCh <- fixture.consumer.Run(ctx) - }() - - time.Sleep(50 * time.Millisecond) - cancel() - - select { - case err := <-resultCh: - require.ErrorIs(t, err, context.Canceled) - case <-time.After(time.Second): - require.FailNow(t, "intent consumer did not stop after shutdown") - } -} - -func TestIntentConsumerRecordsRecipientNotFoundAndAdvancesOffset(t *testing.T) { - t.Parallel() - - fixture := newIntentConsumerFixture(t, stubUserDirectory{}) - messageID := addValidIntent(t, fixture.client, fixture.stream, `{"turn_number":54,"game_name":"Nebula Clash","game_id":"game-123"}`) - - running := runIntentConsumer(t, fixture.consumer) - defer running.stop(t) - - require.Eventually(t, func() bool { - payload, err := fixture.client.Get(context.Background(), redisstate.Keyspace{}.MalformedIntent(messageID)).Bytes() - if err != nil { - return false - } - entry, err := redisstate.UnmarshalMalformedIntent(payload) - if err != nil { - return false - } - return entry.FailureCode == malformedintent.FailureCodeRecipientNotFound - }, time.Second, 10*time.Millisecond) - - offset, found, err := fixture.offsetStore.Load(context.Background(), fixture.stream) - require.NoError(t, err) - require.True(t, found) - require.Equal(t, messageID, offset) - - _, found, err = fixture.acceptanceStore.GetNotification(context.Background(), messageID) - require.NoError(t, err) - require.False(t, found) -} - -func TestIntentConsumerRecordsMalformedIntentAndAdvancesOffset(t *testing.T) { - t.Parallel() - - fixture := newIntentConsumerFixture(t, stubUserDirectory{ - records: map[string]acceptintent.UserRecord{ - "user-1": {Email: "pilot@example.com", PreferredLanguage: "en"}, - }, - }) - messageID, err := fixture.client.XAdd(context.Background(), &redis.XAddArgs{ - Stream: fixture.stream, - Values: map[string]any{ - "notification_type": "game.turn.ready", - "producer": "game_master", - "audience_kind": "user", - "recipient_user_ids_json": `["user-1"]`, - "idempotency_key": "game-123:turn-ready", - "occurred_at_ms": "1775121700000", - }, - }).Result() - require.NoError(t, err) - - running := runIntentConsumer(t, fixture.consumer) - defer running.stop(t) - - require.Eventually(t, func() bool { - payload, err := fixture.client.Get(context.Background(), redisstate.Keyspace{}.MalformedIntent(messageID)).Bytes() - if err != nil { - return false - } - entry, err := redisstate.UnmarshalMalformedIntent(payload) - if err != nil { - return false - } - return entry.FailureCode == malformedintent.FailureCodeInvalidPayload && - entry.StreamEntryID == messageID - }, time.Second, 10*time.Millisecond) - - offset, found, err := fixture.offsetStore.Load(context.Background(), fixture.stream) - require.NoError(t, err) - require.True(t, found) - require.Equal(t, messageID, offset) - - _, found, err = fixture.acceptanceStore.GetNotification(context.Background(), messageID) - require.NoError(t, err) - require.False(t, found) -} - -func TestIntentConsumerRecordsTelemetryForOutcomesAndMalformedIntents(t *testing.T) { - t.Parallel() - - fixture := newIntentConsumerFixture(t, stubUserDirectory{ - records: map[string]acceptintent.UserRecord{ - "user-1": {Email: "pilot@example.com", PreferredLanguage: "en"}, - }, - }) - addValidIntent(t, fixture.client, fixture.stream, `{"turn_number":54,"game_name":"Nebula Clash","game_id":"game-123"}`) - addValidIntent(t, fixture.client, fixture.stream, `{"turn_number":54,"game_name":"Nebula Clash","game_id":"game-123"}`) - conflictID := addValidIntent(t, fixture.client, fixture.stream, `{"turn_number":55,"game_name":"Nebula Clash","game_id":"game-123"}`) - - running := runIntentConsumer(t, fixture.consumer) - defer running.stop(t) - - require.Eventually(t, func() bool { - payload, err := fixture.client.Get(context.Background(), redisstate.Keyspace{}.MalformedIntent(conflictID)).Bytes() - if err != nil { - return false - } - entry, err := redisstate.UnmarshalMalformedIntent(payload) - if err != nil { - return false - } - return entry.FailureCode == malformedintent.FailureCodeIdempotencyConflict - }, time.Second, 10*time.Millisecond) - - require.Eventually(t, func() bool { - return fixture.telemetry.hasIntentOutcome("accepted") && - fixture.telemetry.hasIntentOutcome("duplicate") && - fixture.telemetry.hasMalformedIntent("idempotency_conflict") - }, time.Second, 10*time.Millisecond) -} - -func TestIntentConsumerStopsWithoutAdvancingOffsetWhenUserDirectoryIsUnavailable(t *testing.T) { - t.Parallel() - - fixture := newIntentConsumerFixture(t, stubUserDirectory{ - err: errors.New("user service unavailable"), - }) - messageID := addValidIntent(t, fixture.client, fixture.stream, `{"turn_number":54,"game_name":"Nebula Clash","game_id":"game-123"}`) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - resultCh := make(chan error, 1) - go func() { - resultCh <- fixture.consumer.Run(ctx) - }() - - var runErr error - require.Eventually(t, func() bool { - select { - case runErr = <-resultCh: - return true - default: - return false - } - }, time.Second, 10*time.Millisecond) - - require.Error(t, runErr) - require.ErrorContains(t, runErr, "user service unavailable") - - _, found, err := fixture.offsetStore.Load(context.Background(), fixture.stream) - require.NoError(t, err) - require.False(t, found) - - _, found, err = fixture.acceptanceStore.GetNotification(context.Background(), messageID) - require.NoError(t, err) - require.False(t, found) -} - -type intentConsumerFixture struct { - client *redis.Client - stream string - acceptanceStore *redisstate.AcceptanceStore - offsetStore *redisstate.StreamOffsetStore - consumer *IntentConsumer - telemetry *recordingWorkerTelemetry -} - -func newIntentConsumerFixture(t *testing.T, userDirectory acceptintent.UserDirectory) intentConsumerFixture { - t.Helper() - - server := miniredis.RunT(t) - client := redis.NewClient(&redis.Options{ - Addr: server.Addr(), - Protocol: 2, - DisableIdentity: true, - }) - t.Cleanup(func() { - assert.NoError(t, client.Close()) - }) - - acceptanceStore, err := redisstate.NewAcceptanceStore(client, redisstate.AcceptanceConfig{ - RecordTTL: 24 * time.Hour, - DeadLetterTTL: 72 * time.Hour, - IdempotencyTTL: 7 * 24 * time.Hour, - }) - require.NoError(t, err) - malformedStore, err := redisstate.NewMalformedIntentStore(client, 72*time.Hour) - require.NoError(t, err) - offsetStore, err := redisstate.NewStreamOffsetStore(client) - require.NoError(t, err) - telemetry := &recordingWorkerTelemetry{} - service, err := acceptintent.New(acceptintent.Config{ - Store: acceptanceStore, - UserDirectory: userDirectory, - Clock: fixedClock{now: time.UnixMilli(1775121700000).UTC()}, - Logger: slog.New(slog.NewTextHandler(io.Discard, nil)), - Telemetry: telemetry, - PushMaxAttempts: 3, - EmailMaxAttempts: 7, - IdempotencyTTL: 7 * 24 * time.Hour, - AdminRouting: config.AdminRoutingConfig{}, - }) - require.NoError(t, err) - consumer, err := NewIntentConsumer(IntentConsumerConfig{ - Client: client, - Stream: "notification:intents", - BlockTimeout: 25 * time.Millisecond, - Acceptor: service, - MalformedRecorder: malformedStore, - OffsetStore: offsetStore, - Telemetry: telemetry, - Clock: fixedClock{now: time.UnixMilli(1775121700001).UTC()}, - }, slog.New(slog.NewTextHandler(io.Discard, nil))) - require.NoError(t, err) - - return intentConsumerFixture{ - client: client, - stream: "notification:intents", - acceptanceStore: acceptanceStore, - offsetStore: offsetStore, - consumer: consumer, - telemetry: telemetry, - } -} - -func addValidIntent(t *testing.T, client *redis.Client, stream string, payloadJSON string) string { - t.Helper() - - messageID, err := client.XAdd(context.Background(), &redis.XAddArgs{ - Stream: stream, - Values: map[string]any{ - "notification_type": "game.turn.ready", - "producer": "game_master", - "audience_kind": "user", - "recipient_user_ids_json": `["user-1"]`, - "idempotency_key": "game-123:turn-ready", - "occurred_at_ms": "1775121700000", - "payload_json": payloadJSON, - }, - }).Result() - require.NoError(t, err) - - return messageID -} - -type runningIntentConsumer struct { - cancel context.CancelFunc - resultCh chan error -} - -func runIntentConsumer(t *testing.T, consumer *IntentConsumer) runningIntentConsumer { - t.Helper() - - ctx, cancel := context.WithCancel(context.Background()) - resultCh := make(chan error, 1) - go func() { - resultCh <- consumer.Run(ctx) - }() - - time.Sleep(50 * time.Millisecond) - - return runningIntentConsumer{ - cancel: cancel, - resultCh: resultCh, - } -} - -func (r runningIntentConsumer) stop(t *testing.T) { - t.Helper() - - r.cancel() - - select { - case err := <-r.resultCh: - require.ErrorIs(t, err, context.Canceled) - case <-time.After(time.Second): - require.FailNow(t, "intent consumer did not stop") - } -} - -type fixedClock struct { - now time.Time -} - -func (clock fixedClock) Now() time.Time { - return clock.now -} - -type stubUserDirectory struct { - records map[string]acceptintent.UserRecord - err error -} - -func (directory stubUserDirectory) GetUserByID(_ context.Context, userID string) (acceptintent.UserRecord, error) { - if directory.err != nil { - return acceptintent.UserRecord{}, directory.err - } - if record, ok := directory.records[userID]; ok { - return record, nil - } - - return acceptintent.UserRecord{}, acceptintent.ErrRecipientNotFound -} diff --git a/notification/internal/worker/push_publisher.go b/notification/internal/worker/push_publisher.go index e6fec58..c96e7c5 100644 --- a/notification/internal/worker/push_publisher.go +++ b/notification/internal/worker/push_publisher.go @@ -10,11 +10,13 @@ import ( "strings" "time" - "galaxy/notification/internal/adapters/redisstate" "galaxy/notification/internal/api/intentstream" "galaxy/notification/internal/logging" "galaxy/notification/internal/service/acceptintent" "galaxy/notification/internal/service/publishpush" + "galaxy/notification/internal/service/routestate" + + "github.com/redis/go-redis/v9" ) const ( @@ -29,7 +31,7 @@ const ( // PushPublisher. type PushRouteStateStore interface { // ListDueRoutes loads due scheduled routes. - ListDueRoutes(context.Context, time.Time, int64) ([]redisstate.ScheduledRoute, error) + ListDueRoutes(context.Context, time.Time, int64) ([]routestate.ScheduledRoute, error) // TryAcquireRouteLease attempts to acquire one temporary route lease. TryAcquireRouteLease(context.Context, string, string, string, time.Duration) (bool, error) @@ -44,13 +46,13 @@ type PushRouteStateStore interface { GetRoute(context.Context, string, string) (acceptintent.NotificationRoute, bool, error) // CompleteRoutePublished records one successful publication. - CompleteRoutePublished(context.Context, redisstate.CompleteRoutePublishedInput) error + CompleteRoutePublished(context.Context, routestate.CompleteRoutePublishedInput) error // CompleteRouteFailed records one retryable publication failure. - CompleteRouteFailed(context.Context, redisstate.CompleteRouteFailedInput) error + CompleteRouteFailed(context.Context, routestate.CompleteRouteFailedInput) error // CompleteRouteDeadLetter records one exhausted publication failure. - CompleteRouteDeadLetter(context.Context, redisstate.CompleteRouteDeadLetterInput) error + CompleteRouteDeadLetter(context.Context, routestate.CompleteRouteDeadLetterInput) error } // PushEventEncoder encodes one push-capable notification route into a @@ -109,6 +111,10 @@ type PushPublisherConfig struct { // Clock provides wall-clock timestamps. Clock Clock + + // StreamPublisher emits the outbound Gateway client-event before the + // route's PostgreSQL state transition is committed. + StreamPublisher StreamPublisher } // PushPublisher publishes due push routes into the Gateway client-events @@ -125,6 +131,7 @@ type PushPublisher struct { encoder PushEventEncoder telemetry RoutePublisherTelemetry clock Clock + streamPublisher StreamPublisher workerToken string logger *slog.Logger } @@ -134,6 +141,8 @@ func NewPushPublisher(cfg PushPublisherConfig, logger *slog.Logger) (*PushPublis switch { case cfg.Store == nil: return nil, errors.New("new push publisher: nil store") + case cfg.StreamPublisher == nil: + return nil, errors.New("new push publisher: nil stream publisher") case strings.TrimSpace(cfg.GatewayStream) == "": return nil, errors.New("new push publisher: gateway stream must not be empty") case cfg.GatewayStreamMaxLen <= 0: @@ -180,6 +189,7 @@ func NewPushPublisher(cfg PushPublisherConfig, logger *slog.Logger) (*PushPublis encoder: cfg.Encoder, telemetry: cfg.Telemetry, clock: cfg.Clock, + streamPublisher: cfg.StreamPublisher, workerToken: workerToken, logger: logger.With("component", "push_publisher", "stream", cfg.GatewayStream), }, nil @@ -260,7 +270,7 @@ func (publisher *PushPublisher) publishDueRoutes(ctx context.Context) (bool, err return progress, nil } -func (publisher *PushPublisher) publishRoute(ctx context.Context, now time.Time, dueRoute redisstate.ScheduledRoute) (bool, error) { +func (publisher *PushPublisher) publishRoute(ctx context.Context, now time.Time, dueRoute routestate.ScheduledRoute) (bool, error) { acquired, err := publisher.store.TryAcquireRouteLease(ctx, dueRoute.NotificationID, dueRoute.RouteID, publisher.workerToken, publisher.routeLeaseTTL) if err != nil { return false, fmt.Errorf("acquire route lease %q: %w", dueRoute.RouteID, err) @@ -306,7 +316,19 @@ func (publisher *PushPublisher) publishRoute(ctx context.Context, now time.Time, return publisher.recordFailure(ctx, notification, route, pushFailureClassificationPayloadEncoding, err.Error()) } - err = publisher.store.CompleteRoutePublished(ctx, redisstate.CompleteRoutePublishedInput{ + xaddArgs := &redis.XAddArgs{ + Stream: publisher.gatewayStream, + Values: eventValues(event), + } + if publisher.gatewayStreamMaxLen > 0 { + xaddArgs.MaxLen = publisher.gatewayStreamMaxLen + xaddArgs.Approx = true + } + if err := publisher.streamPublisher.XAdd(ctx, xaddArgs).Err(); err != nil { + return publisher.recordFailure(ctx, notification, route, pushFailureClassificationGatewayStreamWrite, err.Error()) + } + + err = publisher.store.CompleteRoutePublished(ctx, routestate.CompleteRoutePublishedInput{ ExpectedRoute: route, LeaseToken: publisher.workerToken, PublishedAt: publisher.now(), @@ -335,7 +357,7 @@ func (publisher *PushPublisher) publishRoute(ctx context.Context, now time.Time, logArgs = append(logArgs, logging.TraceAttrsFromContext(ctx)...) publisher.logger.Info("push route published", logArgs...) return true, nil - case errors.Is(err, redisstate.ErrConflict): + case errors.Is(err, routestate.ErrConflict): return false, nil default: return publisher.recordFailure(ctx, notification, route, pushFailureClassificationGatewayStreamWrite, err.Error()) @@ -371,7 +393,7 @@ func (publisher *PushPublisher) recordFailure( logArgs = append(logArgs, logging.TraceAttrsFromContext(ctx)...) if attemptNumber >= route.MaxAttempts { - err := publisher.store.CompleteRouteDeadLetter(ctx, redisstate.CompleteRouteDeadLetterInput{ + err := publisher.store.CompleteRouteDeadLetter(ctx, routestate.CompleteRouteDeadLetterInput{ ExpectedRoute: route, LeaseToken: publisher.workerToken, DeadLetteredAt: failureAt, @@ -384,7 +406,7 @@ func (publisher *PushPublisher) recordFailure( publisher.recordRouteDeadLetter(ctx, notification, route, classification) publisher.logger.Warn("push route dead-lettered", logArgs...) return true, nil - case errors.Is(err, redisstate.ErrConflict): + case errors.Is(err, routestate.ErrConflict): return false, nil default: return false, fmt.Errorf("dead-letter route %q: %w", route.RouteID, err) @@ -392,7 +414,7 @@ func (publisher *PushPublisher) recordFailure( } nextAttemptAt := failureAt.Add(routeBackoffDelay(attemptNumber, publisher.routeBackoffMin, publisher.routeBackoffMax)).UTC().Truncate(time.Millisecond) - err := publisher.store.CompleteRouteFailed(ctx, redisstate.CompleteRouteFailedInput{ + err := publisher.store.CompleteRouteFailed(ctx, routestate.CompleteRouteFailedInput{ ExpectedRoute: route, LeaseToken: publisher.workerToken, FailedAt: failureAt, @@ -407,7 +429,7 @@ func (publisher *PushPublisher) recordFailure( logArgs = append(logArgs, "next_attempt_at", nextAttemptAt) publisher.logger.Warn("push route failed and was rescheduled", logArgs...) return true, nil - case errors.Is(err, redisstate.ErrConflict): + case errors.Is(err, routestate.ErrConflict): return false, nil default: return false, fmt.Errorf("reschedule route %q: %w", route.RouteID, err) diff --git a/notification/internal/worker/push_publisher_test.go b/notification/internal/worker/push_publisher_test.go deleted file mode 100644 index b0db75b..0000000 --- a/notification/internal/worker/push_publisher_test.go +++ /dev/null @@ -1,318 +0,0 @@ -package worker - -import ( - "context" - "io" - "log/slog" - "sync" - "testing" - "time" - - redisstate "galaxy/notification/internal/adapters/redisstate" - "galaxy/notification/internal/api/intentstream" - "galaxy/notification/internal/service/acceptintent" - - "github.com/alicebob/miniredis/v2" - "github.com/redis/go-redis/v9" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestPushPublisherPublishesDuePushRouteAndLeavesEmailRoutePending(t *testing.T) { - t.Parallel() - - fixture := newPushPublisherFixture(t) - require.NoError(t, fixture.store.CreateAcceptance(context.Background(), validPushAcceptanceInput(fixture.now))) - - running := runPushPublisher(t, fixture.publisher) - defer running.stop(t) - - require.Eventually(t, func() bool { - route, found, err := fixture.store.GetRoute(context.Background(), "1775121700000-0", "push:user:user-1") - return err == nil && found && route.Status == acceptintent.RouteStatusPublished - }, time.Second, 10*time.Millisecond) - - emailRoute, found, err := fixture.store.GetRoute(context.Background(), "1775121700000-0", "email:user:user-1") - require.NoError(t, err) - require.True(t, found) - require.Equal(t, acceptintent.RouteStatusPending, emailRoute.Status) - - messages, err := fixture.client.XRange(context.Background(), fixture.gatewayStream, "-", "+").Result() - require.NoError(t, err) - require.Len(t, messages, 1) - require.Equal(t, "user-1", messages[0].Values["user_id"]) - require.Equal(t, "game.turn.ready", messages[0].Values["event_type"]) - require.Equal(t, "1775121700000-0/push:user:user-1", messages[0].Values["event_id"]) - require.True(t, fixture.telemetry.hasRoutePublishAttempt("push", "published", "")) -} - -func TestPushPublisherRetriesGatewayStreamPublicationFailures(t *testing.T) { - t.Parallel() - - fixture := newPushPublisherFixture(t) - require.NoError(t, fixture.store.CreateAcceptance(context.Background(), validPushAcceptanceInput(fixture.now))) - require.NoError(t, fixture.client.Set(context.Background(), fixture.gatewayStream, "wrong-type", 0).Err()) - - running := runPushPublisher(t, fixture.publisher) - defer running.stop(t) - - require.Eventually(t, func() bool { - route, found, err := fixture.store.GetRoute(context.Background(), "1775121700000-0", "push:user:user-1") - return err == nil && found && route.Status == acceptintent.RouteStatusFailed && route.AttemptCount == 1 - }, time.Second, 10*time.Millisecond) - require.True(t, fixture.telemetry.hasRoutePublishAttempt("push", "retry", pushFailureClassificationGatewayStreamWrite)) - require.True(t, fixture.telemetry.hasRouteRetry("push")) - - require.NoError(t, fixture.client.Del(context.Background(), fixture.gatewayStream).Err()) - - require.Eventually(t, func() bool { - route, found, err := fixture.store.GetRoute(context.Background(), "1775121700000-0", "push:user:user-1") - return err == nil && found && route.Status == acceptintent.RouteStatusPublished && route.AttemptCount == 2 - }, 2*time.Second, 10*time.Millisecond) - - messages, err := fixture.client.XRange(context.Background(), fixture.gatewayStream, "-", "+").Result() - require.NoError(t, err) - require.Len(t, messages, 1) - require.True(t, fixture.telemetry.hasRoutePublishAttempt("push", "published", "")) -} - -func TestPushPublisherDeadLettersExhaustedRoute(t *testing.T) { - t.Parallel() - - fixture := newPushPublisherFixture(t) - input := validPushAcceptanceInput(fixture.now) - for index := range input.Routes { - if input.Routes[index].RouteID == "push:user:user-1" { - input.Routes[index].AttemptCount = 2 - input.Routes[index].MaxAttempts = 3 - } - } - require.NoError(t, fixture.store.CreateAcceptance(context.Background(), input)) - require.NoError(t, fixture.client.Set(context.Background(), fixture.gatewayStream, "wrong-type", 0).Err()) - - running := runPushPublisher(t, fixture.publisher) - defer running.stop(t) - - require.Eventually(t, func() bool { - route, found, err := fixture.store.GetRoute(context.Background(), "1775121700000-0", "push:user:user-1") - return err == nil && found && route.Status == acceptintent.RouteStatusDeadLetter && route.AttemptCount == 3 - }, time.Second, 10*time.Millisecond) - - deadLetterPayload, err := fixture.client.Get(context.Background(), redisstate.Keyspace{}.DeadLetter("1775121700000-0", "push:user:user-1")).Bytes() - require.NoError(t, err) - deadLetter, err := redisstate.UnmarshalDeadLetter(deadLetterPayload) - require.NoError(t, err) - require.Equal(t, pushFailureClassificationGatewayStreamWrite, deadLetter.FailureClassification) - require.True(t, fixture.telemetry.hasRoutePublishAttempt("push", "dead_letter", pushFailureClassificationGatewayStreamWrite)) - require.True(t, fixture.telemetry.hasRouteDeadLetter("push", pushFailureClassificationGatewayStreamWrite)) -} - -func TestPushPublisherLeasePreventsDuplicatePublicationAcrossReplicas(t *testing.T) { - t.Parallel() - - fixture := newPushPublisherFixture(t) - require.NoError(t, fixture.store.CreateAcceptance(context.Background(), validPushAcceptanceInput(fixture.now))) - - otherPublisher, err := NewPushPublisher(PushPublisherConfig{ - Store: fixture.store, - GatewayStream: fixture.gatewayStream, - GatewayStreamMaxLen: 1024, - RouteLeaseTTL: 200 * time.Millisecond, - RouteBackoffMin: 20 * time.Millisecond, - RouteBackoffMax: 20 * time.Millisecond, - PollInterval: 10 * time.Millisecond, - BatchSize: 16, - Clock: newSteppingClock(fixture.now, time.Millisecond), - }, testWorkerLogger()) - require.NoError(t, err) - - first := runPushPublisher(t, fixture.publisher) - defer first.stop(t) - second := runPushPublisher(t, otherPublisher) - defer second.stop(t) - - require.Eventually(t, func() bool { - route, found, err := fixture.store.GetRoute(context.Background(), "1775121700000-0", "push:user:user-1") - return err == nil && found && route.Status == acceptintent.RouteStatusPublished - }, time.Second, 10*time.Millisecond) - - messages, err := fixture.client.XRange(context.Background(), fixture.gatewayStream, "-", "+").Result() - require.NoError(t, err) - require.Len(t, messages, 1) -} - -type pushPublisherFixture struct { - client *redis.Client - store *redisstate.AcceptanceStore - publisher *PushPublisher - gatewayStream string - now time.Time - clock *steppingClock - telemetry *recordingWorkerTelemetry -} - -func newPushPublisherFixture(t *testing.T) pushPublisherFixture { - t.Helper() - - server := miniredis.RunT(t) - client := redis.NewClient(&redis.Options{ - Addr: server.Addr(), - Protocol: 2, - DisableIdentity: true, - }) - t.Cleanup(func() { - assert.NoError(t, client.Close()) - }) - - store, err := redisstate.NewAcceptanceStore(client, redisstate.AcceptanceConfig{ - RecordTTL: 24 * time.Hour, - DeadLetterTTL: 72 * time.Hour, - IdempotencyTTL: 7 * 24 * time.Hour, - }) - require.NoError(t, err) - - now := time.UnixMilli(1775121700000).UTC() - clock := newSteppingClock(now, time.Millisecond) - telemetry := &recordingWorkerTelemetry{} - publisher, err := NewPushPublisher(PushPublisherConfig{ - Store: store, - GatewayStream: "gateway:client-events", - GatewayStreamMaxLen: 1024, - RouteLeaseTTL: 200 * time.Millisecond, - RouteBackoffMin: 20 * time.Millisecond, - RouteBackoffMax: 20 * time.Millisecond, - PollInterval: 10 * time.Millisecond, - BatchSize: 16, - Telemetry: telemetry, - Clock: clock, - }, testWorkerLogger()) - require.NoError(t, err) - - return pushPublisherFixture{ - client: client, - store: store, - publisher: publisher, - gatewayStream: "gateway:client-events", - now: now, - clock: clock, - telemetry: telemetry, - } -} - -func validPushAcceptanceInput(now time.Time) acceptintent.CreateAcceptanceInput { - return acceptintent.CreateAcceptanceInput{ - Notification: acceptintent.NotificationRecord{ - NotificationID: "1775121700000-0", - NotificationType: intentstream.NotificationTypeGameTurnReady, - Producer: intentstream.ProducerGameMaster, - AudienceKind: intentstream.AudienceKindUser, - RecipientUserIDs: []string{"user-1"}, - PayloadJSON: `{"game_id":"game-123","game_name":"Nebula Clash","turn_number":54}`, - IdempotencyKey: "game-123:turn-54", - RequestFingerprint: "sha256:deadbeef", - RequestID: "request-1", - TraceID: "trace-1", - OccurredAt: now, - AcceptedAt: now, - UpdatedAt: now, - }, - Routes: []acceptintent.NotificationRoute{ - { - NotificationID: "1775121700000-0", - RouteID: "push:user:user-1", - Channel: intentstream.ChannelPush, - RecipientRef: "user:user-1", - Status: acceptintent.RouteStatusPending, - AttemptCount: 0, - MaxAttempts: 3, - NextAttemptAt: now, - ResolvedEmail: "pilot@example.com", - ResolvedLocale: "en", - CreatedAt: now, - UpdatedAt: now, - }, - { - NotificationID: "1775121700000-0", - RouteID: "email:user:user-1", - Channel: intentstream.ChannelEmail, - RecipientRef: "user:user-1", - Status: acceptintent.RouteStatusPending, - AttemptCount: 0, - MaxAttempts: 7, - NextAttemptAt: now, - ResolvedEmail: "pilot@example.com", - ResolvedLocale: "en", - CreatedAt: now, - UpdatedAt: now, - }, - }, - Idempotency: acceptintent.IdempotencyRecord{ - Producer: intentstream.ProducerGameMaster, - IdempotencyKey: "game-123:turn-54", - NotificationID: "1775121700000-0", - RequestFingerprint: "sha256:deadbeef", - CreatedAt: now, - ExpiresAt: now.Add(7 * 24 * time.Hour), - }, - } -} - -type runningPushPublisher struct { - cancel context.CancelFunc - resultCh chan error -} - -func runPushPublisher(t *testing.T, publisher *PushPublisher) runningPushPublisher { - t.Helper() - - ctx, cancel := context.WithCancel(context.Background()) - resultCh := make(chan error, 1) - go func() { - resultCh <- publisher.Run(ctx) - }() - - return runningPushPublisher{ - cancel: cancel, - resultCh: resultCh, - } -} - -func (r runningPushPublisher) stop(t *testing.T) { - t.Helper() - - r.cancel() - - select { - case err := <-r.resultCh: - require.ErrorIs(t, err, context.Canceled) - case <-time.After(time.Second): - require.FailNow(t, "push publisher did not stop") - } -} - -type steppingClock struct { - mu sync.Mutex - current time.Time - step time.Duration -} - -func newSteppingClock(start time.Time, step time.Duration) *steppingClock { - return &steppingClock{ - current: start.UTC().Truncate(time.Millisecond), - step: step, - } -} - -func (clock *steppingClock) Now() time.Time { - clock.mu.Lock() - defer clock.mu.Unlock() - - now := clock.current - clock.current = clock.current.Add(clock.step).UTC().Truncate(time.Millisecond) - - return now -} - -func testWorkerLogger() *slog.Logger { - return slog.New(slog.NewTextHandler(io.Discard, nil)) -} diff --git a/notification/internal/worker/sqlretention.go b/notification/internal/worker/sqlretention.go new file mode 100644 index 0000000..466c057 --- /dev/null +++ b/notification/internal/worker/sqlretention.go @@ -0,0 +1,161 @@ +package worker + +import ( + "context" + "errors" + "fmt" + "log/slog" + "time" +) + +// SQLRetentionStore performs the durable DELETE statements applied by the +// retention worker. Implementations are typically the umbrella PostgreSQL +// notification store; the interface keeps the worker decoupled from the +// store package. +type SQLRetentionStore interface { + // DeleteRecordsOlderThan removes records rows whose accepted_at predates + // cutoff. Cascading FKs drop routes and dead_letters owned by the deleted + // rows. + DeleteRecordsOlderThan(ctx context.Context, cutoff time.Time) (int64, error) + + // DeleteMalformedIntentsOlderThan removes malformed-intent rows whose + // recorded_at predates cutoff. + DeleteMalformedIntentsOlderThan(ctx context.Context, cutoff time.Time) (int64, error) +} + +// SQLRetentionConfig stores the dependencies and policy used by +// SQLRetentionWorker. +type SQLRetentionConfig struct { + // Store applies the durable DELETE statements. + Store SQLRetentionStore + + // RecordRetention bounds how long records (and their cascaded routes and + // dead_letters) survive after acceptance. + RecordRetention time.Duration + + // MalformedIntentRetention bounds how long malformed-intent rows survive + // after recorded_at. + MalformedIntentRetention time.Duration + + // CleanupInterval stores the wall-clock period between two retention + // passes. + CleanupInterval time.Duration + + // Clock provides the wall-clock used to compute cutoff timestamps. + Clock Clock +} + +// SQLRetentionWorker periodically deletes records and malformed-intent rows +// whose retention window has expired. The worker replaces the per-key +// Redis EXPIRE eviction that maintained TTLs on the previous Redis-backed +// notification keyspace. +type SQLRetentionWorker struct { + store SQLRetentionStore + recordRetention time.Duration + malformedIntentRetention time.Duration + cleanupInterval time.Duration + clock Clock + logger *slog.Logger +} + +// NewSQLRetentionWorker constructs the periodic retention worker. +func NewSQLRetentionWorker(cfg SQLRetentionConfig, logger *slog.Logger) (*SQLRetentionWorker, error) { + switch { + case cfg.Store == nil: + return nil, errors.New("new sql retention worker: nil store") + case cfg.RecordRetention <= 0: + return nil, errors.New("new sql retention worker: non-positive record retention") + case cfg.MalformedIntentRetention <= 0: + return nil, errors.New("new sql retention worker: non-positive malformed intent retention") + case cfg.CleanupInterval <= 0: + return nil, errors.New("new sql retention worker: non-positive cleanup interval") + case cfg.Clock == nil: + return nil, errors.New("new sql retention worker: nil clock") + } + if logger == nil { + logger = slog.Default() + } + + return &SQLRetentionWorker{ + store: cfg.Store, + recordRetention: cfg.RecordRetention, + malformedIntentRetention: cfg.MalformedIntentRetention, + cleanupInterval: cfg.CleanupInterval, + clock: cfg.Clock, + logger: logger.With("component", "sql_retention_worker"), + }, nil +} + +// Run starts the retention loop and blocks until ctx is canceled. +func (worker *SQLRetentionWorker) Run(ctx context.Context) error { + if ctx == nil { + return errors.New("run sql retention worker: nil context") + } + if err := ctx.Err(); err != nil { + return err + } + if worker == nil { + return errors.New("run sql retention worker: nil worker") + } + + worker.logger.Info("sql retention worker started", + "record_retention", worker.recordRetention.String(), + "malformed_intent_retention", worker.malformedIntentRetention.String(), + "cleanup_interval", worker.cleanupInterval.String(), + ) + defer worker.logger.Info("sql retention worker stopped") + + // First pass runs immediately so a freshly started service does not wait + // one full interval before evicting stale rows. + worker.runOnce(ctx) + + ticker := time.NewTicker(worker.cleanupInterval) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return ctx.Err() + case <-ticker.C: + worker.runOnce(ctx) + } + } +} + +// Shutdown stops the retention worker within ctx. +func (worker *SQLRetentionWorker) Shutdown(ctx context.Context) error { + if ctx == nil { + return errors.New("shutdown sql retention worker: nil context") + } + return nil +} + +func (worker *SQLRetentionWorker) runOnce(ctx context.Context) { + now := worker.clock.Now().UTC() + + recordCutoff := now.Add(-worker.recordRetention) + if deleted, err := worker.store.DeleteRecordsOlderThan(ctx, recordCutoff); err != nil { + worker.logger.Warn("delete expired records failed", + "cutoff", recordCutoff, + "error", fmt.Sprintf("%v", err), + ) + } else if deleted > 0 { + worker.logger.Info("expired records deleted", + "cutoff", recordCutoff, + "deleted", deleted, + ) + } + + malformedCutoff := now.Add(-worker.malformedIntentRetention) + if deleted, err := worker.store.DeleteMalformedIntentsOlderThan(ctx, malformedCutoff); err != nil { + worker.logger.Warn("delete expired malformed intents failed", + "cutoff", malformedCutoff, + "error", fmt.Sprintf("%v", err), + ) + } else if deleted > 0 { + worker.logger.Info("expired malformed intents deleted", + "cutoff", malformedCutoff, + "deleted", deleted, + ) + } +} diff --git a/notification/internal/worker/stream_publisher.go b/notification/internal/worker/stream_publisher.go new file mode 100644 index 0000000..7022391 --- /dev/null +++ b/notification/internal/worker/stream_publisher.go @@ -0,0 +1,18 @@ +package worker + +import ( + "context" + + "github.com/redis/go-redis/v9" +) + +// StreamPublisher abstracts the subset of the Redis Streams API used by the +// route publishers to emit one outbound stream entry. The default +// implementation in production wiring is `*redis.Client`. Tests substitute +// an in-memory fake. +type StreamPublisher interface { + // XAdd appends one entry to the configured stream. Implementations must + // honour `args.MaxLen` plus `args.Approx == true` for approximate trimming + // when the caller sets them. + XAdd(ctx context.Context, args *redis.XAddArgs) *redis.StringCmd +} diff --git a/notification/redis_state_contract_test.go b/notification/redis_state_contract_test.go index 6b8948f..7cbefbe 100644 --- a/notification/redis_state_contract_test.go +++ b/notification/redis_state_contract_test.go @@ -9,51 +9,42 @@ import ( const expectedNotificationRedisKeyTable = `| Logical artifact | Redis key | | --- | --- | -| ` + "`notification_record`" + ` | ` + "`notification:records:`" + ` | -| ` + "`notification_route`" + ` | ` + "`notification:routes::`" + ` | | temporary route lease | ` + "`notification:route_leases::`" + ` | -| ` + "`notification_idempotency_record`" + ` | ` + "`notification:idempotency::`" + ` | -| ` + "`notification_dead_letter_entry`" + ` | ` + "`notification:dead_letters::`" + ` | -| malformed intent record | ` + "`notification:malformed_intents:`" + ` | | stream offset record | ` + "`notification:stream_offsets:`" + ` | -| ingress stream | ` + "`notification:intents`" + ` | -| route schedule sorted set | ` + "`notification:route_schedule`" + ` |` +| ingress stream | ` + "`notification:intents`" + ` |` -const expectedNotificationRedisRecordFieldsTable = `| Record | Frozen fields | +const expectedNotificationPostgresTable = `| Table | Frozen columns | | --- | --- | -| ` + "`notification_record`" + ` | ` + "`notification_id`" + `, ` + "`notification_type`" + `, ` + "`producer`" + `, ` + "`audience_kind`" + `, normalized ` + "`recipient_user_ids`" + `, normalized ` + "`payload_json`" + `, ` + "`idempotency_key`" + `, ` + "`request_fingerprint`" + `, optional ` + "`request_id`" + `, optional ` + "`trace_id`" + `, ` + "`occurred_at_ms`" + `, ` + "`accepted_at_ms`" + `, ` + "`updated_at_ms`" + ` | -| ` + "`notification_route`" + ` | ` + "`notification_id`" + `, ` + "`route_id`" + `, ` + "`channel`" + `, ` + "`recipient_ref`" + `, ` + "`status`" + `, ` + "`attempt_count`" + `, ` + "`max_attempts`" + `, ` + "`next_attempt_at_ms`" + `, optional ` + "`resolved_email`" + `, optional ` + "`resolved_locale`" + `, optional ` + "`last_error_classification`" + `, optional ` + "`last_error_message`" + `, optional ` + "`last_error_at_ms`" + `, ` + "`created_at_ms`" + `, ` + "`updated_at_ms`" + `, optional ` + "`published_at_ms`" + `, optional ` + "`dead_lettered_at_ms`" + `, optional ` + "`skipped_at_ms`" + ` | -| ` + "`notification_idempotency_record`" + ` | ` + "`producer`" + `, ` + "`idempotency_key`" + `, ` + "`notification_id`" + `, ` + "`request_fingerprint`" + `, ` + "`created_at_ms`" + `, ` + "`expires_at_ms`" + ` | -| ` + "`notification_dead_letter_entry`" + ` | ` + "`notification_id`" + `, ` + "`route_id`" + `, ` + "`channel`" + `, ` + "`recipient_ref`" + `, ` + "`final_attempt_count`" + `, ` + "`max_attempts`" + `, ` + "`failure_classification`" + `, ` + "`failure_message`" + `, ` + "`created_at_ms`" + `, optional ` + "`recovery_hint`" + ` | -| malformed intent record | ` + "`stream_entry_id`" + `, optional ` + "`notification_type`" + `, optional ` + "`producer`" + `, optional ` + "`idempotency_key`" + `, ` + "`failure_code`" + `, ` + "`failure_message`" + `, ` + "`raw_fields_json`" + `, ` + "`recorded_at_ms`" + ` | -| stream offset record | ` + "`stream`" + `, ` + "`last_processed_entry_id`" + `, ` + "`updated_at_ms`" + ` |` +| ` + "`records`" + ` | ` + "`notification_id`" + `, ` + "`notification_type`" + `, ` + "`producer`" + `, ` + "`audience_kind`" + `, ` + "`recipient_user_ids`" + ` (jsonb), ` + "`payload_json`" + `, ` + "`idempotency_key`" + `, ` + "`request_fingerprint`" + `, ` + "`request_id`" + `, ` + "`trace_id`" + `, ` + "`occurred_at`" + `, ` + "`accepted_at`" + `, ` + "`updated_at`" + `, ` + "`idempotency_expires_at`" + `; ` + "`UNIQUE (producer, idempotency_key)`" + ` | +| ` + "`routes`" + ` | ` + "`notification_id`" + `, ` + "`route_id`" + `, ` + "`channel`" + `, ` + "`recipient_ref`" + `, ` + "`status`" + `, ` + "`attempt_count`" + `, ` + "`max_attempts`" + `, ` + "`next_attempt_at`" + `, ` + "`resolved_email`" + `, ` + "`resolved_locale`" + `, ` + "`last_error_classification`" + `, ` + "`last_error_message`" + `, ` + "`last_error_at`" + `, ` + "`created_at`" + `, ` + "`updated_at`" + `, ` + "`published_at`" + `, ` + "`dead_lettered_at`" + `, ` + "`skipped_at`" + `; PRIMARY KEY ` + "`(notification_id, route_id)`" + ` | +| ` + "`dead_letters`" + ` | ` + "`notification_id`" + `, ` + "`route_id`" + `, ` + "`channel`" + `, ` + "`recipient_ref`" + `, ` + "`final_attempt_count`" + `, ` + "`max_attempts`" + `, ` + "`failure_classification`" + `, ` + "`failure_message`" + `, ` + "`recovery_hint`" + `, ` + "`created_at`" + `; PRIMARY KEY ` + "`(notification_id, route_id)`" + ` cascading from ` + "`routes`" + ` | +| ` + "`malformed_intents`" + ` | ` + "`stream_entry_id`" + `, ` + "`notification_type`" + `, ` + "`producer`" + `, ` + "`idempotency_key`" + `, ` + "`failure_code`" + `, ` + "`failure_message`" + `, ` + "`raw_fields`" + ` (jsonb), ` + "`recorded_at`" + ` |` -var expectedNotificationRedisDocumentationSnippets = []string{ - "Each route represents exactly one `(channel, recipient_ref)` pair.", - "every derived `recipient_ref` receives one `push` route slot and one `email` route slot, except that an empty administrator email list materializes one synthetic `config:` recipient slot with only a skipped `email` route", - "a route slot whose channel is outside the notification type channel matrix is materialized as `skipped`", - "`recipient_ref` is `user:` for user-targeted routes", - "`recipient_ref` is `email:` for configured administrator email routes", - "synthetic recipient slot `config:` with one skipped `email` route so the configuration gap remains durable and operator-visible", - "`route_id` is mandatory and equals `:`", - "durable records are stored as strict JSON blobs", - "timestamps are stored in Unix milliseconds", +var expectedNotificationPersistenceDocumentationSnippets = []string{ + "the durable `records` row IS the idempotency reservation", + "`next_attempt_at` is non-NULL only while the route is a scheduling candidate", + "`payload_json` stores the canonical normalized JSON string used for idempotency fingerprinting", + "`recipient_user_ids` is JSONB and omitted for `audience_kind=admin_email`", + "record-level retention deletes cascade to `routes` and `dead_letters` via `ON DELETE CASCADE`", "dynamic Redis key segments are base64url-encoded", - "`notification:route_schedule` is one shared sorted set for both `push` and `email`", - "`notification_record.payload_json` stores the canonical normalized JSON string used for idempotency fingerprinting", - "temporary route lease keys store one opaque worker token and use `NOTIFICATION_ROUTE_LEASE_TTL`; they are service-local coordination state rather than durable records", - "score = `next_attempt_at_ms` and member = full Redis route key with encoded dynamic segments", - "`status=pending` and `next_attempt_at_ms = accepted_at_ms`", - "`failed` routes remain scheduled for retry", - "`published`, `dead_letter`, and `skipped` are absent from the schedule", - "only the current lease holder may finalize one due publication attempt", + "temporary route lease keys store one opaque worker token and use `NOTIFICATION_ROUTE_LEASE_TTL`", + "retained on Redis as a per-replica exclusivity hint atop the SQL claim", + "the outbound streams `gateway:client-events` and `mail:delivery_commands` remain Redis Streams", + "Notification Service emits one entry through `XADD` before committing the route's PostgreSQL state transition", + "`routes_due_idx` (the partial index on `next_attempt_at`) replaces the former `notification:route_schedule` ZSET", + "`push` publishers filter for `route_id` prefix `push:`", + "`email` publishers filter for prefix `email:`", + "only the current lease holder finalises one due publication attempt", + "the durable transition is a `Complete*` SQL transaction with optimistic concurrency on `routes.updated_at`", + "newly accepted publishable routes enter the partial index immediately", "after failed attempt `N`, the next delay is `clamp(NOTIFICATION_ROUTE_BACKOFF_MIN * 2^(N-1), NOTIFICATION_ROUTE_BACKOFF_MIN, NOTIFICATION_ROUTE_BACKOFF_MAX)`", "no jitter is added to the retry delay", - "creates `notification_dead_letter_entry`, and is removed from `notification:route_schedule`", - "`notification_record` and `notification_route` use `NOTIFICATION_RECORD_TTL`", - "`notification_idempotency_record` uses `NOTIFICATION_IDEMPOTENCY_TTL`", - "`notification_dead_letter_entry` and malformed intent records use `NOTIFICATION_DEAD_LETTER_TTL`", - "stream offset records do not use TTL", + "creates `notification_dead_letter_entry`", + "`records` and their cascaded `routes` / `dead_letters` use `NOTIFICATION_RECORD_RETENTION`", + "the per-record idempotency window (`records.idempotency_expires_at`) uses `NOTIFICATION_IDEMPOTENCY_TTL`", + "`malformed_intents` use `NOTIFICATION_MALFORMED_INTENT_RETENTION`", + "the retention worker runs once per `NOTIFICATION_CLEANUP_INTERVAL`", + "stream offset records do not expire", } func TestNotificationRedisDocsStayInSync(t *testing.T) { @@ -74,9 +65,9 @@ func TestNotificationRedisDocsStayInSync(t *testing.T) { require.Contains(t, docsIndex, "- [Operator runbook](runbook.md)") require.Contains(t, readme, expectedNotificationRedisKeyTable) - require.Contains(t, readme, expectedNotificationRedisRecordFieldsTable) + require.Contains(t, readme, expectedNotificationPostgresTable) - for _, snippet := range expectedNotificationRedisDocumentationSnippets { + for _, snippet := range expectedNotificationPersistenceDocumentationSnippets { normalizedSnippet := normalizeWhitespace(snippet) require.Contains(t, normalizedReadme, normalizedSnippet) } diff --git a/pkg/geoip/go.mod b/pkg/geoip/go.mod index 5bc36ce..73a4f20 100644 --- a/pkg/geoip/go.mod +++ b/pkg/geoip/go.mod @@ -13,7 +13,7 @@ require ( github.com/oschwald/maxminddb-golang/v2 v2.1.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/rogpeppe/go-internal v1.14.1 // indirect - golang.org/x/sys v0.42.0 // indirect + golang.org/x/sys v0.43.0 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/pkg/geoip/go.sum b/pkg/geoip/go.sum index 1a20929..26eaca3 100644 --- a/pkg/geoip/go.sum +++ b/pkg/geoip/go.sum @@ -11,8 +11,7 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= -golang.org/x/sys v0.42.0 h1:omrd2nAlyT5ESRdCLYdm3+fMfNFE/+Rf4bDIQImRJeo= -golang.org/x/sys v0.42.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw= +golang.org/x/sys v0.43.0 h1:Rlag2XtaFTxp19wS8MXlJwTvoh8ArU6ezoyFsMyCTNI= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= diff --git a/pkg/notificationintent/go.mod b/pkg/notificationintent/go.mod index 1661aae..c80186b 100644 --- a/pkg/notificationintent/go.mod +++ b/pkg/notificationintent/go.mod @@ -18,7 +18,7 @@ require ( github.com/rogpeppe/go-internal v1.14.1 // indirect github.com/yuin/gopher-lua v1.1.1 // indirect go.uber.org/atomic v1.11.0 // indirect - golang.org/x/sys v0.42.0 // indirect + golang.org/x/sys v0.43.0 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/pkg/notificationintent/go.sum b/pkg/notificationintent/go.sum index 11f0086..fb38598 100644 --- a/pkg/notificationintent/go.sum +++ b/pkg/notificationintent/go.sum @@ -24,7 +24,7 @@ github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0= github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -golang.org/x/sys v0.42.0 h1:omrd2nAlyT5ESRdCLYdm3+fMfNFE/+Rf4bDIQImRJeo= +golang.org/x/sys v0.43.0 h1:Rlag2XtaFTxp19wS8MXlJwTvoh8ArU6ezoyFsMyCTNI= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= diff --git a/pkg/postgres/config.go b/pkg/postgres/config.go new file mode 100644 index 0000000..3c2e367 --- /dev/null +++ b/pkg/postgres/config.go @@ -0,0 +1,196 @@ +// Package postgres provides shared helpers for opening, instrumenting and +// migrating PostgreSQL backends used by Galaxy services. +// +// The package codifies the steady-state rules captured in `ARCHITECTURE.md` +// `§Persistence Backends`: services connect through `database/sql` driven by +// the pgx driver, apply embedded goose migrations at startup, and expose +// statement spans plus pool metrics via OpenTelemetry. +package postgres + +import ( + "errors" + "fmt" + "os" + "strconv" + "strings" + "time" +) + +// Default configuration values applied by DefaultConfig and LoadFromEnv when +// the corresponding environment variable is absent. +const ( + DefaultOperationTimeout = 1 * time.Second + DefaultMaxOpenConns = 25 + DefaultMaxIdleConns = 5 + DefaultConnMaxLifetime = 30 * time.Minute +) + +// Config stores the connection and pool tuning used to open a primary plus +// zero-or-more replica `*sql.DB` instances. Stage 1 wires only the primary; +// the replica list is preserved so future read-routing is a non-breaking +// change. +type Config struct { + // PrimaryDSN stores the DSN used by the primary connection. Required. + PrimaryDSN string + + // ReplicaDSNs stores zero-or-more read-only replica DSNs. + ReplicaDSNs []string + + // OperationTimeout bounds startup operations such as Ping and individual + // pgx connect attempts. + OperationTimeout time.Duration + + // MaxOpenConns caps the maximum number of open connections per pool. + MaxOpenConns int + + // MaxIdleConns caps the maximum number of idle connections per pool. + MaxIdleConns int + + // ConnMaxLifetime bounds the lifetime of an individual connection. + ConnMaxLifetime time.Duration +} + +// DefaultConfig returns the default tuning. PrimaryDSN and ReplicaDSNs remain +// zero-valued and must be supplied by callers (or by LoadFromEnv). +func DefaultConfig() Config { + return Config{ + OperationTimeout: DefaultOperationTimeout, + MaxOpenConns: DefaultMaxOpenConns, + MaxIdleConns: DefaultMaxIdleConns, + ConnMaxLifetime: DefaultConnMaxLifetime, + } +} + +// Validate reports whether cfg is usable. DSN strings are checked for +// non-emptiness only; full pgx parsing happens at OpenPrimary/OpenReplicas +// time so callers see a single failure point. +func (cfg Config) Validate() error { + if strings.TrimSpace(cfg.PrimaryDSN) == "" { + return errors.New("postgres primary DSN must not be empty") + } + for index, dsn := range cfg.ReplicaDSNs { + if strings.TrimSpace(dsn) == "" { + return fmt.Errorf("postgres replica DSN at index %d must not be empty", index) + } + } + if cfg.OperationTimeout <= 0 { + return errors.New("postgres operation timeout must be positive") + } + if cfg.MaxOpenConns <= 0 { + return errors.New("postgres max open conns must be positive") + } + if cfg.MaxIdleConns < 0 { + return errors.New("postgres max idle conns must not be negative") + } + if cfg.MaxIdleConns > cfg.MaxOpenConns { + return errors.New("postgres max idle conns must not exceed max open conns") + } + if cfg.ConnMaxLifetime <= 0 { + return errors.New("postgres conn max lifetime must be positive") + } + return nil +} + +// LoadFromEnv populates Config from environment variables prefixed with +// `_POSTGRES_`. The required variable is `_POSTGRES_PRIMARY_DSN`; +// every other variable falls back to DefaultConfig values. +// +// Example variable set for prefix "USERSERVICE": +// +// USERSERVICE_POSTGRES_PRIMARY_DSN=postgres://userservice:secret@host:5432/galaxy?search_path=user&sslmode=disable +// USERSERVICE_POSTGRES_REPLICA_DSNS=postgres://...,postgres://... +// USERSERVICE_POSTGRES_OPERATION_TIMEOUT=1s +// USERSERVICE_POSTGRES_MAX_OPEN_CONNS=25 +// USERSERVICE_POSTGRES_MAX_IDLE_CONNS=5 +// USERSERVICE_POSTGRES_CONN_MAX_LIFETIME=30m +func LoadFromEnv(prefix string) (Config, error) { + if strings.TrimSpace(prefix) == "" { + return Config{}, errors.New("postgres env prefix must not be empty") + } + + cfg := DefaultConfig() + + primaryName := envName(prefix, "PRIMARY_DSN") + primary, ok := os.LookupEnv(primaryName) + if !ok || strings.TrimSpace(primary) == "" { + return Config{}, fmt.Errorf("%s must be set", primaryName) + } + cfg.PrimaryDSN = strings.TrimSpace(primary) + + if raw, ok := os.LookupEnv(envName(prefix, "REPLICA_DSNS")); ok { + cfg.ReplicaDSNs = splitCSV(raw) + } + + timeout, err := loadDuration(envName(prefix, "OPERATION_TIMEOUT"), cfg.OperationTimeout) + if err != nil { + return Config{}, err + } + cfg.OperationTimeout = timeout + + maxOpen, err := loadInt(envName(prefix, "MAX_OPEN_CONNS"), cfg.MaxOpenConns) + if err != nil { + return Config{}, err + } + cfg.MaxOpenConns = maxOpen + + maxIdle, err := loadInt(envName(prefix, "MAX_IDLE_CONNS"), cfg.MaxIdleConns) + if err != nil { + return Config{}, err + } + cfg.MaxIdleConns = maxIdle + + connLifetime, err := loadDuration(envName(prefix, "CONN_MAX_LIFETIME"), cfg.ConnMaxLifetime) + if err != nil { + return Config{}, err + } + cfg.ConnMaxLifetime = connLifetime + + if err := cfg.Validate(); err != nil { + return Config{}, err + } + return cfg, nil +} + +func envName(prefix, suffix string) string { + return strings.ToUpper(strings.TrimSpace(prefix)) + "_POSTGRES_" + suffix +} + +func splitCSV(raw string) []string { + parts := strings.Split(raw, ",") + out := make([]string, 0, len(parts)) + for _, part := range parts { + trimmed := strings.TrimSpace(part) + if trimmed == "" { + continue + } + out = append(out, trimmed) + } + if len(out) == 0 { + return nil + } + return out +} + +func loadDuration(name string, fallback time.Duration) (time.Duration, error) { + raw, ok := os.LookupEnv(name) + if !ok { + return fallback, nil + } + parsed, err := time.ParseDuration(strings.TrimSpace(raw)) + if err != nil { + return 0, fmt.Errorf("%s: %w", name, err) + } + return parsed, nil +} + +func loadInt(name string, fallback int) (int, error) { + raw, ok := os.LookupEnv(name) + if !ok { + return fallback, nil + } + parsed, err := strconv.Atoi(strings.TrimSpace(raw)) + if err != nil { + return 0, fmt.Errorf("%s: %w", name, err) + } + return parsed, nil +} diff --git a/pkg/postgres/config_test.go b/pkg/postgres/config_test.go new file mode 100644 index 0000000..565999d --- /dev/null +++ b/pkg/postgres/config_test.go @@ -0,0 +1,198 @@ +package postgres + +import ( + "strings" + "testing" + "time" +) + +func TestDefaultConfigReturnsExpectedTuning(t *testing.T) { + t.Parallel() + + cfg := DefaultConfig() + if cfg.OperationTimeout != DefaultOperationTimeout { + t.Fatalf("operation timeout = %v, want %v", cfg.OperationTimeout, DefaultOperationTimeout) + } + if cfg.MaxOpenConns != DefaultMaxOpenConns { + t.Fatalf("max open conns = %d, want %d", cfg.MaxOpenConns, DefaultMaxOpenConns) + } + if cfg.MaxIdleConns != DefaultMaxIdleConns { + t.Fatalf("max idle conns = %d, want %d", cfg.MaxIdleConns, DefaultMaxIdleConns) + } + if cfg.ConnMaxLifetime != DefaultConnMaxLifetime { + t.Fatalf("conn max lifetime = %v, want %v", cfg.ConnMaxLifetime, DefaultConnMaxLifetime) + } +} + +func TestConfigValidateAcceptsHappyPath(t *testing.T) { + t.Parallel() + + cfg := DefaultConfig() + cfg.PrimaryDSN = "postgres://localhost:5432/galaxy?sslmode=disable" + if err := cfg.Validate(); err != nil { + t.Fatalf("validate happy path: %v", err) + } +} + +func TestConfigValidateRejectsInvalidValues(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + mutate func(*Config) + wantSub string + }{ + { + name: "missing primary", + mutate: func(c *Config) { + c.PrimaryDSN = "" + }, + wantSub: "primary DSN", + }, + { + name: "blank replica entry", + mutate: func(c *Config) { + c.ReplicaDSNs = []string{"postgres://a", " "} + }, + wantSub: "replica DSN", + }, + { + name: "non-positive timeout", + mutate: func(c *Config) { + c.OperationTimeout = 0 + }, + wantSub: "operation timeout", + }, + { + name: "non-positive max open", + mutate: func(c *Config) { + c.MaxOpenConns = 0 + }, + wantSub: "max open conns", + }, + { + name: "negative max idle", + mutate: func(c *Config) { + c.MaxIdleConns = -1 + }, + wantSub: "max idle conns must not be negative", + }, + { + name: "max idle exceeds open", + mutate: func(c *Config) { + c.MaxOpenConns = 4 + c.MaxIdleConns = 5 + }, + wantSub: "must not exceed", + }, + { + name: "non-positive lifetime", + mutate: func(c *Config) { + c.ConnMaxLifetime = 0 + }, + wantSub: "conn max lifetime", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + cfg := DefaultConfig() + cfg.PrimaryDSN = "postgres://localhost" + tt.mutate(&cfg) + + err := cfg.Validate() + if err == nil { + t.Fatalf("expected validate error, got nil") + } + if !strings.Contains(err.Error(), tt.wantSub) { + t.Fatalf("error %q does not contain %q", err, tt.wantSub) + } + }) + } +} + +func TestLoadFromEnvUsesDefaultsWhenOnlyPrimarySet(t *testing.T) { + const prefix = "TESTSVC" + t.Setenv(prefix+"_POSTGRES_PRIMARY_DSN", "postgres://example/galaxy?sslmode=disable") + + cfg, err := LoadFromEnv(prefix) + if err != nil { + t.Fatalf("load from env: %v", err) + } + if cfg.PrimaryDSN != "postgres://example/galaxy?sslmode=disable" { + t.Fatalf("primary DSN = %q", cfg.PrimaryDSN) + } + if len(cfg.ReplicaDSNs) != 0 { + t.Fatalf("replica DSNs = %v, want empty", cfg.ReplicaDSNs) + } + if cfg.OperationTimeout != DefaultOperationTimeout { + t.Fatalf("operation timeout = %v", cfg.OperationTimeout) + } + if cfg.MaxOpenConns != DefaultMaxOpenConns { + t.Fatalf("max open conns = %d", cfg.MaxOpenConns) + } +} + +func TestLoadFromEnvParsesAllOverrides(t *testing.T) { + const prefix = "TESTSVC" + t.Setenv(prefix+"_POSTGRES_PRIMARY_DSN", "postgres://example/galaxy?sslmode=disable") + t.Setenv(prefix+"_POSTGRES_REPLICA_DSNS", "postgres://r1, postgres://r2 ,") + t.Setenv(prefix+"_POSTGRES_OPERATION_TIMEOUT", "750ms") + t.Setenv(prefix+"_POSTGRES_MAX_OPEN_CONNS", "40") + t.Setenv(prefix+"_POSTGRES_MAX_IDLE_CONNS", "10") + t.Setenv(prefix+"_POSTGRES_CONN_MAX_LIFETIME", "15m") + + cfg, err := LoadFromEnv(prefix) + if err != nil { + t.Fatalf("load from env: %v", err) + } + if got, want := cfg.OperationTimeout, 750*time.Millisecond; got != want { + t.Fatalf("operation timeout = %v, want %v", got, want) + } + if got, want := cfg.MaxOpenConns, 40; got != want { + t.Fatalf("max open conns = %d, want %d", got, want) + } + if got, want := cfg.MaxIdleConns, 10; got != want { + t.Fatalf("max idle conns = %d, want %d", got, want) + } + if got, want := cfg.ConnMaxLifetime, 15*time.Minute; got != want { + t.Fatalf("conn max lifetime = %v, want %v", got, want) + } + if got, want := len(cfg.ReplicaDSNs), 2; got != want { + t.Fatalf("replica DSN count = %d, want %d", got, want) + } + if cfg.ReplicaDSNs[0] != "postgres://r1" || cfg.ReplicaDSNs[1] != "postgres://r2" { + t.Fatalf("replica DSNs = %v", cfg.ReplicaDSNs) + } +} + +func TestLoadFromEnvFailsWhenPrimaryMissing(t *testing.T) { + const prefix = "TESTSVC" + t.Setenv(prefix+"_POSTGRES_PRIMARY_DSN", "") + + if _, err := LoadFromEnv(prefix); err == nil { + t.Fatal("expected error when primary DSN missing") + } +} + +func TestLoadFromEnvRejectsEmptyPrefix(t *testing.T) { + t.Parallel() + + if _, err := LoadFromEnv(" "); err == nil { + t.Fatal("expected error on empty prefix") + } +} + +func TestLoadFromEnvSurfacesDurationParseErrors(t *testing.T) { + const prefix = "TESTSVC" + t.Setenv(prefix+"_POSTGRES_PRIMARY_DSN", "postgres://example/galaxy") + t.Setenv(prefix+"_POSTGRES_OPERATION_TIMEOUT", "not-a-duration") + + if _, err := LoadFromEnv(prefix); err == nil { + t.Fatal("expected parse error") + } else if !strings.Contains(err.Error(), "OPERATION_TIMEOUT") { + t.Fatalf("error %q should name the env var", err) + } +} diff --git a/pkg/postgres/go.mod b/pkg/postgres/go.mod new file mode 100644 index 0000000..8557d35 --- /dev/null +++ b/pkg/postgres/go.mod @@ -0,0 +1,72 @@ +module galaxy/postgres + +go 1.26.1 + +require ( + github.com/XSAM/otelsql v0.42.0 + github.com/jackc/pgx/v5 v5.9.2 + github.com/pressly/goose/v3 v3.27.1 + github.com/testcontainers/testcontainers-go v0.42.0 + github.com/testcontainers/testcontainers-go/modules/postgres v0.42.0 + go.opentelemetry.io/otel v1.43.0 + go.opentelemetry.io/otel/metric v1.43.0 + go.opentelemetry.io/otel/trace v1.43.0 +) + +require ( + dario.cat/mergo v1.0.2 // indirect + github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/containerd/errdefs v1.0.0 // indirect + github.com/containerd/errdefs/pkg v0.3.0 // indirect + github.com/containerd/log v0.1.0 // indirect + github.com/containerd/platforms v0.2.1 // indirect + github.com/cpuguy83/dockercfg v0.3.2 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/distribution/reference v0.6.0 // indirect + github.com/docker/go-connections v0.7.0 // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/ebitengine/purego v0.10.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-ole/go-ole v1.2.6 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect + github.com/jackc/puddle/v2 v2.2.2 // indirect + github.com/klauspost/compress v1.18.5 // indirect + github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect + github.com/magiconair/properties v1.8.10 // indirect + github.com/mfridman/interpolate v0.0.2 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect + github.com/moby/go-archive v0.2.0 // indirect + github.com/moby/moby/api v1.54.2 // indirect + github.com/moby/moby/client v0.4.1 // indirect + github.com/moby/patternmatcher v0.6.1 // indirect + github.com/moby/sys/sequential v0.6.0 // indirect + github.com/moby/sys/user v0.4.0 // indirect + github.com/moby/sys/userns v0.1.0 // indirect + github.com/moby/term v0.5.2 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.1 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect + github.com/sethvargo/go-retry v0.3.0 // indirect + github.com/shirou/gopsutil/v4 v4.26.3 // indirect + github.com/sirupsen/logrus v1.9.4 // indirect + github.com/stretchr/testify v1.11.1 // indirect + github.com/tklauser/go-sysconf v0.3.16 // indirect + github.com/tklauser/numcpus v0.11.0 // indirect + github.com/yusufpapurcu/wmi v1.2.4 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.68.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/crypto v0.50.0 // indirect + golang.org/x/sync v0.20.0 // indirect + golang.org/x/sys v0.43.0 // indirect + golang.org/x/text v0.36.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/pkg/postgres/go.sum b/pkg/postgres/go.sum new file mode 100644 index 0000000..10c3178 --- /dev/null +++ b/pkg/postgres/go.sum @@ -0,0 +1,185 @@ +dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8= +dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 h1:He8afgbRMd7mFxO99hRNu+6tazq8nFF9lIwo9JFroBk= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/XSAM/otelsql v0.42.0 h1:Li0xF4eJUxG2e0x3D4rvRlys1f27yJKvjTh7ljkUP5o= +github.com/XSAM/otelsql v0.42.0/go.mod h1:4mOrEv+cS1KmKzrvTktvJnstr5GtKSAK+QHvFR9OcpI= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= +github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= +github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= +github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A= +github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw= +github.com/cpuguy83/dockercfg v0.3.2 h1:DlJTyZGBDlXqUZ2Dk2Q3xHs/FtnooJJVaad2S9GKorA= +github.com/cpuguy83/dockercfg v0.3.2/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc= +github.com/creack/pty v1.1.24 h1:bJrF4RRfyJnbTJqzRLHzcGaZK1NeM5kTC9jGgovnR1s= +github.com/creack/pty v1.1.24/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/go-connections v0.7.0 h1:6SsRfJddP22WMrCkj19x9WKjEDTB+ahsdiGYf0mN39c= +github.com/docker/go-connections v0.7.0/go.mod h1:no1qkHdjq7kLMGUXYAduOhYPSJxxvgWBh7ogVvptn3Q= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/ebitengine/purego v0.10.0 h1:QIw4xfpWT6GWTzaW5XEKy3HXoqrJGx1ijYHzTF0/ISU= +github.com/ebitengine/purego v0.10.0/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgx/v5 v5.9.2 h1:3ZhOzMWnR4yJ+RW1XImIPsD1aNSz4T4fyP7zlQb56hw= +github.com/jackc/pgx/v5 v5.9.2/go.mod h1:mal1tBGAFfLHvZzaYh77YS/eC6IX9OWbRV1QIIM0Jn4= +github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= +github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= +github.com/klauspost/compress v1.18.5 h1:/h1gH5Ce+VWNLSWqPzOVn6XBO+vJbCNGvjoaGBFW2IE= +github.com/klauspost/compress v1.18.5/go.mod h1:cwPg85FWrGar70rWktvGQj8/hthj3wpl0PGDogxkrSQ= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/magiconair/properties v1.8.10 h1:s31yESBquKXCV9a/ScB3ESkOjUYYv+X0rg8SYxI99mE= +github.com/magiconair/properties v1.8.10/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mattn/go-isatty v0.0.21 h1:xYae+lCNBP7QuW4PUnNG61ffM4hVIfm+zUzDuSzYLGs= +github.com/mattn/go-isatty v0.0.21/go.mod h1:ZXfXG4SQHsB/w3ZeOYbR0PrPwLy+n6xiMrJlRFqopa4= +github.com/mdelapenya/tlscert v0.2.0 h1:7H81W6Z/4weDvZBNOfQte5GpIMo0lGYEeWbkGp5LJHI= +github.com/mdelapenya/tlscert v0.2.0/go.mod h1:O4njj3ELLnJjGdkN7M/vIVCpZ+Cf0L6muqOG4tLSl8o= +github.com/mfridman/interpolate v0.0.2 h1:pnuTK7MQIxxFz1Gr+rjSIx9u7qVjf5VOoM/u6BbAxPY= +github.com/mfridman/interpolate v0.0.2/go.mod h1:p+7uk6oE07mpE/Ik1b8EckO0O4ZXiGAfshKBWLUM9Xg= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/moby/go-archive v0.2.0 h1:zg5QDUM2mi0JIM9fdQZWC7U8+2ZfixfTYoHL7rWUcP8= +github.com/moby/go-archive v0.2.0/go.mod h1:mNeivT14o8xU+5q1YnNrkQVpK+dnNe/K6fHqnTg4qPU= +github.com/moby/moby/api v1.54.2 h1:wiat9QAhnDQjA7wk1kh/TqHz2I1uUA7M7t9SAl/JNXg= +github.com/moby/moby/api v1.54.2/go.mod h1:+RQ6wluLwtYaTd1WnPLykIDPekkuyD/ROWQClE83pzs= +github.com/moby/moby/client v0.4.1 h1:DMQgisVoMkmMs7fp3ROSdiBnoAu8+vo3GggFl06M/wY= +github.com/moby/moby/client v0.4.1/go.mod h1:z52C9O2POPOsnxZAy//WtKcQ32P+jT/NGeXu/7nfjGQ= +github.com/moby/patternmatcher v0.6.1 h1:qlhtafmr6kgMIJjKJMDmMWq7WLkKIo23hsrpR3x084U= +github.com/moby/patternmatcher v0.6.1/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= +github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU= +github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko= +github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs= +github.com/moby/sys/user v0.4.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= +github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g= +github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28= +github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= +github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= +github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w= +github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/pressly/goose/v3 v3.27.1 h1:6uEvcprBybDmW4hcz3gYujhARhye+GoWKhEWyzD5sh4= +github.com/pressly/goose/v3 v3.27.1/go.mod h1:maruOxsPnIG2yHHyo8UqKWXYKFcH7Q76csUV7+7KYoM= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= +github.com/sethvargo/go-retry v0.3.0 h1:EEt31A35QhrcRZtrYFDTBg91cqZVnFL2navjDrah2SE= +github.com/sethvargo/go-retry v0.3.0/go.mod h1:mNX17F0C/HguQMyMyJxcnU471gOZGxCLyYaFyAZraas= +github.com/shirou/gopsutil/v4 v4.26.3 h1:2ESdQt90yU3oXF/CdOlRCJxrP+Am1aBYubTMTfxJ1qc= +github.com/shirou/gopsutil/v4 v4.26.3/go.mod h1:LZ6ewCSkBqUpvSOf+LsTGnRinC6iaNUNMGBtDkJBaLQ= +github.com/sirupsen/logrus v1.9.4 h1:TsZE7l11zFCLZnZ+teH4Umoq5BhEIfIzfRDZ1Uzql2w= +github.com/sirupsen/logrus v1.9.4/go.mod h1:ftWc9WdOfJ0a92nsE2jF5u5ZwH8Bv2zdeOC42RjbV2g= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.5.3 h1:jmXUvGomnU1o3W/V5h2VEradbpJDwGrzugQQvL0POH4= +github.com/stretchr/objx v0.5.3/go.mod h1:rDQraq+vQZU7Fde9LOZLr8Tax6zZvy4kuNKF+QYS+U0= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/testcontainers/testcontainers-go v0.42.0 h1:He3IhTzTZOygSXLJPMX7n44XtK+qhjat1nI9cneBbUY= +github.com/testcontainers/testcontainers-go v0.42.0/go.mod h1:vZjdY1YmUA1qEForxOIOazfsrdyORJAbhi0bp8plN30= +github.com/testcontainers/testcontainers-go/modules/postgres v0.42.0 h1:GCbb1ndrF7OTDiIvxXyItaDab4qkzTFJ48LKFdM7EIo= +github.com/testcontainers/testcontainers-go/modules/postgres v0.42.0/go.mod h1:IRPBaI8jXdrNfD0e4Zm7Fbcgaz5shKxOQv4axiL09xs= +github.com/tklauser/go-sysconf v0.3.16 h1:frioLaCQSsF5Cy1jgRBrzr6t502KIIwQ0MArYICU0nA= +github.com/tklauser/go-sysconf v0.3.16/go.mod h1:/qNL9xxDhc7tx3HSRsLWNnuzbVfh3e7gh/BmM179nYI= +github.com/tklauser/numcpus v0.11.0 h1:nSTwhKH5e1dMNsCdVBukSZrURJRoHbSEQjdEbY+9RXw= +github.com/tklauser/numcpus v0.11.0/go.mod h1:z+LwcLq54uWZTX0u/bGobaV34u6V7KNlTZejzM6/3MQ= +github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= +github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.68.0 h1:CqXxU8VOmDefoh0+ztfGaymYbhdB/tT3zs79QaZTNGY= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.68.0/go.mod h1:BuhAPThV8PBHBvg8ZzZ/Ok3idOdhWIodywz2xEcRbJo= +go.opentelemetry.io/otel v1.43.0 h1:mYIM03dnh5zfN7HautFE4ieIig9amkNANT+xcVxAj9I= +go.opentelemetry.io/otel v1.43.0/go.mod h1:JuG+u74mvjvcm8vj8pI5XiHy1zDeoCS2LB1spIq7Ay0= +go.opentelemetry.io/otel/metric v1.43.0 h1:d7638QeInOnuwOONPp4JAOGfbCEpYb+K6DVWvdxGzgM= +go.opentelemetry.io/otel/metric v1.43.0/go.mod h1:RDnPtIxvqlgO8GRW18W6Z/4P462ldprJtfxHxyKd2PY= +go.opentelemetry.io/otel/sdk v1.43.0 h1:pi5mE86i5rTeLXqoF/hhiBtUNcrAGHLKQdhg4h4V9Dg= +go.opentelemetry.io/otel/sdk v1.43.0/go.mod h1:P+IkVU3iWukmiit/Yf9AWvpyRDlUeBaRg6Y+C58QHzg= +go.opentelemetry.io/otel/sdk/metric v1.43.0 h1:S88dyqXjJkuBNLeMcVPRFXpRw2fuwdvfCGLEo89fDkw= +go.opentelemetry.io/otel/sdk/metric v1.43.0/go.mod h1:C/RJtwSEJ5hzTiUz5pXF1kILHStzb9zFlIEe85bhj6A= +go.opentelemetry.io/otel/trace v1.43.0 h1:BkNrHpup+4k4w+ZZ86CZoHHEkohws8AY+WTX09nk+3A= +go.opentelemetry.io/otel/trace v1.43.0/go.mod h1:/QJhyVBUUswCphDVxq+8mld+AvhXZLhe+8WVFxiFff0= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +golang.org/x/crypto v0.50.0 h1:zO47/JPrL6vsNkINmLoo/PH1gcxpls50DNogFvB5ZGI= +golang.org/x/crypto v0.50.0/go.mod h1:3muZ7vA7PBCE6xgPX7nkzzjiUq87kRItoJQM1Yo8S+Q= +golang.org/x/sync v0.20.0 h1:e0PTpb7pjO8GAtTs2dQ6jYa5BWYlMuX047Dco/pItO4= +golang.org/x/sync v0.20.0/go.mod h1:9xrNwdLfx4jkKbNva9FpL6vEN7evnE43NNNJQ2LF3+0= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.43.0 h1:Rlag2XtaFTxp19wS8MXlJwTvoh8ArU6ezoyFsMyCTNI= +golang.org/x/sys v0.43.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw= +golang.org/x/term v0.42.0 h1:UiKe+zDFmJobeJ5ggPwOshJIVt6/Ft0rcfrXZDLWAWY= +golang.org/x/term v0.42.0/go.mod h1:Dq/D+snpsbazcBG5+F9Q1n2rXV8Ma+71xEjTRufARgY= +golang.org/x/text v0.36.0 h1:JfKh3XmcRPqZPKevfXVpI1wXPTqbkE5f7JA92a55Yxg= +golang.org/x/text v0.36.0/go.mod h1:NIdBknypM8iqVmPiuco0Dh6P5Jcdk8lJL0CUebqK164= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= +gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= +modernc.org/libc v1.72.1 h1:db1xwJ6u1kE3KHTFTTbe2GCrczHPKzlURP0aDC4NGD0= +modernc.org/libc v1.72.1/go.mod h1:HRMiC/PhPGLIPM7GzAFCbI+oSgE3dhZ8FWftmRrHVlY= +modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU= +modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg= +modernc.org/memory v1.11.0 h1:o4QC8aMQzmcwCK3t3Ux/ZHmwFPzE6hf2Y5LbkRs+hbI= +modernc.org/memory v1.11.0/go.mod h1:/JP4VbVC+K5sU2wZi9bHoq2MAkCnrt2r98UGeSK7Mjw= +modernc.org/sqlite v1.49.1 h1:dYGHTKcX1sJ+EQDnUzvz4TJ5GbuvhNJa8Fg6ElGx73U= +modernc.org/sqlite v1.49.1/go.mod h1:m0w8xhwYUVY3H6pSDwc3gkJ/irZT/0YEXwBlhaxQEew= +pgregory.net/rapid v1.2.0 h1:keKAYRcjm+e1F0oAuU5F5+YPAWcyxNNRK2wud503Gnk= +pgregory.net/rapid v1.2.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04= diff --git a/pkg/postgres/health.go b/pkg/postgres/health.go new file mode 100644 index 0000000..c484740 --- /dev/null +++ b/pkg/postgres/health.go @@ -0,0 +1,30 @@ +package postgres + +import ( + "context" + "database/sql" + "errors" + "fmt" + "time" +) + +// Ping bounds db.PingContext under timeout and returns a wrapped error so +// startup failures are easy to spot in service logs. +// +// timeout is typically taken from Config.OperationTimeout. +func Ping(ctx context.Context, db *sql.DB, timeout time.Duration) error { + if db == nil { + return errors.New("ping postgres: nil db") + } + if timeout <= 0 { + return errors.New("ping postgres: timeout must be positive") + } + + pingCtx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + if err := db.PingContext(pingCtx); err != nil { + return fmt.Errorf("ping postgres: %w", err) + } + return nil +} diff --git a/pkg/postgres/migrate.go b/pkg/postgres/migrate.go new file mode 100644 index 0000000..a898e59 --- /dev/null +++ b/pkg/postgres/migrate.go @@ -0,0 +1,53 @@ +package postgres + +import ( + "context" + "database/sql" + "errors" + "fmt" + "io/fs" + "strings" + "sync" + + "github.com/pressly/goose/v3" +) + +// gooseMu serialises access to goose's package-level filesystem state so +// concurrent calls to RunMigrations from independent services in the same +// process do not race on goose.SetBaseFS. +var gooseMu sync.Mutex + +// RunMigrations applies every pending Up migration found under dir inside fsys +// against db. The PostgreSQL dialect is forced; goose's package-level base FS +// is restored to the OS filesystem on the way out so a second caller in the +// same process is safe. +// +// dir is the path within fsys (use "." when the migration files sit at the +// embed root). The function does not handle Down migrations or partial +// targets — services apply the full forward sequence at startup. +func RunMigrations(ctx context.Context, db *sql.DB, fsys fs.FS, dir string) error { + if db == nil { + return errors.New("run migrations: nil db") + } + if fsys == nil { + return errors.New("run migrations: nil fs") + } + if strings.TrimSpace(dir) == "" { + return errors.New("run migrations: dir must not be empty") + } + + gooseMu.Lock() + defer gooseMu.Unlock() + + goose.SetBaseFS(fsys) + defer goose.SetBaseFS(nil) + + if err := goose.SetDialect("postgres"); err != nil { + return fmt.Errorf("run migrations: set dialect: %w", err) + } + + if err := goose.UpContext(ctx, db, dir); err != nil { + return fmt.Errorf("run migrations: %w", err) + } + return nil +} diff --git a/pkg/postgres/open.go b/pkg/postgres/open.go new file mode 100644 index 0000000..102d252 --- /dev/null +++ b/pkg/postgres/open.go @@ -0,0 +1,136 @@ +package postgres + +import ( + "context" + "database/sql" + "errors" + "fmt" + + "github.com/XSAM/otelsql" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/stdlib" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/trace" +) + +// dbSystemAttribute identifies the wrapped backend in OpenTelemetry spans +// without locking the package to a specific semconv release. +var dbSystemAttribute = attribute.String("db.system", "postgresql") + +// Option configures the OpenTelemetry providers attached to a connection by +// OpenPrimary, OpenReplicas, and InstrumentDBStats. Unset providers fall back +// to the OpenTelemetry global providers. +type Option func(*options) + +type options struct { + tracerProvider trace.TracerProvider + meterProvider metric.MeterProvider +} + +// WithTracerProvider sets the tracer provider used for SQL statement spans. +func WithTracerProvider(tp trace.TracerProvider) Option { + return func(o *options) { + o.tracerProvider = tp + } +} + +// WithMeterProvider sets the meter provider used for connection-pool stats. +func WithMeterProvider(mp metric.MeterProvider) Option { + return func(o *options) { + o.meterProvider = mp + } +} + +func evalOptions(opts []Option) options { + var resolved options + for _, opt := range opts { + if opt == nil { + continue + } + opt(&resolved) + } + return resolved +} + +func (o options) otelsqlOpenOptions() []otelsql.Option { + out := []otelsql.Option{otelsql.WithAttributes(dbSystemAttribute)} + if o.tracerProvider != nil { + out = append(out, otelsql.WithTracerProvider(o.tracerProvider)) + } + if o.meterProvider != nil { + out = append(out, otelsql.WithMeterProvider(o.meterProvider)) + } + return out +} + +// OpenPrimary opens the primary `*sql.DB` from cfg. ctx bounds individual +// pgx connect attempts via the parsed pgx config's ConnectTimeout (set to +// cfg.OperationTimeout). The returned pool has SetMaxOpenConns, +// SetMaxIdleConns and SetConnMaxLifetime applied. +func OpenPrimary(ctx context.Context, cfg Config, opts ...Option) (*sql.DB, error) { + if err := cfg.Validate(); err != nil { + return nil, fmt.Errorf("open postgres primary: %w", err) + } + db, err := openDB(ctx, cfg, cfg.PrimaryDSN, evalOptions(opts)) + if err != nil { + return nil, fmt.Errorf("open postgres primary: %w", err) + } + return db, nil +} + +// OpenReplicas opens one `*sql.DB` per replica DSN. It returns nil when no +// replicas are configured. When opening a replica fails mid-way, every +// already-opened replica is closed before returning the error. +func OpenReplicas(ctx context.Context, cfg Config, opts ...Option) ([]*sql.DB, error) { + if err := cfg.Validate(); err != nil { + return nil, fmt.Errorf("open postgres replicas: %w", err) + } + if len(cfg.ReplicaDSNs) == 0 { + return nil, nil + } + + resolved := evalOptions(opts) + pools := make([]*sql.DB, 0, len(cfg.ReplicaDSNs)) + for index, dsn := range cfg.ReplicaDSNs { + db, err := openDB(ctx, cfg, dsn, resolved) + if err != nil { + for _, opened := range pools { + _ = opened.Close() + } + return nil, fmt.Errorf("open postgres replica at index %d: %w", index, err) + } + pools = append(pools, db) + } + return pools, nil +} + +func openDB(ctx context.Context, cfg Config, dsn string, opts options) (*sql.DB, error) { + if ctx.Err() != nil { + return nil, ctx.Err() + } + + pgxCfg, err := pgx.ParseConfig(dsn) + if err != nil { + return nil, fmt.Errorf("parse dsn: %w", err) + } + pgxCfg.ConnectTimeout = cfg.OperationTimeout + + registeredName := stdlib.RegisterConnConfig(pgxCfg) + + db, err := otelsql.Open("pgx", registeredName, opts.otelsqlOpenOptions()...) + if err != nil { + stdlib.UnregisterConnConfig(registeredName) + return nil, fmt.Errorf("otelsql open: %w", err) + } + if db == nil { + stdlib.UnregisterConnConfig(registeredName) + return nil, errors.New("otelsql open returned nil db") + } + + db.SetMaxOpenConns(cfg.MaxOpenConns) + db.SetMaxIdleConns(cfg.MaxIdleConns) + db.SetConnMaxLifetime(cfg.ConnMaxLifetime) + + return db, nil +} diff --git a/pkg/postgres/otel.go b/pkg/postgres/otel.go new file mode 100644 index 0000000..d018c6a --- /dev/null +++ b/pkg/postgres/otel.go @@ -0,0 +1,38 @@ +package postgres + +import ( + "database/sql" + "errors" + "fmt" + + "github.com/XSAM/otelsql" +) + +// Unregister releases an instrumentation registration. Returned by +// InstrumentDBStats so callers can detach metrics during shutdown. +type Unregister func() error + +// InstrumentDBStats registers `database/sql` connection-pool metrics +// (`db.sql.connection.*`) for db. Statement spans are already attached at open +// time inside OpenPrimary/OpenReplicas — this function adds the meter-side +// instrumentation. +// +// The returned Unregister detaches the metric callbacks; callers usually +// invoke it during shutdown after closing db. +func InstrumentDBStats(db *sql.DB, opts ...Option) (Unregister, error) { + if db == nil { + return nil, errors.New("instrument postgres db stats: nil db") + } + + resolved := evalOptions(opts) + otelOpts := []otelsql.Option{otelsql.WithAttributes(dbSystemAttribute)} + if resolved.meterProvider != nil { + otelOpts = append(otelOpts, otelsql.WithMeterProvider(resolved.meterProvider)) + } + + reg, err := otelsql.RegisterDBStatsMetrics(db, otelOpts...) + if err != nil { + return nil, fmt.Errorf("instrument postgres db stats: %w", err) + } + return reg.Unregister, nil +} diff --git a/pkg/postgres/postgres_test.go b/pkg/postgres/postgres_test.go new file mode 100644 index 0000000..89c5eea --- /dev/null +++ b/pkg/postgres/postgres_test.go @@ -0,0 +1,115 @@ +package postgres_test + +import ( + "context" + "embed" + "io/fs" + "testing" + "time" + + "galaxy/postgres" + + testcontainers "github.com/testcontainers/testcontainers-go" + tcpostgres "github.com/testcontainers/testcontainers-go/modules/postgres" + "github.com/testcontainers/testcontainers-go/wait" +) + +const smokeImage = "postgres:16-alpine" + +//go:embed testdata/migrations/*.sql +var smokeMigrationsFS embed.FS + +func TestPostgresPackageRoundTrip(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + t.Cleanup(cancel) + + pgContainer, err := tcpostgres.Run(ctx, + smokeImage, + tcpostgres.WithDatabase("galaxy_smoke"), + tcpostgres.WithUsername("galaxy_smoke"), + tcpostgres.WithPassword("galaxy_smoke"), + // The Postgres image emits "ready to accept connections" twice during + // startup: once for the temporary bootstrap instance, once for the real + // listener on the mapped port. Waiting for the second occurrence + // prevents racing the bootstrap. + testcontainers.WithWaitStrategy( + wait.ForLog("database system is ready to accept connections"). + WithOccurrence(2). + WithStartupTimeout(60*time.Second), + ), + ) + if err != nil { + t.Fatalf("start postgres container: %v", err) + } + t.Cleanup(func() { + if err := testcontainers.TerminateContainer(pgContainer); err != nil { + t.Errorf("terminate postgres container: %v", err) + } + }) + + dsn, err := pgContainer.ConnectionString(ctx, "sslmode=disable") + if err != nil { + t.Fatalf("postgres connection string: %v", err) + } + + cfg := postgres.DefaultConfig() + cfg.PrimaryDSN = dsn + cfg.OperationTimeout = 5 * time.Second + + db, err := postgres.OpenPrimary(ctx, cfg) + if err != nil { + t.Fatalf("open primary: %v", err) + } + t.Cleanup(func() { + if err := db.Close(); err != nil { + t.Errorf("close db: %v", err) + } + }) + + if err := postgres.Ping(ctx, db, cfg.OperationTimeout); err != nil { + t.Fatalf("ping: %v", err) + } + + migrationsDir, err := fs.Sub(smokeMigrationsFS, "testdata/migrations") + if err != nil { + t.Fatalf("sub migrations FS: %v", err) + } + if err := postgres.RunMigrations(ctx, db, migrationsDir, "."); err != nil { + t.Fatalf("run migrations: %v", err) + } + + var insertedID int64 + if err := db.QueryRowContext(ctx, + "INSERT INTO smoke (note) VALUES ($1) RETURNING id", "hello", + ).Scan(&insertedID); err != nil { + t.Fatalf("insert returning id: %v", err) + } + if insertedID <= 0 { + t.Fatalf("inserted id = %d, want > 0", insertedID) + } + + var note string + if err := db.QueryRowContext(ctx, + "SELECT note FROM smoke WHERE id = $1", insertedID, + ).Scan(¬e); err != nil { + t.Fatalf("select note: %v", err) + } + if note != "hello" { + t.Fatalf("note = %q, want %q", note, "hello") + } +} + +func TestOpenReplicasReturnsNilWhenUnconfigured(t *testing.T) { + t.Parallel() + + cfg := postgres.DefaultConfig() + cfg.PrimaryDSN = "postgres://localhost:5432/galaxy?sslmode=disable" + + dbs, err := postgres.OpenReplicas(context.Background(), cfg) + if err != nil { + t.Fatalf("open replicas: %v", err) + } + if dbs != nil { + t.Fatalf("replicas = %v, want nil", dbs) + } +} diff --git a/pkg/postgres/testdata/migrations/00001_smoke.sql b/pkg/postgres/testdata/migrations/00001_smoke.sql new file mode 100644 index 0000000..9a3a9ff --- /dev/null +++ b/pkg/postgres/testdata/migrations/00001_smoke.sql @@ -0,0 +1,8 @@ +-- +goose Up +CREATE TABLE smoke ( + id BIGSERIAL PRIMARY KEY, + note TEXT NOT NULL +); + +-- +goose Down +DROP TABLE smoke; diff --git a/pkg/redisconn/client.go b/pkg/redisconn/client.go new file mode 100644 index 0000000..bf44ba6 --- /dev/null +++ b/pkg/redisconn/client.go @@ -0,0 +1,43 @@ +package redisconn + +import ( + "github.com/redis/go-redis/v9" +) + +// NewMasterClient builds the master Redis client from cfg using the +// conservative options shared by the existing service publishers +// (`Protocol: 2`, `DisableIdentity: true`, dial/read/write timeouts bound to +// cfg.OperationTimeout). +// +// NewMasterClient does not validate cfg; callers are expected to call +// Config.Validate (or LoadFromEnv which already does) before invoking this. +func NewMasterClient(cfg Config) *redis.Client { + return redis.NewClient(buildOptions(cfg.MasterAddr, cfg)) +} + +// NewReplicaClients builds one Redis client per replica address. It returns +// nil when no replicas are configured; callers treat that as "no replicas +// wired". +func NewReplicaClients(cfg Config) []*redis.Client { + if len(cfg.ReplicaAddrs) == 0 { + return nil + } + clients := make([]*redis.Client, 0, len(cfg.ReplicaAddrs)) + for _, addr := range cfg.ReplicaAddrs { + clients = append(clients, redis.NewClient(buildOptions(addr, cfg))) + } + return clients +} + +func buildOptions(addr string, cfg Config) *redis.Options { + return &redis.Options{ + Addr: addr, + Password: cfg.Password, + DB: cfg.DB, + Protocol: 2, + DisableIdentity: true, + DialTimeout: cfg.OperationTimeout, + ReadTimeout: cfg.OperationTimeout, + WriteTimeout: cfg.OperationTimeout, + } +} diff --git a/pkg/redisconn/config.go b/pkg/redisconn/config.go new file mode 100644 index 0000000..8cd1f85 --- /dev/null +++ b/pkg/redisconn/config.go @@ -0,0 +1,187 @@ +// Package redisconn provides shared helpers for opening, instrumenting and +// pinging Redis connections used by Galaxy services. +// +// The package codifies the steady-state rules captured in `ARCHITECTURE.md` +// `§Persistence Backends`: each service connects to one master plus +// zero-or-more replicas with a mandatory password, no TLS, and no +// `USERNAME`/ACL. The deprecated env vars `*_REDIS_TLS_ENABLED` and +// `*_REDIS_USERNAME` are rejected by LoadFromEnv with a clear startup error. +package redisconn + +import ( + "errors" + "fmt" + "os" + "strconv" + "strings" + "time" +) + +// Default configuration values applied by DefaultConfig and LoadFromEnv when +// the corresponding environment variable is absent. +const ( + DefaultDB = 0 + DefaultOperationTimeout = 250 * time.Millisecond +) + +// Config stores the connection settings for one master plus zero-or-more +// replica Redis instances. Stage 1 wires only the master; the replica list is +// preserved so future read-routing is a non-breaking change. +type Config struct { + // MasterAddr stores the Redis network address in host:port form. Required. + MasterAddr string + + // ReplicaAddrs stores zero-or-more read-only replica addresses. + ReplicaAddrs []string + + // Password is the mandatory connection password. Empty values are rejected + // by Validate to enforce the architectural rule that Redis traffic is + // password-protected even on the trusted segment. + Password string + + // DB selects the logical Redis database index. + DB int + + // OperationTimeout bounds individual Redis round trips. + OperationTimeout time.Duration +} + +// DefaultConfig returns the default tuning. MasterAddr and Password remain +// zero-valued and must be supplied by callers (or by LoadFromEnv). +func DefaultConfig() Config { + return Config{ + DB: DefaultDB, + OperationTimeout: DefaultOperationTimeout, + } +} + +// Validate reports whether cfg is usable. +func (cfg Config) Validate() error { + if strings.TrimSpace(cfg.MasterAddr) == "" { + return errors.New("redis master addr must not be empty") + } + if strings.TrimSpace(cfg.Password) == "" { + return errors.New("redis password must not be empty") + } + for index, addr := range cfg.ReplicaAddrs { + if strings.TrimSpace(addr) == "" { + return fmt.Errorf("redis replica addr at index %d must not be empty", index) + } + } + if cfg.DB < 0 { + return errors.New("redis db must not be negative") + } + if cfg.OperationTimeout <= 0 { + return errors.New("redis operation timeout must be positive") + } + return nil +} + +// LoadFromEnv populates Config from environment variables prefixed with +// `_REDIS_`. The required variables are +// `_REDIS_MASTER_ADDR` and `_REDIS_PASSWORD`; every other +// variable falls back to DefaultConfig values. +// +// LoadFromEnv hard-fails when either of the deprecated variables +// `_REDIS_TLS_ENABLED` or `_REDIS_USERNAME` is set in the +// environment, with an error pointing to ARCHITECTURE.md. +func LoadFromEnv(prefix string) (Config, error) { + if strings.TrimSpace(prefix) == "" { + return Config{}, errors.New("redis env prefix must not be empty") + } + + if err := rejectDeprecatedEnv(prefix); err != nil { + return Config{}, err + } + + cfg := DefaultConfig() + + masterName := envName(prefix, "MASTER_ADDR") + master, ok := os.LookupEnv(masterName) + if !ok || strings.TrimSpace(master) == "" { + return Config{}, fmt.Errorf("%s must be set", masterName) + } + cfg.MasterAddr = strings.TrimSpace(master) + + passwordName := envName(prefix, "PASSWORD") + password, ok := os.LookupEnv(passwordName) + if !ok || strings.TrimSpace(password) == "" { + return Config{}, fmt.Errorf("%s must be set", passwordName) + } + cfg.Password = strings.TrimSpace(password) + + if raw, ok := os.LookupEnv(envName(prefix, "REPLICA_ADDRS")); ok { + cfg.ReplicaAddrs = splitCSV(raw) + } + + db, err := loadInt(envName(prefix, "DB"), cfg.DB) + if err != nil { + return Config{}, err + } + cfg.DB = db + + timeout, err := loadDuration(envName(prefix, "OPERATION_TIMEOUT"), cfg.OperationTimeout) + if err != nil { + return Config{}, err + } + cfg.OperationTimeout = timeout + + if err := cfg.Validate(); err != nil { + return Config{}, err + } + return cfg, nil +} + +func rejectDeprecatedEnv(prefix string) error { + for _, suffix := range []string{"TLS_ENABLED", "USERNAME"} { + name := envName(prefix, suffix) + if _, ok := os.LookupEnv(name); ok { + return fmt.Errorf("%s is no longer supported (see ARCHITECTURE.md §Persistence Backends); unset it before starting the service", name) + } + } + return nil +} + +func envName(prefix, suffix string) string { + return strings.ToUpper(strings.TrimSpace(prefix)) + "_REDIS_" + suffix +} + +func splitCSV(raw string) []string { + parts := strings.Split(raw, ",") + out := make([]string, 0, len(parts)) + for _, part := range parts { + trimmed := strings.TrimSpace(part) + if trimmed == "" { + continue + } + out = append(out, trimmed) + } + if len(out) == 0 { + return nil + } + return out +} + +func loadDuration(name string, fallback time.Duration) (time.Duration, error) { + raw, ok := os.LookupEnv(name) + if !ok { + return fallback, nil + } + parsed, err := time.ParseDuration(strings.TrimSpace(raw)) + if err != nil { + return 0, fmt.Errorf("%s: %w", name, err) + } + return parsed, nil +} + +func loadInt(name string, fallback int) (int, error) { + raw, ok := os.LookupEnv(name) + if !ok { + return fallback, nil + } + parsed, err := strconv.Atoi(strings.TrimSpace(raw)) + if err != nil { + return 0, fmt.Errorf("%s: %w", name, err) + } + return parsed, nil +} diff --git a/pkg/redisconn/go.mod b/pkg/redisconn/go.mod new file mode 100644 index 0000000..4ff8e45 --- /dev/null +++ b/pkg/redisconn/go.mod @@ -0,0 +1,28 @@ +module galaxy/redisconn + +go 1.26.1 + +require ( + github.com/alicebob/miniredis/v2 v2.37.0 + github.com/redis/go-redis/extra/redisotel/v9 v9.18.0 + github.com/redis/go-redis/v9 v9.18.0 + go.opentelemetry.io/otel/metric v1.43.0 + go.opentelemetry.io/otel/trace v1.43.0 +) + +require ( + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/klauspost/cpuid/v2 v2.3.0 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/redis/go-redis/extra/rediscmd/v9 v9.18.0 // indirect + github.com/yuin/gopher-lua v1.1.1 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/otel v1.43.0 // indirect + go.opentelemetry.io/otel/sdk v1.43.0 // indirect + go.uber.org/atomic v1.11.0 // indirect + golang.org/x/sys v0.43.0 // indirect +) diff --git a/pkg/redisconn/go.sum b/pkg/redisconn/go.sum new file mode 100644 index 0000000..0d0e35d --- /dev/null +++ b/pkg/redisconn/go.sum @@ -0,0 +1,47 @@ +github.com/alicebob/miniredis/v2 v2.37.0 h1:RheObYW32G1aiJIj81XVt78ZHJpHonHLHW7OLIshq68= +github.com/alicebob/miniredis/v2 v2.37.0/go.mod h1:TcL7YfarKPGDAthEtl5NBeHZfeUQj6OXMm/+iu5cLMM= +github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= +github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= +github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= +github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/redis/go-redis/extra/rediscmd/v9 v9.18.0 h1:QY4nmPHLFAJjtT5O4OMUEOxP8WVaRNOFpcbmxT2NLZU= +github.com/redis/go-redis/extra/rediscmd/v9 v9.18.0/go.mod h1:WH8cY/0fT41Bsf341qzo8v4nx0GCE8FykAA23IVbVmo= +github.com/redis/go-redis/extra/redisotel/v9 v9.18.0 h1:2dKdoEYBJ0CZCLPiCdvvc7luz3DPwY6hKdzjL6m1eHE= +github.com/redis/go-redis/extra/redisotel/v9 v9.18.0/go.mod h1:WzkrVG9ro9BwCQD0eJOWn6AGL4Z1CleGflM45w1hu10= +github.com/redis/go-redis/v9 v9.18.0 h1:pMkxYPkEbMPwRdenAzUNyFNrDgHx9U+DrBabWNfSRQs= +github.com/redis/go-redis/v9 v9.18.0/go.mod h1:k3ufPphLU5YXwNTUcCRXGxUoF1fqxnhFQmscfkCoDA0= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/yuin/gopher-lua v1.1.1 h1:kYKnWBjvbNP4XLT3+bPEwAXJx262OhaHDWDVOPjL46M= +github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= +github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0= +github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/otel v1.43.0 h1:mYIM03dnh5zfN7HautFE4ieIig9amkNANT+xcVxAj9I= +go.opentelemetry.io/otel v1.43.0/go.mod h1:JuG+u74mvjvcm8vj8pI5XiHy1zDeoCS2LB1spIq7Ay0= +go.opentelemetry.io/otel/metric v1.43.0 h1:d7638QeInOnuwOONPp4JAOGfbCEpYb+K6DVWvdxGzgM= +go.opentelemetry.io/otel/metric v1.43.0/go.mod h1:RDnPtIxvqlgO8GRW18W6Z/4P462ldprJtfxHxyKd2PY= +go.opentelemetry.io/otel/sdk v1.43.0 h1:pi5mE86i5rTeLXqoF/hhiBtUNcrAGHLKQdhg4h4V9Dg= +go.opentelemetry.io/otel/trace v1.43.0 h1:BkNrHpup+4k4w+ZZ86CZoHHEkohws8AY+WTX09nk+3A= +go.opentelemetry.io/otel/trace v1.43.0/go.mod h1:/QJhyVBUUswCphDVxq+8mld+AvhXZLhe+8WVFxiFff0= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +golang.org/x/sys v0.43.0 h1:Rlag2XtaFTxp19wS8MXlJwTvoh8ArU6ezoyFsMyCTNI= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/pkg/redisconn/health.go b/pkg/redisconn/health.go new file mode 100644 index 0000000..a7a2075 --- /dev/null +++ b/pkg/redisconn/health.go @@ -0,0 +1,31 @@ +package redisconn + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/redis/go-redis/v9" +) + +// Ping bounds client.Ping under timeout and returns a wrapped error so +// startup failures are easy to spot in service logs. +// +// timeout is typically taken from Config.OperationTimeout. +func Ping(ctx context.Context, client *redis.Client, timeout time.Duration) error { + if client == nil { + return errors.New("ping redis: nil client") + } + if timeout <= 0 { + return errors.New("ping redis: timeout must be positive") + } + + pingCtx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + if err := client.Ping(pingCtx).Err(); err != nil { + return fmt.Errorf("ping redis: %w", err) + } + return nil +} diff --git a/pkg/redisconn/otel.go b/pkg/redisconn/otel.go new file mode 100644 index 0000000..b2108f5 --- /dev/null +++ b/pkg/redisconn/otel.go @@ -0,0 +1,75 @@ +package redisconn + +import ( + "errors" + "fmt" + + "github.com/redis/go-redis/extra/redisotel/v9" + "github.com/redis/go-redis/v9" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/trace" +) + +// Option configures the OpenTelemetry providers attached to a client by +// Instrument. Unset providers fall back to the OpenTelemetry global +// providers (matching `redisotel` defaults). +type Option func(*options) + +type options struct { + tracerProvider trace.TracerProvider + meterProvider metric.MeterProvider +} + +// WithTracerProvider sets the tracer provider used for Redis command spans. +func WithTracerProvider(tp trace.TracerProvider) Option { + return func(o *options) { + o.tracerProvider = tp + } +} + +// WithMeterProvider sets the meter provider used for Redis client metrics. +func WithMeterProvider(mp metric.MeterProvider) Option { + return func(o *options) { + o.meterProvider = mp + } +} + +func evalOptions(opts []Option) options { + var resolved options + for _, opt := range opts { + if opt == nil { + continue + } + opt(&resolved) + } + return resolved +} + +// Instrument attaches Redis tracing and metrics to client. Tracing is +// configured with `WithDBStatement(false)` so that only the command name is +// captured, matching the existing instrumentation in the user, lobby, and +// notification services. +func Instrument(client *redis.Client, opts ...Option) error { + if client == nil { + return errors.New("instrument redis client: nil client") + } + + resolved := evalOptions(opts) + + traceOpts := []redisotel.TracingOption{redisotel.WithDBStatement(false)} + if resolved.tracerProvider != nil { + traceOpts = append(traceOpts, redisotel.WithTracerProvider(resolved.tracerProvider)) + } + if err := redisotel.InstrumentTracing(client, traceOpts...); err != nil { + return fmt.Errorf("instrument redis client tracing: %w", err) + } + + metricOpts := []redisotel.MetricsOption{} + if resolved.meterProvider != nil { + metricOpts = append(metricOpts, redisotel.WithMeterProvider(resolved.meterProvider)) + } + if err := redisotel.InstrumentMetrics(client, metricOpts...); err != nil { + return fmt.Errorf("instrument redis client metrics: %w", err) + } + return nil +} diff --git a/pkg/redisconn/redisconn_test.go b/pkg/redisconn/redisconn_test.go new file mode 100644 index 0000000..44137dd --- /dev/null +++ b/pkg/redisconn/redisconn_test.go @@ -0,0 +1,258 @@ +package redisconn_test + +import ( + "context" + "strings" + "testing" + "time" + + "galaxy/redisconn" + + "github.com/alicebob/miniredis/v2" + "go.opentelemetry.io/otel/metric/noop" + tracenoop "go.opentelemetry.io/otel/trace/noop" +) + +func TestDefaultConfigReturnsExpectedTuning(t *testing.T) { + t.Parallel() + + cfg := redisconn.DefaultConfig() + if cfg.OperationTimeout != redisconn.DefaultOperationTimeout { + t.Fatalf("operation timeout = %v, want %v", cfg.OperationTimeout, redisconn.DefaultOperationTimeout) + } + if cfg.DB != redisconn.DefaultDB { + t.Fatalf("db = %d, want %d", cfg.DB, redisconn.DefaultDB) + } +} + +func TestConfigValidateRejectsInvalidValues(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + mutate func(*redisconn.Config) + wantSub string + }{ + { + name: "missing master", + mutate: func(c *redisconn.Config) { + c.MasterAddr = "" + }, + wantSub: "master addr", + }, + { + name: "missing password", + mutate: func(c *redisconn.Config) { + c.Password = "" + }, + wantSub: "password", + }, + { + name: "blank replica entry", + mutate: func(c *redisconn.Config) { + c.ReplicaAddrs = []string{" "} + }, + wantSub: "replica addr", + }, + { + name: "negative db", + mutate: func(c *redisconn.Config) { + c.DB = -1 + }, + wantSub: "db must not be negative", + }, + { + name: "non-positive timeout", + mutate: func(c *redisconn.Config) { + c.OperationTimeout = 0 + }, + wantSub: "operation timeout", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + cfg := redisconn.DefaultConfig() + cfg.MasterAddr = "127.0.0.1:6379" + cfg.Password = "secret" + tt.mutate(&cfg) + + err := cfg.Validate() + if err == nil { + t.Fatalf("expected validate error, got nil") + } + if !strings.Contains(err.Error(), tt.wantSub) { + t.Fatalf("error %q does not contain %q", err, tt.wantSub) + } + }) + } +} + +func TestLoadFromEnvHappyPath(t *testing.T) { + const prefix = "TESTSVC" + t.Setenv(prefix+"_REDIS_MASTER_ADDR", "127.0.0.1:6379") + t.Setenv(prefix+"_REDIS_REPLICA_ADDRS", "127.0.0.1:6380, 127.0.0.1:6381 ,") + t.Setenv(prefix+"_REDIS_PASSWORD", "secret") + t.Setenv(prefix+"_REDIS_DB", "3") + t.Setenv(prefix+"_REDIS_OPERATION_TIMEOUT", "500ms") + + cfg, err := redisconn.LoadFromEnv(prefix) + if err != nil { + t.Fatalf("load from env: %v", err) + } + if cfg.MasterAddr != "127.0.0.1:6379" { + t.Fatalf("master addr = %q", cfg.MasterAddr) + } + if cfg.Password != "secret" { + t.Fatalf("password = %q", cfg.Password) + } + if got, want := cfg.DB, 3; got != want { + t.Fatalf("db = %d, want %d", got, want) + } + if got, want := cfg.OperationTimeout, 500*time.Millisecond; got != want { + t.Fatalf("operation timeout = %v, want %v", got, want) + } + if got, want := len(cfg.ReplicaAddrs), 2; got != want { + t.Fatalf("replica count = %d, want %d", got, want) + } +} + +func TestLoadFromEnvRejectsDeprecatedTLSEnabled(t *testing.T) { + const prefix = "TESTSVC" + t.Setenv(prefix+"_REDIS_MASTER_ADDR", "127.0.0.1:6379") + t.Setenv(prefix+"_REDIS_PASSWORD", "secret") + t.Setenv(prefix+"_REDIS_TLS_ENABLED", "true") + + _, err := redisconn.LoadFromEnv(prefix) + if err == nil { + t.Fatal("expected error when TLS_ENABLED is set") + } + if !strings.Contains(err.Error(), "TLS_ENABLED") { + t.Fatalf("error %q should name TLS_ENABLED", err) + } + if !strings.Contains(err.Error(), "ARCHITECTURE.md") { + t.Fatalf("error %q should reference ARCHITECTURE.md", err) + } +} + +func TestLoadFromEnvRejectsDeprecatedUsername(t *testing.T) { + const prefix = "TESTSVC" + t.Setenv(prefix+"_REDIS_MASTER_ADDR", "127.0.0.1:6379") + t.Setenv(prefix+"_REDIS_PASSWORD", "secret") + t.Setenv(prefix+"_REDIS_USERNAME", "anything") + + _, err := redisconn.LoadFromEnv(prefix) + if err == nil { + t.Fatal("expected error when USERNAME is set") + } + if !strings.Contains(err.Error(), "USERNAME") { + t.Fatalf("error %q should name USERNAME", err) + } +} + +func TestLoadFromEnvRequiresPassword(t *testing.T) { + const prefix = "TESTSVC" + t.Setenv(prefix+"_REDIS_MASTER_ADDR", "127.0.0.1:6379") + t.Setenv(prefix+"_REDIS_PASSWORD", "") + + if _, err := redisconn.LoadFromEnv(prefix); err == nil { + t.Fatal("expected error when password is empty") + } +} + +func TestNewMasterClientPingsMiniredis(t *testing.T) { + t.Parallel() + + server := miniredis.RunT(t) + server.RequireAuth("secret") + + cfg := redisconn.DefaultConfig() + cfg.MasterAddr = server.Addr() + cfg.Password = "secret" + if err := cfg.Validate(); err != nil { + t.Fatalf("validate: %v", err) + } + + client := redisconn.NewMasterClient(cfg) + t.Cleanup(func() { + _ = client.Close() + }) + + if err := redisconn.Ping(context.Background(), client, cfg.OperationTimeout); err != nil { + t.Fatalf("ping miniredis: %v", err) + } +} + +func TestNewReplicaClientsReturnsExpectedLength(t *testing.T) { + t.Parallel() + + server1 := miniredis.RunT(t) + server2 := miniredis.RunT(t) + + cfg := redisconn.DefaultConfig() + cfg.MasterAddr = "ignored:6379" + cfg.Password = "secret" + cfg.ReplicaAddrs = []string{server1.Addr(), server2.Addr()} + + clients := redisconn.NewReplicaClients(cfg) + t.Cleanup(func() { + for _, client := range clients { + _ = client.Close() + } + }) + + if got, want := len(clients), 2; got != want { + t.Fatalf("client count = %d, want %d", got, want) + } +} + +func TestNewReplicaClientsReturnsNilWhenUnconfigured(t *testing.T) { + t.Parallel() + + cfg := redisconn.DefaultConfig() + cfg.MasterAddr = "ignored:6379" + cfg.Password = "secret" + + if clients := redisconn.NewReplicaClients(cfg); clients != nil { + t.Fatalf("clients = %v, want nil", clients) + } +} + +func TestInstrumentAcceptsNoopProviders(t *testing.T) { + t.Parallel() + + server := miniredis.RunT(t) + server.RequireAuth("secret") + + cfg := redisconn.DefaultConfig() + cfg.MasterAddr = server.Addr() + cfg.Password = "secret" + + client := redisconn.NewMasterClient(cfg) + t.Cleanup(func() { + _ = client.Close() + }) + + err := redisconn.Instrument( + client, + redisconn.WithTracerProvider(tracenoop.NewTracerProvider()), + redisconn.WithMeterProvider(noop.NewMeterProvider()), + ) + if err != nil { + t.Fatalf("instrument: %v", err) + } + + if err := redisconn.Ping(context.Background(), client, cfg.OperationTimeout); err != nil { + t.Fatalf("ping after instrument: %v", err) + } +} + +func TestInstrumentRejectsNilClient(t *testing.T) { + t.Parallel() + + if err := redisconn.Instrument(nil); err == nil { + t.Fatal("expected error for nil client") + } +} diff --git a/pkg/util/go.mod b/pkg/util/go.mod index bf8d6b0..fa66726 100644 --- a/pkg/util/go.mod +++ b/pkg/util/go.mod @@ -5,7 +5,7 @@ go 1.26.0 require ( github.com/google/uuid v1.6.0 github.com/stretchr/testify v1.11.1 - golang.org/x/sys v0.42.0 + golang.org/x/sys v0.43.0 ) require ( diff --git a/pkg/util/go.sum b/pkg/util/go.sum index 9645866..bc472af 100644 --- a/pkg/util/go.sum +++ b/pkg/util/go.sum @@ -5,6 +5,6 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= -golang.org/x/sys v0.42.0 h1:omrd2nAlyT5ESRdCLYdm3+fMfNFE/+Rf4bDIQImRJeo= +golang.org/x/sys v0.43.0 h1:Rlag2XtaFTxp19wS8MXlJwTvoh8ArU6ezoyFsMyCTNI= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= diff --git a/user/Makefile b/user/Makefile new file mode 100644 index 0000000..285c9b1 --- /dev/null +++ b/user/Makefile @@ -0,0 +1,10 @@ +# Makefile for galaxy/user. +# +# The `jet` target regenerates the go-jet/v2 query-builder code under +# internal/adapters/postgres/jet/ against a transient PostgreSQL container +# brought up by cmd/jetgen. Generated code is committed. + +.PHONY: jet + +jet: + go run ./cmd/jetgen diff --git a/user/README.md b/user/README.md index cf8494c..d430c98 100644 --- a/user/README.md +++ b/user/README.md @@ -445,10 +445,66 @@ as: Transport failures, timeouts, and upstream `503` remain transport-level gateway `UNAVAILABLE`, not business results. +## Storage + +`User Service` is split between two backends per +[`../ARCHITECTURE.md §Persistence Backends`](../ARCHITECTURE.md): + +- PostgreSQL is the source of truth for table-shaped business state. The + `user` schema (provisioned externally) holds `accounts`, + `blocked_emails`, `entitlement_records`, `entitlement_snapshots`, + `sanction_records`, `sanction_active`, `limit_records`, `limit_active`. + Embedded migrations in + [`internal/adapters/postgres/migrations`](internal/adapters/postgres/migrations) + apply at process start; a non-zero exit is fatal. +- Redis hosts the two stream publishers — the auxiliary domain-events + stream and the trusted user-lifecycle stream described below. No + durable user state lives on Redis after Stage 3 of `PG_PLAN.md`. + +Schema decisions and the reasoning behind keeping `entitlement_snapshots` +denormalised, expressing eligibility flags as SQL predicates instead of +materialised columns, and sharing one `*redis.Client` between the two +publishers are recorded in +[`docs/postgres-migration.md`](docs/postgres-migration.md). + +### Configuration + +PostgreSQL knobs (consumed via `pkg/postgres`): + +- `USERSERVICE_POSTGRES_PRIMARY_DSN` (required) +- `USERSERVICE_POSTGRES_REPLICA_DSNS` (optional; comma-separated) +- `USERSERVICE_POSTGRES_OPERATION_TIMEOUT` (default `1s`) +- `USERSERVICE_POSTGRES_MAX_OPEN_CONNS` (default `25`) +- `USERSERVICE_POSTGRES_MAX_IDLE_CONNS` (default `5`) +- `USERSERVICE_POSTGRES_CONN_MAX_LIFETIME` (default `30m`) + +Redis knobs (consumed via `pkg/redisconn`): + +- `USERSERVICE_REDIS_MASTER_ADDR` (required) +- `USERSERVICE_REDIS_REPLICA_ADDRS` (optional; comma-separated) +- `USERSERVICE_REDIS_PASSWORD` (required; mandatory by architectural rule) +- `USERSERVICE_REDIS_DB` (default `0`) +- `USERSERVICE_REDIS_OPERATION_TIMEOUT` (default `250ms`) + +Stream-shape knobs: + +- `USERSERVICE_REDIS_DOMAIN_EVENTS_STREAM` (default `user:domain_events`) +- `USERSERVICE_REDIS_DOMAIN_EVENTS_STREAM_MAX_LEN` (default `1024`) +- `USERSERVICE_REDIS_LIFECYCLE_EVENTS_STREAM` (default + `user:lifecycle_events`) +- `USERSERVICE_REDIS_LIFECYCLE_EVENTS_STREAM_MAX_LEN` (default `1024`) + +The deprecated variables `USERSERVICE_REDIS_ADDR`, +`USERSERVICE_REDIS_USERNAME`, `USERSERVICE_REDIS_TLS_ENABLED`, and +`USERSERVICE_REDIS_KEYSPACE_PREFIX` are retired; setting any of them now +fails service start with a clear error message pointing back to +`ARCHITECTURE.md §Persistence Backends`. + ## References - [Internal REST contract](openapi.yaml) - [Service docs index](docs/README.md) +- [PostgreSQL migration decisions](docs/postgres-migration.md) - [Stage 21 decisions](docs/stage21-user-name-display-name.md) - [Stage 22 decisions](docs/stage22-permanent-block-delete-user.md) - [System architecture](../ARCHITECTURE.md) diff --git a/user/cmd/jetgen/main.go b/user/cmd/jetgen/main.go new file mode 100644 index 0000000..6f26eb1 --- /dev/null +++ b/user/cmd/jetgen/main.go @@ -0,0 +1,236 @@ +// Command jetgen regenerates the go-jet/v2 query-builder code under +// galaxy/user/internal/adapters/postgres/jet/ against a transient PostgreSQL +// instance. +// +// The program is intended to be invoked as `go run ./cmd/jetgen` (or via the +// `make jet` Makefile target) from within `galaxy/user`. It is not part of +// the runtime binary. +// +// Steps: +// +// 1. start a postgres:16-alpine container via testcontainers-go +// 2. open it through pkg/postgres as the superuser +// 3. CREATE ROLE userservice and CREATE SCHEMA "user" AUTHORIZATION +// userservice +// 4. open a second pool as userservice with search_path=user and apply the +// embedded goose migrations +// 5. run jet's PostgreSQL generator against schema=user, writing into +// ../internal/adapters/postgres/jet +package main + +import ( + "context" + "errors" + "fmt" + "log" + "net/url" + "os" + "path/filepath" + "runtime" + "time" + + "galaxy/postgres" + "galaxy/user/internal/adapters/postgres/migrations" + + jetpostgres "github.com/go-jet/jet/v2/generator/postgres" + testcontainers "github.com/testcontainers/testcontainers-go" + tcpostgres "github.com/testcontainers/testcontainers-go/modules/postgres" + "github.com/testcontainers/testcontainers-go/wait" +) + +const ( + postgresImage = "postgres:16-alpine" + superuserName = "galaxy" + superuserPassword = "galaxy" + superuserDatabase = "galaxy_user" + serviceRole = "userservice" + servicePassword = "userservice" + serviceSchema = "user" + containerStartup = 90 * time.Second + defaultOpTimeout = 10 * time.Second + jetOutputDirSuffix = "internal/adapters/postgres/jet" +) + +func main() { + if err := run(context.Background()); err != nil { + log.Fatalf("jetgen: %v", err) + } +} + +func run(ctx context.Context) error { + outputDir, err := jetOutputDir() + if err != nil { + return err + } + + container, err := tcpostgres.Run(ctx, postgresImage, + tcpostgres.WithDatabase(superuserDatabase), + tcpostgres.WithUsername(superuserName), + tcpostgres.WithPassword(superuserPassword), + testcontainers.WithWaitStrategy( + wait.ForLog("database system is ready to accept connections"). + WithOccurrence(2). + WithStartupTimeout(containerStartup), + ), + ) + if err != nil { + return fmt.Errorf("start postgres container: %w", err) + } + defer func() { + if termErr := testcontainers.TerminateContainer(container); termErr != nil { + log.Printf("jetgen: terminate container: %v", termErr) + } + }() + + baseDSN, err := container.ConnectionString(ctx, "sslmode=disable") + if err != nil { + return fmt.Errorf("resolve container dsn: %w", err) + } + + if err := provisionRoleAndSchema(ctx, baseDSN); err != nil { + return err + } + + scopedDSN, err := dsnForServiceRole(baseDSN) + if err != nil { + return err + } + if err := applyMigrations(ctx, scopedDSN); err != nil { + return err + } + + if err := os.RemoveAll(outputDir); err != nil { + return fmt.Errorf("remove existing jet output %q: %w", outputDir, err) + } + if err := os.MkdirAll(filepath.Dir(outputDir), 0o755); err != nil { + return fmt.Errorf("ensure jet output parent: %w", err) + } + + jetCfg := postgres.DefaultConfig() + jetCfg.PrimaryDSN = scopedDSN + jetCfg.OperationTimeout = defaultOpTimeout + jetDB, err := postgres.OpenPrimary(ctx, jetCfg) + if err != nil { + return fmt.Errorf("open scoped pool for jet generation: %w", err) + } + defer func() { _ = jetDB.Close() }() + + if err := jetpostgres.GenerateDB(jetDB, serviceSchema, outputDir); err != nil { + return fmt.Errorf("jet generate: %w", err) + } + + log.Printf("jetgen: generated jet code into %s (schema=%s)", outputDir, serviceSchema) + return nil +} + +func provisionRoleAndSchema(ctx context.Context, baseDSN string) error { + cfg := postgres.DefaultConfig() + cfg.PrimaryDSN = baseDSN + cfg.OperationTimeout = defaultOpTimeout + db, err := postgres.OpenPrimary(ctx, cfg) + if err != nil { + return fmt.Errorf("open admin pool: %w", err) + } + defer func() { _ = db.Close() }() + + statements := []string{ + fmt.Sprintf(`DO $$ BEGIN + IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = %s) THEN + CREATE ROLE %s LOGIN PASSWORD %s; + END IF; + END $$;`, sqlLiteral(serviceRole), sqlIdentifier(serviceRole), sqlLiteral(servicePassword)), + fmt.Sprintf(`CREATE SCHEMA IF NOT EXISTS %s AUTHORIZATION %s;`, + sqlIdentifier(serviceSchema), sqlIdentifier(serviceRole)), + fmt.Sprintf(`GRANT USAGE ON SCHEMA %s TO %s;`, + sqlIdentifier(serviceSchema), sqlIdentifier(serviceRole)), + } + for _, statement := range statements { + if _, err := db.ExecContext(ctx, statement); err != nil { + return fmt.Errorf("provision %q/%q: %w", serviceSchema, serviceRole, err) + } + } + return nil +} + +func dsnForServiceRole(baseDSN string) (string, error) { + parsed, err := url.Parse(baseDSN) + if err != nil { + return "", fmt.Errorf("parse base dsn: %w", err) + } + values := url.Values{} + values.Set("search_path", serviceSchema) + values.Set("sslmode", "disable") + scoped := url.URL{ + Scheme: parsed.Scheme, + User: url.UserPassword(serviceRole, servicePassword), + Host: parsed.Host, + Path: parsed.Path, + RawQuery: values.Encode(), + } + return scoped.String(), nil +} + +func applyMigrations(ctx context.Context, dsn string) error { + cfg := postgres.DefaultConfig() + cfg.PrimaryDSN = dsn + cfg.OperationTimeout = defaultOpTimeout + db, err := postgres.OpenPrimary(ctx, cfg) + if err != nil { + return fmt.Errorf("open scoped pool: %w", err) + } + defer func() { _ = db.Close() }() + + if err := postgres.Ping(ctx, db, defaultOpTimeout); err != nil { + return err + } + if err := postgres.RunMigrations(ctx, db, migrations.FS(), "."); err != nil { + return fmt.Errorf("run migrations: %w", err) + } + return nil +} + +// jetOutputDir returns the absolute path that jet should write into. We rely +// on the runtime caller info to anchor it to galaxy/user regardless of the +// invoking working directory. +func jetOutputDir() (string, error) { + _, file, _, ok := runtime.Caller(0) + if !ok { + return "", errors.New("resolve runtime caller for jet output path") + } + dir := filepath.Dir(file) + // dir = .../galaxy/user/cmd/jetgen + moduleRoot := filepath.Clean(filepath.Join(dir, "..", "..")) + return filepath.Join(moduleRoot, jetOutputDirSuffix), nil +} + +func sqlIdentifier(name string) string { + return `"` + escapeDoubleQuotes(name) + `"` +} + +func sqlLiteral(value string) string { + return "'" + escapeSingleQuotes(value) + "'" +} + +func escapeDoubleQuotes(value string) string { + out := make([]byte, 0, len(value)) + for index := 0; index < len(value); index++ { + if value[index] == '"' { + out = append(out, '"', '"') + continue + } + out = append(out, value[index]) + } + return string(out) +} + +func escapeSingleQuotes(value string) string { + out := make([]byte, 0, len(value)) + for index := 0; index < len(value); index++ { + if value[index] == '\'' { + out = append(out, '\'', '\'') + continue + } + out = append(out, value[index]) + } + return string(out) +} diff --git a/user/docs/README.md b/user/docs/README.md index 47d017e..56d222a 100644 --- a/user/docs/README.md +++ b/user/docs/README.md @@ -10,6 +10,13 @@ Sections: - [Operator runbook](runbook.md) - [Contract examples](examples.md) +Decision records: + +- [PostgreSQL migration](postgres-migration.md) — schema and storage + decisions landed by `PG_PLAN.md §3` +- [Stage 21 — `user_name` + `display_name` refactor](stage21-user-name-display-name.md) +- [Stage 22 — `permanent_block` + `DeleteUser` soft-delete](stage22-permanent-block-delete-user.md) + Primary references: - [`../README.md`](../README.md) for stable service scope and business rules diff --git a/user/docs/postgres-migration.md b/user/docs/postgres-migration.md new file mode 100644 index 0000000..ff2ab06 --- /dev/null +++ b/user/docs/postgres-migration.md @@ -0,0 +1,206 @@ +# PostgreSQL Migration + +PG_PLAN.md §3 migrated `galaxy/user` from a Redis-only durable store to the +steady-state split codified in `ARCHITECTURE.md §Persistence Backends`: +PostgreSQL is the source of truth for table-shaped business state, and Redis +keeps only the two streams that publish auxiliary domain events +(`user:domain_events`) and trusted user-lifecycle events +(`user:lifecycle_events`). + +This document records the schema decisions and the non-obvious agreements +behind them. Use it together with the migration script +(`internal/adapters/postgres/migrations/00001_init.sql`) and the runtime +wiring (`internal/app/runtime.go`). + +## Outcomes + +- Schema `user` (provisioned externally) holds the durable state: `accounts`, + `blocked_emails`, `entitlement_records`, `entitlement_snapshots`, + `sanction_records`, `sanction_active`, `limit_records`, `limit_active`. +- The runtime opens one PostgreSQL pool via `pkg/postgres.OpenPrimary`, + applies embedded goose migrations strictly before any HTTP listener + becomes ready, and exits non-zero when migration or ping fails. +- The runtime opens one shared `*redis.Client` via + `pkg/redisconn.NewMasterClient` and passes it to both stream publishers + (`internal/adapters/redis/domainevents`, + `internal/adapters/redis/lifecycleevents`); the publishers no longer hold + their own connection topology fields. +- `internal/adapters/redis/userstore/` and the entire + `internal/adapters/redisstate/` package are removed. The Redis Lua scripts, + Watch/Multi optimistic-concurrency loops, and ZSET indexes are gone. +- Configuration drops `USERSERVICE_REDIS_USERNAME`, + `USERSERVICE_REDIS_TLS_ENABLED`, and `USERSERVICE_REDIS_KEYSPACE_PREFIX`. + `USERSERVICE_REDIS_ADDR` is replaced by + `USERSERVICE_REDIS_MASTER_ADDR` + optional + `USERSERVICE_REDIS_REPLICA_ADDRS`. Postgres-specific knobs live under + `USERSERVICE_POSTGRES_*` per the architectural rule. + +## Decisions + +### 1. One schema, externally-provisioned role + +**Decision.** The `user` schema and the matching `userservice` role are +created outside the migration sequence (in tests, by +`integration/internal/harness/postgres_container.go::EnsureRoleAndSchema`; +in production, by an ops init script not in scope for this stage). The +embedded migration `00001_init.sql` only contains DDL for tables and +indexes and assumes it runs as the schema owner with `search_path=user`. + +**Why.** Mixing role creation, schema creation, and table DDL into one +script forces every consumer of the migration to run as a superuser. The +schema-per-service architectural rule +(`ARCHITECTURE.md §Persistence Backends`) lines up neatly with the +operational split: ops provisions roles and schemas, the service applies +schema-scoped migrations. + +### 2. `entitlement_snapshots` stays denormalised + +**Decision.** A dedicated `entitlement_snapshots` table holds exactly one +row per `user_id` mirroring the current effective fields (`plan_code`, +`is_paid`, `starts_at`, `ends_at`, `source`, `actor_*`, `reason_code`, +`updated_at`). Lifecycle operations (`Grant`, `Extend`, `Revoke`, +`RepairExpired`) write the history row and the snapshot row inside one +transaction. + +**Why.** The lobby-eligibility hot-path reads exactly one row per user; a +JOIN over `entitlement_records` to compute the current segment would add +latency and wire-format complexity. Keeping the snapshot denormalised +matches the previous Redis shape where the hot read returned a +pre-materialised JSON blob, which preserves the existing service-layer +contract and the public REST envelope. + +### 3. `sanction_active` / `limit_active` are the source of truth for "active" + +**Decision.** The active state of a sanction or a user-specific limit is +expressed by a small dedicated table (`sanction_active`, `limit_active`) +whose primary key is `(user_id, code)`. Each row references the matching +history record by `record_id`. Lifecycle operations maintain both tables +inside one transaction. + +**Why.** The lobby-eligibility hot path needs to enumerate active +sanctions/limits without scanning the full history. Encoding "active" +as a partial index on `removed_at IS NULL` would still require dedup +because a user can apply, remove, and re-apply the same code. Two narrow +tables let the same predicates that the Redis adapter encoded as +`active` keys remain index-only. + +### 4. Eligibility flags are computed predicates, not stored columns + +**Decision.** No `can_login`, `can_create_private_game`, `can_join_game` +columns or indexes exist. The admin listing surface (and the lobby +eligibility snapshot) compute these from `entitlement_snapshots` and +`sanction_active` at read time. + +**Why.** Stage 21 expanded the eligibility marker catalogue and Stage 22 +added `permanent_block`. Each addition would have required schema work +plus a backfill if eligibility flags were materialised columns. Computed +predicates push that complexity into one place — the SQL query — and +keep the schema small. + +### 5. Atomic flows use explicit `BEGIN … COMMIT` with per-row `FOR UPDATE` + +**Decision.** Composite operations (`AuthDirectoryStore.{Resolve, +Ensure, Block*}`, `EntitlementLifecycleStore.{Grant, Extend, Revoke, +RepairExpired}`, `PolicyLifecycleStore.{ApplySanction, RemoveSanction, +SetLimit, RemoveLimit}`) execute inside `store.withTx` and acquire row +locks with `SELECT … FOR UPDATE` on the rows they intend to mutate. +Optimistic-replacement guards (`Expected*Record`, `Expected*Snapshot`) +are validated against the locked rows before the write goes through; +mismatches surface as `ports.ErrConflict`. + +**Why.** PostgreSQL's default `READ COMMITTED` isolation plus row-level +locks gives us the serialisation property the previous Redis +WATCH/MULTI loops achieved without needing the application to retry on +optimistic-failure errors. The explicit `FOR UPDATE` keeps intent +visible; ad-hoc CTE patterns would obscure the locking shape. + +### 6. Query layer is `go-jet/jet/v2` + +**Decision.** All `userstore` packages build SQL through the jet +builder API (`pgtable.
.INSERT/SELECT/UPDATE/DELETE` plus the +`pg.AND/OR/SET/...` DSL). `cmd/jetgen` (invoked via `make jet`) brings +up a transient PostgreSQL container, applies the embedded migrations, +and runs `github.com/go-jet/jet/v2/generator/postgres.GenerateDB` +against the provisioned schema; the generated table/model code lives +under `internal/adapters/postgres/jet/user/{model,table}/*.go` and is +committed to the repo, so build consumers do not need Docker. +Statements are run through the `database/sql` API +(`stmt.Sql() → db.Exec/Query/QueryRow`); manual `rowScanner` helpers +preserve domain-type marshalling. + +**Why.** Aligns with `PG_PLAN.md` §Library stack ("Query layer: +`github.com/go-jet/jet/v2` (PostgreSQL dialect). Generated code lives +under each service `internal/adapters/postgres/jet/`, regenerated via +a `make jet` target and committed to the repo"). Constructs the jet +builder does not cover natively (`FOR UPDATE`, keyset-pagination +row-comparison, partial UNIQUE WHERE in `CREATE INDEX`) are expressed +through the per-DSL helpers (`.FOR(pg.UPDATE())`, `OR/AND` expansion +of `(created_at, user_id) < (…)`). The ports contract and the schema +do not change. + +### 7. Redis publishers share one `*redis.Client` + +**Decision.** `internal/app/runtime.go` constructs one +`redisconn.NewMasterClient(cfg.Redis.Conn)` and passes it to both +`domainevents.New(client, cfg)` and `lifecycleevents.New(client, +cfg)`. The publishers no longer carry connection-topology fields and +no longer close the client; the runtime owns it. + +**Why.** Each subsequent PG_PLAN stage (Mail, Notification, Lobby) +ships a similar duo of stream publishers; sharing one client is the +shape we want all stages to converge on. Per-publisher clients +multiplied TCP connections, ping points, and OpenTelemetry +instrumentation hooks for no functional benefit. + +### 8. Mandatory Redis password in tests as well + +**Decision.** Unit tests for the publishers configure +`miniredis.RequireAuth("integration")` and pass a matching password +through their direct `redis.NewClient(...)` construction. The runtime +contract test +(`runtime_contract_test.go::newRuntimeContractHarness`) does the same +plus boots a Postgres container. + +**Why.** The architectural rule forbids password-less Redis +connections; carrying the constraint into tests prevents the rule +from drifting. + +### 9. Listing surface keeps storage-thin pagination + +**Decision.** `UserListStore.ListUserIDs` paginates only on +`(created_at DESC, user_id DESC)` with keyset cursors carried by the +opaque page token. Filter matrix evaluation (paid_state, +declared_country, sanction_code, limit_code, can_*) is performed by +the service-layer `adminusers.Lister`, which loads each candidate +through the per-user loader. This mirrors the previous Redis +behaviour exactly. + +**Why.** Pushing the filter matrix into SQL is desirable — it eliminates +candidate over-fetching — but doing it without changing the public +`UserListStore.ListUserIDs` contract (which returns a page of +`UserID`, not full records) requires a JOIN-driven query. That work +is a non-breaking optimisation and is intentionally deferred so this +stage focuses on the storage cut-over rather than throughput +improvements. The page-token wire format is preserved bit-for-bit so +already-issued tokens keep working. + +## Cross-References + +- `PG_PLAN.md §3` (Stage 3 — User Service migration / pilot). +- `ARCHITECTURE.md §Persistence Backends`. +- `internal/adapters/postgres/migrations/00001_init.sql` and + `internal/adapters/postgres/migrations/migrations.go`. +- `internal/adapters/postgres/userstore/{store,accounts,blocked_emails, + auth_directory,entitlement_store,policy_store,list_store,page_token, + helpers}.go` plus the testcontainers-backed unit suite under + `userstore/{harness,store}_test.go`. +- `internal/adapters/postgres/jet/user/{model,table}/*.go` (committed + generated code) plus `cmd/jetgen/main.go` and the `make jet` + Makefile target that regenerate it. +- `internal/config/config.go` (`PostgresConfig`, `RedisConfig` reshape). +- `internal/app/runtime.go` (PG pool open + migration + shared Redis + client wiring). +- `internal/adapters/redis/{domainevents,lifecycleevents}/publisher.go` + (refactored to accept the shared `*redis.Client`). +- `runtime_contract_test.go::startPostgresForContractTest` (shows the + inline Postgres bootstrap used by the existing runtime contract). diff --git a/user/docs/runbook.md b/user/docs/runbook.md index d4588e4..53eed83 100644 --- a/user/docs/runbook.md +++ b/user/docs/runbook.md @@ -32,20 +32,46 @@ additional process-level operational endpoint. ## Common Failure Modes +### PostgreSQL unavailable + +Symptoms: + +- process fails during startup with `ping postgres` or `run postgres + migrations` in the error chain +- readiness probe never reports healthy, internal API never opens +- internal API returns `503 service_unavailable` if connectivity is lost + after start + +Checks: + +- DSN reachable from the service host: `psql "$USERSERVICE_POSTGRES_PRIMARY_DSN" -c "select 1"` +- `userservice` role exists with `LOGIN` and the configured password +- Schema `user` exists and is owned (or grant-accessible) by the + `userservice` role: `\dn user` +- Embedded migrations applied: query `goose_db_version` (the schema-qualified + goose bookkeeping table) and confirm the latest version matches the + binary's expectation +- Pool tuning sane: + `USERSERVICE_POSTGRES_MAX_OPEN_CONNS` ≥ peak request fan-out + ### Redis unavailable Symptoms: -- process fails during startup -- internal API returns `503 service_unavailable` -- domain events stop being published +- process fails during startup with `ping redis master` in the error chain +- domain events / lifecycle events stop being published +- internal API still serves reads/writes (PostgreSQL is the source of truth); + publishers degrade gracefully but operators must investigate Checks: -- connectivity to `USERSERVICE_REDIS_ADDR` -- Redis ACL credentials -- Redis DB number -- TLS setting mismatch +- connectivity to `USERSERVICE_REDIS_MASTER_ADDR` +- `USERSERVICE_REDIS_PASSWORD` matches the Redis configuration +- Redis DB number is reachable and unblocked +- The retired variables `USERSERVICE_REDIS_ADDR`, + `USERSERVICE_REDIS_USERNAME`, `USERSERVICE_REDIS_TLS_ENABLED`, + `USERSERVICE_REDIS_KEYSPACE_PREFIX` are not set in the deployment + (`pkg/redisconn.LoadFromEnv` rejects them with a clear error) ### Invalid registration context diff --git a/user/docs/runtime.md b/user/docs/runtime.md index d3240b7..5b6efe0 100644 --- a/user/docs/runtime.md +++ b/user/docs/runtime.md @@ -63,38 +63,67 @@ Intentional omissions: `cmd/userservice` loads config, constructs logging and telemetry, and then creates the runtime through `internal/app.NewRuntime`. -The runtime wires: +The runtime wires, in order: -- Redis-backed stores for accounts, entitlement snapshots, sanctions, limits, - and listing indexes +- one shared `*redis.Client` opened through `pkg/redisconn` plus a Ping +- one PostgreSQL pool opened through `pkg/postgres`, instrumented with + `db.sql.connection.*` metrics, pinged, and migrated forward via the + embedded `internal/adapters/postgres/migrations` filesystem +- the PostgreSQL-backed user store from + `internal/adapters/postgres/userstore` (accounts, blocked-emails, + entitlement snapshot/history/lifecycle, sanction history/lifecycle, + limit history/lifecycle, listing index) +- two Redis Stream publishers + (`internal/adapters/redis/domainevents` for auxiliary domain events, + `internal/adapters/redis/lifecycleevents` for trusted user-lifecycle + events) sharing the same `*redis.Client` - the trusted internal HTTP router - the optional admin metrics listener -- the optional Redis-backed domain-event publishers - service-local helpers for clock, IDs, and validation/policy adapters -Startup fails fast when Redis connectivity is unavailable or configuration is -invalid. +Startup fails fast when Redis or PostgreSQL connectivity is unavailable, the +mandatory connection-topology environment variables are missing, the +embedded migration sequence cannot be applied, or configuration is otherwise +invalid. The HTTP listeners do not open until every dependency check passes. -## Redis Namespaces +## Storage Backends -The service uses one Redis keyspace prefix plus one auxiliary domain-events -stream. +The service is split between two backends per +[`../../ARCHITECTURE.md §Persistence Backends`](../../ARCHITECTURE.md): -Configuration: +PostgreSQL holds source-of-truth durable state in the `user` schema: -- `USERSERVICE_REDIS_KEYSPACE_PREFIX` -- `USERSERVICE_REDIS_DOMAIN_EVENTS_STREAM` -- `USERSERVICE_REDIS_DOMAIN_EVENTS_STREAM_MAX_LEN` +- `accounts` (with `email` and `user_name` UNIQUE; `deleted_at` records the + Stage 22 soft-delete state) +- `blocked_emails` (one row per blocked address) +- `entitlement_records` plus the denormalised `entitlement_snapshots` + one-row-per-user current view +- `sanction_records` plus `sanction_active(user_id, sanction_code)` +- `limit_records` plus `limit_active(user_id, limit_code)` -The keyspace stores source-of-truth business state. The stream carries -post-commit auxiliary domain events and must not be treated as the source of -truth. +Indexes carry the listing surface (`accounts(created_at DESC, user_id +DESC)`), reverse-lookup filters (`accounts(declared_country)`, +`entitlement_snapshots(plan_code, is_paid)`, +`entitlement_snapshots(ends_at) WHERE is_paid AND ends_at IS NOT NULL`, +`sanction_active(sanction_code)`, `limit_active(limit_code)`), and the +per-user history scans. + +Redis hosts only the two Stream publishers +(`USERSERVICE_REDIS_DOMAIN_EVENTS_STREAM`, +`USERSERVICE_REDIS_LIFECYCLE_EVENTS_STREAM`). It does not store any +durable user state after Stage 3 of `PG_PLAN.md`. + +Decision records: +[`postgres-migration.md`](postgres-migration.md) for the schema and +storage decisions. ## Configuration Groups Required for all process starts: -- `USERSERVICE_REDIS_ADDR` +- `USERSERVICE_REDIS_MASTER_ADDR` +- `USERSERVICE_REDIS_PASSWORD` +- `USERSERVICE_POSTGRES_PRIMARY_DSN` Core process config: @@ -116,16 +145,31 @@ Admin HTTP config: - `USERSERVICE_ADMIN_HTTP_READ_TIMEOUT` - `USERSERVICE_ADMIN_HTTP_IDLE_TIMEOUT` -Redis connectivity and namespace config: +Redis connectivity (consumed by `pkg/redisconn`): -- `USERSERVICE_REDIS_USERNAME` -- `USERSERVICE_REDIS_PASSWORD` +- `USERSERVICE_REDIS_REPLICA_ADDRS` (optional, comma-separated) - `USERSERVICE_REDIS_DB` -- `USERSERVICE_REDIS_TLS_ENABLED` - `USERSERVICE_REDIS_OPERATION_TIMEOUT` -- `USERSERVICE_REDIS_KEYSPACE_PREFIX` + +Stream-shape (kept service-local): + - `USERSERVICE_REDIS_DOMAIN_EVENTS_STREAM` - `USERSERVICE_REDIS_DOMAIN_EVENTS_STREAM_MAX_LEN` +- `USERSERVICE_REDIS_LIFECYCLE_EVENTS_STREAM` +- `USERSERVICE_REDIS_LIFECYCLE_EVENTS_STREAM_MAX_LEN` + +PostgreSQL connectivity (consumed by `pkg/postgres`): + +- `USERSERVICE_POSTGRES_REPLICA_DSNS` (optional, comma-separated) +- `USERSERVICE_POSTGRES_OPERATION_TIMEOUT` +- `USERSERVICE_POSTGRES_MAX_OPEN_CONNS` +- `USERSERVICE_POSTGRES_MAX_IDLE_CONNS` +- `USERSERVICE_POSTGRES_CONN_MAX_LIFETIME` + +The retired Redis variables `USERSERVICE_REDIS_ADDR`, +`USERSERVICE_REDIS_USERNAME`, `USERSERVICE_REDIS_TLS_ENABLED`, +`USERSERVICE_REDIS_KEYSPACE_PREFIX` produce a startup error from +`pkg/redisconn` if set; unset them before starting the service. Telemetry: diff --git a/user/go.mod b/user/go.mod index 2ff646c..166e1e6 100644 --- a/user/go.mod +++ b/user/go.mod @@ -3,13 +3,18 @@ module galaxy/user go 1.26.1 require ( + galaxy/postgres v0.0.0-00010101000000-000000000000 + galaxy/redisconn v0.0.0-00010101000000-000000000000 github.com/alicebob/miniredis/v2 v2.37.0 - github.com/disciplinedware/go-confusables v0.1.1 github.com/getkin/kin-openapi v0.135.0 github.com/gin-gonic/gin v1.12.0 + github.com/go-jet/jet/v2 v2.14.1 + github.com/jackc/pgx/v5 v5.9.2 github.com/prometheus/client_golang v1.23.2 github.com/redis/go-redis/v9 v9.18.0 github.com/stretchr/testify v1.11.1 + github.com/testcontainers/testcontainers-go v0.42.0 + github.com/testcontainers/testcontainers-go/modules/postgres v0.42.0 go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.68.0 go.opentelemetry.io/otel v1.43.0 go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.43.0 @@ -27,19 +32,35 @@ require ( ) require ( + dario.cat/mergo v1.0.2 // indirect + github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect + github.com/XSAM/otelsql v0.42.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bytedance/gopkg v0.1.4 // indirect github.com/bytedance/sonic v1.15.0 // indirect github.com/bytedance/sonic/loader v0.5.1 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cenkalti/backoff/v5 v5.0.3 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cloudwego/base64x v0.1.6 // indirect + github.com/containerd/errdefs v1.0.0 // indirect + github.com/containerd/errdefs/pkg v0.3.0 // indirect + github.com/containerd/log v0.1.0 // indirect + github.com/containerd/platforms v0.2.1 // indirect + github.com/cpuguy83/dockercfg v0.3.2 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect + github.com/distribution/reference v0.6.0 // indirect + github.com/docker/go-connections v0.7.0 // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/ebitengine/purego v0.10.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect github.com/gabriel-vasile/mimetype v1.4.13 // indirect github.com/gin-contrib/sse v1.1.1 // indirect github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/swag v0.23.0 // indirect github.com/go-playground/locales v0.14.1 // indirect @@ -49,45 +70,85 @@ require ( github.com/goccy/go-yaml v1.19.2 // indirect github.com/google/uuid v1.6.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0 // indirect + github.com/jackc/chunkreader/v2 v2.0.1 // indirect + github.com/jackc/pgconn v1.14.3 // indirect + github.com/jackc/pgio v1.0.0 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgproto3/v2 v2.3.3 // indirect + github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect + github.com/jackc/pgtype v1.14.4 // indirect + github.com/jackc/puddle/v2 v2.2.2 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.18.5 // indirect github.com/klauspost/cpuid/v2 v2.3.0 // indirect github.com/leodido/go-urn v1.4.0 // indirect + github.com/lib/pq v1.10.9 // indirect + github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect + github.com/magiconair/properties v1.8.10 // indirect github.com/mailru/easyjson v0.7.7 // indirect - github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mattn/go-isatty v0.0.21 // indirect + github.com/mfridman/interpolate v0.0.2 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect + github.com/moby/go-archive v0.2.0 // indirect + github.com/moby/moby/api v1.54.2 // indirect + github.com/moby/moby/client v0.4.1 // indirect + github.com/moby/patternmatcher v0.6.1 // indirect + github.com/moby/sys/sequential v0.6.0 // indirect + github.com/moby/sys/user v0.4.0 // indirect + github.com/moby/sys/userns v0.1.0 // indirect + github.com/moby/term v0.5.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/oasdiff/yaml v0.0.9 // indirect github.com/oasdiff/yaml3 v0.0.9 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.1 // indirect github.com/pelletier/go-toml/v2 v2.3.0 // indirect github.com/perimeterx/marshmallow v1.1.5 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect + github.com/pressly/goose/v3 v3.27.1 // indirect github.com/prometheus/client_model v0.6.2 // indirect github.com/prometheus/common v0.67.5 // indirect github.com/prometheus/otlptranslator v1.0.0 // indirect github.com/prometheus/procfs v0.20.1 // indirect github.com/quic-go/qpack v0.6.0 // indirect github.com/quic-go/quic-go v0.59.0 // indirect + github.com/redis/go-redis/extra/rediscmd/v9 v9.18.0 // indirect + github.com/redis/go-redis/extra/redisotel/v9 v9.18.0 // indirect + github.com/sethvargo/go-retry v0.3.0 // indirect + github.com/shirou/gopsutil/v4 v4.26.3 // indirect + github.com/sirupsen/logrus v1.9.4 // indirect + github.com/tklauser/go-sysconf v0.3.16 // indirect + github.com/tklauser/numcpus v0.11.0 // indirect github.com/twitchyliquid64/golang-asm v0.15.1 // indirect github.com/ugorji/go/codec v1.3.1 // indirect github.com/woodsbury/decimal128 v1.3.0 // indirect github.com/yuin/gopher-lua v1.1.1 // indirect + github.com/yusufpapurcu/wmi v1.2.4 // indirect go.mongodb.org/mongo-driver/v2 v2.5.0 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.68.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.43.0 // indirect go.opentelemetry.io/proto/otlp v1.10.0 // indirect go.uber.org/atomic v1.11.0 // indirect + go.uber.org/multierr v1.11.0 // indirect go.yaml.in/yaml/v2 v2.4.4 // indirect golang.org/x/arch v0.25.0 // indirect - golang.org/x/crypto v0.49.0 // indirect - golang.org/x/net v0.52.0 // indirect - golang.org/x/sys v0.42.0 // indirect + golang.org/x/crypto v0.50.0 // indirect + golang.org/x/net v0.53.0 // indirect + golang.org/x/sync v0.20.0 // indirect + golang.org/x/sys v0.43.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20260420184626-e10c466a9529 // indirect google.golang.org/grpc v1.80.0 // indirect google.golang.org/protobuf v1.36.11 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) + +replace galaxy/postgres => ../pkg/postgres + +replace galaxy/redisconn => ../pkg/redisconn diff --git a/user/go.sum b/user/go.sum index bc5e821..933ab7f 100644 --- a/user/go.sum +++ b/user/go.sum @@ -1,3 +1,15 @@ +dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8= +dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 h1:He8afgbRMd7mFxO99hRNu+6tazq8nFF9lIwo9JFroBk= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= +github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/XSAM/otelsql v0.42.0 h1:Li0xF4eJUxG2e0x3D4rvRlys1f27yJKvjTh7ljkUP5o= +github.com/XSAM/otelsql v0.42.0/go.mod h1:4mOrEv+cS1KmKzrvTktvJnstr5GtKSAK+QHvFR9OcpI= github.com/alicebob/miniredis/v2 v2.37.0 h1:RheObYW32G1aiJIj81XVt78ZHJpHonHLHW7OLIshq68= github.com/alicebob/miniredis/v2 v2.37.0/go.mod h1:TcL7YfarKPGDAthEtl5NBeHZfeUQj6OXMm/+iu5cLMM= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -12,20 +24,48 @@ github.com/bytedance/sonic v1.15.0 h1:/PXeWFaR5ElNcVE84U0dOHjiMHQOwNIx3K4ymzh/uS github.com/bytedance/sonic v1.15.0/go.mod h1:tFkWrPz0/CUCLEF4ri4UkHekCIcdnkqXw9VduqpJh0k= github.com/bytedance/sonic/loader v0.5.1 h1:Ygpfa9zwRCCKSlrp5bBP/b/Xzc3VxsAW+5NIYXrOOpI= github.com/bytedance/sonic/loader v0.5.1/go.mod h1:AR4NYCk5DdzZizZ5djGqQ92eEhCCcdf5x77udYiSJRo= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cloudwego/base64x v0.1.6 h1:t11wG9AECkCDk5fMSoxmufanudBtJ+/HemLstXDLI2M= github.com/cloudwego/base64x v0.1.6/go.mod h1:OFcloc187FXDaYHvrNIjxSe8ncn0OOM8gEHfghB2IPU= +github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= +github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= +github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= +github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= +github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A= +github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/cpuguy83/dockercfg v0.3.2 h1:DlJTyZGBDlXqUZ2Dk2Q3xHs/FtnooJJVaad2S9GKorA= +github.com/cpuguy83/dockercfg v0.3.2/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.24 h1:bJrF4RRfyJnbTJqzRLHzcGaZK1NeM5kTC9jGgovnR1s= +github.com/creack/pty v1.1.24/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= -github.com/disciplinedware/go-confusables v0.1.1 h1:l/JVOsdrEDHo7nvL+tQfRO1F14UyuuDm1Uvv3Nqmq9Q= -github.com/disciplinedware/go-confusables v0.1.1/go.mod h1:2hAXIAtpSqx+tMKdCzgRNv4J/kmz/oGfSHTBGJjVgfc= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/go-connections v0.7.0 h1:6SsRfJddP22WMrCkj19x9WKjEDTB+ahsdiGYf0mN39c= +github.com/docker/go-connections v0.7.0/go.mod h1:no1qkHdjq7kLMGUXYAduOhYPSJxxvgWBh7ogVvptn3Q= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/ebitengine/purego v0.10.0 h1:QIw4xfpWT6GWTzaW5XEKy3HXoqrJGx1ijYHzTF0/ISU= +github.com/ebitengine/purego v0.10.0/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/gabriel-vasile/mimetype v1.4.13 h1:46nXokslUBsAJE/wMsp5gtO500a4F3Nkz9Ufpk2AcUM= github.com/gabriel-vasile/mimetype v1.4.13/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s= github.com/getkin/kin-openapi v0.135.0 h1:751SjYfbiwqukYuVjwYEIKNfrSwS5YpA7DZnKSwQgtg= @@ -34,11 +74,17 @@ github.com/gin-contrib/sse v1.1.1 h1:uGYpNwTacv5R68bSGMapo62iLTRa9l5zxGCps4hK6ko github.com/gin-contrib/sse v1.1.1/go.mod h1:QXzuVkA0YO7o/gun03UI1Q+FTI8ZV/n5t03kIQAI89s= github.com/gin-gonic/gin v1.12.0 h1:b3YAbrZtnf8N//yjKeU2+MQsh2mY5htkZidOM7O0wG8= github.com/gin-gonic/gin v1.12.0/go.mod h1:VxccKfsSllpKshkBWgVgRniFFAzFb9csfngsqANjnLc= +github.com/go-jet/jet/v2 v2.14.1 h1:wsfD9e7CGP9h46+IFNlftfncBcmVnKddikbTtapQM3M= +github.com/go-jet/jet/v2 v2.14.1/go.mod h1:dqTAECV2Mo3S2NFjbm4vJ1aDruZjhaJ1RAAR8rGUkkc= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= @@ -51,40 +97,136 @@ github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJn github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= github.com/go-playground/validator/v10 v10.30.2 h1:JiFIMtSSHb2/XBUbWM4i/MpeQm9ZK2xqPNk8vgvu5JQ= github.com/go-playground/validator/v10 v10.30.2/go.mod h1:mAf2pIOVXjTEBrwUMGKkCWKKPs9NheYGabeB04txQSc= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM= github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= github.com/goccy/go-json v0.10.6 h1:p8HrPJzOakx/mn/bQtjgNjdTcN+/S6FcG2CTtQOrHVU= github.com/goccy/go-json v0.10.6/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/goccy/go-yaml v1.19.2 h1:PmFC1S6h8ljIz6gMRBopkjP1TVT7xuwrButHID66PoM= github.com/goccy/go-yaml v1.19.2/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA= +github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0 h1:HWRh5R2+9EifMyIHV7ZV+MIZqgz+PMpZ14Jynv3O2Zs= github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0/go.mod h1:JfhWUomR1baixubs02l85lZYYOm7LV6om4ceouMv45c= +github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= +github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= +github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= +github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= +github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= +github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= +github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY= +github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= +github.com/jackc/pgconn v1.14.3 h1:bVoTr12EGANZz66nZPkMInAV/KHD2TxH9npjXXgiB3w= +github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= +github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= +github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65 h1:DadwsjnMwFjfWc9y5Wi/+Zz7xoE5ALHsRQlOctkOiHc= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= +github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.3.3 h1:1HLSx5H+tXR9pW3in3zaztoEwQYRC9SQaYUHjTSUOag= +github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= +github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= +github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= +github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM= +github.com/jackc/pgtype v1.14.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= +github.com/jackc/pgtype v1.14.4 h1:fKuNiCumbKTAIxQwXfB/nsrnkEI6bPJrrSiMKgbJ2j8= +github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= +github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= +github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= +github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs= +github.com/jackc/pgx/v4 v4.18.2/go.mod h1:Ey4Oru5tH5sB6tV7hDmfWFahwF15Eb7DNXlRKx2CkVw= +github.com/jackc/pgx/v4 v4.18.3 h1:dE2/TrEsGX3RBprb3qryqSV9Y60iZN1C6i8IrmW9/BA= +github.com/jackc/pgx/v5 v5.9.2 h1:3ZhOzMWnR4yJ+RW1XImIPsD1aNSz4T4fyP7zlQb56hw= +github.com/jackc/pgx/v5 v5.9.2/go.mod h1:mal1tBGAFfLHvZzaYh77YS/eC6IX9OWbRV1QIIM0Jn4= +github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.3.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= +github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.18.5 h1:/h1gH5Ce+VWNLSWqPzOVn6XBO+vJbCNGvjoaGBFW2IE= +github.com/klauspost/compress v1.18.5/go.mod h1:cwPg85FWrGar70rWktvGQj8/hthj3wpl0PGDogxkrSQ= github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y= github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/magiconair/properties v1.8.10 h1:s31yESBquKXCV9a/ScB3ESkOjUYYv+X0rg8SYxI99mE= +github.com/magiconair/properties v1.8.10/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= -github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.21 h1:xYae+lCNBP7QuW4PUnNG61ffM4hVIfm+zUzDuSzYLGs= +github.com/mattn/go-isatty v0.0.21/go.mod h1:ZXfXG4SQHsB/w3ZeOYbR0PrPwLy+n6xiMrJlRFqopa4= +github.com/mdelapenya/tlscert v0.2.0 h1:7H81W6Z/4weDvZBNOfQte5GpIMo0lGYEeWbkGp5LJHI= +github.com/mdelapenya/tlscert v0.2.0/go.mod h1:O4njj3ELLnJjGdkN7M/vIVCpZ+Cf0L6muqOG4tLSl8o= +github.com/mfridman/interpolate v0.0.2 h1:pnuTK7MQIxxFz1Gr+rjSIx9u7qVjf5VOoM/u6BbAxPY= +github.com/mfridman/interpolate v0.0.2/go.mod h1:p+7uk6oE07mpE/Ik1b8EckO0O4ZXiGAfshKBWLUM9Xg= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/moby/go-archive v0.2.0 h1:zg5QDUM2mi0JIM9fdQZWC7U8+2ZfixfTYoHL7rWUcP8= +github.com/moby/go-archive v0.2.0/go.mod h1:mNeivT14o8xU+5q1YnNrkQVpK+dnNe/K6fHqnTg4qPU= +github.com/moby/moby/api v1.54.2 h1:wiat9QAhnDQjA7wk1kh/TqHz2I1uUA7M7t9SAl/JNXg= +github.com/moby/moby/api v1.54.2/go.mod h1:+RQ6wluLwtYaTd1WnPLykIDPekkuyD/ROWQClE83pzs= +github.com/moby/moby/client v0.4.1 h1:DMQgisVoMkmMs7fp3ROSdiBnoAu8+vo3GggFl06M/wY= +github.com/moby/moby/client v0.4.1/go.mod h1:z52C9O2POPOsnxZAy//WtKcQ32P+jT/NGeXu/7nfjGQ= +github.com/moby/patternmatcher v0.6.1 h1:qlhtafmr6kgMIJjKJMDmMWq7WLkKIo23hsrpR3x084U= +github.com/moby/patternmatcher v0.6.1/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= +github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU= +github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko= +github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs= +github.com/moby/sys/user v0.4.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= +github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g= +github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28= +github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= +github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -94,17 +236,28 @@ github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9 github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w= +github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= github.com/oasdiff/yaml v0.0.9 h1:zQOvd2UKoozsSsAknnWoDJlSK4lC0mpmjfDsfqNwX48= github.com/oasdiff/yaml v0.0.9/go.mod h1:8lvhgJG4xiKPj3HN5lDow4jZHPlx1i7dIwzkdAo6oAM= github.com/oasdiff/yaml3 v0.0.9 h1:rWPrKccrdUm8J0F3sGuU+fuh9+1K/RdJlWF7O/9yw2g= github.com/oasdiff/yaml3 v0.0.9/go.mod h1:y5+oSEHCPT/DGrS++Wc/479ERge0zTFxaF8PbGKcg2o= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= github.com/pelletier/go-toml/v2 v2.3.0 h1:k59bC/lIZREW0/iVaQR8nDHxVq8OVlIzYCOJf421CaM= github.com/pelletier/go-toml/v2 v2.3.0/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= github.com/perimeterx/marshmallow v1.1.5 h1:a2LALqQ1BlHM8PZblsDdidgv1mWi1DgC2UmX50IvK2s= github.com/perimeterx/marshmallow v1.1.5/go.mod h1:dsXbUu8CRzfYP5a87xpp0xq9S3u0Vchtcl8we9tYaXw= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/pressly/goose/v3 v3.27.1 h1:6uEvcprBybDmW4hcz3gYujhARhye+GoWKhEWyzD5sh4= +github.com/pressly/goose/v3 v3.27.1/go.mod h1:maruOxsPnIG2yHHyo8UqKWXYKFcH7Q76csUV7+7KYoM= github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= @@ -119,21 +272,59 @@ github.com/quic-go/qpack v0.6.0 h1:g7W+BMYynC1LbYLSqRt8PBg5Tgwxn214ZZR34VIOjz8= github.com/quic-go/qpack v0.6.0/go.mod h1:lUpLKChi8njB4ty2bFLX2x4gzDqXwUpaO1DP9qMDZII= github.com/quic-go/quic-go v0.59.0 h1:OLJkp1Mlm/aS7dpKgTc6cnpynnD2Xg7C1pwL6vy/SAw= github.com/quic-go/quic-go v0.59.0/go.mod h1:upnsH4Ju1YkqpLXC305eW3yDZ4NfnNbmQRCMWS58IKU= +github.com/redis/go-redis/extra/rediscmd/v9 v9.18.0 h1:QY4nmPHLFAJjtT5O4OMUEOxP8WVaRNOFpcbmxT2NLZU= +github.com/redis/go-redis/extra/rediscmd/v9 v9.18.0/go.mod h1:WH8cY/0fT41Bsf341qzo8v4nx0GCE8FykAA23IVbVmo= +github.com/redis/go-redis/extra/redisotel/v9 v9.18.0 h1:2dKdoEYBJ0CZCLPiCdvvc7luz3DPwY6hKdzjL6m1eHE= +github.com/redis/go-redis/extra/redisotel/v9 v9.18.0/go.mod h1:WzkrVG9ro9BwCQD0eJOWn6AGL4Z1CleGflM45w1hu10= github.com/redis/go-redis/v9 v9.18.0 h1:pMkxYPkEbMPwRdenAzUNyFNrDgHx9U+DrBabWNfSRQs= github.com/redis/go-redis/v9 v9.18.0/go.mod h1:k3ufPphLU5YXwNTUcCRXGxUoF1fqxnhFQmscfkCoDA0= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= +github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= +github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sethvargo/go-retry v0.3.0 h1:EEt31A35QhrcRZtrYFDTBg91cqZVnFL2navjDrah2SE= +github.com/sethvargo/go-retry v0.3.0/go.mod h1:mNX17F0C/HguQMyMyJxcnU471gOZGxCLyYaFyAZraas= +github.com/shirou/gopsutil/v4 v4.26.3 h1:2ESdQt90yU3oXF/CdOlRCJxrP+Am1aBYubTMTfxJ1qc= +github.com/shirou/gopsutil/v4 v4.26.3/go.mod h1:LZ6ewCSkBqUpvSOf+LsTGnRinC6iaNUNMGBtDkJBaLQ= +github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.9.4 h1:TsZE7l11zFCLZnZ+teH4Umoq5BhEIfIzfRDZ1Uzql2w= +github.com/sirupsen/logrus v1.9.4/go.mod h1:ftWc9WdOfJ0a92nsE2jF5u5ZwH8Bv2zdeOC42RjbV2g= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/objx v0.5.3 h1:jmXUvGomnU1o3W/V5h2VEradbpJDwGrzugQQvL0POH4= +github.com/stretchr/objx v0.5.3/go.mod h1:rDQraq+vQZU7Fde9LOZLr8Tax6zZvy4kuNKF+QYS+U0= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/testcontainers/testcontainers-go v0.42.0 h1:He3IhTzTZOygSXLJPMX7n44XtK+qhjat1nI9cneBbUY= +github.com/testcontainers/testcontainers-go v0.42.0/go.mod h1:vZjdY1YmUA1qEForxOIOazfsrdyORJAbhi0bp8plN30= +github.com/testcontainers/testcontainers-go/modules/postgres v0.42.0 h1:GCbb1ndrF7OTDiIvxXyItaDab4qkzTFJ48LKFdM7EIo= +github.com/testcontainers/testcontainers-go/modules/postgres v0.42.0/go.mod h1:IRPBaI8jXdrNfD0e4Zm7Fbcgaz5shKxOQv4axiL09xs= +github.com/tklauser/go-sysconf v0.3.16 h1:frioLaCQSsF5Cy1jgRBrzr6t502KIIwQ0MArYICU0nA= +github.com/tklauser/go-sysconf v0.3.16/go.mod h1:/qNL9xxDhc7tx3HSRsLWNnuzbVfh3e7gh/BmM179nYI= +github.com/tklauser/numcpus v0.11.0 h1:nSTwhKH5e1dMNsCdVBukSZrURJRoHbSEQjdEbY+9RXw= +github.com/tklauser/numcpus v0.11.0/go.mod h1:z+LwcLq54uWZTX0u/bGobaV34u6V7KNlTZejzM6/3MQ= github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= github.com/ugorji/go/codec v1.3.1 h1:waO7eEiFDwidsBN6agj1vJQ4AG7lh2yqXyOXqhgQuyY= @@ -142,14 +333,19 @@ github.com/woodsbury/decimal128 v1.3.0 h1:8pffMNWIlC0O5vbyHWFZAt5yWvWcrHA+3ovIIj github.com/woodsbury/decimal128 v1.3.0/go.mod h1:C5UTmyTjW3JftjUFzOVhC20BEQa2a4ZKOB5I6Zjb+ds= github.com/yuin/gopher-lua v1.1.1 h1:kYKnWBjvbNP4XLT3+bPEwAXJx262OhaHDWDVOPjL46M= github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= +github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= +github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0= github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= +github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= go.mongodb.org/mongo-driver/v2 v2.5.0 h1:yXUhImUjjAInNcpTcAlPHiT7bIXhshCTL3jVBkF3xaE= go.mongodb.org/mongo-driver/v2 v2.5.0/go.mod h1:yOI9kBsufol30iFsl1slpdq1I0eHPzybRWdyYUs8K/0= go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.68.0 h1:5FXSL2s6afUC1bzNzl1iedZZ8yqR7GOhbCoEXtyeK6Q= go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.68.0/go.mod h1:MdHW7tLtkeGJnR4TyOrnd5D0zUGZQB1l84uHCe8hRpE= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.68.0 h1:CqXxU8VOmDefoh0+ztfGaymYbhdB/tT3zs79QaZTNGY= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.68.0/go.mod h1:BuhAPThV8PBHBvg8ZzZ/Ok3idOdhWIodywz2xEcRbJo= go.opentelemetry.io/contrib/propagators/b3 v1.43.0 h1:CETqV3QLLPTy5yNrqyMr41VnAOOD4lsRved7n4QG00A= go.opentelemetry.io/contrib/propagators/b3 v1.43.0/go.mod h1:Q4mCiCdziYzpNR0g+6UqVotAlCDZdzz6L8jwY4knOrw= go.opentelemetry.io/otel v1.43.0 h1:mYIM03dnh5zfN7HautFE4ieIig9amkNANT+xcVxAj9I= @@ -180,37 +376,114 @@ go.opentelemetry.io/otel/trace v1.43.0 h1:BkNrHpup+4k4w+ZZ86CZoHHEkohws8AY+WTX09 go.opentelemetry.io/otel/trace v1.43.0/go.mod h1:/QJhyVBUUswCphDVxq+8mld+AvhXZLhe+8WVFxiFff0= go.opentelemetry.io/proto/otlp v1.10.0 h1:IQRWgT5srOCYfiWnpqUYz9CVmbO8bFmKcwYxpuCSL2g= go.opentelemetry.io/proto/otlp v1.10.0/go.mod h1:/CV4QoCR/S9yaPj8utp3lvQPoqMtxXdzn7ozvvozVqk= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/mock v0.6.0 h1:hyF9dfmbgIX5EfOdasqLsWD6xqpNZlXblLB/Dbnwv3Y= go.uber.org/mock v0.6.0/go.mod h1:KiVJ4BqZJaMj4svdfmHM0AUx4NJYO8ZNpPnZn1Z+BBU= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.yaml.in/yaml/v2 v2.4.4 h1:tuyd0P+2Ont/d6e2rl3be67goVK4R6deVxCUX5vyPaQ= go.yaml.in/yaml/v2 v2.4.4/go.mod h1:gMZqIpDtDqOfM0uNfy0SkpRhvUryYH0Z6wdMYcacYXQ= golang.org/x/arch v0.25.0 h1:qnk6Ksugpi5Bz32947rkUgDt9/s5qvqDPl/gBKdMJLE= golang.org/x/arch v0.25.0/go.mod h1:0X+GdSIP+kL5wPmpK7sdkEVTt2XoYP0cSjQSbZBwOi8= -golang.org/x/crypto v0.49.0 h1:+Ng2ULVvLHnJ/ZFEq4KdcDd/cfjrrjjNSXNzxg0Y4U4= -golang.org/x/crypto v0.49.0/go.mod h1:ErX4dUh2UM+CFYiXZRTcMpEcN8b/1gxEuv3nODoYtCA= -golang.org/x/net v0.52.0 h1:He/TN1l0e4mmR3QqHMT2Xab3Aj3L9qjbhRm78/6jrW0= -golang.org/x/net v0.52.0/go.mod h1:R1MAz7uMZxVMualyPXb+VaqGSa3LIaUqk0eEt3w36Sw= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.42.0 h1:omrd2nAlyT5ESRdCLYdm3+fMfNFE/+Rf4bDIQImRJeo= -golang.org/x/sys v0.42.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw= +golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ= +golang.org/x/crypto v0.50.0 h1:zO47/JPrL6vsNkINmLoo/PH1gcxpls50DNogFvB5ZGI= +golang.org/x/crypto v0.50.0/go.mod h1:3muZ7vA7PBCE6xgPX7nkzzjiUq87kRItoJQM1Yo8S+Q= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.53.0 h1:d+qAbo5L0orcWAr0a9JweQpjXF19LMXJE8Ey7hwOdUA= +golang.org/x/net v0.53.0/go.mod h1:JvMuJH7rrdiCfbeHoo3fCQU24Lf5JJwT9W3sJFulfgs= +golang.org/x/sync v0.20.0 h1:e0PTpb7pjO8GAtTs2dQ6jYa5BWYlMuX047Dco/pItO4= +golang.org/x/sync v0.20.0/go.mod h1:9xrNwdLfx4jkKbNva9FpL6vEN7evnE43NNNJQ2LF3+0= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.43.0 h1:Rlag2XtaFTxp19wS8MXlJwTvoh8ArU6ezoyFsMyCTNI= +golang.org/x/sys v0.43.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.42.0 h1:UiKe+zDFmJobeJ5ggPwOshJIVt6/Ft0rcfrXZDLWAWY= +golang.org/x/term v0.42.0/go.mod h1:Dq/D+snpsbazcBG5+F9Q1n2rXV8Ma+71xEjTRufARgY= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.36.0 h1:JfKh3XmcRPqZPKevfXVpI1wXPTqbkE5f7JA92a55Yxg= +golang.org/x/text v0.36.0/go.mod h1:NIdBknypM8iqVmPiuco0Dh6P5Jcdk8lJL0CUebqK164= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gonum.org/v1/gonum v0.17.0 h1:VbpOemQlsSMrYmn7T2OUvQ4dqxQXU+ouZFQsZOx50z4= gonum.org/v1/gonum v0.17.0/go.mod h1:El3tOrEuMpv2UdMrbNlKEh9vd86bmQ6vqIcDwxEOc1E= google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9 h1:VPWxll4HlMw1Vs/qXtN7BvhZqsS9cdAittCNvVENElA= google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9/go.mod h1:7QBABkRtR8z+TEnmXTqIqwJLlzrZKVfAUm7tY3yGv0M= -google.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9 h1:m8qni9SQFH0tJc1X0vmnpw/0t+AImlSvp30sEupozUg= -google.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260420184626-e10c466a9529 h1:XF8+t6QQiS0o9ArVan/HW8Q7cycNPGsJf6GA2nXxYAg= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260420184626-e10c466a9529/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8= google.golang.org/grpc v1.80.0 h1:Xr6m2WmWZLETvUNvIUmeD5OAagMw3FiKmMlTdViWsHM= google.golang.org/grpc v1.80.0/go.mod h1:ho/dLnxwi3EDJA4Zghp7k2Ec1+c2jqup0bFkw07bwF4= google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= +gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +modernc.org/libc v1.72.1 h1:db1xwJ6u1kE3KHTFTTbe2GCrczHPKzlURP0aDC4NGD0= +modernc.org/libc v1.72.1/go.mod h1:HRMiC/PhPGLIPM7GzAFCbI+oSgE3dhZ8FWftmRrHVlY= +modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU= +modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg= +modernc.org/memory v1.11.0 h1:o4QC8aMQzmcwCK3t3Ux/ZHmwFPzE6hf2Y5LbkRs+hbI= +modernc.org/memory v1.11.0/go.mod h1:/JP4VbVC+K5sU2wZi9bHoq2MAkCnrt2r98UGeSK7Mjw= +modernc.org/sqlite v1.49.1 h1:dYGHTKcX1sJ+EQDnUzvz4TJ5GbuvhNJa8Fg6ElGx73U= +modernc.org/sqlite v1.49.1/go.mod h1:m0w8xhwYUVY3H6pSDwc3gkJ/irZT/0YEXwBlhaxQEew= +pgregory.net/rapid v1.2.0 h1:keKAYRcjm+e1F0oAuU5F5+YPAWcyxNNRK2wud503Gnk= +pgregory.net/rapid v1.2.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04= diff --git a/user/internal/adapters/postgres/jet/user/model/accounts.go b/user/internal/adapters/postgres/jet/user/model/accounts.go new file mode 100644 index 0000000..d2766a9 --- /dev/null +++ b/user/internal/adapters/postgres/jet/user/model/accounts.go @@ -0,0 +1,25 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package model + +import ( + "time" +) + +type Accounts struct { + UserID string `sql:"primary_key"` + Email string + UserName string + DisplayName string + PreferredLanguage string + TimeZone string + DeclaredCountry *string + CreatedAt time.Time + UpdatedAt time.Time + DeletedAt *time.Time +} diff --git a/user/internal/adapters/postgres/jet/user/model/blocked_emails.go b/user/internal/adapters/postgres/jet/user/model/blocked_emails.go new file mode 100644 index 0000000..5652651 --- /dev/null +++ b/user/internal/adapters/postgres/jet/user/model/blocked_emails.go @@ -0,0 +1,21 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package model + +import ( + "time" +) + +type BlockedEmails struct { + Email string `sql:"primary_key"` + ReasonCode string + BlockedAt time.Time + ActorType *string + ActorID *string + ResolvedUserID *string +} diff --git a/user/internal/adapters/postgres/jet/user/model/entitlement_records.go b/user/internal/adapters/postgres/jet/user/model/entitlement_records.go new file mode 100644 index 0000000..65280ea --- /dev/null +++ b/user/internal/adapters/postgres/jet/user/model/entitlement_records.go @@ -0,0 +1,29 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package model + +import ( + "time" +) + +type EntitlementRecords struct { + RecordID string `sql:"primary_key"` + UserID string + PlanCode string + Source string + ActorType string + ActorID *string + ReasonCode string + StartsAt time.Time + EndsAt *time.Time + CreatedAt time.Time + ClosedAt *time.Time + ClosedByType *string + ClosedByID *string + ClosedReasonCode *string +} diff --git a/user/internal/adapters/postgres/jet/user/model/entitlement_snapshots.go b/user/internal/adapters/postgres/jet/user/model/entitlement_snapshots.go new file mode 100644 index 0000000..910ffd7 --- /dev/null +++ b/user/internal/adapters/postgres/jet/user/model/entitlement_snapshots.go @@ -0,0 +1,25 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package model + +import ( + "time" +) + +type EntitlementSnapshots struct { + UserID string `sql:"primary_key"` + PlanCode string + IsPaid bool + StartsAt time.Time + EndsAt *time.Time + Source string + ActorType string + ActorID *string + ReasonCode string + UpdatedAt time.Time +} diff --git a/user/internal/adapters/postgres/jet/user/model/goose_db_version.go b/user/internal/adapters/postgres/jet/user/model/goose_db_version.go new file mode 100644 index 0000000..c7f68e8 --- /dev/null +++ b/user/internal/adapters/postgres/jet/user/model/goose_db_version.go @@ -0,0 +1,19 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package model + +import ( + "time" +) + +type GooseDbVersion struct { + ID int32 `sql:"primary_key"` + VersionID int64 + IsApplied bool + Tstamp time.Time +} diff --git a/user/internal/adapters/postgres/jet/user/model/limit_active.go b/user/internal/adapters/postgres/jet/user/model/limit_active.go new file mode 100644 index 0000000..e72730d --- /dev/null +++ b/user/internal/adapters/postgres/jet/user/model/limit_active.go @@ -0,0 +1,15 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package model + +type LimitActive struct { + UserID string `sql:"primary_key"` + LimitCode string `sql:"primary_key"` + RecordID string + Value int32 +} diff --git a/user/internal/adapters/postgres/jet/user/model/limit_records.go b/user/internal/adapters/postgres/jet/user/model/limit_records.go new file mode 100644 index 0000000..772ad27 --- /dev/null +++ b/user/internal/adapters/postgres/jet/user/model/limit_records.go @@ -0,0 +1,28 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package model + +import ( + "time" +) + +type LimitRecords struct { + RecordID string `sql:"primary_key"` + UserID string + LimitCode string + Value int32 + ReasonCode string + ActorType string + ActorID *string + AppliedAt time.Time + ExpiresAt *time.Time + RemovedAt *time.Time + RemovedByType *string + RemovedByID *string + RemovedReasonCode *string +} diff --git a/user/internal/adapters/postgres/jet/user/model/sanction_active.go b/user/internal/adapters/postgres/jet/user/model/sanction_active.go new file mode 100644 index 0000000..4f2da1f --- /dev/null +++ b/user/internal/adapters/postgres/jet/user/model/sanction_active.go @@ -0,0 +1,14 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package model + +type SanctionActive struct { + UserID string `sql:"primary_key"` + SanctionCode string `sql:"primary_key"` + RecordID string +} diff --git a/user/internal/adapters/postgres/jet/user/model/sanction_records.go b/user/internal/adapters/postgres/jet/user/model/sanction_records.go new file mode 100644 index 0000000..b1f5fb2 --- /dev/null +++ b/user/internal/adapters/postgres/jet/user/model/sanction_records.go @@ -0,0 +1,28 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package model + +import ( + "time" +) + +type SanctionRecords struct { + RecordID string `sql:"primary_key"` + UserID string + SanctionCode string + Scope string + ReasonCode string + ActorType string + ActorID *string + AppliedAt time.Time + ExpiresAt *time.Time + RemovedAt *time.Time + RemovedByType *string + RemovedByID *string + RemovedReasonCode *string +} diff --git a/user/internal/adapters/postgres/jet/user/table/accounts.go b/user/internal/adapters/postgres/jet/user/table/accounts.go new file mode 100644 index 0000000..98921f7 --- /dev/null +++ b/user/internal/adapters/postgres/jet/user/table/accounts.go @@ -0,0 +1,105 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var Accounts = newAccountsTable("user", "accounts", "") + +type accountsTable struct { + postgres.Table + + // Columns + UserID postgres.ColumnString + Email postgres.ColumnString + UserName postgres.ColumnString + DisplayName postgres.ColumnString + PreferredLanguage postgres.ColumnString + TimeZone postgres.ColumnString + DeclaredCountry postgres.ColumnString + CreatedAt postgres.ColumnTimestampz + UpdatedAt postgres.ColumnTimestampz + DeletedAt postgres.ColumnTimestampz + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList + DefaultColumns postgres.ColumnList +} + +type AccountsTable struct { + accountsTable + + EXCLUDED accountsTable +} + +// AS creates new AccountsTable with assigned alias +func (a AccountsTable) AS(alias string) *AccountsTable { + return newAccountsTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new AccountsTable with assigned schema name +func (a AccountsTable) FromSchema(schemaName string) *AccountsTable { + return newAccountsTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new AccountsTable with assigned table prefix +func (a AccountsTable) WithPrefix(prefix string) *AccountsTable { + return newAccountsTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new AccountsTable with assigned table suffix +func (a AccountsTable) WithSuffix(suffix string) *AccountsTable { + return newAccountsTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newAccountsTable(schemaName, tableName, alias string) *AccountsTable { + return &AccountsTable{ + accountsTable: newAccountsTableImpl(schemaName, tableName, alias), + EXCLUDED: newAccountsTableImpl("", "excluded", ""), + } +} + +func newAccountsTableImpl(schemaName, tableName, alias string) accountsTable { + var ( + UserIDColumn = postgres.StringColumn("user_id") + EmailColumn = postgres.StringColumn("email") + UserNameColumn = postgres.StringColumn("user_name") + DisplayNameColumn = postgres.StringColumn("display_name") + PreferredLanguageColumn = postgres.StringColumn("preferred_language") + TimeZoneColumn = postgres.StringColumn("time_zone") + DeclaredCountryColumn = postgres.StringColumn("declared_country") + CreatedAtColumn = postgres.TimestampzColumn("created_at") + UpdatedAtColumn = postgres.TimestampzColumn("updated_at") + DeletedAtColumn = postgres.TimestampzColumn("deleted_at") + allColumns = postgres.ColumnList{UserIDColumn, EmailColumn, UserNameColumn, DisplayNameColumn, PreferredLanguageColumn, TimeZoneColumn, DeclaredCountryColumn, CreatedAtColumn, UpdatedAtColumn, DeletedAtColumn} + mutableColumns = postgres.ColumnList{EmailColumn, UserNameColumn, DisplayNameColumn, PreferredLanguageColumn, TimeZoneColumn, DeclaredCountryColumn, CreatedAtColumn, UpdatedAtColumn, DeletedAtColumn} + defaultColumns = postgres.ColumnList{DisplayNameColumn} + ) + + return accountsTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + UserID: UserIDColumn, + Email: EmailColumn, + UserName: UserNameColumn, + DisplayName: DisplayNameColumn, + PreferredLanguage: PreferredLanguageColumn, + TimeZone: TimeZoneColumn, + DeclaredCountry: DeclaredCountryColumn, + CreatedAt: CreatedAtColumn, + UpdatedAt: UpdatedAtColumn, + DeletedAt: DeletedAtColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + DefaultColumns: defaultColumns, + } +} diff --git a/user/internal/adapters/postgres/jet/user/table/blocked_emails.go b/user/internal/adapters/postgres/jet/user/table/blocked_emails.go new file mode 100644 index 0000000..084fca0 --- /dev/null +++ b/user/internal/adapters/postgres/jet/user/table/blocked_emails.go @@ -0,0 +1,93 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var BlockedEmails = newBlockedEmailsTable("user", "blocked_emails", "") + +type blockedEmailsTable struct { + postgres.Table + + // Columns + Email postgres.ColumnString + ReasonCode postgres.ColumnString + BlockedAt postgres.ColumnTimestampz + ActorType postgres.ColumnString + ActorID postgres.ColumnString + ResolvedUserID postgres.ColumnString + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList + DefaultColumns postgres.ColumnList +} + +type BlockedEmailsTable struct { + blockedEmailsTable + + EXCLUDED blockedEmailsTable +} + +// AS creates new BlockedEmailsTable with assigned alias +func (a BlockedEmailsTable) AS(alias string) *BlockedEmailsTable { + return newBlockedEmailsTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new BlockedEmailsTable with assigned schema name +func (a BlockedEmailsTable) FromSchema(schemaName string) *BlockedEmailsTable { + return newBlockedEmailsTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new BlockedEmailsTable with assigned table prefix +func (a BlockedEmailsTable) WithPrefix(prefix string) *BlockedEmailsTable { + return newBlockedEmailsTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new BlockedEmailsTable with assigned table suffix +func (a BlockedEmailsTable) WithSuffix(suffix string) *BlockedEmailsTable { + return newBlockedEmailsTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newBlockedEmailsTable(schemaName, tableName, alias string) *BlockedEmailsTable { + return &BlockedEmailsTable{ + blockedEmailsTable: newBlockedEmailsTableImpl(schemaName, tableName, alias), + EXCLUDED: newBlockedEmailsTableImpl("", "excluded", ""), + } +} + +func newBlockedEmailsTableImpl(schemaName, tableName, alias string) blockedEmailsTable { + var ( + EmailColumn = postgres.StringColumn("email") + ReasonCodeColumn = postgres.StringColumn("reason_code") + BlockedAtColumn = postgres.TimestampzColumn("blocked_at") + ActorTypeColumn = postgres.StringColumn("actor_type") + ActorIDColumn = postgres.StringColumn("actor_id") + ResolvedUserIDColumn = postgres.StringColumn("resolved_user_id") + allColumns = postgres.ColumnList{EmailColumn, ReasonCodeColumn, BlockedAtColumn, ActorTypeColumn, ActorIDColumn, ResolvedUserIDColumn} + mutableColumns = postgres.ColumnList{ReasonCodeColumn, BlockedAtColumn, ActorTypeColumn, ActorIDColumn, ResolvedUserIDColumn} + defaultColumns = postgres.ColumnList{} + ) + + return blockedEmailsTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + Email: EmailColumn, + ReasonCode: ReasonCodeColumn, + BlockedAt: BlockedAtColumn, + ActorType: ActorTypeColumn, + ActorID: ActorIDColumn, + ResolvedUserID: ResolvedUserIDColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + DefaultColumns: defaultColumns, + } +} diff --git a/user/internal/adapters/postgres/jet/user/table/entitlement_records.go b/user/internal/adapters/postgres/jet/user/table/entitlement_records.go new file mode 100644 index 0000000..f06da96 --- /dev/null +++ b/user/internal/adapters/postgres/jet/user/table/entitlement_records.go @@ -0,0 +1,117 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var EntitlementRecords = newEntitlementRecordsTable("user", "entitlement_records", "") + +type entitlementRecordsTable struct { + postgres.Table + + // Columns + RecordID postgres.ColumnString + UserID postgres.ColumnString + PlanCode postgres.ColumnString + Source postgres.ColumnString + ActorType postgres.ColumnString + ActorID postgres.ColumnString + ReasonCode postgres.ColumnString + StartsAt postgres.ColumnTimestampz + EndsAt postgres.ColumnTimestampz + CreatedAt postgres.ColumnTimestampz + ClosedAt postgres.ColumnTimestampz + ClosedByType postgres.ColumnString + ClosedByID postgres.ColumnString + ClosedReasonCode postgres.ColumnString + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList + DefaultColumns postgres.ColumnList +} + +type EntitlementRecordsTable struct { + entitlementRecordsTable + + EXCLUDED entitlementRecordsTable +} + +// AS creates new EntitlementRecordsTable with assigned alias +func (a EntitlementRecordsTable) AS(alias string) *EntitlementRecordsTable { + return newEntitlementRecordsTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new EntitlementRecordsTable with assigned schema name +func (a EntitlementRecordsTable) FromSchema(schemaName string) *EntitlementRecordsTable { + return newEntitlementRecordsTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new EntitlementRecordsTable with assigned table prefix +func (a EntitlementRecordsTable) WithPrefix(prefix string) *EntitlementRecordsTable { + return newEntitlementRecordsTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new EntitlementRecordsTable with assigned table suffix +func (a EntitlementRecordsTable) WithSuffix(suffix string) *EntitlementRecordsTable { + return newEntitlementRecordsTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newEntitlementRecordsTable(schemaName, tableName, alias string) *EntitlementRecordsTable { + return &EntitlementRecordsTable{ + entitlementRecordsTable: newEntitlementRecordsTableImpl(schemaName, tableName, alias), + EXCLUDED: newEntitlementRecordsTableImpl("", "excluded", ""), + } +} + +func newEntitlementRecordsTableImpl(schemaName, tableName, alias string) entitlementRecordsTable { + var ( + RecordIDColumn = postgres.StringColumn("record_id") + UserIDColumn = postgres.StringColumn("user_id") + PlanCodeColumn = postgres.StringColumn("plan_code") + SourceColumn = postgres.StringColumn("source") + ActorTypeColumn = postgres.StringColumn("actor_type") + ActorIDColumn = postgres.StringColumn("actor_id") + ReasonCodeColumn = postgres.StringColumn("reason_code") + StartsAtColumn = postgres.TimestampzColumn("starts_at") + EndsAtColumn = postgres.TimestampzColumn("ends_at") + CreatedAtColumn = postgres.TimestampzColumn("created_at") + ClosedAtColumn = postgres.TimestampzColumn("closed_at") + ClosedByTypeColumn = postgres.StringColumn("closed_by_type") + ClosedByIDColumn = postgres.StringColumn("closed_by_id") + ClosedReasonCodeColumn = postgres.StringColumn("closed_reason_code") + allColumns = postgres.ColumnList{RecordIDColumn, UserIDColumn, PlanCodeColumn, SourceColumn, ActorTypeColumn, ActorIDColumn, ReasonCodeColumn, StartsAtColumn, EndsAtColumn, CreatedAtColumn, ClosedAtColumn, ClosedByTypeColumn, ClosedByIDColumn, ClosedReasonCodeColumn} + mutableColumns = postgres.ColumnList{UserIDColumn, PlanCodeColumn, SourceColumn, ActorTypeColumn, ActorIDColumn, ReasonCodeColumn, StartsAtColumn, EndsAtColumn, CreatedAtColumn, ClosedAtColumn, ClosedByTypeColumn, ClosedByIDColumn, ClosedReasonCodeColumn} + defaultColumns = postgres.ColumnList{} + ) + + return entitlementRecordsTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + RecordID: RecordIDColumn, + UserID: UserIDColumn, + PlanCode: PlanCodeColumn, + Source: SourceColumn, + ActorType: ActorTypeColumn, + ActorID: ActorIDColumn, + ReasonCode: ReasonCodeColumn, + StartsAt: StartsAtColumn, + EndsAt: EndsAtColumn, + CreatedAt: CreatedAtColumn, + ClosedAt: ClosedAtColumn, + ClosedByType: ClosedByTypeColumn, + ClosedByID: ClosedByIDColumn, + ClosedReasonCode: ClosedReasonCodeColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + DefaultColumns: defaultColumns, + } +} diff --git a/user/internal/adapters/postgres/jet/user/table/entitlement_snapshots.go b/user/internal/adapters/postgres/jet/user/table/entitlement_snapshots.go new file mode 100644 index 0000000..9fafc07 --- /dev/null +++ b/user/internal/adapters/postgres/jet/user/table/entitlement_snapshots.go @@ -0,0 +1,105 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var EntitlementSnapshots = newEntitlementSnapshotsTable("user", "entitlement_snapshots", "") + +type entitlementSnapshotsTable struct { + postgres.Table + + // Columns + UserID postgres.ColumnString + PlanCode postgres.ColumnString + IsPaid postgres.ColumnBool + StartsAt postgres.ColumnTimestampz + EndsAt postgres.ColumnTimestampz + Source postgres.ColumnString + ActorType postgres.ColumnString + ActorID postgres.ColumnString + ReasonCode postgres.ColumnString + UpdatedAt postgres.ColumnTimestampz + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList + DefaultColumns postgres.ColumnList +} + +type EntitlementSnapshotsTable struct { + entitlementSnapshotsTable + + EXCLUDED entitlementSnapshotsTable +} + +// AS creates new EntitlementSnapshotsTable with assigned alias +func (a EntitlementSnapshotsTable) AS(alias string) *EntitlementSnapshotsTable { + return newEntitlementSnapshotsTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new EntitlementSnapshotsTable with assigned schema name +func (a EntitlementSnapshotsTable) FromSchema(schemaName string) *EntitlementSnapshotsTable { + return newEntitlementSnapshotsTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new EntitlementSnapshotsTable with assigned table prefix +func (a EntitlementSnapshotsTable) WithPrefix(prefix string) *EntitlementSnapshotsTable { + return newEntitlementSnapshotsTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new EntitlementSnapshotsTable with assigned table suffix +func (a EntitlementSnapshotsTable) WithSuffix(suffix string) *EntitlementSnapshotsTable { + return newEntitlementSnapshotsTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newEntitlementSnapshotsTable(schemaName, tableName, alias string) *EntitlementSnapshotsTable { + return &EntitlementSnapshotsTable{ + entitlementSnapshotsTable: newEntitlementSnapshotsTableImpl(schemaName, tableName, alias), + EXCLUDED: newEntitlementSnapshotsTableImpl("", "excluded", ""), + } +} + +func newEntitlementSnapshotsTableImpl(schemaName, tableName, alias string) entitlementSnapshotsTable { + var ( + UserIDColumn = postgres.StringColumn("user_id") + PlanCodeColumn = postgres.StringColumn("plan_code") + IsPaidColumn = postgres.BoolColumn("is_paid") + StartsAtColumn = postgres.TimestampzColumn("starts_at") + EndsAtColumn = postgres.TimestampzColumn("ends_at") + SourceColumn = postgres.StringColumn("source") + ActorTypeColumn = postgres.StringColumn("actor_type") + ActorIDColumn = postgres.StringColumn("actor_id") + ReasonCodeColumn = postgres.StringColumn("reason_code") + UpdatedAtColumn = postgres.TimestampzColumn("updated_at") + allColumns = postgres.ColumnList{UserIDColumn, PlanCodeColumn, IsPaidColumn, StartsAtColumn, EndsAtColumn, SourceColumn, ActorTypeColumn, ActorIDColumn, ReasonCodeColumn, UpdatedAtColumn} + mutableColumns = postgres.ColumnList{PlanCodeColumn, IsPaidColumn, StartsAtColumn, EndsAtColumn, SourceColumn, ActorTypeColumn, ActorIDColumn, ReasonCodeColumn, UpdatedAtColumn} + defaultColumns = postgres.ColumnList{} + ) + + return entitlementSnapshotsTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + UserID: UserIDColumn, + PlanCode: PlanCodeColumn, + IsPaid: IsPaidColumn, + StartsAt: StartsAtColumn, + EndsAt: EndsAtColumn, + Source: SourceColumn, + ActorType: ActorTypeColumn, + ActorID: ActorIDColumn, + ReasonCode: ReasonCodeColumn, + UpdatedAt: UpdatedAtColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + DefaultColumns: defaultColumns, + } +} diff --git a/user/internal/adapters/postgres/jet/user/table/goose_db_version.go b/user/internal/adapters/postgres/jet/user/table/goose_db_version.go new file mode 100644 index 0000000..77bc7fb --- /dev/null +++ b/user/internal/adapters/postgres/jet/user/table/goose_db_version.go @@ -0,0 +1,87 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var GooseDbVersion = newGooseDbVersionTable("user", "goose_db_version", "") + +type gooseDbVersionTable struct { + postgres.Table + + // Columns + ID postgres.ColumnInteger + VersionID postgres.ColumnInteger + IsApplied postgres.ColumnBool + Tstamp postgres.ColumnTimestamp + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList + DefaultColumns postgres.ColumnList +} + +type GooseDbVersionTable struct { + gooseDbVersionTable + + EXCLUDED gooseDbVersionTable +} + +// AS creates new GooseDbVersionTable with assigned alias +func (a GooseDbVersionTable) AS(alias string) *GooseDbVersionTable { + return newGooseDbVersionTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new GooseDbVersionTable with assigned schema name +func (a GooseDbVersionTable) FromSchema(schemaName string) *GooseDbVersionTable { + return newGooseDbVersionTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new GooseDbVersionTable with assigned table prefix +func (a GooseDbVersionTable) WithPrefix(prefix string) *GooseDbVersionTable { + return newGooseDbVersionTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new GooseDbVersionTable with assigned table suffix +func (a GooseDbVersionTable) WithSuffix(suffix string) *GooseDbVersionTable { + return newGooseDbVersionTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newGooseDbVersionTable(schemaName, tableName, alias string) *GooseDbVersionTable { + return &GooseDbVersionTable{ + gooseDbVersionTable: newGooseDbVersionTableImpl(schemaName, tableName, alias), + EXCLUDED: newGooseDbVersionTableImpl("", "excluded", ""), + } +} + +func newGooseDbVersionTableImpl(schemaName, tableName, alias string) gooseDbVersionTable { + var ( + IDColumn = postgres.IntegerColumn("id") + VersionIDColumn = postgres.IntegerColumn("version_id") + IsAppliedColumn = postgres.BoolColumn("is_applied") + TstampColumn = postgres.TimestampColumn("tstamp") + allColumns = postgres.ColumnList{IDColumn, VersionIDColumn, IsAppliedColumn, TstampColumn} + mutableColumns = postgres.ColumnList{VersionIDColumn, IsAppliedColumn, TstampColumn} + defaultColumns = postgres.ColumnList{TstampColumn} + ) + + return gooseDbVersionTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + ID: IDColumn, + VersionID: VersionIDColumn, + IsApplied: IsAppliedColumn, + Tstamp: TstampColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + DefaultColumns: defaultColumns, + } +} diff --git a/user/internal/adapters/postgres/jet/user/table/limit_active.go b/user/internal/adapters/postgres/jet/user/table/limit_active.go new file mode 100644 index 0000000..8051e6f --- /dev/null +++ b/user/internal/adapters/postgres/jet/user/table/limit_active.go @@ -0,0 +1,87 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var LimitActive = newLimitActiveTable("user", "limit_active", "") + +type limitActiveTable struct { + postgres.Table + + // Columns + UserID postgres.ColumnString + LimitCode postgres.ColumnString + RecordID postgres.ColumnString + Value postgres.ColumnInteger + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList + DefaultColumns postgres.ColumnList +} + +type LimitActiveTable struct { + limitActiveTable + + EXCLUDED limitActiveTable +} + +// AS creates new LimitActiveTable with assigned alias +func (a LimitActiveTable) AS(alias string) *LimitActiveTable { + return newLimitActiveTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new LimitActiveTable with assigned schema name +func (a LimitActiveTable) FromSchema(schemaName string) *LimitActiveTable { + return newLimitActiveTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new LimitActiveTable with assigned table prefix +func (a LimitActiveTable) WithPrefix(prefix string) *LimitActiveTable { + return newLimitActiveTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new LimitActiveTable with assigned table suffix +func (a LimitActiveTable) WithSuffix(suffix string) *LimitActiveTable { + return newLimitActiveTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newLimitActiveTable(schemaName, tableName, alias string) *LimitActiveTable { + return &LimitActiveTable{ + limitActiveTable: newLimitActiveTableImpl(schemaName, tableName, alias), + EXCLUDED: newLimitActiveTableImpl("", "excluded", ""), + } +} + +func newLimitActiveTableImpl(schemaName, tableName, alias string) limitActiveTable { + var ( + UserIDColumn = postgres.StringColumn("user_id") + LimitCodeColumn = postgres.StringColumn("limit_code") + RecordIDColumn = postgres.StringColumn("record_id") + ValueColumn = postgres.IntegerColumn("value") + allColumns = postgres.ColumnList{UserIDColumn, LimitCodeColumn, RecordIDColumn, ValueColumn} + mutableColumns = postgres.ColumnList{RecordIDColumn, ValueColumn} + defaultColumns = postgres.ColumnList{} + ) + + return limitActiveTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + UserID: UserIDColumn, + LimitCode: LimitCodeColumn, + RecordID: RecordIDColumn, + Value: ValueColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + DefaultColumns: defaultColumns, + } +} diff --git a/user/internal/adapters/postgres/jet/user/table/limit_records.go b/user/internal/adapters/postgres/jet/user/table/limit_records.go new file mode 100644 index 0000000..d9cccbc --- /dev/null +++ b/user/internal/adapters/postgres/jet/user/table/limit_records.go @@ -0,0 +1,114 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var LimitRecords = newLimitRecordsTable("user", "limit_records", "") + +type limitRecordsTable struct { + postgres.Table + + // Columns + RecordID postgres.ColumnString + UserID postgres.ColumnString + LimitCode postgres.ColumnString + Value postgres.ColumnInteger + ReasonCode postgres.ColumnString + ActorType postgres.ColumnString + ActorID postgres.ColumnString + AppliedAt postgres.ColumnTimestampz + ExpiresAt postgres.ColumnTimestampz + RemovedAt postgres.ColumnTimestampz + RemovedByType postgres.ColumnString + RemovedByID postgres.ColumnString + RemovedReasonCode postgres.ColumnString + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList + DefaultColumns postgres.ColumnList +} + +type LimitRecordsTable struct { + limitRecordsTable + + EXCLUDED limitRecordsTable +} + +// AS creates new LimitRecordsTable with assigned alias +func (a LimitRecordsTable) AS(alias string) *LimitRecordsTable { + return newLimitRecordsTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new LimitRecordsTable with assigned schema name +func (a LimitRecordsTable) FromSchema(schemaName string) *LimitRecordsTable { + return newLimitRecordsTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new LimitRecordsTable with assigned table prefix +func (a LimitRecordsTable) WithPrefix(prefix string) *LimitRecordsTable { + return newLimitRecordsTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new LimitRecordsTable with assigned table suffix +func (a LimitRecordsTable) WithSuffix(suffix string) *LimitRecordsTable { + return newLimitRecordsTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newLimitRecordsTable(schemaName, tableName, alias string) *LimitRecordsTable { + return &LimitRecordsTable{ + limitRecordsTable: newLimitRecordsTableImpl(schemaName, tableName, alias), + EXCLUDED: newLimitRecordsTableImpl("", "excluded", ""), + } +} + +func newLimitRecordsTableImpl(schemaName, tableName, alias string) limitRecordsTable { + var ( + RecordIDColumn = postgres.StringColumn("record_id") + UserIDColumn = postgres.StringColumn("user_id") + LimitCodeColumn = postgres.StringColumn("limit_code") + ValueColumn = postgres.IntegerColumn("value") + ReasonCodeColumn = postgres.StringColumn("reason_code") + ActorTypeColumn = postgres.StringColumn("actor_type") + ActorIDColumn = postgres.StringColumn("actor_id") + AppliedAtColumn = postgres.TimestampzColumn("applied_at") + ExpiresAtColumn = postgres.TimestampzColumn("expires_at") + RemovedAtColumn = postgres.TimestampzColumn("removed_at") + RemovedByTypeColumn = postgres.StringColumn("removed_by_type") + RemovedByIDColumn = postgres.StringColumn("removed_by_id") + RemovedReasonCodeColumn = postgres.StringColumn("removed_reason_code") + allColumns = postgres.ColumnList{RecordIDColumn, UserIDColumn, LimitCodeColumn, ValueColumn, ReasonCodeColumn, ActorTypeColumn, ActorIDColumn, AppliedAtColumn, ExpiresAtColumn, RemovedAtColumn, RemovedByTypeColumn, RemovedByIDColumn, RemovedReasonCodeColumn} + mutableColumns = postgres.ColumnList{UserIDColumn, LimitCodeColumn, ValueColumn, ReasonCodeColumn, ActorTypeColumn, ActorIDColumn, AppliedAtColumn, ExpiresAtColumn, RemovedAtColumn, RemovedByTypeColumn, RemovedByIDColumn, RemovedReasonCodeColumn} + defaultColumns = postgres.ColumnList{} + ) + + return limitRecordsTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + RecordID: RecordIDColumn, + UserID: UserIDColumn, + LimitCode: LimitCodeColumn, + Value: ValueColumn, + ReasonCode: ReasonCodeColumn, + ActorType: ActorTypeColumn, + ActorID: ActorIDColumn, + AppliedAt: AppliedAtColumn, + ExpiresAt: ExpiresAtColumn, + RemovedAt: RemovedAtColumn, + RemovedByType: RemovedByTypeColumn, + RemovedByID: RemovedByIDColumn, + RemovedReasonCode: RemovedReasonCodeColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + DefaultColumns: defaultColumns, + } +} diff --git a/user/internal/adapters/postgres/jet/user/table/sanction_active.go b/user/internal/adapters/postgres/jet/user/table/sanction_active.go new file mode 100644 index 0000000..041df9a --- /dev/null +++ b/user/internal/adapters/postgres/jet/user/table/sanction_active.go @@ -0,0 +1,84 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var SanctionActive = newSanctionActiveTable("user", "sanction_active", "") + +type sanctionActiveTable struct { + postgres.Table + + // Columns + UserID postgres.ColumnString + SanctionCode postgres.ColumnString + RecordID postgres.ColumnString + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList + DefaultColumns postgres.ColumnList +} + +type SanctionActiveTable struct { + sanctionActiveTable + + EXCLUDED sanctionActiveTable +} + +// AS creates new SanctionActiveTable with assigned alias +func (a SanctionActiveTable) AS(alias string) *SanctionActiveTable { + return newSanctionActiveTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new SanctionActiveTable with assigned schema name +func (a SanctionActiveTable) FromSchema(schemaName string) *SanctionActiveTable { + return newSanctionActiveTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new SanctionActiveTable with assigned table prefix +func (a SanctionActiveTable) WithPrefix(prefix string) *SanctionActiveTable { + return newSanctionActiveTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new SanctionActiveTable with assigned table suffix +func (a SanctionActiveTable) WithSuffix(suffix string) *SanctionActiveTable { + return newSanctionActiveTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newSanctionActiveTable(schemaName, tableName, alias string) *SanctionActiveTable { + return &SanctionActiveTable{ + sanctionActiveTable: newSanctionActiveTableImpl(schemaName, tableName, alias), + EXCLUDED: newSanctionActiveTableImpl("", "excluded", ""), + } +} + +func newSanctionActiveTableImpl(schemaName, tableName, alias string) sanctionActiveTable { + var ( + UserIDColumn = postgres.StringColumn("user_id") + SanctionCodeColumn = postgres.StringColumn("sanction_code") + RecordIDColumn = postgres.StringColumn("record_id") + allColumns = postgres.ColumnList{UserIDColumn, SanctionCodeColumn, RecordIDColumn} + mutableColumns = postgres.ColumnList{RecordIDColumn} + defaultColumns = postgres.ColumnList{} + ) + + return sanctionActiveTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + UserID: UserIDColumn, + SanctionCode: SanctionCodeColumn, + RecordID: RecordIDColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + DefaultColumns: defaultColumns, + } +} diff --git a/user/internal/adapters/postgres/jet/user/table/sanction_records.go b/user/internal/adapters/postgres/jet/user/table/sanction_records.go new file mode 100644 index 0000000..450fc35 --- /dev/null +++ b/user/internal/adapters/postgres/jet/user/table/sanction_records.go @@ -0,0 +1,114 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +import ( + "github.com/go-jet/jet/v2/postgres" +) + +var SanctionRecords = newSanctionRecordsTable("user", "sanction_records", "") + +type sanctionRecordsTable struct { + postgres.Table + + // Columns + RecordID postgres.ColumnString + UserID postgres.ColumnString + SanctionCode postgres.ColumnString + Scope postgres.ColumnString + ReasonCode postgres.ColumnString + ActorType postgres.ColumnString + ActorID postgres.ColumnString + AppliedAt postgres.ColumnTimestampz + ExpiresAt postgres.ColumnTimestampz + RemovedAt postgres.ColumnTimestampz + RemovedByType postgres.ColumnString + RemovedByID postgres.ColumnString + RemovedReasonCode postgres.ColumnString + + AllColumns postgres.ColumnList + MutableColumns postgres.ColumnList + DefaultColumns postgres.ColumnList +} + +type SanctionRecordsTable struct { + sanctionRecordsTable + + EXCLUDED sanctionRecordsTable +} + +// AS creates new SanctionRecordsTable with assigned alias +func (a SanctionRecordsTable) AS(alias string) *SanctionRecordsTable { + return newSanctionRecordsTable(a.SchemaName(), a.TableName(), alias) +} + +// Schema creates new SanctionRecordsTable with assigned schema name +func (a SanctionRecordsTable) FromSchema(schemaName string) *SanctionRecordsTable { + return newSanctionRecordsTable(schemaName, a.TableName(), a.Alias()) +} + +// WithPrefix creates new SanctionRecordsTable with assigned table prefix +func (a SanctionRecordsTable) WithPrefix(prefix string) *SanctionRecordsTable { + return newSanctionRecordsTable(a.SchemaName(), prefix+a.TableName(), a.TableName()) +} + +// WithSuffix creates new SanctionRecordsTable with assigned table suffix +func (a SanctionRecordsTable) WithSuffix(suffix string) *SanctionRecordsTable { + return newSanctionRecordsTable(a.SchemaName(), a.TableName()+suffix, a.TableName()) +} + +func newSanctionRecordsTable(schemaName, tableName, alias string) *SanctionRecordsTable { + return &SanctionRecordsTable{ + sanctionRecordsTable: newSanctionRecordsTableImpl(schemaName, tableName, alias), + EXCLUDED: newSanctionRecordsTableImpl("", "excluded", ""), + } +} + +func newSanctionRecordsTableImpl(schemaName, tableName, alias string) sanctionRecordsTable { + var ( + RecordIDColumn = postgres.StringColumn("record_id") + UserIDColumn = postgres.StringColumn("user_id") + SanctionCodeColumn = postgres.StringColumn("sanction_code") + ScopeColumn = postgres.StringColumn("scope") + ReasonCodeColumn = postgres.StringColumn("reason_code") + ActorTypeColumn = postgres.StringColumn("actor_type") + ActorIDColumn = postgres.StringColumn("actor_id") + AppliedAtColumn = postgres.TimestampzColumn("applied_at") + ExpiresAtColumn = postgres.TimestampzColumn("expires_at") + RemovedAtColumn = postgres.TimestampzColumn("removed_at") + RemovedByTypeColumn = postgres.StringColumn("removed_by_type") + RemovedByIDColumn = postgres.StringColumn("removed_by_id") + RemovedReasonCodeColumn = postgres.StringColumn("removed_reason_code") + allColumns = postgres.ColumnList{RecordIDColumn, UserIDColumn, SanctionCodeColumn, ScopeColumn, ReasonCodeColumn, ActorTypeColumn, ActorIDColumn, AppliedAtColumn, ExpiresAtColumn, RemovedAtColumn, RemovedByTypeColumn, RemovedByIDColumn, RemovedReasonCodeColumn} + mutableColumns = postgres.ColumnList{UserIDColumn, SanctionCodeColumn, ScopeColumn, ReasonCodeColumn, ActorTypeColumn, ActorIDColumn, AppliedAtColumn, ExpiresAtColumn, RemovedAtColumn, RemovedByTypeColumn, RemovedByIDColumn, RemovedReasonCodeColumn} + defaultColumns = postgres.ColumnList{} + ) + + return sanctionRecordsTable{ + Table: postgres.NewTable(schemaName, tableName, alias, allColumns...), + + //Columns + RecordID: RecordIDColumn, + UserID: UserIDColumn, + SanctionCode: SanctionCodeColumn, + Scope: ScopeColumn, + ReasonCode: ReasonCodeColumn, + ActorType: ActorTypeColumn, + ActorID: ActorIDColumn, + AppliedAt: AppliedAtColumn, + ExpiresAt: ExpiresAtColumn, + RemovedAt: RemovedAtColumn, + RemovedByType: RemovedByTypeColumn, + RemovedByID: RemovedByIDColumn, + RemovedReasonCode: RemovedReasonCodeColumn, + + AllColumns: allColumns, + MutableColumns: mutableColumns, + DefaultColumns: defaultColumns, + } +} diff --git a/user/internal/adapters/postgres/jet/user/table/table_use_schema.go b/user/internal/adapters/postgres/jet/user/table/table_use_schema.go new file mode 100644 index 0000000..355f3c9 --- /dev/null +++ b/user/internal/adapters/postgres/jet/user/table/table_use_schema.go @@ -0,0 +1,22 @@ +// +// Code generated by go-jet DO NOT EDIT. +// +// WARNING: Changes to this file may cause incorrect behavior +// and will be lost if the code is regenerated +// + +package table + +// UseSchema sets a new schema name for all generated table SQL builder types. It is recommended to invoke +// this method only once at the beginning of the program. +func UseSchema(schema string) { + Accounts = Accounts.FromSchema(schema) + BlockedEmails = BlockedEmails.FromSchema(schema) + EntitlementRecords = EntitlementRecords.FromSchema(schema) + EntitlementSnapshots = EntitlementSnapshots.FromSchema(schema) + GooseDbVersion = GooseDbVersion.FromSchema(schema) + LimitActive = LimitActive.FromSchema(schema) + LimitRecords = LimitRecords.FromSchema(schema) + SanctionActive = SanctionActive.FromSchema(schema) + SanctionRecords = SanctionRecords.FromSchema(schema) +} diff --git a/user/internal/adapters/postgres/migrations/00001_init.sql b/user/internal/adapters/postgres/migrations/00001_init.sql new file mode 100644 index 0000000..75d1a97 --- /dev/null +++ b/user/internal/adapters/postgres/migrations/00001_init.sql @@ -0,0 +1,169 @@ +-- +goose Up +-- accounts holds the editable source-of-truth user-account state. +-- email and user_name remain UNIQUE for both live and soft-deleted records: +-- emails are never reassigned to a fresh user_id after DeleteUser, and +-- user_name is immutable for the lifetime of the account. +CREATE TABLE accounts ( + user_id text PRIMARY KEY, + email text NOT NULL, + user_name text NOT NULL, + display_name text NOT NULL DEFAULT '', + preferred_language text NOT NULL, + time_zone text NOT NULL, + declared_country text, + created_at timestamptz NOT NULL, + updated_at timestamptz NOT NULL, + deleted_at timestamptz, + CONSTRAINT accounts_email_unique UNIQUE (email), + CONSTRAINT accounts_user_name_unique UNIQUE (user_name) +); + +-- Newest-first listing index used by the trusted admin user-list surface. +CREATE INDEX accounts_listing_idx + ON accounts (created_at DESC, user_id DESC); + +-- Reverse-lookup index for the optional declared-country filter; the partial +-- predicate keeps the index small while declared_country is mostly NULL. +CREATE INDEX accounts_declared_country_idx + ON accounts (declared_country) + WHERE declared_country IS NOT NULL; + +-- blocked_emails persists pre-user blocked-email subjects that may exist +-- before any user account exists, plus the blocked subjects produced by +-- BlockByUserID/BlockByEmail. resolved_user_id is populated when the block +-- corresponds to an existing or formerly existing account. +CREATE TABLE blocked_emails ( + email text PRIMARY KEY, + reason_code text NOT NULL, + blocked_at timestamptz NOT NULL, + actor_type text, + actor_id text, + resolved_user_id text +); + +-- entitlement_records stores the immutable history of entitlement periods. +-- Each row represents one segment that was current at some point; closed +-- segments carry closed_* metadata. +CREATE TABLE entitlement_records ( + record_id text PRIMARY KEY, + user_id text NOT NULL REFERENCES accounts(user_id), + plan_code text NOT NULL, + source text NOT NULL, + actor_type text NOT NULL, + actor_id text, + reason_code text NOT NULL, + starts_at timestamptz NOT NULL, + ends_at timestamptz, + created_at timestamptz NOT NULL, + closed_at timestamptz, + closed_by_type text, + closed_by_id text, + closed_reason_code text +); + +CREATE INDEX entitlement_records_user_idx + ON entitlement_records (user_id, created_at DESC); + +-- entitlement_snapshots stores the read-optimized current entitlement state. +-- Exactly one row per user_id; updated atomically together with history rows +-- by EntitlementLifecycleStore operations. +CREATE TABLE entitlement_snapshots ( + user_id text PRIMARY KEY REFERENCES accounts(user_id), + plan_code text NOT NULL, + is_paid boolean NOT NULL, + starts_at timestamptz NOT NULL, + ends_at timestamptz, + source text NOT NULL, + actor_type text NOT NULL, + actor_id text, + reason_code text NOT NULL, + updated_at timestamptz NOT NULL +); + +-- Coarse free-versus-paid filter used by the admin listing surface. +CREATE INDEX entitlement_snapshots_paid_state_idx + ON entitlement_snapshots (is_paid, plan_code); + +-- Finite paid-expiry filter; partial predicate keeps the index limited to +-- finite paid plans (paid_monthly, paid_yearly). +CREATE INDEX entitlement_snapshots_paid_expiry_idx + ON entitlement_snapshots (ends_at) + WHERE is_paid AND ends_at IS NOT NULL; + +-- sanction_records stores the immutable history of sanction mutations. +-- A row may carry removed_at + removed_* fields once the sanction is lifted. +CREATE TABLE sanction_records ( + record_id text PRIMARY KEY, + user_id text NOT NULL REFERENCES accounts(user_id), + sanction_code text NOT NULL, + scope text NOT NULL, + reason_code text NOT NULL, + actor_type text NOT NULL, + actor_id text, + applied_at timestamptz NOT NULL, + expires_at timestamptz, + removed_at timestamptz, + removed_by_type text, + removed_by_id text, + removed_reason_code text +); + +CREATE INDEX sanction_records_user_idx + ON sanction_records (user_id, applied_at DESC); + +-- sanction_active stores the at-most-one active record per (user_id, +-- sanction_code). It is maintained by PolicyLifecycleStore in the same +-- transaction as the corresponding sanction_records mutation. +CREATE TABLE sanction_active ( + user_id text NOT NULL REFERENCES accounts(user_id), + sanction_code text NOT NULL, + record_id text NOT NULL REFERENCES sanction_records(record_id), + PRIMARY KEY (user_id, sanction_code) +); + +CREATE INDEX sanction_active_code_idx + ON sanction_active (sanction_code); + +-- limit_records mirrors sanction_records for user-specific limit overrides. +CREATE TABLE limit_records ( + record_id text PRIMARY KEY, + user_id text NOT NULL REFERENCES accounts(user_id), + limit_code text NOT NULL, + value integer NOT NULL, + reason_code text NOT NULL, + actor_type text NOT NULL, + actor_id text, + applied_at timestamptz NOT NULL, + expires_at timestamptz, + removed_at timestamptz, + removed_by_type text, + removed_by_id text, + removed_reason_code text +); + +CREATE INDEX limit_records_user_idx + ON limit_records (user_id, applied_at DESC); + +-- limit_active mirrors sanction_active for user-specific limits. value is +-- denormalised so the admin listing predicate can read it without joining +-- the full history. +CREATE TABLE limit_active ( + user_id text NOT NULL REFERENCES accounts(user_id), + limit_code text NOT NULL, + record_id text NOT NULL REFERENCES limit_records(record_id), + value integer NOT NULL, + PRIMARY KEY (user_id, limit_code) +); + +CREATE INDEX limit_active_code_idx + ON limit_active (limit_code); + +-- +goose Down +DROP TABLE IF EXISTS limit_active; +DROP TABLE IF EXISTS limit_records; +DROP TABLE IF EXISTS sanction_active; +DROP TABLE IF EXISTS sanction_records; +DROP TABLE IF EXISTS entitlement_snapshots; +DROP TABLE IF EXISTS entitlement_records; +DROP TABLE IF EXISTS blocked_emails; +DROP TABLE IF EXISTS accounts; diff --git a/user/internal/adapters/postgres/migrations/migrations.go b/user/internal/adapters/postgres/migrations/migrations.go new file mode 100644 index 0000000..fd16855 --- /dev/null +++ b/user/internal/adapters/postgres/migrations/migrations.go @@ -0,0 +1,19 @@ +// Package migrations exposes the embedded goose migration files used by +// User Service to provision its `user` schema in PostgreSQL. +// +// The embedded filesystem is consumed by `pkg/postgres.RunMigrations` +// during user-service startup and by `cmd/jetgen` when regenerating the +// `internal/adapters/postgres/jet/` code against a transient PostgreSQL +// instance. +package migrations + +import "embed" + +//go:embed *.sql +var fs embed.FS + +// FS returns the embedded filesystem containing every numbered goose +// migration shipped with User Service. +func FS() embed.FS { + return fs +} diff --git a/user/internal/adapters/postgres/userstore/accounts.go b/user/internal/adapters/postgres/userstore/accounts.go new file mode 100644 index 0000000..785e9ad --- /dev/null +++ b/user/internal/adapters/postgres/userstore/accounts.go @@ -0,0 +1,375 @@ +package userstore + +import ( + "context" + "database/sql" + "errors" + "fmt" + "time" + + pgtable "galaxy/user/internal/adapters/postgres/jet/user/table" + "galaxy/user/internal/domain/account" + "galaxy/user/internal/domain/common" + "galaxy/user/internal/ports" + + pg "github.com/go-jet/jet/v2/postgres" +) + +// SQL constraint names declared in 00001_init.sql; referenced from error +// translation so we can disambiguate UNIQUE violations on (email) versus +// (user_name). +const ( + accountsEmailUniqueConstraint = "accounts_email_unique" + accountsUserNameUniqueConstraint = "accounts_user_name_unique" +) + +// accountSelectColumns is the canonical SELECT list for accounts, matching +// scanAccountRow's column order. +var accountSelectColumns = pg.ColumnList{ + pgtable.Accounts.UserID, + pgtable.Accounts.Email, + pgtable.Accounts.UserName, + pgtable.Accounts.DisplayName, + pgtable.Accounts.PreferredLanguage, + pgtable.Accounts.TimeZone, + pgtable.Accounts.DeclaredCountry, + pgtable.Accounts.CreatedAt, + pgtable.Accounts.UpdatedAt, + pgtable.Accounts.DeletedAt, +} + +// Create stores one new account record. Email and user-name uniqueness are +// enforced by the schema; conflicts on those columns surface as +// ports.ErrConflict (with ports.ErrUserNameConflict for the dedicated +// user-name index). +func (store *Store) Create(ctx context.Context, input ports.CreateAccountInput) error { + if err := input.Validate(); err != nil { + return fmt.Errorf("create account in postgres: %w", err) + } + + operationCtx, cancel, err := store.operationContext(ctx, "create account in postgres") + if err != nil { + return err + } + defer cancel() + + if err := insertAccount(operationCtx, store.db, input.Account); err != nil { + return err + } + return nil +} + +// insertAccount runs one INSERT against accounts using the supplied Queryer +// (a *sql.DB or a *sql.Tx). It centralises the column list and error +// translation used by Create and EnsureByEmail. +func insertAccount(ctx context.Context, q queryer, record account.UserAccount) error { + stmt := pgtable.Accounts.INSERT( + pgtable.Accounts.UserID, + pgtable.Accounts.Email, + pgtable.Accounts.UserName, + pgtable.Accounts.DisplayName, + pgtable.Accounts.PreferredLanguage, + pgtable.Accounts.TimeZone, + pgtable.Accounts.DeclaredCountry, + pgtable.Accounts.CreatedAt, + pgtable.Accounts.UpdatedAt, + pgtable.Accounts.DeletedAt, + ).VALUES( + record.UserID.String(), + record.Email.String(), + record.UserName.String(), + record.DisplayName.String(), + record.PreferredLanguage.String(), + record.TimeZone.String(), + nullableCountry(record.DeclaredCountry), + record.CreatedAt.UTC(), + record.UpdatedAt.UTC(), + nullableTime(record.DeletedAt), + ) + + query, args := stmt.Sql() + _, err := q.ExecContext(ctx, query, args...) + if err == nil { + return nil + } + if mapped := classifyUniqueViolation(err, accountsUserNameUniqueConstraint, ports.ErrUserNameConflict); mapped != nil { + return fmt.Errorf("create account %q in postgres: %w", record.UserID, mapped) + } + if isUniqueViolation(err) { + return fmt.Errorf("create account %q in postgres: %w", record.UserID, ports.ErrConflict) + } + return fmt.Errorf("create account %q in postgres: %w", record.UserID, err) +} + +// queryer is the subset of *sql.DB / *sql.Tx used by helpers that need to +// run inside an existing transaction or against the bare pool. +type queryer interface { + QueryRowContext(ctx context.Context, query string, args ...any) *sql.Row + QueryContext(ctx context.Context, query string, args ...any) (*sql.Rows, error) + ExecContext(ctx context.Context, query string, args ...any) (sql.Result, error) +} + +// GetByUserID returns the stored account identified by userID. +func (store *Store) GetByUserID(ctx context.Context, userID common.UserID) (account.UserAccount, error) { + if err := userID.Validate(); err != nil { + return account.UserAccount{}, fmt.Errorf("get account by user id from postgres: %w", err) + } + + operationCtx, cancel, err := store.operationContext(ctx, "get account by user id from postgres") + if err != nil { + return account.UserAccount{}, err + } + defer cancel() + + record, err := scanAccountByUserID(operationCtx, store.db, userID) + switch { + case errors.Is(err, ports.ErrNotFound): + return account.UserAccount{}, fmt.Errorf("get account by user id %q from postgres: %w", userID, ports.ErrNotFound) + case err != nil: + return account.UserAccount{}, fmt.Errorf("get account by user id %q from postgres: %w", userID, err) + } + return record, nil +} + +// GetByEmail returns the stored account identified by the normalized e-mail +// address. +func (store *Store) GetByEmail(ctx context.Context, email common.Email) (account.UserAccount, error) { + if err := email.Validate(); err != nil { + return account.UserAccount{}, fmt.Errorf("get account by email from postgres: %w", err) + } + + operationCtx, cancel, err := store.operationContext(ctx, "get account by email from postgres") + if err != nil { + return account.UserAccount{}, err + } + defer cancel() + + record, err := scanAccountByEmail(operationCtx, store.db, email) + switch { + case errors.Is(err, ports.ErrNotFound): + return account.UserAccount{}, fmt.Errorf("get account by email %q from postgres: %w", email, ports.ErrNotFound) + case err != nil: + return account.UserAccount{}, fmt.Errorf("get account by email %q from postgres: %w", email, err) + } + return record, nil +} + +// GetByUserName returns the stored account identified by the exact stored +// user name. +func (store *Store) GetByUserName(ctx context.Context, userName common.UserName) (account.UserAccount, error) { + if err := userName.Validate(); err != nil { + return account.UserAccount{}, fmt.Errorf("get account by user name from postgres: %w", err) + } + + operationCtx, cancel, err := store.operationContext(ctx, "get account by user name from postgres") + if err != nil { + return account.UserAccount{}, err + } + defer cancel() + + record, err := scanAccountByUserName(operationCtx, store.db, userName) + switch { + case errors.Is(err, ports.ErrNotFound): + return account.UserAccount{}, fmt.Errorf("get account by user name %q from postgres: %w", userName, ports.ErrNotFound) + case err != nil: + return account.UserAccount{}, fmt.Errorf("get account by user name %q from postgres: %w", userName, err) + } + return record, nil +} + +// ExistsByUserID reports whether userID currently identifies a stored account +// that is not soft-deleted. Soft-deleted accounts are treated as non-existing +// for external callers per Stage 22. +func (store *Store) ExistsByUserID(ctx context.Context, userID common.UserID) (bool, error) { + if err := userID.Validate(); err != nil { + return false, fmt.Errorf("exists by user id from postgres: %w", err) + } + + operationCtx, cancel, err := store.operationContext(ctx, "exists by user id from postgres") + if err != nil { + return false, err + } + defer cancel() + + stmt := pg.SELECT(pgtable.Accounts.DeletedAt). + FROM(pgtable.Accounts). + WHERE(pgtable.Accounts.UserID.EQ(pg.String(userID.String()))) + + query, args := stmt.Sql() + var deletedAt *time.Time + err = store.db.QueryRowContext(operationCtx, query, args...).Scan(&deletedAt) + switch { + case errors.Is(err, sql.ErrNoRows): + return false, nil + case err != nil: + return false, fmt.Errorf("exists by user id %q from postgres: %w", userID, err) + } + return deletedAt == nil, nil +} + +// Update replaces the stored account state for record.UserID. Email and +// user_name are immutable; mutation attempts return ports.ErrConflict. +// declared_country, display_name, preferred_language, time_zone, updated_at, +// and deleted_at are the columns affected. +func (store *Store) Update(ctx context.Context, record account.UserAccount) error { + if err := record.Validate(); err != nil { + return fmt.Errorf("update account in postgres: %w", err) + } + + return store.withTx(ctx, "update account in postgres", func(ctx context.Context, tx *sql.Tx) error { + current, err := scanAccountForUpdate(ctx, tx, record.UserID) + if err != nil { + if errors.Is(err, ports.ErrNotFound) { + return fmt.Errorf("update account %q in postgres: %w", record.UserID, ports.ErrNotFound) + } + return fmt.Errorf("update account %q in postgres: %w", record.UserID, err) + } + if current.Email != record.Email || current.UserName != record.UserName { + return fmt.Errorf("update account %q in postgres: %w", record.UserID, ports.ErrConflict) + } + + stmt := pgtable.Accounts.UPDATE( + pgtable.Accounts.DisplayName, + pgtable.Accounts.PreferredLanguage, + pgtable.Accounts.TimeZone, + pgtable.Accounts.DeclaredCountry, + pgtable.Accounts.UpdatedAt, + pgtable.Accounts.DeletedAt, + ).SET( + record.DisplayName.String(), + record.PreferredLanguage.String(), + record.TimeZone.String(), + nullableCountry(record.DeclaredCountry), + record.UpdatedAt.UTC(), + nullableTime(record.DeletedAt), + ).WHERE(pgtable.Accounts.UserID.EQ(pg.String(record.UserID.String()))) + + query, args := stmt.Sql() + if _, err := tx.ExecContext(ctx, query, args...); err != nil { + return fmt.Errorf("update account %q in postgres: %w", record.UserID, err) + } + return nil + }) +} + +// scanAccountByUserID is a thin wrapper around scanAccountWhere for the +// (user_id) column so atomic flows can reuse the same scanner with FOR +// UPDATE locking semantics. +func scanAccountByUserID(ctx context.Context, q queryer, userID common.UserID) (account.UserAccount, error) { + return scanAccountWhere(ctx, q, pgtable.Accounts.UserID.EQ(pg.String(userID.String())), false) +} + +func scanAccountByEmail(ctx context.Context, q queryer, email common.Email) (account.UserAccount, error) { + return scanAccountWhere(ctx, q, pgtable.Accounts.Email.EQ(pg.String(email.String())), false) +} + +func scanAccountByUserName(ctx context.Context, q queryer, userName common.UserName) (account.UserAccount, error) { + return scanAccountWhere(ctx, q, pgtable.Accounts.UserName.EQ(pg.String(userName.String())), false) +} + +func scanAccountForUpdate(ctx context.Context, q queryer, userID common.UserID) (account.UserAccount, error) { + return scanAccountWhere(ctx, q, pgtable.Accounts.UserID.EQ(pg.String(userID.String())), true) +} + +func scanAccountForUpdateByEmail(ctx context.Context, q queryer, email common.Email) (account.UserAccount, error) { + return scanAccountWhere(ctx, q, pgtable.Accounts.Email.EQ(pg.String(email.String())), true) +} + +func scanAccountWhere(ctx context.Context, q queryer, condition pg.BoolExpression, forUpdate bool) (account.UserAccount, error) { + stmt := pg.SELECT(accountSelectColumns). + FROM(pgtable.Accounts). + WHERE(condition) + if forUpdate { + stmt = stmt.FOR(pg.UPDATE()) + } + query, args := stmt.Sql() + row := q.QueryRowContext(ctx, query, args...) + return scanAccountRow(row) +} + +func scanAccountRow(row *sql.Row) (account.UserAccount, error) { + var ( + record account.UserAccount + userID string + email string + userName string + displayName string + preferredLang string + timeZone string + declaredCountry *string + createdAt time.Time + updatedAt time.Time + deletedAt *time.Time + ) + + if err := row.Scan( + &userID, &email, &userName, &displayName, + &preferredLang, &timeZone, &declaredCountry, + &createdAt, &updatedAt, &deletedAt, + ); err != nil { + return account.UserAccount{}, mapNotFound(err) + } + + record.UserID = common.UserID(userID) + record.Email = common.Email(email) + record.UserName = common.UserName(userName) + record.DisplayName = common.DisplayName(displayName) + record.PreferredLanguage = common.LanguageTag(preferredLang) + record.TimeZone = common.TimeZoneName(timeZone) + if declaredCountry != nil { + record.DeclaredCountry = common.CountryCode(*declaredCountry) + } + record.CreatedAt = createdAt.UTC() + record.UpdatedAt = updatedAt.UTC() + record.DeletedAt = timeFromNullable(deletedAt) + return record, nil +} + +// AccountStore adapts Store to the UserAccountStore port. The wrapper is +// returned by Store.Accounts() so callers that need only the narrow port +// interface remain unaware of the broader Store surface. +type AccountStore struct { + store *Store +} + +// Accounts returns one adapter that exposes the user-account store port over +// Store. +func (store *Store) Accounts() *AccountStore { + if store == nil { + return nil + } + return &AccountStore{store: store} +} + +// Create stores one new account record. +func (adapter *AccountStore) Create(ctx context.Context, input ports.CreateAccountInput) error { + return adapter.store.Create(ctx, input) +} + +// GetByUserID returns the stored account identified by userID. +func (adapter *AccountStore) GetByUserID(ctx context.Context, userID common.UserID) (account.UserAccount, error) { + return adapter.store.GetByUserID(ctx, userID) +} + +// GetByEmail returns the stored account identified by email. +func (adapter *AccountStore) GetByEmail(ctx context.Context, email common.Email) (account.UserAccount, error) { + return adapter.store.GetByEmail(ctx, email) +} + +// GetByUserName returns the stored account identified by userName. +func (adapter *AccountStore) GetByUserName(ctx context.Context, userName common.UserName) (account.UserAccount, error) { + return adapter.store.GetByUserName(ctx, userName) +} + +// ExistsByUserID reports whether userID currently identifies a stored +// account. +func (adapter *AccountStore) ExistsByUserID(ctx context.Context, userID common.UserID) (bool, error) { + return adapter.store.ExistsByUserID(ctx, userID) +} + +// Update replaces the stored account state for record.UserID. +func (adapter *AccountStore) Update(ctx context.Context, record account.UserAccount) error { + return adapter.store.Update(ctx, record) +} + +var _ ports.UserAccountStore = (*AccountStore)(nil) diff --git a/user/internal/adapters/postgres/userstore/auth_directory.go b/user/internal/adapters/postgres/userstore/auth_directory.go new file mode 100644 index 0000000..e5b5b19 --- /dev/null +++ b/user/internal/adapters/postgres/userstore/auth_directory.go @@ -0,0 +1,280 @@ +package userstore + +import ( + "context" + "database/sql" + "errors" + "fmt" + + "galaxy/user/internal/domain/account" + "galaxy/user/internal/domain/authblock" + "galaxy/user/internal/domain/common" + "galaxy/user/internal/ports" +) + +// deletedAccountBlockReasonCode is returned to auth callers when the lookup +// resolves to a soft-deleted account. Auth/Session treats this exactly like +// a regular block: it refuses to mint a session for the subject. The code is +// not a real sanction record; it lives only on the wire. +const deletedAccountBlockReasonCode common.ReasonCode = "account_deleted" + +// ResolveByEmail returns the current coarse auth-facing resolution state for +// email. The decision tree, in order: +// +// 1. blocked_emails has a row for this address → blocked. +// 2. accounts has a non-soft-deleted row for this address → existing. +// 3. accounts has a soft-deleted row for this address → blocked +// (account_deleted). +// 4. otherwise → creatable. +// +// The whole sequence is a read-only path; no transaction is required. +func (store *Store) ResolveByEmail(ctx context.Context, email common.Email) (ports.ResolveByEmailResult, error) { + if err := email.Validate(); err != nil { + return ports.ResolveByEmailResult{}, fmt.Errorf("resolve by email in postgres: %w", err) + } + + operationCtx, cancel, err := store.operationContext(ctx, "resolve by email in postgres") + if err != nil { + return ports.ResolveByEmailResult{}, err + } + defer cancel() + + blocked, err := scanBlockedEmail(operationCtx, store.db, email, false) + switch { + case err == nil: + return ports.ResolveByEmailResult{ + Kind: ports.AuthResolutionKindBlocked, + BlockReasonCode: blocked.ReasonCode, + }, nil + case !errors.Is(err, ports.ErrNotFound): + return ports.ResolveByEmailResult{}, fmt.Errorf("resolve by email %q in postgres: %w", email, err) + } + + record, err := scanAccountByEmail(operationCtx, store.db, email) + switch { + case errors.Is(err, ports.ErrNotFound): + return ports.ResolveByEmailResult{Kind: ports.AuthResolutionKindCreatable}, nil + case err != nil: + return ports.ResolveByEmailResult{}, fmt.Errorf("resolve by email %q in postgres: %w", email, err) + } + if record.IsDeleted() { + return ports.ResolveByEmailResult{ + Kind: ports.AuthResolutionKindBlocked, + BlockReasonCode: deletedAccountBlockReasonCode, + }, nil + } + return ports.ResolveByEmailResult{ + Kind: ports.AuthResolutionKindExisting, + UserID: record.UserID, + }, nil +} + +// EnsureByEmail atomically returns an existing user, creates a new one, or +// reports a blocked outcome. The whole flow runs in one transaction with +// row-level locks on `blocked_emails(email)` and `accounts(email)` so we +// observe a consistent snapshot of the auth-facing state. +// +// On the create branch the transaction also INSERTs the initial +// entitlement_records row and the entitlement_snapshots row. UNIQUE +// violations on user_id or user_name surface as ports.ErrConflict (with +// ports.ErrUserNameConflict for the user-name index). +func (store *Store) EnsureByEmail(ctx context.Context, input ports.EnsureByEmailInput) (ports.EnsureByEmailResult, error) { + if err := input.Validate(); err != nil { + return ports.EnsureByEmailResult{}, fmt.Errorf("ensure by email in postgres: %w", err) + } + + var ( + result ports.EnsureByEmailResult + handled bool + ) + + if err := store.withTx(ctx, "ensure by email in postgres", func(ctx context.Context, tx *sql.Tx) error { + blocked, err := scanBlockedEmail(ctx, tx, input.Email, true) + switch { + case err == nil: + result = ports.EnsureByEmailResult{ + Outcome: ports.EnsureByEmailOutcomeBlocked, + BlockReasonCode: blocked.ReasonCode, + } + handled = true + return nil + case !errors.Is(err, ports.ErrNotFound): + return fmt.Errorf("ensure by email %q in postgres: %w", input.Email, err) + } + + existing, err := scanAccountForUpdateByEmail(ctx, tx, input.Email) + switch { + case err == nil: + if existing.IsDeleted() { + result = ports.EnsureByEmailResult{ + Outcome: ports.EnsureByEmailOutcomeBlocked, + BlockReasonCode: deletedAccountBlockReasonCode, + } + handled = true + return nil + } + result = ports.EnsureByEmailResult{ + Outcome: ports.EnsureByEmailOutcomeExisting, + UserID: existing.UserID, + } + handled = true + return nil + case !errors.Is(err, ports.ErrNotFound): + return fmt.Errorf("ensure by email %q in postgres: %w", input.Email, err) + } + + if err := insertAccount(ctx, tx, input.Account); err != nil { + return err + } + if err := insertEntitlementPeriod(ctx, tx, input.EntitlementRecord); err != nil { + return err + } + if err := upsertEntitlementSnapshot(ctx, tx, input.Entitlement); err != nil { + return err + } + + result = ports.EnsureByEmailResult{ + Outcome: ports.EnsureByEmailOutcomeCreated, + UserID: input.Account.UserID, + } + handled = true + return nil + }); err != nil { + return ports.EnsureByEmailResult{}, err + } + if !handled { + return ports.EnsureByEmailResult{}, fmt.Errorf("ensure by email %q in postgres: unhandled transaction outcome", input.Email) + } + return result, nil +} + +// BlockByUserID applies a block to the account identified by userID. The +// block is stored as a row in blocked_emails keyed on the user's e-mail with +// resolved_user_id pointing back to the account. +func (store *Store) BlockByUserID(ctx context.Context, input ports.BlockByUserIDInput) (ports.BlockResult, error) { + if err := input.Validate(); err != nil { + return ports.BlockResult{}, fmt.Errorf("block by user id in postgres: %w", err) + } + + var ( + result ports.BlockResult + handled bool + ) + + if err := store.withTx(ctx, "block by user id in postgres", func(ctx context.Context, tx *sql.Tx) error { + acc, err := scanAccountForUpdate(ctx, tx, input.UserID) + switch { + case errors.Is(err, ports.ErrNotFound): + return fmt.Errorf("block by user id %q in postgres: %w", input.UserID, ports.ErrNotFound) + case err != nil: + return fmt.Errorf("block by user id %q in postgres: %w", input.UserID, err) + } + if acc.IsDeleted() { + return fmt.Errorf("block by user id %q in postgres: %w", input.UserID, ports.ErrNotFound) + } + + blocked, err := scanBlockedEmail(ctx, tx, acc.Email, true) + switch { + case err == nil: + result = ports.BlockResult{ + Outcome: ports.AuthBlockOutcomeAlreadyBlocked, + UserID: input.UserID, + } + if !blocked.ResolvedUserID.IsZero() { + result.UserID = blocked.ResolvedUserID + } + handled = true + return nil + case !errors.Is(err, ports.ErrNotFound): + return fmt.Errorf("block by user id %q in postgres: %w", input.UserID, err) + } + + record := authblock.BlockedEmailSubject{ + Email: acc.Email, + ReasonCode: input.ReasonCode, + BlockedAt: input.BlockedAt.UTC(), + ResolvedUserID: input.UserID, + } + if err := upsertBlockedEmail(ctx, tx, record); err != nil { + return fmt.Errorf("block by user id %q in postgres: %w", input.UserID, err) + } + + result = ports.BlockResult{ + Outcome: ports.AuthBlockOutcomeBlocked, + UserID: input.UserID, + } + handled = true + return nil + }); err != nil { + return ports.BlockResult{}, err + } + if !handled { + return ports.BlockResult{}, fmt.Errorf("block by user id %q in postgres: unhandled transaction outcome", input.UserID) + } + return result, nil +} + +// BlockByEmail applies a block to email even when no account exists yet. If +// an account does exist for the e-mail, its user_id is recorded as +// resolved_user_id; soft-deleted accounts also count for this resolution. +func (store *Store) BlockByEmail(ctx context.Context, input ports.BlockByEmailInput) (ports.BlockResult, error) { + if err := input.Validate(); err != nil { + return ports.BlockResult{}, fmt.Errorf("block by email in postgres: %w", err) + } + + var ( + result ports.BlockResult + handled bool + ) + + if err := store.withTx(ctx, "block by email in postgres", func(ctx context.Context, tx *sql.Tx) error { + blocked, err := scanBlockedEmail(ctx, tx, input.Email, true) + switch { + case err == nil: + result = ports.BlockResult{ + Outcome: ports.AuthBlockOutcomeAlreadyBlocked, + UserID: blocked.ResolvedUserID, + } + handled = true + return nil + case !errors.Is(err, ports.ErrNotFound): + return fmt.Errorf("block by email %q in postgres: %w", input.Email, err) + } + + var resolvedUserID common.UserID + acc, err := scanAccountForUpdateByEmail(ctx, tx, input.Email) + switch { + case err == nil: + resolvedUserID = acc.UserID + case !errors.Is(err, ports.ErrNotFound): + return fmt.Errorf("block by email %q in postgres: %w", input.Email, err) + } + + record := authblock.BlockedEmailSubject{ + Email: input.Email, + ReasonCode: input.ReasonCode, + BlockedAt: input.BlockedAt.UTC(), + ResolvedUserID: resolvedUserID, + } + if err := upsertBlockedEmail(ctx, tx, record); err != nil { + return fmt.Errorf("block by email %q in postgres: %w", input.Email, err) + } + + result = ports.BlockResult{ + Outcome: ports.AuthBlockOutcomeBlocked, + UserID: resolvedUserID, + } + handled = true + return nil + }); err != nil { + return ports.BlockResult{}, err + } + if !handled { + return ports.BlockResult{}, fmt.Errorf("block by email %q in postgres: unhandled transaction outcome", input.Email) + } + return result, nil +} + +// guard so external callers cannot mistake this file's helpers for a public +// surface. +var _ account.UserAccount = account.UserAccount{} diff --git a/user/internal/adapters/postgres/userstore/blocked_emails.go b/user/internal/adapters/postgres/userstore/blocked_emails.go new file mode 100644 index 0000000..482d34c --- /dev/null +++ b/user/internal/adapters/postgres/userstore/blocked_emails.go @@ -0,0 +1,175 @@ +package userstore + +import ( + "context" + "database/sql" + "errors" + "fmt" + "time" + + pgtable "galaxy/user/internal/adapters/postgres/jet/user/table" + "galaxy/user/internal/domain/authblock" + "galaxy/user/internal/domain/common" + "galaxy/user/internal/ports" + + pg "github.com/go-jet/jet/v2/postgres" +) + +// blockedEmailSelectColumns is the canonical SELECT list for blocked_emails. +var blockedEmailSelectColumns = pg.ColumnList{ + pgtable.BlockedEmails.Email, + pgtable.BlockedEmails.ReasonCode, + pgtable.BlockedEmails.BlockedAt, + pgtable.BlockedEmails.ActorType, + pgtable.BlockedEmails.ActorID, + pgtable.BlockedEmails.ResolvedUserID, +} + +// GetBlockedEmail returns the blocked-email subject for email. +func (store *Store) GetBlockedEmail(ctx context.Context, email common.Email) (authblock.BlockedEmailSubject, error) { + if err := email.Validate(); err != nil { + return authblock.BlockedEmailSubject{}, fmt.Errorf("get blocked email subject from postgres: %w", err) + } + + operationCtx, cancel, err := store.operationContext(ctx, "get blocked email subject from postgres") + if err != nil { + return authblock.BlockedEmailSubject{}, err + } + defer cancel() + + record, err := scanBlockedEmail(operationCtx, store.db, email, false) + switch { + case errors.Is(err, ports.ErrNotFound): + return authblock.BlockedEmailSubject{}, fmt.Errorf("get blocked email subject %q from postgres: %w", email, ports.ErrNotFound) + case err != nil: + return authblock.BlockedEmailSubject{}, fmt.Errorf("get blocked email subject %q from postgres: %w", email, err) + } + return record, nil +} + +// PutBlockedEmail stores or replaces the blocked-email subject for +// record.Email. The schema's PRIMARY KEY on (email) makes this an UPSERT via +// `INSERT … ON CONFLICT (email) DO UPDATE`. +func (store *Store) PutBlockedEmail(ctx context.Context, record authblock.BlockedEmailSubject) error { + if err := record.Validate(); err != nil { + return fmt.Errorf("upsert blocked email subject in postgres: %w", err) + } + + operationCtx, cancel, err := store.operationContext(ctx, "upsert blocked email subject in postgres") + if err != nil { + return err + } + defer cancel() + + if err := upsertBlockedEmail(operationCtx, store.db, record); err != nil { + return err + } + return nil +} + +// upsertBlockedEmail centralises the UPSERT used by PutBlockedEmail and the +// composite block flows. q is a *sql.DB or *sql.Tx so it can run inside an +// auth-directory transaction. +func upsertBlockedEmail(ctx context.Context, q queryer, record authblock.BlockedEmailSubject) error { + stmt := pgtable.BlockedEmails.INSERT( + pgtable.BlockedEmails.Email, + pgtable.BlockedEmails.ReasonCode, + pgtable.BlockedEmails.BlockedAt, + pgtable.BlockedEmails.ActorType, + pgtable.BlockedEmails.ActorID, + pgtable.BlockedEmails.ResolvedUserID, + ).VALUES( + record.Email.String(), + record.ReasonCode.String(), + record.BlockedAt.UTC(), + nullableActorType(record.Actor.Type), + nullableActorID(record.Actor.ID), + nullableUserID(record.ResolvedUserID), + ).ON_CONFLICT(pgtable.BlockedEmails.Email).DO_UPDATE( + pg.SET( + pgtable.BlockedEmails.ReasonCode.SET(pgtable.BlockedEmails.EXCLUDED.ReasonCode), + pgtable.BlockedEmails.BlockedAt.SET(pgtable.BlockedEmails.EXCLUDED.BlockedAt), + pgtable.BlockedEmails.ActorType.SET(pgtable.BlockedEmails.EXCLUDED.ActorType), + pgtable.BlockedEmails.ActorID.SET(pgtable.BlockedEmails.EXCLUDED.ActorID), + pgtable.BlockedEmails.ResolvedUserID.SET(pgtable.BlockedEmails.EXCLUDED.ResolvedUserID), + ), + ) + + query, args := stmt.Sql() + if _, err := q.ExecContext(ctx, query, args...); err != nil { + return fmt.Errorf("upsert blocked email subject %q in postgres: %w", record.Email, err) + } + return nil +} + +// scanBlockedEmail loads one blocked-email row. forUpdate selects the +// `FOR UPDATE` lock variant used inside the auth-directory transaction. +func scanBlockedEmail(ctx context.Context, q queryer, email common.Email, forUpdate bool) (authblock.BlockedEmailSubject, error) { + stmt := pg.SELECT(blockedEmailSelectColumns). + FROM(pgtable.BlockedEmails). + WHERE(pgtable.BlockedEmails.Email.EQ(pg.String(email.String()))) + if forUpdate { + stmt = stmt.FOR(pg.UPDATE()) + } + query, args := stmt.Sql() + row := q.QueryRowContext(ctx, query, args...) + return scanBlockedEmailRow(row) +} + +func scanBlockedEmailRow(row *sql.Row) (authblock.BlockedEmailSubject, error) { + var ( + record authblock.BlockedEmailSubject + emailValue string + reasonCode string + blockedAt time.Time + actorType *string + actorID *string + resolvedUserID *string + ) + if err := row.Scan( + &emailValue, &reasonCode, &blockedAt, + &actorType, &actorID, &resolvedUserID, + ); err != nil { + return authblock.BlockedEmailSubject{}, mapNotFound(err) + } + + record.Email = common.Email(emailValue) + record.ReasonCode = common.ReasonCode(reasonCode) + record.BlockedAt = blockedAt.UTC() + if actorType != nil { + record.Actor.Type = common.ActorType(*actorType) + } + if actorID != nil { + record.Actor.ID = common.ActorID(*actorID) + } + if resolvedUserID != nil { + record.ResolvedUserID = common.UserID(*resolvedUserID) + } + return record, nil +} + +// BlockedEmailStore adapts Store to the BlockedEmailStore port. +type BlockedEmailStore struct { + store *Store +} + +// BlockedEmails returns one adapter that exposes the blocked-email store +// port over Store. +func (store *Store) BlockedEmails() *BlockedEmailStore { + if store == nil { + return nil + } + return &BlockedEmailStore{store: store} +} + +// GetByEmail returns the blocked-email subject for email. +func (adapter *BlockedEmailStore) GetByEmail(ctx context.Context, email common.Email) (authblock.BlockedEmailSubject, error) { + return adapter.store.GetBlockedEmail(ctx, email) +} + +// Upsert stores or replaces the blocked-email subject for record.Email. +func (adapter *BlockedEmailStore) Upsert(ctx context.Context, record authblock.BlockedEmailSubject) error { + return adapter.store.PutBlockedEmail(ctx, record) +} + +var _ ports.BlockedEmailStore = (*BlockedEmailStore)(nil) diff --git a/user/internal/adapters/postgres/userstore/entitlement_store.go b/user/internal/adapters/postgres/userstore/entitlement_store.go new file mode 100644 index 0000000..a851dca --- /dev/null +++ b/user/internal/adapters/postgres/userstore/entitlement_store.go @@ -0,0 +1,729 @@ +package userstore + +import ( + "context" + "database/sql" + "errors" + "fmt" + "time" + + pgtable "galaxy/user/internal/adapters/postgres/jet/user/table" + "galaxy/user/internal/domain/common" + "galaxy/user/internal/domain/entitlement" + "galaxy/user/internal/ports" + + pg "github.com/go-jet/jet/v2/postgres" +) + +// entitlementPeriodSelectColumns is the canonical SELECT list for +// entitlement_records, matching scanEntitlementPeriod's column order. +var entitlementPeriodSelectColumns = pg.ColumnList{ + pgtable.EntitlementRecords.RecordID, + pgtable.EntitlementRecords.UserID, + pgtable.EntitlementRecords.PlanCode, + pgtable.EntitlementRecords.Source, + pgtable.EntitlementRecords.ActorType, + pgtable.EntitlementRecords.ActorID, + pgtable.EntitlementRecords.ReasonCode, + pgtable.EntitlementRecords.StartsAt, + pgtable.EntitlementRecords.EndsAt, + pgtable.EntitlementRecords.CreatedAt, + pgtable.EntitlementRecords.ClosedAt, + pgtable.EntitlementRecords.ClosedByType, + pgtable.EntitlementRecords.ClosedByID, + pgtable.EntitlementRecords.ClosedReasonCode, +} + +// entitlementSnapshotSelectColumns is the canonical SELECT list for +// entitlement_snapshots, matching scanEntitlementSnapshotRow's column order. +var entitlementSnapshotSelectColumns = pg.ColumnList{ + pgtable.EntitlementSnapshots.UserID, + pgtable.EntitlementSnapshots.PlanCode, + pgtable.EntitlementSnapshots.IsPaid, + pgtable.EntitlementSnapshots.StartsAt, + pgtable.EntitlementSnapshots.EndsAt, + pgtable.EntitlementSnapshots.Source, + pgtable.EntitlementSnapshots.ActorType, + pgtable.EntitlementSnapshots.ActorID, + pgtable.EntitlementSnapshots.ReasonCode, + pgtable.EntitlementSnapshots.UpdatedAt, +} + +// CreateEntitlementRecord stores one new entitlement period history record. +// The unique key is record_id; a duplicate record_id returns +// ports.ErrConflict. +func (store *Store) CreateEntitlementRecord(ctx context.Context, record entitlement.PeriodRecord) error { + if err := record.Validate(); err != nil { + return fmt.Errorf("create entitlement record in postgres: %w", err) + } + operationCtx, cancel, err := store.operationContext(ctx, "create entitlement record in postgres") + if err != nil { + return err + } + defer cancel() + return insertEntitlementPeriod(operationCtx, store.db, record) +} + +// GetEntitlementRecordByID returns the entitlement period record identified +// by recordID. +func (store *Store) GetEntitlementRecordByID(ctx context.Context, recordID entitlement.EntitlementRecordID) (entitlement.PeriodRecord, error) { + if err := recordID.Validate(); err != nil { + return entitlement.PeriodRecord{}, fmt.Errorf("get entitlement record from postgres: %w", err) + } + operationCtx, cancel, err := store.operationContext(ctx, "get entitlement record from postgres") + if err != nil { + return entitlement.PeriodRecord{}, err + } + defer cancel() + + stmt := pg.SELECT(entitlementPeriodSelectColumns). + FROM(pgtable.EntitlementRecords). + WHERE(pgtable.EntitlementRecords.RecordID.EQ(pg.String(recordID.String()))) + + query, args := stmt.Sql() + row := store.db.QueryRowContext(operationCtx, query, args...) + record, err := scanEntitlementPeriodRow(row) + switch { + case errors.Is(err, ports.ErrNotFound): + return entitlement.PeriodRecord{}, fmt.Errorf("get entitlement record %q from postgres: %w", recordID, ports.ErrNotFound) + case err != nil: + return entitlement.PeriodRecord{}, fmt.Errorf("get entitlement record %q from postgres: %w", recordID, err) + } + return record, nil +} + +// ListEntitlementRecordsByUserID returns every entitlement period record +// owned by userID, ordered by created_at ascending so historical replay is +// deterministic. +func (store *Store) ListEntitlementRecordsByUserID(ctx context.Context, userID common.UserID) ([]entitlement.PeriodRecord, error) { + if err := userID.Validate(); err != nil { + return nil, fmt.Errorf("list entitlement records from postgres: %w", err) + } + operationCtx, cancel, err := store.operationContext(ctx, "list entitlement records from postgres") + if err != nil { + return nil, err + } + defer cancel() + + stmt := pg.SELECT(entitlementPeriodSelectColumns). + FROM(pgtable.EntitlementRecords). + WHERE(pgtable.EntitlementRecords.UserID.EQ(pg.String(userID.String()))). + ORDER_BY(pgtable.EntitlementRecords.CreatedAt.ASC(), pgtable.EntitlementRecords.RecordID.ASC()) + + query, args := stmt.Sql() + rows, err := store.db.QueryContext(operationCtx, query, args...) + if err != nil { + return nil, fmt.Errorf("list entitlement records for %q from postgres: %w", userID, err) + } + defer func() { _ = rows.Close() }() + + out := make([]entitlement.PeriodRecord, 0) + for rows.Next() { + record, err := scanEntitlementPeriodRows(rows) + if err != nil { + return nil, fmt.Errorf("list entitlement records for %q from postgres: %w", userID, err) + } + out = append(out, record) + } + if err := rows.Err(); err != nil { + return nil, fmt.Errorf("list entitlement records for %q from postgres: %w", userID, err) + } + return out, nil +} + +// UpdateEntitlementRecord replaces one stored entitlement period record. The +// statement matches by record_id; ports.ErrNotFound is returned when the +// record does not exist. +func (store *Store) UpdateEntitlementRecord(ctx context.Context, record entitlement.PeriodRecord) error { + if err := record.Validate(); err != nil { + return fmt.Errorf("update entitlement record in postgres: %w", err) + } + operationCtx, cancel, err := store.operationContext(ctx, "update entitlement record in postgres") + if err != nil { + return err + } + defer cancel() + + rows, err := updateEntitlementPeriod(operationCtx, store.db, record) + if err != nil { + return fmt.Errorf("update entitlement record %q in postgres: %w", record.RecordID, err) + } + if rows == 0 { + return fmt.Errorf("update entitlement record %q in postgres: %w", record.RecordID, ports.ErrNotFound) + } + return nil +} + +func updateEntitlementPeriod(ctx context.Context, q queryer, record entitlement.PeriodRecord) (int64, error) { + stmt := pgtable.EntitlementRecords.UPDATE( + pgtable.EntitlementRecords.PlanCode, + pgtable.EntitlementRecords.Source, + pgtable.EntitlementRecords.ActorType, + pgtable.EntitlementRecords.ActorID, + pgtable.EntitlementRecords.ReasonCode, + pgtable.EntitlementRecords.StartsAt, + pgtable.EntitlementRecords.EndsAt, + pgtable.EntitlementRecords.CreatedAt, + pgtable.EntitlementRecords.ClosedAt, + pgtable.EntitlementRecords.ClosedByType, + pgtable.EntitlementRecords.ClosedByID, + pgtable.EntitlementRecords.ClosedReasonCode, + ).SET( + string(record.PlanCode), + record.Source.String(), + record.Actor.Type.String(), + nullableActorID(record.Actor.ID), + record.ReasonCode.String(), + record.StartsAt.UTC(), + nullableTime(record.EndsAt), + record.CreatedAt.UTC(), + nullableTime(record.ClosedAt), + nullableActorType(record.ClosedBy.Type), + nullableActorID(record.ClosedBy.ID), + nullableReasonCode(record.ClosedReasonCode), + ).WHERE(pgtable.EntitlementRecords.RecordID.EQ(pg.String(record.RecordID.String()))) + + query, args := stmt.Sql() + res, err := q.ExecContext(ctx, query, args...) + if err != nil { + return 0, err + } + return res.RowsAffected() +} + +func insertEntitlementPeriod(ctx context.Context, q queryer, record entitlement.PeriodRecord) error { + stmt := pgtable.EntitlementRecords.INSERT( + pgtable.EntitlementRecords.RecordID, + pgtable.EntitlementRecords.UserID, + pgtable.EntitlementRecords.PlanCode, + pgtable.EntitlementRecords.Source, + pgtable.EntitlementRecords.ActorType, + pgtable.EntitlementRecords.ActorID, + pgtable.EntitlementRecords.ReasonCode, + pgtable.EntitlementRecords.StartsAt, + pgtable.EntitlementRecords.EndsAt, + pgtable.EntitlementRecords.CreatedAt, + pgtable.EntitlementRecords.ClosedAt, + pgtable.EntitlementRecords.ClosedByType, + pgtable.EntitlementRecords.ClosedByID, + pgtable.EntitlementRecords.ClosedReasonCode, + ).VALUES( + record.RecordID.String(), + record.UserID.String(), + string(record.PlanCode), + record.Source.String(), + record.Actor.Type.String(), + nullableActorID(record.Actor.ID), + record.ReasonCode.String(), + record.StartsAt.UTC(), + nullableTime(record.EndsAt), + record.CreatedAt.UTC(), + nullableTime(record.ClosedAt), + nullableActorType(record.ClosedBy.Type), + nullableActorID(record.ClosedBy.ID), + nullableReasonCode(record.ClosedReasonCode), + ) + + query, args := stmt.Sql() + _, err := q.ExecContext(ctx, query, args...) + if err == nil { + return nil + } + if isUniqueViolation(err) { + return fmt.Errorf("create entitlement record %q in postgres: %w", record.RecordID, ports.ErrConflict) + } + return fmt.Errorf("create entitlement record %q in postgres: %w", record.RecordID, err) +} + +// scannableRow abstracts *sql.Row and *sql.Rows so the row-scanner can be +// shared by single-row and iterating callers. +type scannableRow interface { + Scan(dest ...any) error +} + +func scanEntitlementPeriodRow(row *sql.Row) (entitlement.PeriodRecord, error) { + record, err := scanEntitlementPeriod(row) + if errors.Is(err, sql.ErrNoRows) { + return entitlement.PeriodRecord{}, ports.ErrNotFound + } + return record, err +} + +func scanEntitlementPeriodRows(rows *sql.Rows) (entitlement.PeriodRecord, error) { + return scanEntitlementPeriod(rows) +} + +func scanEntitlementPeriod(row scannableRow) (entitlement.PeriodRecord, error) { + var ( + recordID string + userID string + planCode string + source string + actorType string + actorID *string + reasonCode string + startsAt time.Time + endsAt *time.Time + createdAt time.Time + closedAt *time.Time + closedByType *string + closedByID *string + closedReason *string + ) + if err := row.Scan( + &recordID, &userID, &planCode, &source, + &actorType, &actorID, &reasonCode, + &startsAt, &endsAt, &createdAt, + &closedAt, &closedByType, &closedByID, &closedReason, + ); err != nil { + return entitlement.PeriodRecord{}, err + } + record := entitlement.PeriodRecord{ + RecordID: entitlement.EntitlementRecordID(recordID), + UserID: common.UserID(userID), + PlanCode: entitlement.PlanCode(planCode), + Source: common.Source(source), + Actor: common.ActorRef{Type: common.ActorType(actorType)}, + ReasonCode: common.ReasonCode(reasonCode), + StartsAt: startsAt.UTC(), + EndsAt: timeFromNullable(endsAt), + CreatedAt: createdAt.UTC(), + ClosedAt: timeFromNullable(closedAt), + } + if actorID != nil { + record.Actor.ID = common.ActorID(*actorID) + } + if closedByType != nil { + record.ClosedBy.Type = common.ActorType(*closedByType) + } + if closedByID != nil { + record.ClosedBy.ID = common.ActorID(*closedByID) + } + if closedReason != nil { + record.ClosedReasonCode = common.ReasonCode(*closedReason) + } + return record, nil +} + +// GetEntitlementByUserID returns the current entitlement snapshot for userID. +func (store *Store) GetEntitlementByUserID(ctx context.Context, userID common.UserID) (entitlement.CurrentSnapshot, error) { + if err := userID.Validate(); err != nil { + return entitlement.CurrentSnapshot{}, fmt.Errorf("get entitlement snapshot from postgres: %w", err) + } + operationCtx, cancel, err := store.operationContext(ctx, "get entitlement snapshot from postgres") + if err != nil { + return entitlement.CurrentSnapshot{}, err + } + defer cancel() + + stmt := pg.SELECT(entitlementSnapshotSelectColumns). + FROM(pgtable.EntitlementSnapshots). + WHERE(pgtable.EntitlementSnapshots.UserID.EQ(pg.String(userID.String()))) + + query, args := stmt.Sql() + row := store.db.QueryRowContext(operationCtx, query, args...) + record, err := scanEntitlementSnapshotRow(row) + switch { + case errors.Is(err, ports.ErrNotFound): + return entitlement.CurrentSnapshot{}, fmt.Errorf("get entitlement snapshot for %q from postgres: %w", userID, ports.ErrNotFound) + case err != nil: + return entitlement.CurrentSnapshot{}, fmt.Errorf("get entitlement snapshot for %q from postgres: %w", userID, err) + } + return record, nil +} + +// PutEntitlement stores the current entitlement snapshot for record.UserID. +// It is an UPSERT so the runtime path can call it on creation and on +// replacement uniformly. +func (store *Store) PutEntitlement(ctx context.Context, record entitlement.CurrentSnapshot) error { + if err := record.Validate(); err != nil { + return fmt.Errorf("put entitlement snapshot in postgres: %w", err) + } + operationCtx, cancel, err := store.operationContext(ctx, "put entitlement snapshot in postgres") + if err != nil { + return err + } + defer cancel() + return upsertEntitlementSnapshot(operationCtx, store.db, record) +} + +func upsertEntitlementSnapshot(ctx context.Context, q queryer, record entitlement.CurrentSnapshot) error { + stmt := pgtable.EntitlementSnapshots.INSERT( + pgtable.EntitlementSnapshots.UserID, + pgtable.EntitlementSnapshots.PlanCode, + pgtable.EntitlementSnapshots.IsPaid, + pgtable.EntitlementSnapshots.StartsAt, + pgtable.EntitlementSnapshots.EndsAt, + pgtable.EntitlementSnapshots.Source, + pgtable.EntitlementSnapshots.ActorType, + pgtable.EntitlementSnapshots.ActorID, + pgtable.EntitlementSnapshots.ReasonCode, + pgtable.EntitlementSnapshots.UpdatedAt, + ).VALUES( + record.UserID.String(), + string(record.PlanCode), + record.IsPaid, + record.StartsAt.UTC(), + nullableTime(record.EndsAt), + record.Source.String(), + record.Actor.Type.String(), + nullableActorID(record.Actor.ID), + record.ReasonCode.String(), + record.UpdatedAt.UTC(), + ).ON_CONFLICT(pgtable.EntitlementSnapshots.UserID).DO_UPDATE( + pg.SET( + pgtable.EntitlementSnapshots.PlanCode.SET(pgtable.EntitlementSnapshots.EXCLUDED.PlanCode), + pgtable.EntitlementSnapshots.IsPaid.SET(pgtable.EntitlementSnapshots.EXCLUDED.IsPaid), + pgtable.EntitlementSnapshots.StartsAt.SET(pgtable.EntitlementSnapshots.EXCLUDED.StartsAt), + pgtable.EntitlementSnapshots.EndsAt.SET(pgtable.EntitlementSnapshots.EXCLUDED.EndsAt), + pgtable.EntitlementSnapshots.Source.SET(pgtable.EntitlementSnapshots.EXCLUDED.Source), + pgtable.EntitlementSnapshots.ActorType.SET(pgtable.EntitlementSnapshots.EXCLUDED.ActorType), + pgtable.EntitlementSnapshots.ActorID.SET(pgtable.EntitlementSnapshots.EXCLUDED.ActorID), + pgtable.EntitlementSnapshots.ReasonCode.SET(pgtable.EntitlementSnapshots.EXCLUDED.ReasonCode), + pgtable.EntitlementSnapshots.UpdatedAt.SET(pgtable.EntitlementSnapshots.EXCLUDED.UpdatedAt), + ), + ) + + query, args := stmt.Sql() + if _, err := q.ExecContext(ctx, query, args...); err != nil { + return fmt.Errorf("upsert entitlement snapshot for %q in postgres: %w", record.UserID, err) + } + return nil +} + +func scanEntitlementSnapshotRow(row *sql.Row) (entitlement.CurrentSnapshot, error) { + var ( + userID string + planCode string + isPaid bool + startsAt time.Time + endsAt *time.Time + source string + actorType string + actorID *string + reasonCode string + updatedAt time.Time + ) + err := row.Scan( + &userID, &planCode, &isPaid, + &startsAt, &endsAt, + &source, &actorType, &actorID, &reasonCode, + &updatedAt, + ) + if errors.Is(err, sql.ErrNoRows) { + return entitlement.CurrentSnapshot{}, ports.ErrNotFound + } + if err != nil { + return entitlement.CurrentSnapshot{}, err + } + record := entitlement.CurrentSnapshot{ + UserID: common.UserID(userID), + PlanCode: entitlement.PlanCode(planCode), + IsPaid: isPaid, + StartsAt: startsAt.UTC(), + EndsAt: timeFromNullable(endsAt), + Source: common.Source(source), + Actor: common.ActorRef{Type: common.ActorType(actorType)}, + ReasonCode: common.ReasonCode(reasonCode), + UpdatedAt: updatedAt.UTC(), + } + if actorID != nil { + record.Actor.ID = common.ActorID(*actorID) + } + return record, nil +} + +// GrantEntitlement atomically closes the current free period, inserts the +// new paid period, and replaces the snapshot. +func (store *Store) GrantEntitlement(ctx context.Context, input ports.GrantEntitlementInput) error { + if err := input.Validate(); err != nil { + return fmt.Errorf("grant entitlement in postgres: %w", err) + } + return store.withTx(ctx, "grant entitlement in postgres", func(ctx context.Context, tx *sql.Tx) error { + if err := lockSnapshotMatching(ctx, tx, input.ExpectedCurrentSnapshot); err != nil { + return fmt.Errorf("grant entitlement for %q in postgres: %w", input.ExpectedCurrentSnapshot.UserID, err) + } + if err := lockPeriodMatching(ctx, tx, input.ExpectedCurrentRecord); err != nil { + return fmt.Errorf("grant entitlement for %q in postgres: %w", input.ExpectedCurrentRecord.RecordID, err) + } + if err := updateEntitlementPeriodTx(ctx, tx, input.UpdatedCurrentRecord); err != nil { + return fmt.Errorf("grant entitlement for %q in postgres: %w", input.UpdatedCurrentRecord.RecordID, err) + } + if err := insertEntitlementPeriod(ctx, tx, input.NewRecord); err != nil { + return err + } + if err := upsertEntitlementSnapshot(ctx, tx, input.NewSnapshot); err != nil { + return err + } + return nil + }) +} + +// ExtendEntitlement atomically appends a new paid history segment and +// replaces the snapshot. +func (store *Store) ExtendEntitlement(ctx context.Context, input ports.ExtendEntitlementInput) error { + if err := input.Validate(); err != nil { + return fmt.Errorf("extend entitlement in postgres: %w", err) + } + return store.withTx(ctx, "extend entitlement in postgres", func(ctx context.Context, tx *sql.Tx) error { + if err := lockSnapshotMatching(ctx, tx, input.ExpectedCurrentSnapshot); err != nil { + return fmt.Errorf("extend entitlement for %q in postgres: %w", input.ExpectedCurrentSnapshot.UserID, err) + } + if err := insertEntitlementPeriod(ctx, tx, input.NewRecord); err != nil { + return err + } + if err := upsertEntitlementSnapshot(ctx, tx, input.NewSnapshot); err != nil { + return err + } + return nil + }) +} + +// RevokeEntitlement atomically closes the current paid period, inserts a new +// free period, and replaces the snapshot. +func (store *Store) RevokeEntitlement(ctx context.Context, input ports.RevokeEntitlementInput) error { + if err := input.Validate(); err != nil { + return fmt.Errorf("revoke entitlement in postgres: %w", err) + } + return store.withTx(ctx, "revoke entitlement in postgres", func(ctx context.Context, tx *sql.Tx) error { + if err := lockSnapshotMatching(ctx, tx, input.ExpectedCurrentSnapshot); err != nil { + return fmt.Errorf("revoke entitlement for %q in postgres: %w", input.ExpectedCurrentSnapshot.UserID, err) + } + if err := lockPeriodMatching(ctx, tx, input.ExpectedCurrentRecord); err != nil { + return fmt.Errorf("revoke entitlement for %q in postgres: %w", input.ExpectedCurrentRecord.RecordID, err) + } + if err := updateEntitlementPeriodTx(ctx, tx, input.UpdatedCurrentRecord); err != nil { + return fmt.Errorf("revoke entitlement for %q in postgres: %w", input.UpdatedCurrentRecord.RecordID, err) + } + if err := insertEntitlementPeriod(ctx, tx, input.NewRecord); err != nil { + return err + } + if err := upsertEntitlementSnapshot(ctx, tx, input.NewSnapshot); err != nil { + return err + } + return nil + }) +} + +// RepairExpiredEntitlement atomically replaces an expired finite paid +// snapshot with a materialised free state. +func (store *Store) RepairExpiredEntitlement(ctx context.Context, input ports.RepairExpiredEntitlementInput) error { + if err := input.Validate(); err != nil { + return fmt.Errorf("repair expired entitlement in postgres: %w", err) + } + return store.withTx(ctx, "repair expired entitlement in postgres", func(ctx context.Context, tx *sql.Tx) error { + if err := lockSnapshotMatching(ctx, tx, input.ExpectedExpiredSnapshot); err != nil { + return fmt.Errorf("repair expired entitlement for %q in postgres: %w", input.ExpectedExpiredSnapshot.UserID, err) + } + if err := insertEntitlementPeriod(ctx, tx, input.NewRecord); err != nil { + return err + } + if err := upsertEntitlementSnapshot(ctx, tx, input.NewSnapshot); err != nil { + return err + } + return nil + }) +} + +// lockSnapshotMatching loads the current snapshot under FOR UPDATE and +// verifies it matches expected. Mismatches surface as ports.ErrConflict so +// optimistic-replacement callers can retry. +func lockSnapshotMatching(ctx context.Context, tx *sql.Tx, expected entitlement.CurrentSnapshot) error { + stmt := pg.SELECT(entitlementSnapshotSelectColumns). + FROM(pgtable.EntitlementSnapshots). + WHERE(pgtable.EntitlementSnapshots.UserID.EQ(pg.String(expected.UserID.String()))). + FOR(pg.UPDATE()) + + query, args := stmt.Sql() + row := tx.QueryRowContext(ctx, query, args...) + current, err := scanEntitlementSnapshotRow(row) + switch { + case errors.Is(err, ports.ErrNotFound): + return ports.ErrNotFound + case err != nil: + return err + } + if !snapshotsEqual(current, expected) { + return ports.ErrConflict + } + return nil +} + +func lockPeriodMatching(ctx context.Context, tx *sql.Tx, expected entitlement.PeriodRecord) error { + stmt := pg.SELECT(entitlementPeriodSelectColumns). + FROM(pgtable.EntitlementRecords). + WHERE(pgtable.EntitlementRecords.RecordID.EQ(pg.String(expected.RecordID.String()))). + FOR(pg.UPDATE()) + + query, args := stmt.Sql() + row := tx.QueryRowContext(ctx, query, args...) + current, err := scanEntitlementPeriodRow(row) + switch { + case errors.Is(err, ports.ErrNotFound): + return ports.ErrNotFound + case err != nil: + return err + } + if !periodsEqual(current, expected) { + return ports.ErrConflict + } + return nil +} + +func updateEntitlementPeriodTx(ctx context.Context, tx *sql.Tx, record entitlement.PeriodRecord) error { + rows, err := updateEntitlementPeriod(ctx, tx, record) + if err != nil { + return err + } + if rows == 0 { + return ports.ErrNotFound + } + return nil +} + +func snapshotsEqual(left entitlement.CurrentSnapshot, right entitlement.CurrentSnapshot) bool { + if left.UserID != right.UserID || + left.PlanCode != right.PlanCode || + left.IsPaid != right.IsPaid || + left.Source != right.Source || + left.Actor != right.Actor || + left.ReasonCode != right.ReasonCode { + return false + } + if !left.StartsAt.Equal(right.StartsAt) || !left.UpdatedAt.Equal(right.UpdatedAt) { + return false + } + return optionalTimeEqual(left.EndsAt, right.EndsAt) +} + +func periodsEqual(left entitlement.PeriodRecord, right entitlement.PeriodRecord) bool { + if left.RecordID != right.RecordID || + left.UserID != right.UserID || + left.PlanCode != right.PlanCode || + left.Source != right.Source || + left.Actor != right.Actor || + left.ReasonCode != right.ReasonCode || + left.ClosedBy != right.ClosedBy || + left.ClosedReasonCode != right.ClosedReasonCode { + return false + } + if !left.StartsAt.Equal(right.StartsAt) || !left.CreatedAt.Equal(right.CreatedAt) { + return false + } + if !optionalTimeEqual(left.EndsAt, right.EndsAt) { + return false + } + return optionalTimeEqual(left.ClosedAt, right.ClosedAt) +} + +func optionalTimeEqual(left *time.Time, right *time.Time) bool { + switch { + case left == nil && right == nil: + return true + case left == nil || right == nil: + return false + default: + return left.Equal(*right) + } +} + +// EntitlementSnapshotStore adapts Store to the EntitlementSnapshotStore port. +type EntitlementSnapshotStore struct { + store *Store +} + +// EntitlementSnapshots returns one adapter that exposes the entitlement- +// snapshot store port over Store. +func (store *Store) EntitlementSnapshots() *EntitlementSnapshotStore { + if store == nil { + return nil + } + return &EntitlementSnapshotStore{store: store} +} + +// GetByUserID returns the current entitlement snapshot for userID. +func (adapter *EntitlementSnapshotStore) GetByUserID(ctx context.Context, userID common.UserID) (entitlement.CurrentSnapshot, error) { + return adapter.store.GetEntitlementByUserID(ctx, userID) +} + +// Put stores the current entitlement snapshot for record.UserID. +func (adapter *EntitlementSnapshotStore) Put(ctx context.Context, record entitlement.CurrentSnapshot) error { + return adapter.store.PutEntitlement(ctx, record) +} + +var _ ports.EntitlementSnapshotStore = (*EntitlementSnapshotStore)(nil) + +// EntitlementHistoryStore adapts Store to the EntitlementHistoryStore port. +type EntitlementHistoryStore struct { + store *Store +} + +// EntitlementHistory returns one adapter that exposes the entitlement +// history store port over Store. +func (store *Store) EntitlementHistory() *EntitlementHistoryStore { + if store == nil { + return nil + } + return &EntitlementHistoryStore{store: store} +} + +// Create stores one new entitlement history record. +func (adapter *EntitlementHistoryStore) Create(ctx context.Context, record entitlement.PeriodRecord) error { + return adapter.store.CreateEntitlementRecord(ctx, record) +} + +// GetByRecordID returns the entitlement history record identified by +// recordID. +func (adapter *EntitlementHistoryStore) GetByRecordID(ctx context.Context, recordID entitlement.EntitlementRecordID) (entitlement.PeriodRecord, error) { + return adapter.store.GetEntitlementRecordByID(ctx, recordID) +} + +// ListByUserID returns every entitlement history record owned by userID. +func (adapter *EntitlementHistoryStore) ListByUserID(ctx context.Context, userID common.UserID) ([]entitlement.PeriodRecord, error) { + return adapter.store.ListEntitlementRecordsByUserID(ctx, userID) +} + +// Update replaces one stored entitlement history record. +func (adapter *EntitlementHistoryStore) Update(ctx context.Context, record entitlement.PeriodRecord) error { + return adapter.store.UpdateEntitlementRecord(ctx, record) +} + +var _ ports.EntitlementHistoryStore = (*EntitlementHistoryStore)(nil) + +// EntitlementLifecycleStore adapts Store to the EntitlementLifecycleStore +// port. +type EntitlementLifecycleStore struct { + store *Store +} + +// EntitlementLifecycle returns one adapter that exposes the entitlement +// lifecycle store port over Store. +func (store *Store) EntitlementLifecycle() *EntitlementLifecycleStore { + if store == nil { + return nil + } + return &EntitlementLifecycleStore{store: store} +} + +// Grant atomically closes the current free period and starts a new paid +// period. +func (adapter *EntitlementLifecycleStore) Grant(ctx context.Context, input ports.GrantEntitlementInput) error { + return adapter.store.GrantEntitlement(ctx, input) +} + +// Extend appends a paid history segment. +func (adapter *EntitlementLifecycleStore) Extend(ctx context.Context, input ports.ExtendEntitlementInput) error { + return adapter.store.ExtendEntitlement(ctx, input) +} + +// Revoke closes the current paid period and starts a fresh free period. +func (adapter *EntitlementLifecycleStore) Revoke(ctx context.Context, input ports.RevokeEntitlementInput) error { + return adapter.store.RevokeEntitlement(ctx, input) +} + +// RepairExpired replaces an expired finite paid snapshot with a free state. +func (adapter *EntitlementLifecycleStore) RepairExpired(ctx context.Context, input ports.RepairExpiredEntitlementInput) error { + return adapter.store.RepairExpiredEntitlement(ctx, input) +} + +var _ ports.EntitlementLifecycleStore = (*EntitlementLifecycleStore)(nil) diff --git a/user/internal/adapters/postgres/userstore/harness_test.go b/user/internal/adapters/postgres/userstore/harness_test.go new file mode 100644 index 0000000..0452be3 --- /dev/null +++ b/user/internal/adapters/postgres/userstore/harness_test.go @@ -0,0 +1,203 @@ +package userstore + +import ( + "context" + "database/sql" + "net/url" + "os" + "strings" + "sync" + "testing" + "time" + + "galaxy/postgres" + "galaxy/user/internal/adapters/postgres/migrations" + + testcontainers "github.com/testcontainers/testcontainers-go" + tcpostgres "github.com/testcontainers/testcontainers-go/modules/postgres" + "github.com/testcontainers/testcontainers-go/wait" +) + +const ( + pkgPostgresImage = "postgres:16-alpine" + pkgSuperUser = "galaxy" + pkgSuperPassword = "galaxy" + pkgSuperDatabase = "galaxy_user" + pkgServiceRole = "userservice" + pkgServicePassword = "userservice" + pkgServiceSchema = "user" + pkgContainerStartup = 90 * time.Second + pkgOperationTimeout = 10 * time.Second +) + +var ( + pkgContainerOnce sync.Once + pkgContainerErr error + pkgContainerEnv *postgresEnv +) + +type postgresEnv struct { + container *tcpostgres.PostgresContainer + dsn string + pool *sql.DB +} + +func ensurePostgresEnv(t testing.TB) *postgresEnv { + t.Helper() + pkgContainerOnce.Do(func() { + pkgContainerEnv, pkgContainerErr = startPostgresEnv() + }) + if pkgContainerErr != nil { + t.Skipf("postgres container start failed (Docker unavailable?): %v", pkgContainerErr) + } + return pkgContainerEnv +} + +func startPostgresEnv() (*postgresEnv, error) { + ctx := context.Background() + container, err := tcpostgres.Run(ctx, pkgPostgresImage, + tcpostgres.WithDatabase(pkgSuperDatabase), + tcpostgres.WithUsername(pkgSuperUser), + tcpostgres.WithPassword(pkgSuperPassword), + testcontainers.WithWaitStrategy( + wait.ForLog("database system is ready to accept connections"). + WithOccurrence(2). + WithStartupTimeout(pkgContainerStartup), + ), + ) + if err != nil { + return nil, err + } + + baseDSN, err := container.ConnectionString(ctx, "sslmode=disable") + if err != nil { + _ = testcontainers.TerminateContainer(container) + return nil, err + } + + if err := provisionRoleAndSchema(ctx, baseDSN); err != nil { + _ = testcontainers.TerminateContainer(container) + return nil, err + } + + scopedDSN, err := dsnForServiceRole(baseDSN) + if err != nil { + _ = testcontainers.TerminateContainer(container) + return nil, err + } + + cfg := postgres.DefaultConfig() + cfg.PrimaryDSN = scopedDSN + cfg.OperationTimeout = pkgOperationTimeout + pool, err := postgres.OpenPrimary(ctx, cfg) + if err != nil { + _ = testcontainers.TerminateContainer(container) + return nil, err + } + if err := postgres.Ping(ctx, pool, pkgOperationTimeout); err != nil { + _ = pool.Close() + _ = testcontainers.TerminateContainer(container) + return nil, err + } + if err := postgres.RunMigrations(ctx, pool, migrations.FS(), "."); err != nil { + _ = pool.Close() + _ = testcontainers.TerminateContainer(container) + return nil, err + } + + return &postgresEnv{ + container: container, + dsn: scopedDSN, + pool: pool, + }, nil +} + +func provisionRoleAndSchema(ctx context.Context, baseDSN string) error { + cfg := postgres.DefaultConfig() + cfg.PrimaryDSN = baseDSN + cfg.OperationTimeout = pkgOperationTimeout + db, err := postgres.OpenPrimary(ctx, cfg) + if err != nil { + return err + } + defer func() { _ = db.Close() }() + + statements := []string{ + `DO $$ BEGIN + IF NOT EXISTS (SELECT 1 FROM pg_roles WHERE rolname = 'userservice') THEN + CREATE ROLE userservice LOGIN PASSWORD 'userservice'; + END IF; + END $$;`, + `CREATE SCHEMA IF NOT EXISTS "user" AUTHORIZATION userservice;`, + `GRANT USAGE ON SCHEMA "user" TO userservice;`, + } + for _, statement := range statements { + if _, err := db.ExecContext(ctx, statement); err != nil { + return err + } + } + return nil +} + +func dsnForServiceRole(baseDSN string) (string, error) { + parsed, err := url.Parse(baseDSN) + if err != nil { + return "", err + } + values := url.Values{} + values.Set("search_path", pkgServiceSchema) + values.Set("sslmode", "disable") + scoped := url.URL{ + Scheme: parsed.Scheme, + User: url.UserPassword(pkgServiceRole, pkgServicePassword), + Host: parsed.Host, + Path: parsed.Path, + RawQuery: values.Encode(), + } + return scoped.String(), nil +} + +// newTestStore returns a Store backed by the package-scoped pool. Every +// invocation truncates the user-owned tables so individual tests start from +// a clean slate while sharing one container start. +func newTestStore(t *testing.T) *Store { + t.Helper() + env := ensurePostgresEnv(t) + truncateAll(t, env.pool) + store, err := New(Config{DB: env.pool, OperationTimeout: pkgOperationTimeout}) + if err != nil { + t.Fatalf("new store: %v", err) + } + return store +} + +func truncateAll(t *testing.T, db *sql.DB) { + t.Helper() + statement := strings.Join([]string{ + "TRUNCATE TABLE", + "sanction_active, limit_active,", + "sanction_records, limit_records,", + "entitlement_snapshots, entitlement_records,", + "blocked_emails, accounts", + "RESTART IDENTITY CASCADE", + }, " ") + if _, err := db.ExecContext(context.Background(), statement); err != nil { + t.Fatalf("truncate tables: %v", err) + } +} + +// TestMain runs first when `go test` enters the package. We drive it through +// a TestMain so the container started by the first test is shut down on the +// way out, even when individual tests panic. +func TestMain(m *testing.M) { + code := m.Run() + if pkgContainerEnv != nil { + if pkgContainerEnv.pool != nil { + _ = pkgContainerEnv.pool.Close() + } + if pkgContainerEnv.container != nil { + _ = testcontainers.TerminateContainer(pkgContainerEnv.container) + } + } + os.Exit(code) +} diff --git a/user/internal/adapters/postgres/userstore/helpers.go b/user/internal/adapters/postgres/userstore/helpers.go new file mode 100644 index 0000000..b531fc7 --- /dev/null +++ b/user/internal/adapters/postgres/userstore/helpers.go @@ -0,0 +1,149 @@ +package userstore + +import ( + "context" + "database/sql" + "errors" + "fmt" + "time" + + "galaxy/user/internal/domain/common" + "galaxy/user/internal/ports" + + "github.com/jackc/pgx/v5/pgconn" +) + +// pgUniqueViolationCode identifies the SQLSTATE returned by PostgreSQL when +// a UNIQUE constraint is violated by INSERT or UPDATE. +const pgUniqueViolationCode = "23505" + +// classifyUniqueViolation maps a PostgreSQL unique-violation error to the +// matching ports sentinel. constraint identifies which UNIQUE constraint name +// the caller cares about so we can surface ports.ErrUserNameConflict for the +// dedicated user-name index. Returns nil when err is not a unique violation +// or does not match constraint. +func classifyUniqueViolation(err error, constraint string, mapped error) error { + var pgErr *pgconn.PgError + if !errors.As(err, &pgErr) || pgErr.Code != pgUniqueViolationCode { + return nil + } + if constraint != "" && pgErr.ConstraintName != constraint { + return nil + } + return mapped +} + +// isUniqueViolation reports whether err is a PostgreSQL unique-violation, +// regardless of constraint name. Useful for "any conflict ⇒ ErrConflict" +// translations on simple INSERT calls. +func isUniqueViolation(err error) bool { + var pgErr *pgconn.PgError + if !errors.As(err, &pgErr) { + return false + } + return pgErr.Code == pgUniqueViolationCode +} + +// nullableString returns the trimmed string when s is non-empty, otherwise +// reports a NULL stand-in usable in $-parameter lists. Empty strings are +// stored as NULL so optional columns round-trip through nil. +func nullableString(s string) any { + if s == "" { + return nil + } + return s +} + +// nullableActorID converts an optional ActorID (the zero value indicates +// "no caller supplied this field") to a NULL stand-in for SQL parameters. +func nullableActorID(id common.ActorID) any { + if id.IsZero() { + return nil + } + return id.String() +} + +// nullableActorType mirrors nullableActorID for ActorType. +func nullableActorType(t common.ActorType) any { + if t.IsZero() { + return nil + } + return t.String() +} + +// nullableReasonCode mirrors nullableActorID for ReasonCode. +func nullableReasonCode(code common.ReasonCode) any { + if code.IsZero() { + return nil + } + return code.String() +} + +// nullableUserID mirrors nullableActorID for UserID. +func nullableUserID(id common.UserID) any { + if id.IsZero() { + return nil + } + return id.String() +} + +// nullableTime returns t.UTC() when non-nil, otherwise nil for NULL columns. +func nullableTime(t *time.Time) any { + if t == nil { + return nil + } + return t.UTC() +} + +// nullableCountry returns the upper-cased ISO 3166-1 alpha-2 string when set, +// otherwise nil. +func nullableCountry(code common.CountryCode) any { + if code.IsZero() { + return nil + } + return code.String() +} + +// stringFromNullable trims an optional sql.NullString-like *string (read from +// Postgres COLUMNAR_NULL) into an ActorID/ReasonCode/UserID-friendly string. +func stringFromNullable(value *string) string { + if value == nil { + return "" + } + return *value +} + +// timeFromNullable copies an optional *time.Time read from Postgres into a +// new pointer normalised to UTC. +func timeFromNullable(value *time.Time) *time.Time { + if value == nil { + return nil + } + utc := value.UTC() + return &utc +} + +// mapNotFound translates sql.ErrNoRows into ports.ErrNotFound, leaving every +// other error untouched. +func mapNotFound(err error) error { + if errors.Is(err, sql.ErrNoRows) { + return ports.ErrNotFound + } + return err +} + +// withTimeout derives a child context bounded by timeout and prefixes context +// errors with operation. Callers must always invoke the returned cancel. +func withTimeout(ctx context.Context, operation string, timeout time.Duration) (context.Context, context.CancelFunc, error) { + if ctx == nil { + return nil, nil, fmt.Errorf("%s: nil context", operation) + } + if err := ctx.Err(); err != nil { + return nil, nil, fmt.Errorf("%s: %w", operation, err) + } + if timeout <= 0 { + return nil, nil, fmt.Errorf("%s: operation timeout must be positive", operation) + } + bounded, cancel := context.WithTimeout(ctx, timeout) + return bounded, cancel, nil +} diff --git a/user/internal/adapters/postgres/userstore/list_store.go b/user/internal/adapters/postgres/userstore/list_store.go new file mode 100644 index 0000000..87c214f --- /dev/null +++ b/user/internal/adapters/postgres/userstore/list_store.go @@ -0,0 +1,160 @@ +package userstore + +import ( + "context" + "fmt" + "time" + + pgtable "galaxy/user/internal/adapters/postgres/jet/user/table" + "galaxy/user/internal/domain/common" + "galaxy/user/internal/ports" + + pg "github.com/go-jet/jet/v2/postgres" +) + +// ListUserIDs returns one deterministic page of user identifiers ordered by +// `created_at desc`, then `user_id desc`, mirroring the ordering used by the +// previous Redis adapter. +// +// The Postgres implementation keeps the listing surface storage-thin: it +// only paginates on `created_at` + `user_id` and does not attempt to push +// the full filter matrix into SQL. The service layer (`adminusers.Lister`) +// continues to load each candidate via the per-user loader and apply the +// filter set in memory, exactly as it did with the Redis adapter. Pushing +// the filter matrix down to SQL is a follow-up optimisation noted in +// `galaxy/user/docs/postgres-migration.md`. +func (store *Store) ListUserIDs(ctx context.Context, input ports.ListUsersInput) (ports.ListUsersResult, error) { + if err := input.Validate(); err != nil { + return ports.ListUsersResult{}, fmt.Errorf("list users in postgres: %w", err) + } + + operationCtx, cancel, err := store.operationContext(ctx, "list users in postgres") + if err != nil { + return ports.ListUsersResult{}, err + } + defer cancel() + + filters := userListFiltersFromPorts(input.Filters) + + var ( + cursorCreatedAt time.Time + cursorUserID common.UserID + cursored bool + ) + if input.PageToken != "" { + cursor, err := decodePageToken(input.PageToken, filters) + if err != nil { + return ports.ListUsersResult{}, fmt.Errorf("list users in postgres: %w", ports.ErrInvalidPageToken) + } + cursorCreatedAt = cursor.CreatedAt + cursorUserID = cursor.UserID + cursored = true + } + + limit := input.PageSize + 1 + rows, err := queryListPage(operationCtx, store, cursored, cursorCreatedAt, cursorUserID, limit) + if err != nil { + return ports.ListUsersResult{}, fmt.Errorf("list users in postgres: %w", err) + } + + result := ports.ListUsersResult{ + UserIDs: make([]common.UserID, 0, min(len(rows), input.PageSize)), + } + visible := min(len(rows), input.PageSize) + for index := range visible { + result.UserIDs = append(result.UserIDs, rows[index].UserID) + } + + if len(rows) > input.PageSize { + last := rows[input.PageSize-1] + token, err := encodePageToken(pageCursor{ + CreatedAt: last.CreatedAt, + UserID: last.UserID, + }, filters) + if err != nil { + return ports.ListUsersResult{}, fmt.Errorf("list users in postgres: %w", err) + } + result.NextPageToken = token + } + return result, nil +} + +// listRow is the lightweight projection returned by queryListPage; only +// (created_at, user_id) is needed for the listing index plus cursor token +// generation. +type listRow struct { + CreatedAt time.Time + UserID common.UserID +} + +// queryListPage returns up to limit rows ordered by created_at DESC, user_id +// DESC. When cursored is true, the query starts strictly after the +// (cursorCreatedAt, cursorUserID) tuple per the keyset pagination rule. +func queryListPage(ctx context.Context, store *Store, cursored bool, cursorCreatedAt time.Time, cursorUserID common.UserID, limit int) ([]listRow, error) { + stmt := pg.SELECT(pgtable.Accounts.CreatedAt, pgtable.Accounts.UserID). + FROM(pgtable.Accounts) + + if cursored { + // (created_at, user_id) < (cursorCreatedAt, cursorUserID) expressed as + // the equivalent OR/AND expansion since jet has no row-comparison + // builder. + ts := pg.TimestampzT(cursorCreatedAt.UTC()) + uid := pg.String(cursorUserID.String()) + stmt = stmt.WHERE(pg.OR( + pgtable.Accounts.CreatedAt.LT(ts), + pg.AND( + pgtable.Accounts.CreatedAt.EQ(ts), + pgtable.Accounts.UserID.LT(uid), + ), + )) + } + stmt = stmt. + ORDER_BY(pgtable.Accounts.CreatedAt.DESC(), pgtable.Accounts.UserID.DESC()). + LIMIT(int64(limit)) + + query, args := stmt.Sql() + rows, err := store.db.QueryContext(ctx, query, args...) + if err != nil { + return nil, err + } + defer func() { _ = rows.Close() }() + + out := make([]listRow, 0, limit) + for rows.Next() { + var ( + createdAt time.Time + userID string + ) + if err := rows.Scan(&createdAt, &userID); err != nil { + return nil, err + } + uid := common.UserID(userID) + if err := uid.Validate(); err != nil { + return nil, fmt.Errorf("created_at index member user id: %w", err) + } + out = append(out, listRow{CreatedAt: createdAt.UTC(), UserID: uid}) + } + if err := rows.Err(); err != nil { + return nil, err + } + return out, nil +} + +// UserList adapts Store to the UserListStore port. +type UserList struct{ store *Store } + +// UserListAdapter returns one adapter that exposes the user-list store port. +func (store *Store) UserListAdapter() *UserList { + if store == nil { + return nil + } + return &UserList{store: store} +} + +// ListUserIDs returns one deterministic page of user identifiers. +func (a *UserList) ListUserIDs(ctx context.Context, input ports.ListUsersInput) (ports.ListUsersResult, error) { + return a.store.ListUserIDs(ctx, input) +} + +var _ ports.UserListStore = (*UserList)(nil) +var _ ports.UserListStore = (*Store)(nil) diff --git a/user/internal/adapters/postgres/userstore/page_token.go b/user/internal/adapters/postgres/userstore/page_token.go new file mode 100644 index 0000000..a5e26b1 --- /dev/null +++ b/user/internal/adapters/postgres/userstore/page_token.go @@ -0,0 +1,198 @@ +package userstore + +import ( + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "time" + + "galaxy/user/internal/domain/common" + "galaxy/user/internal/domain/entitlement" + "galaxy/user/internal/domain/policy" + "galaxy/user/internal/ports" +) + +// errPageTokenFiltersMismatch reports that a supplied page token was created +// for a different normalised filter set. Callers translate it to +// ports.ErrInvalidPageToken on the boundary. +var errPageTokenFiltersMismatch = errors.New("page token filters do not match current filters") + +// pageCursor identifies the last (created_at, user_id) tuple visible on the +// previous listing page. The cursor is paired with a normalised filter +// fingerprint so the token cannot be replayed across a different filter set. +type pageCursor struct { + CreatedAt time.Time + UserID common.UserID +} + +func (cursor pageCursor) Validate() error { + if err := common.ValidateTimestamp("page cursor created at", cursor.CreatedAt); err != nil { + return err + } + if err := cursor.UserID.Validate(); err != nil { + return fmt.Errorf("page cursor user id: %w", err) + } + return nil +} + +// userListFilters mirrors ports.UserListFilters but excludes the fields that +// only the service layer enforces (display_name match, user_name) so token +// replay across a UI re-render that toggles a UI-only filter does not +// invalidate the cursor. +type userListFilters struct { + PaidState entitlement.PaidState + PaidExpiresBefore *time.Time + PaidExpiresAfter *time.Time + DeclaredCountry common.CountryCode + SanctionCode policy.SanctionCode + LimitCode policy.LimitCode + CanLogin *bool + CanCreatePrivateGame *bool + CanJoinGame *bool +} + +// userListFiltersFromPorts copies the listing-stable subset of port-level +// filters into the form embedded into the page token fingerprint. +func userListFiltersFromPorts(filters ports.UserListFilters) userListFilters { + return userListFilters{ + PaidState: filters.PaidState, + PaidExpiresBefore: filters.PaidExpiresBefore, + PaidExpiresAfter: filters.PaidExpiresAfter, + DeclaredCountry: filters.DeclaredCountry, + SanctionCode: filters.SanctionCode, + LimitCode: filters.LimitCode, + CanLogin: filters.CanLogin, + CanCreatePrivateGame: filters.CanCreatePrivateGame, + CanJoinGame: filters.CanJoinGame, + } +} + +func (filters userListFilters) Validate() error { + if !filters.PaidState.IsKnown() { + return fmt.Errorf("paid state %q is unsupported", filters.PaidState) + } + if filters.PaidExpiresBefore != nil && filters.PaidExpiresBefore.IsZero() { + return fmt.Errorf("paid expires before must not be zero") + } + if filters.PaidExpiresAfter != nil && filters.PaidExpiresAfter.IsZero() { + return fmt.Errorf("paid expires after must not be zero") + } + if !filters.DeclaredCountry.IsZero() { + if err := filters.DeclaredCountry.Validate(); err != nil { + return fmt.Errorf("declared country: %w", err) + } + } + if filters.SanctionCode != "" && !filters.SanctionCode.IsKnown() { + return fmt.Errorf("sanction code %q is unsupported", filters.SanctionCode) + } + if filters.LimitCode != "" && !filters.LimitCode.IsKnown() { + return fmt.Errorf("limit code %q is unsupported", filters.LimitCode) + } + return nil +} + +// encodePageToken encodes cursor + filters into the frozen opaque page token +// shape used by the trusted admin listing surface. The encoding is identical +// to the previous Redis implementation so existing public clients can keep +// using their stored tokens through the migration cut-over. +func encodePageToken(cursor pageCursor, filters userListFilters) (string, error) { + if err := cursor.Validate(); err != nil { + return "", fmt.Errorf("encode page token: %w", err) + } + fingerprint, err := normaliseFilters(filters) + if err != nil { + return "", fmt.Errorf("encode page token: %w", err) + } + payload, err := json.Marshal(pageTokenPayload{ + CreatedAt: cursor.CreatedAt.UTC().Format(time.RFC3339Nano), + UserID: cursor.UserID.String(), + Filters: fingerprint, + }) + if err != nil { + return "", fmt.Errorf("encode page token: %w", err) + } + return base64.RawURLEncoding.EncodeToString(payload), nil +} + +// decodePageToken parses raw and verifies the embedded fingerprint matches +// expected. The token's wire format is preserved across the Redis-to- +// PostgreSQL adapter swap. +func decodePageToken(raw string, expected userListFilters) (pageCursor, error) { + fingerprint, err := normaliseFilters(expected) + if err != nil { + return pageCursor{}, fmt.Errorf("decode page token: %w", err) + } + payload, err := base64.RawURLEncoding.DecodeString(raw) + if err != nil { + return pageCursor{}, fmt.Errorf("decode page token: %w", err) + } + var token pageTokenPayload + if err := json.Unmarshal(payload, &token); err != nil { + return pageCursor{}, fmt.Errorf("decode page token: %w", err) + } + if token.Filters != fingerprint { + return pageCursor{}, errPageTokenFiltersMismatch + } + createdAt, err := time.Parse(time.RFC3339Nano, token.CreatedAt) + if err != nil { + return pageCursor{}, fmt.Errorf("decode page token: parse created_at: %w", err) + } + cursor := pageCursor{CreatedAt: createdAt.UTC(), UserID: common.UserID(token.UserID)} + if err := cursor.Validate(); err != nil { + return pageCursor{}, fmt.Errorf("decode page token: %w", err) + } + return cursor, nil +} + +type pageTokenPayload struct { + CreatedAt string `json:"created_at"` + UserID string `json:"user_id"` + Filters normalisedFilterFields `json:"filters"` +} + +type normalisedFilterFields struct { + PaidState string `json:"paid_state,omitempty"` + PaidExpiresBeforeUTC string `json:"paid_expires_before_utc,omitempty"` + PaidExpiresAfterUTC string `json:"paid_expires_after_utc,omitempty"` + DeclaredCountry string `json:"declared_country,omitempty"` + SanctionCode string `json:"sanction_code,omitempty"` + LimitCode string `json:"limit_code,omitempty"` + CanLogin string `json:"can_login,omitempty"` + CanCreatePrivateGame string `json:"can_create_private_game,omitempty"` + CanJoinGame string `json:"can_join_game,omitempty"` +} + +func normaliseFilters(filters userListFilters) (normalisedFilterFields, error) { + if err := filters.Validate(); err != nil { + return normalisedFilterFields{}, err + } + return normalisedFilterFields{ + PaidState: string(filters.PaidState), + PaidExpiresBeforeUTC: formatOptionalUTC(filters.PaidExpiresBefore), + PaidExpiresAfterUTC: formatOptionalUTC(filters.PaidExpiresAfter), + DeclaredCountry: filters.DeclaredCountry.String(), + SanctionCode: string(filters.SanctionCode), + LimitCode: string(filters.LimitCode), + CanLogin: formatOptionalBool(filters.CanLogin), + CanCreatePrivateGame: formatOptionalBool(filters.CanCreatePrivateGame), + CanJoinGame: formatOptionalBool(filters.CanJoinGame), + }, nil +} + +func formatOptionalUTC(value *time.Time) string { + if value == nil { + return "" + } + return value.UTC().Format(time.RFC3339Nano) +} + +func formatOptionalBool(value *bool) string { + if value == nil { + return "" + } + if *value { + return "true" + } + return "false" +} diff --git a/user/internal/adapters/postgres/userstore/policy_store.go b/user/internal/adapters/postgres/userstore/policy_store.go new file mode 100644 index 0000000..356aa57 --- /dev/null +++ b/user/internal/adapters/postgres/userstore/policy_store.go @@ -0,0 +1,870 @@ +package userstore + +import ( + "context" + "database/sql" + "errors" + "fmt" + "time" + + pgtable "galaxy/user/internal/adapters/postgres/jet/user/table" + "galaxy/user/internal/domain/common" + "galaxy/user/internal/domain/policy" + "galaxy/user/internal/ports" + + pg "github.com/go-jet/jet/v2/postgres" +) + +// sanctionSelectColumns is the canonical SELECT list for sanction_records, +// matching scanSanction's column order. +var sanctionSelectColumns = pg.ColumnList{ + pgtable.SanctionRecords.RecordID, + pgtable.SanctionRecords.UserID, + pgtable.SanctionRecords.SanctionCode, + pgtable.SanctionRecords.Scope, + pgtable.SanctionRecords.ReasonCode, + pgtable.SanctionRecords.ActorType, + pgtable.SanctionRecords.ActorID, + pgtable.SanctionRecords.AppliedAt, + pgtable.SanctionRecords.ExpiresAt, + pgtable.SanctionRecords.RemovedAt, + pgtable.SanctionRecords.RemovedByType, + pgtable.SanctionRecords.RemovedByID, + pgtable.SanctionRecords.RemovedReasonCode, +} + +// limitSelectColumns is the canonical SELECT list for limit_records, matching +// scanLimit's column order. +var limitSelectColumns = pg.ColumnList{ + pgtable.LimitRecords.RecordID, + pgtable.LimitRecords.UserID, + pgtable.LimitRecords.LimitCode, + pgtable.LimitRecords.Value, + pgtable.LimitRecords.ReasonCode, + pgtable.LimitRecords.ActorType, + pgtable.LimitRecords.ActorID, + pgtable.LimitRecords.AppliedAt, + pgtable.LimitRecords.ExpiresAt, + pgtable.LimitRecords.RemovedAt, + pgtable.LimitRecords.RemovedByType, + pgtable.LimitRecords.RemovedByID, + pgtable.LimitRecords.RemovedReasonCode, +} + +// CreateSanction stores one new sanction history record. +func (store *Store) CreateSanction(ctx context.Context, record policy.SanctionRecord) error { + if err := record.Validate(); err != nil { + return fmt.Errorf("create sanction in postgres: %w", err) + } + operationCtx, cancel, err := store.operationContext(ctx, "create sanction in postgres") + if err != nil { + return err + } + defer cancel() + return insertSanctionRecord(operationCtx, store.db, record) +} + +func insertSanctionRecord(ctx context.Context, q queryer, record policy.SanctionRecord) error { + stmt := pgtable.SanctionRecords.INSERT( + pgtable.SanctionRecords.RecordID, + pgtable.SanctionRecords.UserID, + pgtable.SanctionRecords.SanctionCode, + pgtable.SanctionRecords.Scope, + pgtable.SanctionRecords.ReasonCode, + pgtable.SanctionRecords.ActorType, + pgtable.SanctionRecords.ActorID, + pgtable.SanctionRecords.AppliedAt, + pgtable.SanctionRecords.ExpiresAt, + pgtable.SanctionRecords.RemovedAt, + pgtable.SanctionRecords.RemovedByType, + pgtable.SanctionRecords.RemovedByID, + pgtable.SanctionRecords.RemovedReasonCode, + ).VALUES( + record.RecordID.String(), + record.UserID.String(), + string(record.SanctionCode), + record.Scope.String(), + record.ReasonCode.String(), + record.Actor.Type.String(), + nullableActorID(record.Actor.ID), + record.AppliedAt.UTC(), + nullableTime(record.ExpiresAt), + nullableTime(record.RemovedAt), + nullableActorType(record.RemovedBy.Type), + nullableActorID(record.RemovedBy.ID), + nullableReasonCode(record.RemovedReasonCode), + ) + + query, args := stmt.Sql() + _, err := q.ExecContext(ctx, query, args...) + if err == nil { + return nil + } + if isUniqueViolation(err) { + return fmt.Errorf("create sanction %q in postgres: %w", record.RecordID, ports.ErrConflict) + } + return fmt.Errorf("create sanction %q in postgres: %w", record.RecordID, err) +} + +// GetSanctionByRecordID returns the sanction history record identified by +// recordID. +func (store *Store) GetSanctionByRecordID(ctx context.Context, recordID policy.SanctionRecordID) (policy.SanctionRecord, error) { + if err := recordID.Validate(); err != nil { + return policy.SanctionRecord{}, fmt.Errorf("get sanction from postgres: %w", err) + } + operationCtx, cancel, err := store.operationContext(ctx, "get sanction from postgres") + if err != nil { + return policy.SanctionRecord{}, err + } + defer cancel() + + stmt := pg.SELECT(sanctionSelectColumns). + FROM(pgtable.SanctionRecords). + WHERE(pgtable.SanctionRecords.RecordID.EQ(pg.String(recordID.String()))) + + query, args := stmt.Sql() + row := store.db.QueryRowContext(operationCtx, query, args...) + record, err := scanSanctionRow(row) + switch { + case errors.Is(err, ports.ErrNotFound): + return policy.SanctionRecord{}, fmt.Errorf("get sanction %q from postgres: %w", recordID, ports.ErrNotFound) + case err != nil: + return policy.SanctionRecord{}, fmt.Errorf("get sanction %q from postgres: %w", recordID, err) + } + return record, nil +} + +// ListSanctionsByUserID returns every sanction history record owned by +// userID, ordered by applied_at ascending. +func (store *Store) ListSanctionsByUserID(ctx context.Context, userID common.UserID) ([]policy.SanctionRecord, error) { + if err := userID.Validate(); err != nil { + return nil, fmt.Errorf("list sanctions from postgres: %w", err) + } + operationCtx, cancel, err := store.operationContext(ctx, "list sanctions from postgres") + if err != nil { + return nil, err + } + defer cancel() + + stmt := pg.SELECT(sanctionSelectColumns). + FROM(pgtable.SanctionRecords). + WHERE(pgtable.SanctionRecords.UserID.EQ(pg.String(userID.String()))). + ORDER_BY(pgtable.SanctionRecords.AppliedAt.ASC(), pgtable.SanctionRecords.RecordID.ASC()) + + query, args := stmt.Sql() + rows, err := store.db.QueryContext(operationCtx, query, args...) + if err != nil { + return nil, fmt.Errorf("list sanctions for %q from postgres: %w", userID, err) + } + defer func() { _ = rows.Close() }() + + out := make([]policy.SanctionRecord, 0) + for rows.Next() { + record, err := scanSanction(rows) + if err != nil { + return nil, fmt.Errorf("list sanctions for %q from postgres: %w", userID, err) + } + out = append(out, record) + } + if err := rows.Err(); err != nil { + return nil, fmt.Errorf("list sanctions for %q from postgres: %w", userID, err) + } + return out, nil +} + +// UpdateSanction replaces one stored sanction history record. The matched +// row is identified by record_id; ports.ErrNotFound is returned when no row +// matches. +func (store *Store) UpdateSanction(ctx context.Context, record policy.SanctionRecord) error { + if err := record.Validate(); err != nil { + return fmt.Errorf("update sanction in postgres: %w", err) + } + operationCtx, cancel, err := store.operationContext(ctx, "update sanction in postgres") + if err != nil { + return err + } + defer cancel() + return updateSanctionRecordTx(operationCtx, store.db, record) +} + +func updateSanctionRecordTx(ctx context.Context, q queryer, record policy.SanctionRecord) error { + stmt := pgtable.SanctionRecords.UPDATE( + pgtable.SanctionRecords.UserID, + pgtable.SanctionRecords.SanctionCode, + pgtable.SanctionRecords.Scope, + pgtable.SanctionRecords.ReasonCode, + pgtable.SanctionRecords.ActorType, + pgtable.SanctionRecords.ActorID, + pgtable.SanctionRecords.AppliedAt, + pgtable.SanctionRecords.ExpiresAt, + pgtable.SanctionRecords.RemovedAt, + pgtable.SanctionRecords.RemovedByType, + pgtable.SanctionRecords.RemovedByID, + pgtable.SanctionRecords.RemovedReasonCode, + ).SET( + record.UserID.String(), + string(record.SanctionCode), + record.Scope.String(), + record.ReasonCode.String(), + record.Actor.Type.String(), + nullableActorID(record.Actor.ID), + record.AppliedAt.UTC(), + nullableTime(record.ExpiresAt), + nullableTime(record.RemovedAt), + nullableActorType(record.RemovedBy.Type), + nullableActorID(record.RemovedBy.ID), + nullableReasonCode(record.RemovedReasonCode), + ).WHERE(pgtable.SanctionRecords.RecordID.EQ(pg.String(record.RecordID.String()))) + + query, args := stmt.Sql() + res, err := q.ExecContext(ctx, query, args...) + if err != nil { + return fmt.Errorf("update sanction %q in postgres: %w", record.RecordID, err) + } + rows, err := res.RowsAffected() + if err != nil { + return fmt.Errorf("update sanction %q in postgres: %w", record.RecordID, err) + } + if rows == 0 { + return fmt.Errorf("update sanction %q in postgres: %w", record.RecordID, ports.ErrNotFound) + } + return nil +} + +func scanSanctionRow(row *sql.Row) (policy.SanctionRecord, error) { + record, err := scanSanction(row) + if errors.Is(err, sql.ErrNoRows) { + return policy.SanctionRecord{}, ports.ErrNotFound + } + return record, err +} + +func scanSanction(row scannableRow) (policy.SanctionRecord, error) { + var ( + recordID string + userID string + code string + scope string + reason string + actorType string + actorID *string + appliedAt time.Time + expiresAt *time.Time + removedAt *time.Time + rmByType *string + rmByID *string + rmReason *string + ) + if err := row.Scan( + &recordID, &userID, &code, &scope, &reason, + &actorType, &actorID, &appliedAt, + &expiresAt, &removedAt, + &rmByType, &rmByID, &rmReason, + ); err != nil { + return policy.SanctionRecord{}, err + } + record := policy.SanctionRecord{ + RecordID: policy.SanctionRecordID(recordID), + UserID: common.UserID(userID), + SanctionCode: policy.SanctionCode(code), + Scope: common.Scope(scope), + ReasonCode: common.ReasonCode(reason), + Actor: common.ActorRef{Type: common.ActorType(actorType)}, + AppliedAt: appliedAt.UTC(), + ExpiresAt: timeFromNullable(expiresAt), + RemovedAt: timeFromNullable(removedAt), + } + if actorID != nil { + record.Actor.ID = common.ActorID(*actorID) + } + if rmByType != nil { + record.RemovedBy.Type = common.ActorType(*rmByType) + } + if rmByID != nil { + record.RemovedBy.ID = common.ActorID(*rmByID) + } + if rmReason != nil { + record.RemovedReasonCode = common.ReasonCode(*rmReason) + } + return record, nil +} + +// CreateLimit stores one new limit history record. +func (store *Store) CreateLimit(ctx context.Context, record policy.LimitRecord) error { + if err := record.Validate(); err != nil { + return fmt.Errorf("create limit in postgres: %w", err) + } + operationCtx, cancel, err := store.operationContext(ctx, "create limit in postgres") + if err != nil { + return err + } + defer cancel() + return insertLimitRecord(operationCtx, store.db, record) +} + +func insertLimitRecord(ctx context.Context, q queryer, record policy.LimitRecord) error { + stmt := pgtable.LimitRecords.INSERT( + pgtable.LimitRecords.RecordID, + pgtable.LimitRecords.UserID, + pgtable.LimitRecords.LimitCode, + pgtable.LimitRecords.Value, + pgtable.LimitRecords.ReasonCode, + pgtable.LimitRecords.ActorType, + pgtable.LimitRecords.ActorID, + pgtable.LimitRecords.AppliedAt, + pgtable.LimitRecords.ExpiresAt, + pgtable.LimitRecords.RemovedAt, + pgtable.LimitRecords.RemovedByType, + pgtable.LimitRecords.RemovedByID, + pgtable.LimitRecords.RemovedReasonCode, + ).VALUES( + record.RecordID.String(), + record.UserID.String(), + string(record.LimitCode), + record.Value, + record.ReasonCode.String(), + record.Actor.Type.String(), + nullableActorID(record.Actor.ID), + record.AppliedAt.UTC(), + nullableTime(record.ExpiresAt), + nullableTime(record.RemovedAt), + nullableActorType(record.RemovedBy.Type), + nullableActorID(record.RemovedBy.ID), + nullableReasonCode(record.RemovedReasonCode), + ) + + query, args := stmt.Sql() + _, err := q.ExecContext(ctx, query, args...) + if err == nil { + return nil + } + if isUniqueViolation(err) { + return fmt.Errorf("create limit %q in postgres: %w", record.RecordID, ports.ErrConflict) + } + return fmt.Errorf("create limit %q in postgres: %w", record.RecordID, err) +} + +// GetLimitByRecordID returns the limit history record identified by recordID. +func (store *Store) GetLimitByRecordID(ctx context.Context, recordID policy.LimitRecordID) (policy.LimitRecord, error) { + if err := recordID.Validate(); err != nil { + return policy.LimitRecord{}, fmt.Errorf("get limit from postgres: %w", err) + } + operationCtx, cancel, err := store.operationContext(ctx, "get limit from postgres") + if err != nil { + return policy.LimitRecord{}, err + } + defer cancel() + + stmt := pg.SELECT(limitSelectColumns). + FROM(pgtable.LimitRecords). + WHERE(pgtable.LimitRecords.RecordID.EQ(pg.String(recordID.String()))) + + query, args := stmt.Sql() + row := store.db.QueryRowContext(operationCtx, query, args...) + record, err := scanLimitRow(row) + switch { + case errors.Is(err, ports.ErrNotFound): + return policy.LimitRecord{}, fmt.Errorf("get limit %q from postgres: %w", recordID, ports.ErrNotFound) + case err != nil: + return policy.LimitRecord{}, fmt.Errorf("get limit %q from postgres: %w", recordID, err) + } + return record, nil +} + +// ListLimitsByUserID returns every limit history record owned by userID, +// ordered by applied_at ascending. +func (store *Store) ListLimitsByUserID(ctx context.Context, userID common.UserID) ([]policy.LimitRecord, error) { + if err := userID.Validate(); err != nil { + return nil, fmt.Errorf("list limits from postgres: %w", err) + } + operationCtx, cancel, err := store.operationContext(ctx, "list limits from postgres") + if err != nil { + return nil, err + } + defer cancel() + + stmt := pg.SELECT(limitSelectColumns). + FROM(pgtable.LimitRecords). + WHERE(pgtable.LimitRecords.UserID.EQ(pg.String(userID.String()))). + ORDER_BY(pgtable.LimitRecords.AppliedAt.ASC(), pgtable.LimitRecords.RecordID.ASC()) + + query, args := stmt.Sql() + rows, err := store.db.QueryContext(operationCtx, query, args...) + if err != nil { + return nil, fmt.Errorf("list limits for %q from postgres: %w", userID, err) + } + defer func() { _ = rows.Close() }() + + out := make([]policy.LimitRecord, 0) + for rows.Next() { + record, err := scanLimit(rows) + if err != nil { + return nil, fmt.Errorf("list limits for %q from postgres: %w", userID, err) + } + out = append(out, record) + } + if err := rows.Err(); err != nil { + return nil, fmt.Errorf("list limits for %q from postgres: %w", userID, err) + } + return out, nil +} + +// UpdateLimit replaces one stored limit history record. +func (store *Store) UpdateLimit(ctx context.Context, record policy.LimitRecord) error { + if err := record.Validate(); err != nil { + return fmt.Errorf("update limit in postgres: %w", err) + } + operationCtx, cancel, err := store.operationContext(ctx, "update limit in postgres") + if err != nil { + return err + } + defer cancel() + return updateLimitRecordTx(operationCtx, store.db, record) +} + +func updateLimitRecordTx(ctx context.Context, q queryer, record policy.LimitRecord) error { + stmt := pgtable.LimitRecords.UPDATE( + pgtable.LimitRecords.UserID, + pgtable.LimitRecords.LimitCode, + pgtable.LimitRecords.Value, + pgtable.LimitRecords.ReasonCode, + pgtable.LimitRecords.ActorType, + pgtable.LimitRecords.ActorID, + pgtable.LimitRecords.AppliedAt, + pgtable.LimitRecords.ExpiresAt, + pgtable.LimitRecords.RemovedAt, + pgtable.LimitRecords.RemovedByType, + pgtable.LimitRecords.RemovedByID, + pgtable.LimitRecords.RemovedReasonCode, + ).SET( + record.UserID.String(), + string(record.LimitCode), + record.Value, + record.ReasonCode.String(), + record.Actor.Type.String(), + nullableActorID(record.Actor.ID), + record.AppliedAt.UTC(), + nullableTime(record.ExpiresAt), + nullableTime(record.RemovedAt), + nullableActorType(record.RemovedBy.Type), + nullableActorID(record.RemovedBy.ID), + nullableReasonCode(record.RemovedReasonCode), + ).WHERE(pgtable.LimitRecords.RecordID.EQ(pg.String(record.RecordID.String()))) + + query, args := stmt.Sql() + res, err := q.ExecContext(ctx, query, args...) + if err != nil { + return fmt.Errorf("update limit %q in postgres: %w", record.RecordID, err) + } + rows, err := res.RowsAffected() + if err != nil { + return fmt.Errorf("update limit %q in postgres: %w", record.RecordID, err) + } + if rows == 0 { + return fmt.Errorf("update limit %q in postgres: %w", record.RecordID, ports.ErrNotFound) + } + return nil +} + +func scanLimitRow(row *sql.Row) (policy.LimitRecord, error) { + record, err := scanLimit(row) + if errors.Is(err, sql.ErrNoRows) { + return policy.LimitRecord{}, ports.ErrNotFound + } + return record, err +} + +func scanLimit(row scannableRow) (policy.LimitRecord, error) { + var ( + recordID string + userID string + code string + value int + reason string + actorType string + actorID *string + appliedAt time.Time + expiresAt *time.Time + removedAt *time.Time + rmByType *string + rmByID *string + rmReason *string + ) + if err := row.Scan( + &recordID, &userID, &code, &value, &reason, + &actorType, &actorID, &appliedAt, + &expiresAt, &removedAt, + &rmByType, &rmByID, &rmReason, + ); err != nil { + return policy.LimitRecord{}, err + } + record := policy.LimitRecord{ + RecordID: policy.LimitRecordID(recordID), + UserID: common.UserID(userID), + LimitCode: policy.LimitCode(code), + Value: value, + ReasonCode: common.ReasonCode(reason), + Actor: common.ActorRef{Type: common.ActorType(actorType)}, + AppliedAt: appliedAt.UTC(), + ExpiresAt: timeFromNullable(expiresAt), + RemovedAt: timeFromNullable(removedAt), + } + if actorID != nil { + record.Actor.ID = common.ActorID(*actorID) + } + if rmByType != nil { + record.RemovedBy.Type = common.ActorType(*rmByType) + } + if rmByID != nil { + record.RemovedBy.ID = common.ActorID(*rmByID) + } + if rmReason != nil { + record.RemovedReasonCode = common.ReasonCode(*rmReason) + } + return record, nil +} + +// ApplySanction inserts the new sanction history row and points +// sanction_active at it. Re-applying the same code while another active +// record exists returns ports.ErrConflict. +func (store *Store) ApplySanction(ctx context.Context, input ports.ApplySanctionInput) error { + if err := input.Validate(); err != nil { + return fmt.Errorf("apply sanction in postgres: %w", err) + } + return store.withTx(ctx, "apply sanction in postgres", func(ctx context.Context, tx *sql.Tx) error { + if err := insertSanctionRecord(ctx, tx, input.NewRecord); err != nil { + return err + } + stmt := pgtable.SanctionActive.INSERT( + pgtable.SanctionActive.UserID, + pgtable.SanctionActive.SanctionCode, + pgtable.SanctionActive.RecordID, + ).VALUES( + input.NewRecord.UserID.String(), + string(input.NewRecord.SanctionCode), + input.NewRecord.RecordID.String(), + ) + query, args := stmt.Sql() + if _, err := tx.ExecContext(ctx, query, args...); err != nil { + if isUniqueViolation(err) { + return fmt.Errorf("apply sanction %q in postgres: %w", input.NewRecord.RecordID, ports.ErrConflict) + } + return fmt.Errorf("apply sanction %q in postgres: %w", input.NewRecord.RecordID, err) + } + return nil + }) +} + +// RemoveSanction updates the existing sanction record with remove metadata +// and clears the sanction_active row that pointed at it. +func (store *Store) RemoveSanction(ctx context.Context, input ports.RemoveSanctionInput) error { + if err := input.Validate(); err != nil { + return fmt.Errorf("remove sanction in postgres: %w", err) + } + return store.withTx(ctx, "remove sanction in postgres", func(ctx context.Context, tx *sql.Tx) error { + if err := lockSanctionMatching(ctx, tx, input.ExpectedActiveRecord); err != nil { + return fmt.Errorf("remove sanction %q in postgres: %w", input.ExpectedActiveRecord.RecordID, err) + } + if err := updateSanctionRecordTx(ctx, tx, input.UpdatedRecord); err != nil { + return err + } + stmt := pgtable.SanctionActive.DELETE(). + WHERE(pg.AND( + pgtable.SanctionActive.UserID.EQ(pg.String(input.ExpectedActiveRecord.UserID.String())), + pgtable.SanctionActive.SanctionCode.EQ(pg.String(string(input.ExpectedActiveRecord.SanctionCode))), + pgtable.SanctionActive.RecordID.EQ(pg.String(input.ExpectedActiveRecord.RecordID.String())), + )) + query, args := stmt.Sql() + res, err := tx.ExecContext(ctx, query, args...) + if err != nil { + return fmt.Errorf("remove sanction %q in postgres: %w", input.ExpectedActiveRecord.RecordID, err) + } + rows, err := res.RowsAffected() + if err != nil { + return fmt.Errorf("remove sanction %q in postgres: %w", input.ExpectedActiveRecord.RecordID, err) + } + if rows == 0 { + return fmt.Errorf("remove sanction %q in postgres: %w", input.ExpectedActiveRecord.RecordID, ports.ErrConflict) + } + return nil + }) +} + +// SetLimit creates a new active limit (or replaces one) for the user. When +// ExpectedActiveRecord is nil the call must succeed only if no active row +// exists for (user_id, limit_code); otherwise the existing record is +// updated with remove metadata and superseded by NewRecord. +func (store *Store) SetLimit(ctx context.Context, input ports.SetLimitInput) error { + if err := input.Validate(); err != nil { + return fmt.Errorf("set limit in postgres: %w", err) + } + return store.withTx(ctx, "set limit in postgres", func(ctx context.Context, tx *sql.Tx) error { + if input.ExpectedActiveRecord != nil { + if err := lockLimitMatching(ctx, tx, *input.ExpectedActiveRecord); err != nil { + return fmt.Errorf("set limit %q in postgres: %w", input.NewRecord.RecordID, err) + } + if err := updateLimitRecordTx(ctx, tx, *input.UpdatedActiveRecord); err != nil { + return err + } + } else { + probe := pg.SELECT(pgtable.LimitActive.RecordID). + FROM(pgtable.LimitActive). + WHERE(pg.AND( + pgtable.LimitActive.UserID.EQ(pg.String(input.NewRecord.UserID.String())), + pgtable.LimitActive.LimitCode.EQ(pg.String(string(input.NewRecord.LimitCode))), + )). + FOR(pg.UPDATE()) + probeQuery, probeArgs := probe.Sql() + row := tx.QueryRowContext(ctx, probeQuery, probeArgs...) + var marker string + if err := row.Scan(&marker); err == nil { + return fmt.Errorf("set limit %q in postgres: %w", input.NewRecord.RecordID, ports.ErrConflict) + } else if !errors.Is(err, sql.ErrNoRows) { + return fmt.Errorf("set limit %q in postgres: %w", input.NewRecord.RecordID, err) + } + } + + if err := insertLimitRecord(ctx, tx, input.NewRecord); err != nil { + return err + } + + upsert := pgtable.LimitActive.INSERT( + pgtable.LimitActive.UserID, + pgtable.LimitActive.LimitCode, + pgtable.LimitActive.RecordID, + pgtable.LimitActive.Value, + ).VALUES( + input.NewRecord.UserID.String(), + string(input.NewRecord.LimitCode), + input.NewRecord.RecordID.String(), + input.NewRecord.Value, + ).ON_CONFLICT(pgtable.LimitActive.UserID, pgtable.LimitActive.LimitCode).DO_UPDATE( + pg.SET( + pgtable.LimitActive.RecordID.SET(pgtable.LimitActive.EXCLUDED.RecordID), + pgtable.LimitActive.Value.SET(pgtable.LimitActive.EXCLUDED.Value), + ), + ) + upsertQuery, upsertArgs := upsert.Sql() + if _, err := tx.ExecContext(ctx, upsertQuery, upsertArgs...); err != nil { + return fmt.Errorf("set limit %q in postgres: %w", input.NewRecord.RecordID, err) + } + return nil + }) +} + +// RemoveLimit updates the limit record with remove metadata and removes the +// active row that referenced it. +func (store *Store) RemoveLimit(ctx context.Context, input ports.RemoveLimitInput) error { + if err := input.Validate(); err != nil { + return fmt.Errorf("remove limit in postgres: %w", err) + } + return store.withTx(ctx, "remove limit in postgres", func(ctx context.Context, tx *sql.Tx) error { + if err := lockLimitMatching(ctx, tx, input.ExpectedActiveRecord); err != nil { + return fmt.Errorf("remove limit %q in postgres: %w", input.ExpectedActiveRecord.RecordID, err) + } + if err := updateLimitRecordTx(ctx, tx, input.UpdatedRecord); err != nil { + return err + } + stmt := pgtable.LimitActive.DELETE(). + WHERE(pg.AND( + pgtable.LimitActive.UserID.EQ(pg.String(input.ExpectedActiveRecord.UserID.String())), + pgtable.LimitActive.LimitCode.EQ(pg.String(string(input.ExpectedActiveRecord.LimitCode))), + pgtable.LimitActive.RecordID.EQ(pg.String(input.ExpectedActiveRecord.RecordID.String())), + )) + query, args := stmt.Sql() + res, err := tx.ExecContext(ctx, query, args...) + if err != nil { + return fmt.Errorf("remove limit %q in postgres: %w", input.ExpectedActiveRecord.RecordID, err) + } + rows, err := res.RowsAffected() + if err != nil { + return fmt.Errorf("remove limit %q in postgres: %w", input.ExpectedActiveRecord.RecordID, err) + } + if rows == 0 { + return fmt.Errorf("remove limit %q in postgres: %w", input.ExpectedActiveRecord.RecordID, ports.ErrConflict) + } + return nil + }) +} + +func lockSanctionMatching(ctx context.Context, tx *sql.Tx, expected policy.SanctionRecord) error { + stmt := pg.SELECT(sanctionSelectColumns). + FROM(pgtable.SanctionRecords). + WHERE(pgtable.SanctionRecords.RecordID.EQ(pg.String(expected.RecordID.String()))). + FOR(pg.UPDATE()) + + query, args := stmt.Sql() + row := tx.QueryRowContext(ctx, query, args...) + current, err := scanSanctionRow(row) + switch { + case errors.Is(err, ports.ErrNotFound): + return ports.ErrNotFound + case err != nil: + return err + } + if !sanctionsEqual(current, expected) { + return ports.ErrConflict + } + return nil +} + +func lockLimitMatching(ctx context.Context, tx *sql.Tx, expected policy.LimitRecord) error { + stmt := pg.SELECT(limitSelectColumns). + FROM(pgtable.LimitRecords). + WHERE(pgtable.LimitRecords.RecordID.EQ(pg.String(expected.RecordID.String()))). + FOR(pg.UPDATE()) + + query, args := stmt.Sql() + row := tx.QueryRowContext(ctx, query, args...) + current, err := scanLimitRow(row) + switch { + case errors.Is(err, ports.ErrNotFound): + return ports.ErrNotFound + case err != nil: + return err + } + if !limitsEqual(current, expected) { + return ports.ErrConflict + } + return nil +} + +func sanctionsEqual(left policy.SanctionRecord, right policy.SanctionRecord) bool { + if left.RecordID != right.RecordID || + left.UserID != right.UserID || + left.SanctionCode != right.SanctionCode || + left.Scope != right.Scope || + left.ReasonCode != right.ReasonCode || + left.Actor != right.Actor || + left.RemovedBy != right.RemovedBy || + left.RemovedReasonCode != right.RemovedReasonCode { + return false + } + if !left.AppliedAt.Equal(right.AppliedAt) { + return false + } + if !optionalTimeEqual(left.ExpiresAt, right.ExpiresAt) { + return false + } + return optionalTimeEqual(left.RemovedAt, right.RemovedAt) +} + +func limitsEqual(left policy.LimitRecord, right policy.LimitRecord) bool { + if left.RecordID != right.RecordID || + left.UserID != right.UserID || + left.LimitCode != right.LimitCode || + left.Value != right.Value || + left.ReasonCode != right.ReasonCode || + left.Actor != right.Actor || + left.RemovedBy != right.RemovedBy || + left.RemovedReasonCode != right.RemovedReasonCode { + return false + } + if !left.AppliedAt.Equal(right.AppliedAt) { + return false + } + if !optionalTimeEqual(left.ExpiresAt, right.ExpiresAt) { + return false + } + return optionalTimeEqual(left.RemovedAt, right.RemovedAt) +} + +// SanctionStore adapts Store to the SanctionStore port. +type SanctionStore struct{ store *Store } + +// Sanctions returns one adapter that exposes the sanction store port. +func (store *Store) Sanctions() *SanctionStore { + if store == nil { + return nil + } + return &SanctionStore{store: store} +} + +// Create stores one new sanction history record. +func (a *SanctionStore) Create(ctx context.Context, record policy.SanctionRecord) error { + return a.store.CreateSanction(ctx, record) +} + +// GetByRecordID returns the sanction record identified by recordID. +func (a *SanctionStore) GetByRecordID(ctx context.Context, recordID policy.SanctionRecordID) (policy.SanctionRecord, error) { + return a.store.GetSanctionByRecordID(ctx, recordID) +} + +// ListByUserID returns every sanction record owned by userID. +func (a *SanctionStore) ListByUserID(ctx context.Context, userID common.UserID) ([]policy.SanctionRecord, error) { + return a.store.ListSanctionsByUserID(ctx, userID) +} + +// Update replaces one stored sanction record. +func (a *SanctionStore) Update(ctx context.Context, record policy.SanctionRecord) error { + return a.store.UpdateSanction(ctx, record) +} + +var _ ports.SanctionStore = (*SanctionStore)(nil) + +// LimitStore adapts Store to the LimitStore port. +type LimitStore struct{ store *Store } + +// Limits returns one adapter that exposes the limit store port. +func (store *Store) Limits() *LimitStore { + if store == nil { + return nil + } + return &LimitStore{store: store} +} + +// Create stores one new limit history record. +func (a *LimitStore) Create(ctx context.Context, record policy.LimitRecord) error { + return a.store.CreateLimit(ctx, record) +} + +// GetByRecordID returns the limit record identified by recordID. +func (a *LimitStore) GetByRecordID(ctx context.Context, recordID policy.LimitRecordID) (policy.LimitRecord, error) { + return a.store.GetLimitByRecordID(ctx, recordID) +} + +// ListByUserID returns every limit record owned by userID. +func (a *LimitStore) ListByUserID(ctx context.Context, userID common.UserID) ([]policy.LimitRecord, error) { + return a.store.ListLimitsByUserID(ctx, userID) +} + +// Update replaces one stored limit record. +func (a *LimitStore) Update(ctx context.Context, record policy.LimitRecord) error { + return a.store.UpdateLimit(ctx, record) +} + +var _ ports.LimitStore = (*LimitStore)(nil) + +// PolicyLifecycleStore adapts Store to the PolicyLifecycleStore port. +type PolicyLifecycleStore struct{ store *Store } + +// PolicyLifecycle returns one adapter that exposes the policy-lifecycle +// store port. +func (store *Store) PolicyLifecycle() *PolicyLifecycleStore { + if store == nil { + return nil + } + return &PolicyLifecycleStore{store: store} +} + +// ApplySanction atomically creates one new active sanction record. +func (a *PolicyLifecycleStore) ApplySanction(ctx context.Context, input ports.ApplySanctionInput) error { + return a.store.ApplySanction(ctx, input) +} + +// RemoveSanction atomically removes one active sanction record. +func (a *PolicyLifecycleStore) RemoveSanction(ctx context.Context, input ports.RemoveSanctionInput) error { + return a.store.RemoveSanction(ctx, input) +} + +// SetLimit atomically creates or replaces one active limit record. +func (a *PolicyLifecycleStore) SetLimit(ctx context.Context, input ports.SetLimitInput) error { + return a.store.SetLimit(ctx, input) +} + +// RemoveLimit atomically removes one active limit record. +func (a *PolicyLifecycleStore) RemoveLimit(ctx context.Context, input ports.RemoveLimitInput) error { + return a.store.RemoveLimit(ctx, input) +} + +var _ ports.PolicyLifecycleStore = (*PolicyLifecycleStore)(nil) diff --git a/user/internal/adapters/postgres/userstore/store.go b/user/internal/adapters/postgres/userstore/store.go new file mode 100644 index 0000000..7092fc3 --- /dev/null +++ b/user/internal/adapters/postgres/userstore/store.go @@ -0,0 +1,138 @@ +// Package userstore implements the PostgreSQL-backed source-of-truth +// persistence used by User Service. +// +// The package owns the on-disk shape of the `user` schema (defined in +// `galaxy/user/internal/adapters/postgres/migrations`) and translates the +// schema-agnostic ports defined under `galaxy/user/internal/ports` into +// concrete `database/sql` operations driven by the pgx driver. Atomic +// composite operations (auth-directory, entitlement-lifecycle, policy- +// lifecycle) execute inside explicit `BEGIN … COMMIT` transactions with +// `SELECT … FOR UPDATE` locks on the rows they mutate. +// +// Stage 3 of `PG_PLAN.md` migrates User Service away from Redis-backed +// durable state. Two Redis Streams (`user:domain_events`, +// `user:lifecycle_events`) remain on Redis for event publication; the +// store is no longer aware of them. +package userstore + +import ( + "context" + "database/sql" + "errors" + "fmt" + "time" + + "galaxy/user/internal/ports" +) + +// Config configures one PostgreSQL-backed user store instance. The store does +// not own the underlying *sql.DB lifecycle: the caller (typically the +// service runtime) opens, instruments, migrates, and closes the pool. The +// store only borrows the pool and bounds individual round trips with +// OperationTimeout. +type Config struct { + // DB stores the connection pool the store uses for every query. + DB *sql.DB + + // OperationTimeout bounds one round trip. The store creates a derived + // context for each operation so callers cannot starve the pool with an + // unbounded ctx. Multi-statement transactions inherit this bound for the + // whole BEGIN … COMMIT span. + OperationTimeout time.Duration +} + +// Store persists auth-facing user state in PostgreSQL and exposes the narrow +// atomic auth-facing mutation boundary plus selected entity-store interfaces +// through the same accessor methods (`Accounts`, `BlockedEmails`, +// `EntitlementSnapshots`, `EntitlementHistory`, `EntitlementLifecycle`, +// `Sanctions`, `Limits`, `PolicyLifecycle`) that the previous Redis-backed +// store provided. This keeps the runtime wiring identical between the two +// implementations. +type Store struct { + db *sql.DB + operationTimeout time.Duration +} + +// New constructs one PostgreSQL-backed user store from cfg. +func New(cfg Config) (*Store, error) { + if cfg.DB == nil { + return nil, errors.New("new postgres user store: db must not be nil") + } + if cfg.OperationTimeout <= 0 { + return nil, errors.New("new postgres user store: operation timeout must be positive") + } + return &Store{ + db: cfg.DB, + operationTimeout: cfg.OperationTimeout, + }, nil +} + +// Close is a no-op for the PostgreSQL-backed store: the connection pool is +// owned by the caller (the runtime) and closed once the runtime shuts down. +// The accessor remains so the Redis-store contract can be preserved +// transparently in the runtime wiring. +func (store *Store) Close() error { + return nil +} + +// Ping verifies that the configured PostgreSQL backend is reachable. It runs +// `db.PingContext` under the configured operation timeout. +func (store *Store) Ping(ctx context.Context) error { + operationCtx, cancel, err := withTimeout(ctx, "ping postgres user store", store.operationTimeout) + if err != nil { + return err + } + defer cancel() + + if err := store.db.PingContext(operationCtx); err != nil { + return fmt.Errorf("ping postgres user store: %w", err) + } + return nil +} + +// withTx runs fn inside a BEGIN … COMMIT transaction bounded by the store's +// operation timeout. It rolls back on any error or panic and returns whatever +// fn returned. The transaction uses the default isolation level +// (`READ COMMITTED`); per-row locking is achieved through `SELECT … FOR +// UPDATE` issued inside fn. +func (store *Store) withTx(ctx context.Context, operation string, fn func(ctx context.Context, tx *sql.Tx) error) error { + operationCtx, cancel, err := withTimeout(ctx, operation, store.operationTimeout) + if err != nil { + return err + } + defer cancel() + + tx, err := store.db.BeginTx(operationCtx, nil) + if err != nil { + return fmt.Errorf("%s: begin: %w", operation, err) + } + + if err := fn(operationCtx, tx); err != nil { + _ = tx.Rollback() + return err + } + + if err := tx.Commit(); err != nil { + return fmt.Errorf("%s: commit: %w", operation, err) + } + return nil +} + +// operationContext bounds one read or write that does not need a transaction +// envelope (single statement). It mirrors store.withTx for non-transactional +// callers. +func (store *Store) operationContext(ctx context.Context, operation string) (context.Context, context.CancelFunc, error) { + return withTimeout(ctx, operation, store.operationTimeout) +} + +// Store directly satisfies the user-account port (its primary entity) and the +// composite auth-directory port. The remaining ports +// (BlockedEmailStore, entitlement-*, sanction-*, limit-*, user-list) are +// implemented by adapter types declared in their respective files; those +// adapters are obtained through Accounts(), BlockedEmails(), +// EntitlementSnapshots(), EntitlementHistory(), EntitlementLifecycle(), +// Sanctions(), Limits(), PolicyLifecycle(), and UserList() accessors. +var ( + _ ports.AuthDirectoryStore = (*Store)(nil) + _ ports.UserAccountStore = (*Store)(nil) +) diff --git a/user/internal/adapters/postgres/userstore/store_test.go b/user/internal/adapters/postgres/userstore/store_test.go new file mode 100644 index 0000000..f827e40 --- /dev/null +++ b/user/internal/adapters/postgres/userstore/store_test.go @@ -0,0 +1,656 @@ +package userstore + +import ( + "context" + "errors" + "testing" + "time" + + "galaxy/user/internal/domain/account" + "galaxy/user/internal/domain/authblock" + "galaxy/user/internal/domain/common" + "galaxy/user/internal/domain/entitlement" + "galaxy/user/internal/domain/policy" + "galaxy/user/internal/ports" + + "github.com/stretchr/testify/require" +) + +// All time values are aligned to microseconds because PostgreSQL's +// timestamptz only stores microsecond precision; using nanoseconds here +// would cause round-trip mismatches. +var fixtureCreatedAt = time.Unix(1_775_240_000, 0).UTC() + +func validAccount() account.UserAccount { + return account.UserAccount{ + UserID: common.UserID("user-pilot-001"), + Email: common.Email("pilot@example.com"), + UserName: common.UserName("player-aaaaaaaa"), + DisplayName: common.DisplayName("NovaPrime"), + PreferredLanguage: common.LanguageTag("en"), + TimeZone: common.TimeZoneName("Europe/Kaliningrad"), + CreatedAt: fixtureCreatedAt, + UpdatedAt: fixtureCreatedAt, + } +} + +func validFreeSnapshot(userID common.UserID, at time.Time) entitlement.CurrentSnapshot { + return entitlement.CurrentSnapshot{ + UserID: userID, + PlanCode: entitlement.PlanCodeFree, + IsPaid: false, + StartsAt: at.UTC(), + Source: common.Source("auth_signup"), + Actor: common.ActorRef{Type: common.ActorType("auth")}, + ReasonCode: common.ReasonCode("initial_free_entitlement"), + UpdatedAt: at.UTC(), + } +} + +func validFreePeriod(userID common.UserID, recordID entitlement.EntitlementRecordID, at time.Time) entitlement.PeriodRecord { + return entitlement.PeriodRecord{ + RecordID: recordID, + UserID: userID, + PlanCode: entitlement.PlanCodeFree, + Source: common.Source("auth_signup"), + Actor: common.ActorRef{Type: common.ActorType("auth")}, + ReasonCode: common.ReasonCode("initial_free_entitlement"), + StartsAt: at.UTC(), + CreatedAt: at.UTC(), + } +} + +func paidPeriod(userID common.UserID, recordID entitlement.EntitlementRecordID, startsAt, endsAt time.Time) entitlement.PeriodRecord { + end := endsAt.UTC() + return entitlement.PeriodRecord{ + RecordID: recordID, + UserID: userID, + PlanCode: entitlement.PlanCodePaidMonthly, + Source: common.Source("admin"), + Actor: common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-1")}, + ReasonCode: common.ReasonCode("manual_grant"), + StartsAt: startsAt.UTC(), + EndsAt: &end, + CreatedAt: startsAt.UTC(), + } +} + +func paidSnapshot(userID common.UserID, startsAt, endsAt, updatedAt time.Time) entitlement.CurrentSnapshot { + end := endsAt.UTC() + return entitlement.CurrentSnapshot{ + UserID: userID, + PlanCode: entitlement.PlanCodePaidMonthly, + IsPaid: true, + StartsAt: startsAt.UTC(), + EndsAt: &end, + Source: common.Source("admin"), + Actor: common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-1")}, + ReasonCode: common.ReasonCode("manual_grant"), + UpdatedAt: updatedAt.UTC(), + } +} + +func validSanction(userID common.UserID, code policy.SanctionCode, appliedAt time.Time) policy.SanctionRecord { + return policy.SanctionRecord{ + RecordID: policy.SanctionRecordID("sanction-" + string(code) + "-1"), + UserID: userID, + SanctionCode: code, + Scope: common.Scope("platform"), + ReasonCode: common.ReasonCode("manual_block"), + Actor: common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-1")}, + AppliedAt: appliedAt.UTC(), + } +} + +func validLimit(userID common.UserID, code policy.LimitCode, value int, appliedAt time.Time) policy.LimitRecord { + return policy.LimitRecord{ + RecordID: policy.LimitRecordID("limit-" + string(code) + "-1"), + UserID: userID, + LimitCode: code, + Value: value, + ReasonCode: common.ReasonCode("manual_override"), + Actor: common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-1")}, + AppliedAt: appliedAt.UTC(), + } +} + +func TestAccountCreateAndLookups(t *testing.T) { + store := newTestStore(t) + ctx := context.Background() + + record := validAccount() + require.NoError(t, store.Create(ctx, ports.CreateAccountInput{Account: record})) + + got, err := store.GetByUserID(ctx, record.UserID) + require.NoError(t, err) + require.Equal(t, record, got) + + got, err = store.GetByEmail(ctx, record.Email) + require.NoError(t, err) + require.Equal(t, record, got) + + got, err = store.GetByUserName(ctx, record.UserName) + require.NoError(t, err) + require.Equal(t, record, got) + + exists, err := store.ExistsByUserID(ctx, record.UserID) + require.NoError(t, err) + require.True(t, exists) +} + +func TestAccountCreateConflictsAreClassified(t *testing.T) { + store := newTestStore(t) + ctx := context.Background() + + record := validAccount() + require.NoError(t, store.Create(ctx, ports.CreateAccountInput{Account: record})) + + // Same UserID -> generic conflict. + require.True(t, errors.Is(store.Create(ctx, ports.CreateAccountInput{Account: record}), ports.ErrConflict)) + + // Same UserName, different UserID/email -> ErrUserNameConflict (which + // also satisfies errors.Is(ErrConflict)). + clone := validAccount() + clone.UserID = common.UserID("user-pilot-002") + clone.Email = common.Email("pilot2@example.com") + err := store.Create(ctx, ports.CreateAccountInput{Account: clone}) + require.True(t, errors.Is(err, ports.ErrUserNameConflict)) + require.True(t, errors.Is(err, ports.ErrConflict)) + + // Same email, different UserID/user_name -> generic conflict. + clone = validAccount() + clone.UserID = common.UserID("user-pilot-003") + clone.UserName = common.UserName("player-bbbbbbbb") + err = store.Create(ctx, ports.CreateAccountInput{Account: clone}) + require.True(t, errors.Is(err, ports.ErrConflict)) + require.False(t, errors.Is(err, ports.ErrUserNameConflict)) +} + +func TestAccountUpdateRespectsImmutableFieldsAndSoftDelete(t *testing.T) { + store := newTestStore(t) + ctx := context.Background() + + record := validAccount() + require.NoError(t, store.Create(ctx, ports.CreateAccountInput{Account: record})) + + updated := record + updated.DisplayName = common.DisplayName("HelloWorld") + updated.DeclaredCountry = common.CountryCode("DE") + updated.UpdatedAt = record.UpdatedAt.Add(time.Minute) + require.NoError(t, store.Update(ctx, updated)) + + got, err := store.GetByUserID(ctx, record.UserID) + require.NoError(t, err) + require.Equal(t, updated, got) + + // Mutating user_name must surface as ErrConflict. + mutating := updated + mutating.UserName = common.UserName("player-xxxxxxxx") + require.True(t, errors.Is(store.Update(ctx, mutating), ports.ErrConflict)) + + // Soft-delete via Update sets DeletedAt; ExistsByUserID flips to false. + deletedAt := updated.UpdatedAt.Add(time.Minute) + soft := updated + soft.DeletedAt = &deletedAt + soft.UpdatedAt = deletedAt + require.NoError(t, store.Update(ctx, soft)) + + exists, err := store.ExistsByUserID(ctx, record.UserID) + require.NoError(t, err) + require.False(t, exists) +} + +func TestBlockedEmailUpsertAndGet(t *testing.T) { + store := newTestStore(t) + ctx := context.Background() + + record := authblock.BlockedEmailSubject{ + Email: common.Email("blocked@example.com"), + ReasonCode: common.ReasonCode("policy_blocked"), + BlockedAt: fixtureCreatedAt, + } + require.NoError(t, store.PutBlockedEmail(ctx, record)) + + got, err := store.GetBlockedEmail(ctx, record.Email) + require.NoError(t, err) + require.Equal(t, record, got) + + // Upsert replaces existing. + updated := record + updated.ReasonCode = common.ReasonCode("admin_blocked") + updated.BlockedAt = record.BlockedAt.Add(time.Hour) + updated.Actor = common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-1")} + require.NoError(t, store.PutBlockedEmail(ctx, updated)) + + got, err = store.GetBlockedEmail(ctx, record.Email) + require.NoError(t, err) + require.Equal(t, updated, got) +} + +func TestResolveByEmailReturnsCreatableExistingBlockedAndDeleted(t *testing.T) { + store := newTestStore(t) + ctx := context.Background() + + creatable, err := store.ResolveByEmail(ctx, common.Email("nobody@example.com")) + require.NoError(t, err) + require.Equal(t, ports.AuthResolutionKindCreatable, creatable.Kind) + + require.NoError(t, store.PutBlockedEmail(ctx, authblock.BlockedEmailSubject{ + Email: common.Email("blocked@example.com"), + ReasonCode: common.ReasonCode("policy_blocked"), + BlockedAt: fixtureCreatedAt, + })) + blocked, err := store.ResolveByEmail(ctx, common.Email("blocked@example.com")) + require.NoError(t, err) + require.Equal(t, ports.AuthResolutionKindBlocked, blocked.Kind) + require.Equal(t, common.ReasonCode("policy_blocked"), blocked.BlockReasonCode) + + record := validAccount() + require.NoError(t, store.Create(ctx, ports.CreateAccountInput{Account: record})) + existing, err := store.ResolveByEmail(ctx, record.Email) + require.NoError(t, err) + require.Equal(t, ports.AuthResolutionKindExisting, existing.Kind) + require.Equal(t, record.UserID, existing.UserID) + + // Soft-delete the account; the email lookup must now resolve to blocked. + deletedAt := record.UpdatedAt.Add(time.Minute) + soft := record + soft.DeletedAt = &deletedAt + soft.UpdatedAt = deletedAt + require.NoError(t, store.Update(ctx, soft)) + + deletedResult, err := store.ResolveByEmail(ctx, record.Email) + require.NoError(t, err) + require.Equal(t, ports.AuthResolutionKindBlocked, deletedResult.Kind) + require.Equal(t, deletedAccountBlockReasonCode, deletedResult.BlockReasonCode) +} + +func TestEnsureByEmailCoversAllOutcomes(t *testing.T) { + store := newTestStore(t) + ctx := context.Background() + + record := validAccount() + snapshot := validFreeSnapshot(record.UserID, record.CreatedAt) + period := validFreePeriod(record.UserID, entitlement.EntitlementRecordID("entitlement-initial"), record.CreatedAt) + + created, err := store.EnsureByEmail(ctx, ports.EnsureByEmailInput{ + Email: record.Email, + Account: record, + Entitlement: snapshot, + EntitlementRecord: period, + }) + require.NoError(t, err) + require.Equal(t, ports.EnsureByEmailOutcomeCreated, created.Outcome) + require.Equal(t, record.UserID, created.UserID) + + // Second call with the same email returns existing. The Account input + // describes the would-be-created record if no account existed yet; its + // email must match the request email per ports.EnsureByEmailInput.Validate. + existingCandidate := validSecondAccount() + existingCandidate.Email = record.Email + existing, err := store.EnsureByEmail(ctx, ports.EnsureByEmailInput{ + Email: record.Email, + Account: existingCandidate, + Entitlement: validFreeSnapshot(existingCandidate.UserID, record.CreatedAt), + EntitlementRecord: validFreePeriod(existingCandidate.UserID, entitlement.EntitlementRecordID("entitlement-second"), record.CreatedAt), + }) + require.NoError(t, err) + require.Equal(t, ports.EnsureByEmailOutcomeExisting, existing.Outcome) + require.Equal(t, record.UserID, existing.UserID) + + // Blocked email path. + require.NoError(t, store.PutBlockedEmail(ctx, authblock.BlockedEmailSubject{ + Email: common.Email("blocked@example.com"), + ReasonCode: common.ReasonCode("policy_blocked"), + BlockedAt: fixtureCreatedAt, + })) + blockedAccount := validSecondAccount() + blockedAccount.Email = common.Email("blocked@example.com") + blockedSnapshot := validFreeSnapshot(blockedAccount.UserID, record.CreatedAt) + blockedPeriod := validFreePeriod(blockedAccount.UserID, entitlement.EntitlementRecordID("entitlement-blocked"), record.CreatedAt) + blocked, err := store.EnsureByEmail(ctx, ports.EnsureByEmailInput{ + Email: blockedAccount.Email, + Account: blockedAccount, + Entitlement: blockedSnapshot, + EntitlementRecord: blockedPeriod, + }) + require.NoError(t, err) + require.Equal(t, ports.EnsureByEmailOutcomeBlocked, blocked.Outcome) + require.Equal(t, common.ReasonCode("policy_blocked"), blocked.BlockReasonCode) + + // Soft-deleted account → blocked(account_deleted). + deletedAt := record.UpdatedAt.Add(time.Hour) + soft := record + soft.DeletedAt = &deletedAt + soft.UpdatedAt = deletedAt + require.NoError(t, store.Update(ctx, soft)) + + deletedCandidate := validSecondAccount() + deletedCandidate.Email = record.Email + deletedCandidate.UserID = common.UserID("user-third") + deletedCandidate.UserName = common.UserName("player-cccccccc") + deletedResult, err := store.EnsureByEmail(ctx, ports.EnsureByEmailInput{ + Email: record.Email, + Account: deletedCandidate, + Entitlement: validFreeSnapshot(deletedCandidate.UserID, record.CreatedAt), + EntitlementRecord: validFreePeriod(deletedCandidate.UserID, entitlement.EntitlementRecordID("entitlement-second-2"), record.CreatedAt), + }) + require.NoError(t, err) + require.Equal(t, ports.EnsureByEmailOutcomeBlocked, deletedResult.Outcome) + require.Equal(t, deletedAccountBlockReasonCode, deletedResult.BlockReasonCode) +} + +func validSecondAccount() account.UserAccount { + return account.UserAccount{ + UserID: common.UserID("user-second"), + Email: common.Email("second@example.com"), + UserName: common.UserName("player-bbbbbbbb"), + PreferredLanguage: common.LanguageTag("en"), + TimeZone: common.TimeZoneName("UTC"), + CreatedAt: fixtureCreatedAt, + UpdatedAt: fixtureCreatedAt, + } +} + +func TestBlockByUserIDAndBlockByEmail(t *testing.T) { + store := newTestStore(t) + ctx := context.Background() + + record := validAccount() + require.NoError(t, store.Create(ctx, ports.CreateAccountInput{Account: record})) + + res, err := store.BlockByUserID(ctx, ports.BlockByUserIDInput{ + UserID: record.UserID, + ReasonCode: common.ReasonCode("manual_block"), + BlockedAt: fixtureCreatedAt.Add(time.Hour), + }) + require.NoError(t, err) + require.Equal(t, ports.AuthBlockOutcomeBlocked, res.Outcome) + require.Equal(t, record.UserID, res.UserID) + + // Replay returns AlreadyBlocked. + res, err = store.BlockByUserID(ctx, ports.BlockByUserIDInput{ + UserID: record.UserID, + ReasonCode: common.ReasonCode("manual_block"), + BlockedAt: fixtureCreatedAt.Add(2 * time.Hour), + }) + require.NoError(t, err) + require.Equal(t, ports.AuthBlockOutcomeAlreadyBlocked, res.Outcome) + require.Equal(t, record.UserID, res.UserID) + + // Block by email for a non-existing address records the block with + // nil resolved_user_id. + res, err = store.BlockByEmail(ctx, ports.BlockByEmailInput{ + Email: common.Email("ghost@example.com"), + ReasonCode: common.ReasonCode("policy_blocked"), + BlockedAt: fixtureCreatedAt.Add(time.Hour), + }) + require.NoError(t, err) + require.Equal(t, ports.AuthBlockOutcomeBlocked, res.Outcome) + require.True(t, res.UserID.IsZero()) +} + +func TestEntitlementSnapshotPutAndGet(t *testing.T) { + store := newTestStore(t) + ctx := context.Background() + + record := validAccount() + require.NoError(t, store.Create(ctx, ports.CreateAccountInput{Account: record})) + + snapshot := validFreeSnapshot(record.UserID, record.CreatedAt) + require.NoError(t, store.PutEntitlement(ctx, snapshot)) + + got, err := store.GetEntitlementByUserID(ctx, record.UserID) + require.NoError(t, err) + require.Equal(t, snapshot, got) + + // Upsert replaces. + paid := paidSnapshot(record.UserID, record.CreatedAt, record.CreatedAt.Add(30*24*time.Hour), record.CreatedAt.Add(time.Minute)) + require.NoError(t, store.PutEntitlement(ctx, paid)) + got, err = store.GetEntitlementByUserID(ctx, record.UserID) + require.NoError(t, err) + require.Equal(t, paid, got) +} + +func TestEntitlementHistoryCRUDAndList(t *testing.T) { + store := newTestStore(t) + ctx := context.Background() + + record := validAccount() + require.NoError(t, store.Create(ctx, ports.CreateAccountInput{Account: record})) + + first := validFreePeriod(record.UserID, entitlement.EntitlementRecordID("entitlement-1"), record.CreatedAt) + second := paidPeriod(record.UserID, entitlement.EntitlementRecordID("entitlement-2"), record.CreatedAt.Add(time.Hour), record.CreatedAt.Add(48*time.Hour)) + + require.NoError(t, store.CreateEntitlementRecord(ctx, first)) + require.NoError(t, store.CreateEntitlementRecord(ctx, second)) + + require.True(t, errors.Is(store.CreateEntitlementRecord(ctx, first), ports.ErrConflict)) + + got, err := store.GetEntitlementRecordByID(ctx, first.RecordID) + require.NoError(t, err) + require.Equal(t, first, got) + + list, err := store.ListEntitlementRecordsByUserID(ctx, record.UserID) + require.NoError(t, err) + require.Len(t, list, 2) + require.Equal(t, first.RecordID, list[0].RecordID) + require.Equal(t, second.RecordID, list[1].RecordID) + + closedAt := record.CreatedAt.Add(2 * time.Hour) + updated := first + updated.ClosedAt = &closedAt + updated.ClosedBy = common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-1")} + updated.ClosedReasonCode = common.ReasonCode("superseded") + require.NoError(t, store.UpdateEntitlementRecord(ctx, updated)) + + got, err = store.GetEntitlementRecordByID(ctx, updated.RecordID) + require.NoError(t, err) + require.Equal(t, updated, got) +} + +func TestEntitlementLifecycleGrantExtendRevokeRepair(t *testing.T) { + store := newTestStore(t) + ctx := context.Background() + + record := validAccount() + require.NoError(t, store.Create(ctx, ports.CreateAccountInput{Account: record})) + + freeSnap := validFreeSnapshot(record.UserID, record.CreatedAt) + freeRecord := validFreePeriod(record.UserID, entitlement.EntitlementRecordID("entitlement-free-1"), record.CreatedAt) + require.NoError(t, store.PutEntitlement(ctx, freeSnap)) + require.NoError(t, store.CreateEntitlementRecord(ctx, freeRecord)) + + closedAt := record.CreatedAt.Add(time.Hour) + closedFree := freeRecord + closedFree.ClosedAt = &closedAt + closedFree.ClosedBy = common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-1")} + closedFree.ClosedReasonCode = common.ReasonCode("superseded") + + paidStart := closedAt + paidEnd := paidStart.Add(30 * 24 * time.Hour) + paid := paidPeriod(record.UserID, entitlement.EntitlementRecordID("entitlement-paid-1"), paidStart, paidEnd) + paidSnap := paidSnapshot(record.UserID, paidStart, paidEnd, paidStart) + + require.NoError(t, store.GrantEntitlement(ctx, ports.GrantEntitlementInput{ + ExpectedCurrentSnapshot: freeSnap, + ExpectedCurrentRecord: freeRecord, + UpdatedCurrentRecord: closedFree, + NewRecord: paid, + NewSnapshot: paidSnap, + })) + + got, err := store.GetEntitlementByUserID(ctx, record.UserID) + require.NoError(t, err) + require.Equal(t, paidSnap, got) + + // Extend with a new paid segment. + extendStart := paidEnd + extendEnd := extendStart.Add(30 * 24 * time.Hour) + extendRecord := paidPeriod(record.UserID, entitlement.EntitlementRecordID("entitlement-paid-2"), extendStart, extendEnd) + extendSnap := paidSnapshot(record.UserID, paidStart, extendEnd, extendStart) + require.NoError(t, store.ExtendEntitlement(ctx, ports.ExtendEntitlementInput{ + ExpectedCurrentSnapshot: paidSnap, + NewRecord: extendRecord, + NewSnapshot: extendSnap, + })) + + // Revoke -> back to free. + revokeAt := extendStart.Add(time.Hour) + revokedPaid := extendRecord + revokedPaid.ClosedAt = &revokeAt + revokedPaid.ClosedBy = common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-1")} + revokedPaid.ClosedReasonCode = common.ReasonCode("revoked") + freeAgain := validFreePeriod(record.UserID, entitlement.EntitlementRecordID("entitlement-free-2"), revokeAt) + freeAgainSnap := validFreeSnapshot(record.UserID, revokeAt) + require.NoError(t, store.RevokeEntitlement(ctx, ports.RevokeEntitlementInput{ + ExpectedCurrentSnapshot: extendSnap, + ExpectedCurrentRecord: extendRecord, + UpdatedCurrentRecord: revokedPaid, + NewRecord: freeAgain, + NewSnapshot: freeAgainSnap, + })) + + got, err = store.GetEntitlementByUserID(ctx, record.UserID) + require.NoError(t, err) + require.Equal(t, freeAgainSnap, got) +} + +func TestEntitlementLifecycleConflictsOnSnapshotMismatch(t *testing.T) { + store := newTestStore(t) + ctx := context.Background() + + record := validAccount() + require.NoError(t, store.Create(ctx, ports.CreateAccountInput{Account: record})) + freeSnap := validFreeSnapshot(record.UserID, record.CreatedAt) + require.NoError(t, store.PutEntitlement(ctx, freeSnap)) + + stale := freeSnap + stale.UpdatedAt = freeSnap.UpdatedAt.Add(-time.Hour) + freeRecord := validFreePeriod(record.UserID, entitlement.EntitlementRecordID("entitlement-free-1"), record.CreatedAt) + require.NoError(t, store.CreateEntitlementRecord(ctx, freeRecord)) + + closedAt := record.CreatedAt.Add(time.Hour) + closedFree := freeRecord + closedFree.ClosedAt = &closedAt + closedFree.ClosedBy = common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-1")} + closedFree.ClosedReasonCode = common.ReasonCode("superseded") + paid := paidPeriod(record.UserID, entitlement.EntitlementRecordID("entitlement-paid-1"), closedAt, closedAt.Add(time.Hour)) + paidSnap := paidSnapshot(record.UserID, closedAt, closedAt.Add(time.Hour), closedAt) + + err := store.GrantEntitlement(ctx, ports.GrantEntitlementInput{ + ExpectedCurrentSnapshot: stale, + ExpectedCurrentRecord: freeRecord, + UpdatedCurrentRecord: closedFree, + NewRecord: paid, + NewSnapshot: paidSnap, + }) + require.True(t, errors.Is(err, ports.ErrConflict)) +} + +func TestPolicyApplyRemoveSanctionAndLimit(t *testing.T) { + store := newTestStore(t) + ctx := context.Background() + + record := validAccount() + require.NoError(t, store.Create(ctx, ports.CreateAccountInput{Account: record})) + + sanction := validSanction(record.UserID, policy.SanctionCodeLoginBlock, fixtureCreatedAt.Add(time.Minute)) + require.NoError(t, store.ApplySanction(ctx, ports.ApplySanctionInput{NewRecord: sanction})) + + got, err := store.GetSanctionByRecordID(ctx, sanction.RecordID) + require.NoError(t, err) + require.Equal(t, sanction, got) + + // Re-applying the same sanction code without removing first must return + // ErrConflict because (user_id, sanction_code) is unique on + // sanction_active. + dup := sanction + dup.RecordID = policy.SanctionRecordID("sanction-login_block-2") + require.True(t, errors.Is(store.ApplySanction(ctx, ports.ApplySanctionInput{NewRecord: dup}), ports.ErrConflict)) + + removedAt := sanction.AppliedAt.Add(time.Hour) + updated := sanction + updated.RemovedAt = &removedAt + updated.RemovedBy = common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-1")} + updated.RemovedReasonCode = common.ReasonCode("manual_unblock") + require.NoError(t, store.RemoveSanction(ctx, ports.RemoveSanctionInput{ + ExpectedActiveRecord: sanction, + UpdatedRecord: updated, + })) + + got, err = store.GetSanctionByRecordID(ctx, sanction.RecordID) + require.NoError(t, err) + require.Equal(t, updated, got) + + // Now SetLimit on a fresh code; replay must conflict. + limit := validLimit(record.UserID, policy.LimitCodeMaxOwnedPrivateGames, 5, fixtureCreatedAt.Add(2*time.Minute)) + require.NoError(t, store.SetLimit(ctx, ports.SetLimitInput{NewRecord: limit})) + + dupLimit := limit + dupLimit.RecordID = policy.LimitRecordID("limit-max_owned_private_games-2") + require.True(t, errors.Is(store.SetLimit(ctx, ports.SetLimitInput{NewRecord: dupLimit}), ports.ErrConflict)) + + // SetLimit with ExpectedActiveRecord -> replaces in the active slot. + expected := limit + expected.RemovedAt = nil + expected.RemovedBy = common.ActorRef{} + expected.RemovedReasonCode = "" + supersededTime := limit.AppliedAt.Add(time.Hour) + supersededLimit := limit + supersededLimit.RemovedAt = &supersededTime + supersededLimit.RemovedBy = common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-1")} + supersededLimit.RemovedReasonCode = common.ReasonCode("superseded") + + newLimit := validLimit(record.UserID, policy.LimitCodeMaxOwnedPrivateGames, 7, supersededTime) + newLimit.RecordID = policy.LimitRecordID("limit-max_owned_private_games-3") + require.NoError(t, store.SetLimit(ctx, ports.SetLimitInput{ + ExpectedActiveRecord: &expected, + UpdatedActiveRecord: &supersededLimit, + NewRecord: newLimit, + })) + + gotLimit, err := store.GetLimitByRecordID(ctx, newLimit.RecordID) + require.NoError(t, err) + require.Equal(t, newLimit, gotLimit) +} + +func TestUserListPaginatesNewestFirstAndDetectsFilterMismatch(t *testing.T) { + store := newTestStore(t) + ctx := context.Background() + + base := fixtureCreatedAt + for index, suffix := range []string{"a", "b", "c", "d", "e"} { + acc := validAccount() + acc.UserID = common.UserID("user-list-" + suffix) + acc.Email = common.Email("list-" + suffix + "@example.com") + acc.UserName = common.UserName("player-list" + suffix + "xx") + acc.CreatedAt = base.Add(time.Duration(index) * time.Minute) + acc.UpdatedAt = acc.CreatedAt + require.NoError(t, store.Create(ctx, ports.CreateAccountInput{Account: acc})) + } + + page1, err := store.ListUserIDs(ctx, ports.ListUsersInput{PageSize: 2}) + require.NoError(t, err) + require.Len(t, page1.UserIDs, 2) + require.Equal(t, common.UserID("user-list-e"), page1.UserIDs[0]) + require.Equal(t, common.UserID("user-list-d"), page1.UserIDs[1]) + require.NotEmpty(t, page1.NextPageToken) + + page2, err := store.ListUserIDs(ctx, ports.ListUsersInput{ + PageSize: 2, + PageToken: page1.NextPageToken, + }) + require.NoError(t, err) + require.Len(t, page2.UserIDs, 2) + require.Equal(t, common.UserID("user-list-c"), page2.UserIDs[0]) + require.Equal(t, common.UserID("user-list-b"), page2.UserIDs[1]) + + // Mismatched filters must reject the previously-issued token. + mismatched, err := store.ListUserIDs(ctx, ports.ListUsersInput{ + PageSize: 2, + PageToken: page1.NextPageToken, + Filters: ports.UserListFilters{PaidState: entitlement.PaidStatePaid}, + }) + require.True(t, errors.Is(err, ports.ErrInvalidPageToken), "got result %#v err %v", mismatched, err) +} diff --git a/user/internal/adapters/redis/domainevents/publisher.go b/user/internal/adapters/redis/domainevents/publisher.go index 8030b6d..b3c8aa8 100644 --- a/user/internal/adapters/redis/domainevents/publisher.go +++ b/user/internal/adapters/redis/domainevents/publisher.go @@ -4,7 +4,6 @@ package domainevents import ( "context" - "crypto/tls" "errors" "fmt" "strconv" @@ -17,23 +16,11 @@ import ( "go.opentelemetry.io/otel/trace" ) -// Config configures one Redis-backed user domain-event publisher. +// Config configures one Redis-backed user domain-event publisher. The +// connection is supplied externally by the runtime so multiple publishers +// can share one *redis.Client; this struct now carries only stream-shape +// parameters. type Config struct { - // Addr is the Redis network address in host:port form. - Addr string - - // Username is the optional Redis ACL username. - Username string - - // Password is the optional Redis ACL password. - Password string - - // DB is the Redis logical database index. - DB int - - // TLSEnabled enables TLS with a conservative minimum protocol version. - TLSEnabled bool - // Stream identifies the Redis Stream key used for domain events. Stream string @@ -53,13 +40,13 @@ type Publisher struct { operationTimeout time.Duration } -// New constructs a Redis-backed domain-event publisher from cfg. -func New(cfg Config) (*Publisher, error) { +// New constructs a Redis-backed domain-event publisher backed by the +// supplied client. The publisher does not own the client; the runtime is +// responsible for closing it. +func New(client *redis.Client, cfg Config) (*Publisher, error) { switch { - case strings.TrimSpace(cfg.Addr) == "": - return nil, errors.New("new redis domain-event publisher: redis addr must not be empty") - case cfg.DB < 0: - return nil, errors.New("new redis domain-event publisher: redis db must not be negative") + case client == nil: + return nil, errors.New("new redis domain-event publisher: redis client must not be nil") case strings.TrimSpace(cfg.Stream) == "": return nil, errors.New("new redis domain-event publisher: stream must not be empty") case cfg.StreamMaxLen <= 0: @@ -68,33 +55,19 @@ func New(cfg Config) (*Publisher, error) { return nil, errors.New("new redis domain-event publisher: operation timeout must be positive") } - options := &redis.Options{ - Addr: cfg.Addr, - Username: cfg.Username, - Password: cfg.Password, - DB: cfg.DB, - Protocol: 2, - DisableIdentity: true, - } - if cfg.TLSEnabled { - options.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12} - } - return &Publisher{ - client: redis.NewClient(options), + client: client, stream: cfg.Stream, streamMaxLen: cfg.StreamMaxLen, operationTimeout: cfg.OperationTimeout, }, nil } -// Close releases the underlying Redis client resources. +// Close is a no-op: the client is owned by the runtime, not the publisher. +// The accessor remains for API symmetry with the previous Redis adapter so +// runtime cleanup chains do not need to special-case this surface. func (publisher *Publisher) Close() error { - if publisher == nil || publisher.client == nil { - return nil - } - - return publisher.client.Close() + return nil } // Ping verifies that the configured Redis backend is reachable within the diff --git a/user/internal/adapters/redis/domainevents/publisher_test.go b/user/internal/adapters/redis/domainevents/publisher_test.go index 60b37e2..ded87e3 100644 --- a/user/internal/adapters/redis/domainevents/publisher_test.go +++ b/user/internal/adapters/redis/domainevents/publisher_test.go @@ -10,6 +10,7 @@ import ( "galaxy/user/internal/ports" "github.com/alicebob/miniredis/v2" + "github.com/redis/go-redis/v9" "github.com/stretchr/testify/require" ) @@ -17,8 +18,7 @@ func TestPublisherPublishesFlatRedisStreamEntry(t *testing.T) { t.Parallel() server := miniredis.RunT(t) - publisher, err := New(Config{ - Addr: server.Addr(), + publisher, err := New(redis.NewClient(&redis.Options{Addr: server.Addr()}), Config{ Stream: "user:test_events", StreamMaxLen: 5, OperationTimeout: time.Second, @@ -70,8 +70,7 @@ func TestPublisherRejectsInvalidEventBeforeXAdd(t *testing.T) { t.Parallel() server := miniredis.RunT(t) - publisher, err := New(Config{ - Addr: server.Addr(), + publisher, err := New(redis.NewClient(&redis.Options{Addr: server.Addr()}), Config{ Stream: "user:test_events", StreamMaxLen: 5, OperationTimeout: time.Second, diff --git a/user/internal/adapters/redis/lifecycleevents/publisher.go b/user/internal/adapters/redis/lifecycleevents/publisher.go index 0909489..24cc0ed 100644 --- a/user/internal/adapters/redis/lifecycleevents/publisher.go +++ b/user/internal/adapters/redis/lifecycleevents/publisher.go @@ -4,7 +4,6 @@ package lifecycleevents import ( "context" - "crypto/tls" "errors" "fmt" "strconv" @@ -17,23 +16,10 @@ import ( "go.opentelemetry.io/otel/trace" ) -// Config configures one Redis-backed user-lifecycle publisher. +// Config configures one Redis-backed user-lifecycle publisher. The +// connection is supplied externally by the runtime so multiple publishers +// can share one *redis.Client. type Config struct { - // Addr is the Redis network address in host:port form. - Addr string - - // Username is the optional Redis ACL username. - Username string - - // Password is the optional Redis ACL password. - Password string - - // DB is the Redis logical database index. - DB int - - // TLSEnabled enables TLS with a conservative minimum protocol version. - TLSEnabled bool - // Stream identifies the Redis Stream key used for lifecycle events. The // default platform key is `user:lifecycle_events`. Stream string @@ -55,13 +41,13 @@ type Publisher struct { operationTimeout time.Duration } -// New constructs a Redis-backed lifecycle-event publisher from cfg. -func New(cfg Config) (*Publisher, error) { +// New constructs a Redis-backed lifecycle-event publisher backed by the +// supplied client. The publisher does not own the client; the runtime is +// responsible for closing it. +func New(client *redis.Client, cfg Config) (*Publisher, error) { switch { - case strings.TrimSpace(cfg.Addr) == "": - return nil, errors.New("new redis lifecycle-event publisher: redis addr must not be empty") - case cfg.DB < 0: - return nil, errors.New("new redis lifecycle-event publisher: redis db must not be negative") + case client == nil: + return nil, errors.New("new redis lifecycle-event publisher: redis client must not be nil") case strings.TrimSpace(cfg.Stream) == "": return nil, errors.New("new redis lifecycle-event publisher: stream must not be empty") case cfg.StreamMaxLen <= 0: @@ -70,33 +56,17 @@ func New(cfg Config) (*Publisher, error) { return nil, errors.New("new redis lifecycle-event publisher: operation timeout must be positive") } - options := &redis.Options{ - Addr: cfg.Addr, - Username: cfg.Username, - Password: cfg.Password, - DB: cfg.DB, - Protocol: 2, - DisableIdentity: true, - } - if cfg.TLSEnabled { - options.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12} - } - return &Publisher{ - client: redis.NewClient(options), + client: client, stream: cfg.Stream, streamMaxLen: cfg.StreamMaxLen, operationTimeout: cfg.OperationTimeout, }, nil } -// Close releases the underlying Redis client resources. +// Close is a no-op: the client is owned by the runtime. func (publisher *Publisher) Close() error { - if publisher == nil || publisher.client == nil { - return nil - } - - return publisher.client.Close() + return nil } // Ping verifies that the configured Redis backend is reachable within the diff --git a/user/internal/adapters/redis/lifecycleevents/publisher_test.go b/user/internal/adapters/redis/lifecycleevents/publisher_test.go index 88ba791..4d00004 100644 --- a/user/internal/adapters/redis/lifecycleevents/publisher_test.go +++ b/user/internal/adapters/redis/lifecycleevents/publisher_test.go @@ -10,6 +10,7 @@ import ( "galaxy/user/internal/ports" "github.com/alicebob/miniredis/v2" + "github.com/redis/go-redis/v9" "github.com/stretchr/testify/require" ) @@ -17,8 +18,7 @@ func TestPublisherPublishesPermanentBlockedEnvelope(t *testing.T) { t.Parallel() server := miniredis.RunT(t) - publisher, err := New(Config{ - Addr: server.Addr(), + publisher, err := New(redis.NewClient(&redis.Options{Addr: server.Addr()}), Config{ Stream: "user:lifecycle_events", StreamMaxLen: 10, OperationTimeout: time.Second, @@ -54,8 +54,7 @@ func TestPublisherOmitsOptionalActorIDAndTraceID(t *testing.T) { t.Parallel() server := miniredis.RunT(t) - publisher, err := New(Config{ - Addr: server.Addr(), + publisher, err := New(redis.NewClient(&redis.Options{Addr: server.Addr()}), Config{ Stream: "user:lifecycle_events", StreamMaxLen: 10, OperationTimeout: time.Second, @@ -86,8 +85,7 @@ func TestPublisherRejectsInvalidEventBeforeXAdd(t *testing.T) { t.Parallel() server := miniredis.RunT(t) - publisher, err := New(Config{ - Addr: server.Addr(), + publisher, err := New(redis.NewClient(&redis.Options{Addr: server.Addr()}), Config{ Stream: "user:lifecycle_events", StreamMaxLen: 10, OperationTimeout: time.Second, @@ -113,8 +111,7 @@ func TestPublisherTrimsBeyondMaxLen(t *testing.T) { t.Parallel() server := miniredis.RunT(t) - publisher, err := New(Config{ - Addr: server.Addr(), + publisher, err := New(redis.NewClient(&redis.Options{Addr: server.Addr()}), Config{ Stream: "user:lifecycle_events", StreamMaxLen: 5, OperationTimeout: time.Second, @@ -142,8 +139,7 @@ func TestPublisherPingReportsReachability(t *testing.T) { t.Parallel() server := miniredis.RunT(t) - publisher, err := New(Config{ - Addr: server.Addr(), + publisher, err := New(redis.NewClient(&redis.Options{Addr: server.Addr()}), Config{ Stream: "user:lifecycle_events", StreamMaxLen: 10, OperationTimeout: time.Second, diff --git a/user/internal/adapters/redis/userstore/admin_index.go b/user/internal/adapters/redis/userstore/admin_index.go deleted file mode 100644 index 0521849..0000000 --- a/user/internal/adapters/redis/userstore/admin_index.go +++ /dev/null @@ -1,227 +0,0 @@ -package userstore - -import ( - "context" - "errors" - - "galaxy/user/internal/adapters/redisstate" - "galaxy/user/internal/domain/account" - "galaxy/user/internal/domain/common" - "galaxy/user/internal/domain/entitlement" - "galaxy/user/internal/domain/policy" - "galaxy/user/internal/ports" - - "github.com/redis/go-redis/v9" -) - -var knownSanctionCodes = []policy.SanctionCode{ - policy.SanctionCodeLoginBlock, - policy.SanctionCodePrivateGameCreateBlock, - policy.SanctionCodePrivateGameManageBlock, - policy.SanctionCodeGameJoinBlock, - policy.SanctionCodeProfileUpdateBlock, - policy.SanctionCodePermanentBlock, -} - -var knownLimitCodes = []policy.LimitCode{ - policy.LimitCodeMaxOwnedPrivateGames, - policy.LimitCodeMaxPendingPublicApplications, - policy.LimitCodeMaxActiveGameMemberships, - policy.LimitCodeMaxRegisteredRaceNames, -} - -var knownEligibilityMarkers = []policy.EligibilityMarker{ - policy.EligibilityMarkerCanLogin, - policy.EligibilityMarkerCanCreatePrivateGame, - policy.EligibilityMarkerCanManagePrivateGame, - policy.EligibilityMarkerCanJoinGame, - policy.EligibilityMarkerCanUpdateProfile, -} - -func (store *Store) addCreatedAtIndex( - pipe redis.Pipeliner, - ctx context.Context, - record account.UserAccount, -) { - pipe.ZAdd(ctx, store.keyspace.CreatedAtIndex(), redis.Z{ - Score: redisstate.CreatedAtScore(record.CreatedAt), - Member: record.UserID.String(), - }) -} - -func (store *Store) syncDeclaredCountryIndex( - pipe redis.Pipeliner, - ctx context.Context, - previous account.UserAccount, - current account.UserAccount, -) { - if !previous.DeclaredCountry.IsZero() { - pipe.SRem(ctx, store.keyspace.DeclaredCountryIndex(previous.DeclaredCountry), current.UserID.String()) - } - if !current.DeclaredCountry.IsZero() { - pipe.SAdd(ctx, store.keyspace.DeclaredCountryIndex(current.DeclaredCountry), current.UserID.String()) - } -} - -func (store *Store) syncEntitlementIndexes( - pipe redis.Pipeliner, - ctx context.Context, - snapshot entitlement.CurrentSnapshot, -) { - pipe.SRem(ctx, store.keyspace.PaidStateIndex(entitlement.PaidStateFree), snapshot.UserID.String()) - pipe.SRem(ctx, store.keyspace.PaidStateIndex(entitlement.PaidStatePaid), snapshot.UserID.String()) - pipe.SAdd(ctx, store.keyspace.PaidStateIndex(paidStateFromSnapshot(snapshot)), snapshot.UserID.String()) - - pipe.ZRem(ctx, store.keyspace.FinitePaidExpiryIndex(), snapshot.UserID.String()) - if snapshot.HasFiniteExpiry() { - pipe.ZAdd(ctx, store.keyspace.FinitePaidExpiryIndex(), redis.Z{ - Score: redisstate.ExpiryScore(*snapshot.EndsAt), - Member: snapshot.UserID.String(), - }) - } -} - -func (store *Store) syncActiveSanctionCodeIndexes( - pipe redis.Pipeliner, - ctx context.Context, - userID common.UserID, - activeCodes map[policy.SanctionCode]struct{}, -) { - for _, code := range knownSanctionCodes { - pipe.SRem(ctx, store.keyspace.ActiveSanctionCodeIndex(code), userID.String()) - if _, ok := activeCodes[code]; ok { - pipe.SAdd(ctx, store.keyspace.ActiveSanctionCodeIndex(code), userID.String()) - } - } -} - -func (store *Store) syncActiveLimitCodeIndexes( - pipe redis.Pipeliner, - ctx context.Context, - userID common.UserID, - activeCodes map[policy.LimitCode]struct{}, -) { - for _, code := range knownLimitCodes { - pipe.SRem(ctx, store.keyspace.ActiveLimitCodeIndex(code), userID.String()) - if _, ok := activeCodes[code]; ok { - pipe.SAdd(ctx, store.keyspace.ActiveLimitCodeIndex(code), userID.String()) - } - } -} - -func (store *Store) syncEligibilityMarkerIndexes( - pipe redis.Pipeliner, - ctx context.Context, - userID common.UserID, - isPaid bool, - activeSanctionCodes map[policy.SanctionCode]struct{}, -) { - values := deriveEligibilityMarkerValues(isPaid, activeSanctionCodes) - - for _, marker := range knownEligibilityMarkers { - pipe.SRem(ctx, store.keyspace.EligibilityMarkerIndex(marker, true), userID.String()) - pipe.SRem(ctx, store.keyspace.EligibilityMarkerIndex(marker, false), userID.String()) - pipe.SAdd(ctx, store.keyspace.EligibilityMarkerIndex(marker, values[marker]), userID.String()) - } -} - -func (store *Store) loadActiveSanctionCodeSet( - ctx context.Context, - getter bytesGetter, - userID common.UserID, -) (map[policy.SanctionCode]struct{}, error) { - activeCodes := make(map[policy.SanctionCode]struct{}, len(knownSanctionCodes)) - - for _, code := range knownSanctionCodes { - _, err := store.loadActiveSanctionRecordID(ctx, getter, store.keyspace.ActiveSanction(userID, code)) - switch { - case err == nil: - activeCodes[code] = struct{}{} - case errors.Is(err, ports.ErrNotFound): - continue - default: - return nil, err - } - } - - return activeCodes, nil -} - -func (store *Store) loadActiveLimitCodeSet( - ctx context.Context, - getter bytesGetter, - userID common.UserID, -) (map[policy.LimitCode]struct{}, error) { - activeCodes := make(map[policy.LimitCode]struct{}, len(knownLimitCodes)) - - for _, code := range knownLimitCodes { - _, err := store.loadActiveLimitRecordID(ctx, getter, store.keyspace.ActiveLimit(userID, code)) - switch { - case err == nil: - activeCodes[code] = struct{}{} - case errors.Is(err, ports.ErrNotFound): - continue - default: - return nil, err - } - } - - return activeCodes, nil -} - -func (store *Store) activeSanctionWatchKeys(userID common.UserID) []string { - keys := make([]string, 0, len(knownSanctionCodes)) - for _, code := range knownSanctionCodes { - keys = append(keys, store.keyspace.ActiveSanction(userID, code)) - } - - return keys -} - -func (store *Store) activeLimitWatchKeys(userID common.UserID) []string { - keys := make([]string, 0, len(knownLimitCodes)) - for _, code := range knownLimitCodes { - keys = append(keys, store.keyspace.ActiveLimit(userID, code)) - } - - return keys -} - -func deriveEligibilityMarkerValues( - isPaid bool, - activeSanctionCodes map[policy.SanctionCode]struct{}, -) map[policy.EligibilityMarker]bool { - if _, permanentBlocked := activeSanctionCodes[policy.SanctionCodePermanentBlock]; permanentBlocked { - return map[policy.EligibilityMarker]bool{ - policy.EligibilityMarkerCanLogin: false, - policy.EligibilityMarkerCanCreatePrivateGame: false, - policy.EligibilityMarkerCanManagePrivateGame: false, - policy.EligibilityMarkerCanJoinGame: false, - policy.EligibilityMarkerCanUpdateProfile: false, - } - } - - _, loginBlocked := activeSanctionCodes[policy.SanctionCodeLoginBlock] - _, createBlocked := activeSanctionCodes[policy.SanctionCodePrivateGameCreateBlock] - _, manageBlocked := activeSanctionCodes[policy.SanctionCodePrivateGameManageBlock] - _, joinBlocked := activeSanctionCodes[policy.SanctionCodeGameJoinBlock] - _, profileBlocked := activeSanctionCodes[policy.SanctionCodeProfileUpdateBlock] - - canLogin := !loginBlocked - - return map[policy.EligibilityMarker]bool{ - policy.EligibilityMarkerCanLogin: canLogin, - policy.EligibilityMarkerCanCreatePrivateGame: canLogin && isPaid && !createBlocked, - policy.EligibilityMarkerCanManagePrivateGame: canLogin && isPaid && !manageBlocked, - policy.EligibilityMarkerCanJoinGame: canLogin && !joinBlocked, - policy.EligibilityMarkerCanUpdateProfile: canLogin && !profileBlocked, - } -} - -func paidStateFromSnapshot(snapshot entitlement.CurrentSnapshot) entitlement.PaidState { - if snapshot.IsPaid { - return entitlement.PaidStatePaid - } - - return entitlement.PaidStateFree -} diff --git a/user/internal/adapters/redis/userstore/admin_index_test.go b/user/internal/adapters/redis/userstore/admin_index_test.go deleted file mode 100644 index 66ca496..0000000 --- a/user/internal/adapters/redis/userstore/admin_index_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package userstore - -import ( - "testing" - - "galaxy/user/internal/domain/policy" - - "github.com/stretchr/testify/require" -) - -func TestDeriveEligibilityMarkerValuesCollapsesUnderPermanentBlock(t *testing.T) { - t.Parallel() - - activeCodes := map[policy.SanctionCode]struct{}{ - policy.SanctionCodePermanentBlock: {}, - } - - values := deriveEligibilityMarkerValues(true, activeCodes) - require.False(t, values[policy.EligibilityMarkerCanLogin]) - require.False(t, values[policy.EligibilityMarkerCanCreatePrivateGame]) - require.False(t, values[policy.EligibilityMarkerCanManagePrivateGame]) - require.False(t, values[policy.EligibilityMarkerCanJoinGame]) - require.False(t, values[policy.EligibilityMarkerCanUpdateProfile]) -} - -func TestDeriveEligibilityMarkerValuesPermanentBlockDominatesOtherSanctions(t *testing.T) { - t.Parallel() - - activeCodes := map[policy.SanctionCode]struct{}{ - policy.SanctionCodePermanentBlock: {}, - policy.SanctionCodeLoginBlock: {}, - policy.SanctionCodeGameJoinBlock: {}, - } - - values := deriveEligibilityMarkerValues(false, activeCodes) - for marker, value := range values { - require.Falsef(t, value, "marker %q must be false under permanent_block", marker) - } -} - -func TestDeriveEligibilityMarkerValuesFreeUserWithoutPermanentBlock(t *testing.T) { - t.Parallel() - - values := deriveEligibilityMarkerValues(false, map[policy.SanctionCode]struct{}{}) - - require.True(t, values[policy.EligibilityMarkerCanLogin]) - require.False(t, values[policy.EligibilityMarkerCanCreatePrivateGame]) - require.False(t, values[policy.EligibilityMarkerCanManagePrivateGame]) - require.True(t, values[policy.EligibilityMarkerCanJoinGame]) - require.True(t, values[policy.EligibilityMarkerCanUpdateProfile]) -} - -func TestKnownCatalogsIncludeStage22Codes(t *testing.T) { - t.Parallel() - - require.Contains(t, knownSanctionCodes, policy.SanctionCodePermanentBlock) - require.Contains(t, knownLimitCodes, policy.LimitCodeMaxRegisteredRaceNames) -} diff --git a/user/internal/adapters/redis/userstore/admin_list_test.go b/user/internal/adapters/redis/userstore/admin_list_test.go deleted file mode 100644 index 9f5381a..0000000 --- a/user/internal/adapters/redis/userstore/admin_list_test.go +++ /dev/null @@ -1,445 +0,0 @@ -package userstore - -import ( - "context" - "testing" - "time" - - "galaxy/user/internal/adapters/redisstate" - "galaxy/user/internal/domain/common" - "galaxy/user/internal/domain/entitlement" - "galaxy/user/internal/domain/policy" - "galaxy/user/internal/ports" - "galaxy/user/internal/service/adminusers" - "galaxy/user/internal/service/entitlementsvc" - - "github.com/stretchr/testify/require" -) - -func TestListUserIDsCreatedAtPagination(t *testing.T) { - t.Parallel() - - store := newTestStore(t) - base := time.Unix(1_775_240_000, 0).UTC() - - first := validAccountRecord() - first.UserID = common.UserID("user-100") - first.Email = common.Email("u100@example.com") - first.UserName = common.UserName("player-user100aa") - first.CreatedAt = base.Add(-time.Hour) - first.UpdatedAt = first.CreatedAt - - second := validAccountRecord() - second.UserID = common.UserID("user-200") - second.Email = common.Email("u200@example.com") - second.UserName = common.UserName("player-user200aa") - second.CreatedAt = base - second.UpdatedAt = second.CreatedAt - - third := validAccountRecord() - third.UserID = common.UserID("user-300") - third.Email = common.Email("u300@example.com") - third.UserName = common.UserName("player-user300aa") - third.CreatedAt = base - third.UpdatedAt = third.CreatedAt - - require.NoError(t, store.Create(context.Background(), createAccountInput(first))) - require.NoError(t, store.Create(context.Background(), createAccountInput(second))) - require.NoError(t, store.Create(context.Background(), createAccountInput(third))) - - firstPage, err := store.ListUserIDs(context.Background(), ports.ListUsersInput{ - PageSize: 2, - Filters: ports.UserListFilters{}, - }) - require.NoError(t, err) - require.Equal(t, []common.UserID{third.UserID, second.UserID}, firstPage.UserIDs) - require.NotEmpty(t, firstPage.NextPageToken) - - secondPage, err := store.ListUserIDs(context.Background(), ports.ListUsersInput{ - PageSize: 2, - PageToken: firstPage.NextPageToken, - Filters: ports.UserListFilters{}, - }) - require.NoError(t, err) - require.Equal(t, []common.UserID{first.UserID}, secondPage.UserIDs) - require.Empty(t, secondPage.NextPageToken) -} - -func TestEnsureByEmailInitialAdminIndexes(t *testing.T) { - t.Parallel() - - store := newTestStore(t) - now := time.Unix(1_775_240_000, 0).UTC() - record := validAccountRecord() - record.DeclaredCountry = common.CountryCode("DE") - record.CreatedAt = now - record.UpdatedAt = now - - result, err := store.EnsureByEmail(context.Background(), ports.EnsureByEmailInput{ - Email: record.Email, - Account: record, - Entitlement: validEntitlementSnapshot(record.UserID, now), - EntitlementRecord: validEntitlementRecord(record.UserID, now), - }) - require.NoError(t, err) - require.Equal(t, ports.EnsureByEmailOutcomeCreated, result.Outcome) - - requireSortedSetScore(t, store, store.keyspace.CreatedAtIndex(), record.UserID.String(), redisstate.CreatedAtScore(record.CreatedAt)) - requireSetContains(t, store, store.keyspace.PaidStateIndex(entitlement.PaidStateFree), record.UserID.String()) - requireSetNotContains(t, store, store.keyspace.PaidStateIndex(entitlement.PaidStatePaid), record.UserID.String()) - requireSetContains(t, store, store.keyspace.DeclaredCountryIndex(record.DeclaredCountry), record.UserID.String()) - requireSetContains(t, store, store.keyspace.EligibilityMarkerIndex(policy.EligibilityMarkerCanLogin, true), record.UserID.String()) - requireSetContains(t, store, store.keyspace.EligibilityMarkerIndex(policy.EligibilityMarkerCanCreatePrivateGame, false), record.UserID.String()) - requireSetContains(t, store, store.keyspace.EligibilityMarkerIndex(policy.EligibilityMarkerCanJoinGame, true), record.UserID.String()) -} - -func TestAccountUpdateSyncsDeclaredCountryIndex(t *testing.T) { - t.Parallel() - - store := newTestStore(t) - accountStore := store.Accounts() - record := validAccountRecord() - record.DeclaredCountry = common.CountryCode("DE") - require.NoError(t, accountStore.Create(context.Background(), createAccountInput(record))) - - updated := record - updated.DeclaredCountry = common.CountryCode("FR") - updated.UpdatedAt = record.UpdatedAt.Add(time.Minute) - require.NoError(t, accountStore.Update(context.Background(), updated)) - - requireSetNotContains(t, store, store.keyspace.DeclaredCountryIndex(common.CountryCode("DE")), record.UserID.String()) - requireSetContains(t, store, store.keyspace.DeclaredCountryIndex(common.CountryCode("FR")), record.UserID.String()) -} - -func TestEntitlementLifecycleSyncsAdminIndexes(t *testing.T) { - t.Parallel() - - store := newTestStore(t) - now := time.Unix(1_775_240_000, 0).UTC() - record := validAccountRecord() - record.CreatedAt = now - record.UpdatedAt = now - _, err := store.EnsureByEmail(context.Background(), ports.EnsureByEmailInput{ - Email: record.Email, - Account: record, - Entitlement: validEntitlementSnapshot(record.UserID, now), - EntitlementRecord: validEntitlementRecord(record.UserID, now), - }) - require.NoError(t, err) - - lifecycleStore := store.EntitlementLifecycle() - freeRecord := validEntitlementRecord(record.UserID, now) - freeSnapshot := validEntitlementSnapshot(record.UserID, now) - - grantStartsAt := now.Add(time.Hour) - grantEndsAt := grantStartsAt.Add(30 * 24 * time.Hour) - grantedRecord := paidEntitlementRecord( - entitlement.EntitlementRecordID("entitlement-paid-1"), - record.UserID, - entitlement.PlanCodePaidMonthly, - grantStartsAt, - grantEndsAt, - common.Source("admin"), - common.ReasonCode("manual_grant"), - ) - grantedSnapshot := paidEntitlementSnapshot( - record.UserID, - entitlement.PlanCodePaidMonthly, - grantStartsAt, - grantEndsAt, - common.Source("admin"), - common.ReasonCode("manual_grant"), - ) - closedFreeRecord := freeRecord - closedFreeRecord.ClosedAt = timePointer(grantStartsAt) - closedFreeRecord.ClosedBy = common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-1")} - closedFreeRecord.ClosedReasonCode = common.ReasonCode("manual_grant") - - require.NoError(t, lifecycleStore.Grant(context.Background(), ports.GrantEntitlementInput{ - ExpectedCurrentSnapshot: freeSnapshot, - ExpectedCurrentRecord: freeRecord, - UpdatedCurrentRecord: closedFreeRecord, - NewRecord: grantedRecord, - NewSnapshot: grantedSnapshot, - })) - - requireSetContains(t, store, store.keyspace.PaidStateIndex(entitlement.PaidStatePaid), record.UserID.String()) - requireSetNotContains(t, store, store.keyspace.PaidStateIndex(entitlement.PaidStateFree), record.UserID.String()) - requireSortedSetScore(t, store, store.keyspace.FinitePaidExpiryIndex(), record.UserID.String(), redisstate.ExpiryScore(grantEndsAt)) - requireSetContains(t, store, store.keyspace.EligibilityMarkerIndex(policy.EligibilityMarkerCanCreatePrivateGame, true), record.UserID.String()) - - extendedEndsAt := grantEndsAt.Add(30 * 24 * time.Hour) - extensionRecord := paidEntitlementRecord( - entitlement.EntitlementRecordID("entitlement-paid-2"), - record.UserID, - entitlement.PlanCodePaidMonthly, - grantEndsAt, - extendedEndsAt, - common.Source("admin"), - common.ReasonCode("manual_extend"), - ) - extendedSnapshot := paidEntitlementSnapshot( - record.UserID, - entitlement.PlanCodePaidMonthly, - grantStartsAt, - extendedEndsAt, - common.Source("admin"), - common.ReasonCode("manual_extend"), - ) - require.NoError(t, lifecycleStore.Extend(context.Background(), ports.ExtendEntitlementInput{ - ExpectedCurrentSnapshot: grantedSnapshot, - NewRecord: extensionRecord, - NewSnapshot: extendedSnapshot, - })) - - requireSortedSetScore(t, store, store.keyspace.FinitePaidExpiryIndex(), record.UserID.String(), redisstate.ExpiryScore(extendedEndsAt)) - - revokeAt := grantEndsAt.Add(12 * time.Hour) - revokedCurrentRecord := extensionRecord - revokedCurrentRecord.ClosedAt = timePointer(revokeAt) - revokedCurrentRecord.ClosedBy = common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-1")} - revokedCurrentRecord.ClosedReasonCode = common.ReasonCode("manual_revoke") - freeAfterRevokeRecord := entitlement.PeriodRecord{ - RecordID: entitlement.EntitlementRecordID("entitlement-free-2"), - UserID: record.UserID, - PlanCode: entitlement.PlanCodeFree, - Source: common.Source("admin"), - Actor: common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-1")}, - ReasonCode: common.ReasonCode("manual_revoke"), - StartsAt: revokeAt, - CreatedAt: revokeAt, - } - freeAfterRevokeSnapshot := entitlement.CurrentSnapshot{ - UserID: record.UserID, - PlanCode: entitlement.PlanCodeFree, - IsPaid: false, - StartsAt: revokeAt, - Source: common.Source("admin"), - Actor: common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-1")}, - ReasonCode: common.ReasonCode("manual_revoke"), - UpdatedAt: revokeAt, - } - require.NoError(t, lifecycleStore.Revoke(context.Background(), ports.RevokeEntitlementInput{ - ExpectedCurrentSnapshot: extendedSnapshot, - ExpectedCurrentRecord: extensionRecord, - UpdatedCurrentRecord: revokedCurrentRecord, - NewRecord: freeAfterRevokeRecord, - NewSnapshot: freeAfterRevokeSnapshot, - })) - - requireSetContains(t, store, store.keyspace.PaidStateIndex(entitlement.PaidStateFree), record.UserID.String()) - requireSetNotContains(t, store, store.keyspace.PaidStateIndex(entitlement.PaidStatePaid), record.UserID.String()) - requireSortedSetMissing(t, store, store.keyspace.FinitePaidExpiryIndex(), record.UserID.String()) - requireSetContains(t, store, store.keyspace.EligibilityMarkerIndex(policy.EligibilityMarkerCanCreatePrivateGame, false), record.UserID.String()) -} - -func TestPolicyLifecycleSyncsAdminIndexes(t *testing.T) { - t.Parallel() - - store := newTestStore(t) - now := time.Unix(1_775_240_000, 0).UTC() - record := validAccountRecord() - record.CreatedAt = now - record.UpdatedAt = now - _, err := store.EnsureByEmail(context.Background(), ports.EnsureByEmailInput{ - Email: record.Email, - Account: record, - Entitlement: validEntitlementSnapshot(record.UserID, now), - EntitlementRecord: validEntitlementRecord(record.UserID, now), - }) - require.NoError(t, err) - - lifecycleStore := store.PolicyLifecycle() - sanctionRecord := policy.SanctionRecord{ - RecordID: policy.SanctionRecordID("sanction-1"), - UserID: record.UserID, - SanctionCode: policy.SanctionCodeLoginBlock, - Scope: common.Scope("auth"), - ReasonCode: common.ReasonCode("manual_block"), - Actor: common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-1")}, - AppliedAt: now, - } - require.NoError(t, lifecycleStore.ApplySanction(context.Background(), ports.ApplySanctionInput{ - NewRecord: sanctionRecord, - })) - - requireSetContains(t, store, store.keyspace.ActiveSanctionCodeIndex(policy.SanctionCodeLoginBlock), record.UserID.String()) - requireSetContains(t, store, store.keyspace.EligibilityMarkerIndex(policy.EligibilityMarkerCanLogin, false), record.UserID.String()) - requireSetContains(t, store, store.keyspace.EligibilityMarkerIndex(policy.EligibilityMarkerCanJoinGame, false), record.UserID.String()) - - removedSanction := sanctionRecord - removedAt := now.Add(time.Minute) - removedSanction.RemovedAt = &removedAt - removedSanction.RemovedBy = common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-2")} - removedSanction.RemovedReasonCode = common.ReasonCode("manual_remove") - require.NoError(t, lifecycleStore.RemoveSanction(context.Background(), ports.RemoveSanctionInput{ - ExpectedActiveRecord: sanctionRecord, - UpdatedRecord: removedSanction, - })) - - requireSetNotContains(t, store, store.keyspace.ActiveSanctionCodeIndex(policy.SanctionCodeLoginBlock), record.UserID.String()) - requireSetContains(t, store, store.keyspace.EligibilityMarkerIndex(policy.EligibilityMarkerCanLogin, true), record.UserID.String()) - - limitRecord := policy.LimitRecord{ - RecordID: policy.LimitRecordID("limit-1"), - UserID: record.UserID, - LimitCode: policy.LimitCodeMaxOwnedPrivateGames, - Value: 5, - ReasonCode: common.ReasonCode("manual_override"), - Actor: common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-1")}, - AppliedAt: now.Add(2 * time.Minute), - } - require.NoError(t, lifecycleStore.SetLimit(context.Background(), ports.SetLimitInput{ - NewRecord: limitRecord, - })) - - requireSetContains(t, store, store.keyspace.ActiveLimitCodeIndex(policy.LimitCodeMaxOwnedPrivateGames), record.UserID.String()) - - removedLimit := limitRecord - limitRemovedAt := now.Add(3 * time.Minute) - removedLimit.RemovedAt = &limitRemovedAt - removedLimit.RemovedBy = common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-2")} - removedLimit.RemovedReasonCode = common.ReasonCode("manual_remove") - require.NoError(t, lifecycleStore.RemoveLimit(context.Background(), ports.RemoveLimitInput{ - ExpectedActiveRecord: limitRecord, - UpdatedRecord: removedLimit, - })) - - requireSetNotContains(t, store, store.keyspace.ActiveLimitCodeIndex(policy.LimitCodeMaxOwnedPrivateGames), record.UserID.String()) -} - -func TestAdminListerReevaluatesExpiredPaidSnapshots(t *testing.T) { - t.Parallel() - - store := newTestStore(t) - userID := common.UserID("user-123") - now := time.Unix(1_775_240_000, 0).UTC() - record := validAccountRecord() - record.CreatedAt = now.Add(-2 * time.Hour) - record.UpdatedAt = record.CreatedAt - _, err := store.EnsureByEmail(context.Background(), ports.EnsureByEmailInput{ - Email: record.Email, - Account: record, - Entitlement: validEntitlementSnapshot(userID, record.CreatedAt), - EntitlementRecord: validEntitlementRecord(userID, record.CreatedAt), - }) - require.NoError(t, err) - - grantStartsAt := now.Add(-90 * time.Minute) - grantEndsAt := now.Add(-30 * time.Minute) - freeRecord := validEntitlementRecord(userID, record.CreatedAt) - freeSnapshot := validEntitlementSnapshot(userID, record.CreatedAt) - grantedRecord := paidEntitlementRecord( - entitlement.EntitlementRecordID("entitlement-paid-expired"), - userID, - entitlement.PlanCodePaidMonthly, - grantStartsAt, - grantEndsAt, - common.Source("admin"), - common.ReasonCode("manual_grant"), - ) - grantedSnapshot := paidEntitlementSnapshot( - userID, - entitlement.PlanCodePaidMonthly, - grantStartsAt, - grantEndsAt, - common.Source("admin"), - common.ReasonCode("manual_grant"), - ) - closedFreeRecord := freeRecord - closedFreeRecord.ClosedAt = timePointer(grantStartsAt) - closedFreeRecord.ClosedBy = common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-1")} - closedFreeRecord.ClosedReasonCode = common.ReasonCode("manual_grant") - require.NoError(t, store.EntitlementLifecycle().Grant(context.Background(), ports.GrantEntitlementInput{ - ExpectedCurrentSnapshot: freeSnapshot, - ExpectedCurrentRecord: freeRecord, - UpdatedCurrentRecord: closedFreeRecord, - NewRecord: grantedRecord, - NewSnapshot: grantedSnapshot, - })) - - reader, err := entitlementsvc.NewReader( - store.EntitlementSnapshots(), - store.EntitlementLifecycle(), - adminStoreClock{now: now}, - adminStoreIDGenerator{entitlementRecordID: entitlement.EntitlementRecordID("entitlement-free-after-expiry")}, - ) - require.NoError(t, err) - lister, err := adminusers.NewLister(store.Accounts(), reader, store.Sanctions(), store.Limits(), adminStoreClock{now: now}, store) - require.NoError(t, err) - - result, err := lister.Execute(context.Background(), adminusers.ListUsersInput{PaidState: "free"}) - require.NoError(t, err) - require.Len(t, result.Items, 1) - require.Equal(t, "user-123", result.Items[0].UserID) - require.Equal(t, "free", result.Items[0].Entitlement.PlanCode) - require.False(t, result.Items[0].Entitlement.IsPaid) - - storedSnapshot, err := store.EntitlementSnapshots().GetByUserID(context.Background(), userID) - require.NoError(t, err) - require.Equal(t, entitlement.PlanCodeFree, storedSnapshot.PlanCode) - require.False(t, storedSnapshot.IsPaid) -} - -type adminStoreClock struct { - now time.Time -} - -func (clock adminStoreClock) Now() time.Time { - return clock.now -} - -type adminStoreIDGenerator struct { - entitlementRecordID entitlement.EntitlementRecordID -} - -func (generator adminStoreIDGenerator) NewUserID() (common.UserID, error) { - return "", nil -} - -func (generator adminStoreIDGenerator) NewUserName() (common.UserName, error) { - return "", nil -} - -func (generator adminStoreIDGenerator) NewEntitlementRecordID() (entitlement.EntitlementRecordID, error) { - return generator.entitlementRecordID, nil -} - -func (generator adminStoreIDGenerator) NewSanctionRecordID() (policy.SanctionRecordID, error) { - return "", nil -} - -func (generator adminStoreIDGenerator) NewLimitRecordID() (policy.LimitRecordID, error) { - return "", nil -} - -func requireSetContains(t *testing.T, store *Store, key string, member string) { - t.Helper() - - exists, err := store.client.SIsMember(context.Background(), key, member).Result() - require.NoError(t, err) - require.True(t, exists, "expected %q to contain %q", key, member) -} - -func requireSetNotContains(t *testing.T, store *Store, key string, member string) { - t.Helper() - - exists, err := store.client.SIsMember(context.Background(), key, member).Result() - require.NoError(t, err) - require.False(t, exists, "expected %q not to contain %q", key, member) -} - -func requireSortedSetScore(t *testing.T, store *Store, key string, member string, want float64) { - t.Helper() - - got, err := store.client.ZScore(context.Background(), key, member).Result() - require.NoError(t, err) - require.Equal(t, want, got) -} - -func requireSortedSetMissing(t *testing.T, store *Store, key string, member string) { - t.Helper() - - _, err := store.client.ZScore(context.Background(), key, member).Result() - require.Error(t, err) -} diff --git a/user/internal/adapters/redis/userstore/entitlement_store.go b/user/internal/adapters/redis/userstore/entitlement_store.go deleted file mode 100644 index de31b39..0000000 --- a/user/internal/adapters/redis/userstore/entitlement_store.go +++ /dev/null @@ -1,752 +0,0 @@ -package userstore - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "time" - - "galaxy/user/internal/domain/common" - "galaxy/user/internal/domain/entitlement" - "galaxy/user/internal/ports" - - "github.com/redis/go-redis/v9" -) - -type entitlementPeriodRecord struct { - RecordID string `json:"record_id"` - UserID string `json:"user_id"` - PlanCode string `json:"plan_code"` - Source string `json:"source"` - ActorType string `json:"actor_type"` - ActorID *string `json:"actor_id,omitempty"` - ReasonCode string `json:"reason_code"` - StartsAt string `json:"starts_at"` - EndsAt *string `json:"ends_at,omitempty"` - CreatedAt string `json:"created_at"` - ClosedAt *string `json:"closed_at,omitempty"` - ClosedByType *string `json:"closed_by_type,omitempty"` - ClosedByID *string `json:"closed_by_id,omitempty"` - ClosedReasonCode *string `json:"closed_reason_code,omitempty"` -} - -// CreateEntitlementRecord stores one new entitlement history record. -func (store *Store) CreateEntitlementRecord(ctx context.Context, record entitlement.PeriodRecord) error { - if err := record.Validate(); err != nil { - return fmt.Errorf("create entitlement record in redis: %w", err) - } - - payload, err := marshalEntitlementPeriodRecord(record) - if err != nil { - return fmt.Errorf("create entitlement record in redis: %w", err) - } - - recordKey := store.keyspace.EntitlementRecord(record.RecordID) - historyKey := store.keyspace.EntitlementHistory(record.UserID) - - operationCtx, cancel, err := store.operationContext(ctx, "create entitlement record in redis") - if err != nil { - return err - } - defer cancel() - - watchErr := store.client.Watch(operationCtx, func(tx *redis.Tx) error { - if err := ensureKeyAbsent(operationCtx, tx, recordKey); err != nil { - return fmt.Errorf("create entitlement record %q in redis: %w", record.RecordID, err) - } - - _, err := tx.TxPipelined(operationCtx, func(pipe redis.Pipeliner) error { - pipe.Set(operationCtx, recordKey, payload, 0) - pipe.ZAdd(operationCtx, historyKey, redis.Z{ - Score: float64(record.StartsAt.UTC().UnixMicro()), - Member: record.RecordID.String(), - }) - return nil - }) - if err != nil { - return fmt.Errorf("create entitlement record %q in redis: %w", record.RecordID, err) - } - - return nil - }, recordKey, historyKey) - - switch { - case errors.Is(watchErr, redis.TxFailedErr): - return fmt.Errorf("create entitlement record %q in redis: %w", record.RecordID, ports.ErrConflict) - case watchErr != nil: - return watchErr - default: - return nil - } -} - -// GetEntitlementRecordByRecordID returns the entitlement history record -// identified by recordID. -func (store *Store) GetEntitlementRecordByRecordID( - ctx context.Context, - recordID entitlement.EntitlementRecordID, -) (entitlement.PeriodRecord, error) { - if err := recordID.Validate(); err != nil { - return entitlement.PeriodRecord{}, fmt.Errorf("get entitlement record by record id from redis: %w", err) - } - - operationCtx, cancel, err := store.operationContext(ctx, "get entitlement record by record id from redis") - if err != nil { - return entitlement.PeriodRecord{}, err - } - defer cancel() - - record, err := store.loadEntitlementRecord(operationCtx, store.client, recordID) - if err != nil { - switch { - case errors.Is(err, ports.ErrNotFound): - return entitlement.PeriodRecord{}, fmt.Errorf("get entitlement record by record id %q from redis: %w", recordID, ports.ErrNotFound) - default: - return entitlement.PeriodRecord{}, fmt.Errorf("get entitlement record by record id %q from redis: %w", recordID, err) - } - } - - return record, nil -} - -// ListEntitlementRecordsByUserID returns every entitlement history record -// owned by userID. -func (store *Store) ListEntitlementRecordsByUserID( - ctx context.Context, - userID common.UserID, -) ([]entitlement.PeriodRecord, error) { - if err := userID.Validate(); err != nil { - return nil, fmt.Errorf("list entitlement records by user id from redis: %w", err) - } - - operationCtx, cancel, err := store.operationContext(ctx, "list entitlement records by user id from redis") - if err != nil { - return nil, err - } - defer cancel() - - recordIDs, err := store.client.ZRange(operationCtx, store.keyspace.EntitlementHistory(userID), 0, -1).Result() - if err != nil { - return nil, fmt.Errorf("list entitlement records by user id %q from redis: %w", userID, err) - } - - records := make([]entitlement.PeriodRecord, 0, len(recordIDs)) - for _, rawRecordID := range recordIDs { - record, err := store.loadEntitlementRecord(operationCtx, store.client, entitlement.EntitlementRecordID(rawRecordID)) - if err != nil { - return nil, fmt.Errorf("list entitlement records by user id %q from redis: %w", userID, err) - } - records = append(records, record) - } - - return records, nil -} - -// UpdateEntitlementRecord replaces one stored entitlement history record. -func (store *Store) UpdateEntitlementRecord(ctx context.Context, record entitlement.PeriodRecord) error { - if err := record.Validate(); err != nil { - return fmt.Errorf("update entitlement record in redis: %w", err) - } - - payload, err := marshalEntitlementPeriodRecord(record) - if err != nil { - return fmt.Errorf("update entitlement record in redis: %w", err) - } - - recordKey := store.keyspace.EntitlementRecord(record.RecordID) - - operationCtx, cancel, err := store.operationContext(ctx, "update entitlement record in redis") - if err != nil { - return err - } - defer cancel() - - watchErr := store.client.Watch(operationCtx, func(tx *redis.Tx) error { - if _, err := store.loadEntitlementRecord(operationCtx, tx, record.RecordID); err != nil { - return fmt.Errorf("update entitlement record %q in redis: %w", record.RecordID, err) - } - - _, err := tx.TxPipelined(operationCtx, func(pipe redis.Pipeliner) error { - pipe.Set(operationCtx, recordKey, payload, 0) - return nil - }) - if err != nil { - return fmt.Errorf("update entitlement record %q in redis: %w", record.RecordID, err) - } - - return nil - }, recordKey) - - switch { - case errors.Is(watchErr, redis.TxFailedErr): - return fmt.Errorf("update entitlement record %q in redis: %w", record.RecordID, ports.ErrConflict) - case watchErr != nil: - return watchErr - default: - return nil - } -} - -// GrantEntitlement atomically closes the current free history record, creates -// one paid history record, and replaces the current snapshot. -func (store *Store) GrantEntitlement(ctx context.Context, input ports.GrantEntitlementInput) error { - if err := input.Validate(); err != nil { - return fmt.Errorf("grant entitlement in redis: %w", err) - } - - updatedCurrentRecordPayload, err := marshalEntitlementPeriodRecord(input.UpdatedCurrentRecord) - if err != nil { - return fmt.Errorf("grant entitlement in redis: %w", err) - } - newRecordPayload, err := marshalEntitlementPeriodRecord(input.NewRecord) - if err != nil { - return fmt.Errorf("grant entitlement in redis: %w", err) - } - newSnapshotPayload, err := marshalEntitlementSnapshotRecord(input.NewSnapshot) - if err != nil { - return fmt.Errorf("grant entitlement in redis: %w", err) - } - - currentRecordKey := store.keyspace.EntitlementRecord(input.ExpectedCurrentRecord.RecordID) - newRecordKey := store.keyspace.EntitlementRecord(input.NewRecord.RecordID) - historyKey := store.keyspace.EntitlementHistory(input.NewRecord.UserID) - snapshotKey := store.keyspace.EntitlementSnapshot(input.NewSnapshot.UserID) - watchedKeys := append( - []string{currentRecordKey, newRecordKey, historyKey, snapshotKey}, - store.activeSanctionWatchKeys(input.NewSnapshot.UserID)..., - ) - - operationCtx, cancel, err := store.operationContext(ctx, "grant entitlement in redis") - if err != nil { - return err - } - defer cancel() - - watchErr := store.client.Watch(operationCtx, func(tx *redis.Tx) error { - storedSnapshot, err := store.loadEntitlementSnapshot(operationCtx, tx, input.ExpectedCurrentSnapshot.UserID) - if err != nil { - return fmt.Errorf("grant entitlement for user %q in redis: %w", input.ExpectedCurrentSnapshot.UserID, err) - } - if !equalEntitlementSnapshots(storedSnapshot, input.ExpectedCurrentSnapshot) { - return fmt.Errorf("grant entitlement for user %q in redis: %w", input.ExpectedCurrentSnapshot.UserID, ports.ErrConflict) - } - - storedCurrentRecord, err := store.loadEntitlementRecord(operationCtx, tx, input.ExpectedCurrentRecord.RecordID) - if err != nil { - return fmt.Errorf("grant entitlement for user %q in redis: %w", input.ExpectedCurrentSnapshot.UserID, err) - } - if !equalEntitlementPeriodRecords(storedCurrentRecord, input.ExpectedCurrentRecord) { - return fmt.Errorf("grant entitlement for user %q in redis: %w", input.ExpectedCurrentSnapshot.UserID, ports.ErrConflict) - } - if err := ensureKeyAbsent(operationCtx, tx, newRecordKey); err != nil { - return fmt.Errorf("grant entitlement for user %q in redis: %w", input.ExpectedCurrentSnapshot.UserID, err) - } - activeSanctionCodes, err := store.loadActiveSanctionCodeSet(operationCtx, tx, input.NewSnapshot.UserID) - if err != nil { - return fmt.Errorf("grant entitlement for user %q in redis: %w", input.ExpectedCurrentSnapshot.UserID, err) - } - - _, err = tx.TxPipelined(operationCtx, func(pipe redis.Pipeliner) error { - pipe.Set(operationCtx, currentRecordKey, updatedCurrentRecordPayload, 0) - pipe.Set(operationCtx, newRecordKey, newRecordPayload, 0) - pipe.ZAdd(operationCtx, historyKey, redis.Z{ - Score: float64(input.NewRecord.StartsAt.UTC().UnixMicro()), - Member: input.NewRecord.RecordID.String(), - }) - pipe.Set(operationCtx, snapshotKey, newSnapshotPayload, 0) - store.syncEntitlementIndexes(pipe, operationCtx, input.NewSnapshot) - store.syncEligibilityMarkerIndexes(pipe, operationCtx, input.NewSnapshot.UserID, input.NewSnapshot.IsPaid, activeSanctionCodes) - return nil - }) - if err != nil { - return fmt.Errorf("grant entitlement for user %q in redis: %w", input.ExpectedCurrentSnapshot.UserID, err) - } - - return nil - }, watchedKeys...) - - switch { - case errors.Is(watchErr, redis.TxFailedErr): - return fmt.Errorf("grant entitlement for user %q in redis: %w", input.ExpectedCurrentSnapshot.UserID, ports.ErrConflict) - case watchErr != nil: - return watchErr - default: - return nil - } -} - -// ExtendEntitlement atomically appends one paid history segment and replaces -// the current paid snapshot. -func (store *Store) ExtendEntitlement(ctx context.Context, input ports.ExtendEntitlementInput) error { - if err := input.Validate(); err != nil { - return fmt.Errorf("extend entitlement in redis: %w", err) - } - - newRecordPayload, err := marshalEntitlementPeriodRecord(input.NewRecord) - if err != nil { - return fmt.Errorf("extend entitlement in redis: %w", err) - } - newSnapshotPayload, err := marshalEntitlementSnapshotRecord(input.NewSnapshot) - if err != nil { - return fmt.Errorf("extend entitlement in redis: %w", err) - } - - newRecordKey := store.keyspace.EntitlementRecord(input.NewRecord.RecordID) - historyKey := store.keyspace.EntitlementHistory(input.NewRecord.UserID) - snapshotKey := store.keyspace.EntitlementSnapshot(input.NewSnapshot.UserID) - watchedKeys := append( - []string{newRecordKey, historyKey, snapshotKey}, - store.activeSanctionWatchKeys(input.NewSnapshot.UserID)..., - ) - - operationCtx, cancel, err := store.operationContext(ctx, "extend entitlement in redis") - if err != nil { - return err - } - defer cancel() - - watchErr := store.client.Watch(operationCtx, func(tx *redis.Tx) error { - storedSnapshot, err := store.loadEntitlementSnapshot(operationCtx, tx, input.ExpectedCurrentSnapshot.UserID) - if err != nil { - return fmt.Errorf("extend entitlement for user %q in redis: %w", input.ExpectedCurrentSnapshot.UserID, err) - } - if !equalEntitlementSnapshots(storedSnapshot, input.ExpectedCurrentSnapshot) { - return fmt.Errorf("extend entitlement for user %q in redis: %w", input.ExpectedCurrentSnapshot.UserID, ports.ErrConflict) - } - if err := ensureKeyAbsent(operationCtx, tx, newRecordKey); err != nil { - return fmt.Errorf("extend entitlement for user %q in redis: %w", input.ExpectedCurrentSnapshot.UserID, err) - } - activeSanctionCodes, err := store.loadActiveSanctionCodeSet(operationCtx, tx, input.NewSnapshot.UserID) - if err != nil { - return fmt.Errorf("extend entitlement for user %q in redis: %w", input.ExpectedCurrentSnapshot.UserID, err) - } - - _, err = tx.TxPipelined(operationCtx, func(pipe redis.Pipeliner) error { - pipe.Set(operationCtx, newRecordKey, newRecordPayload, 0) - pipe.ZAdd(operationCtx, historyKey, redis.Z{ - Score: float64(input.NewRecord.StartsAt.UTC().UnixMicro()), - Member: input.NewRecord.RecordID.String(), - }) - pipe.Set(operationCtx, snapshotKey, newSnapshotPayload, 0) - store.syncEntitlementIndexes(pipe, operationCtx, input.NewSnapshot) - store.syncEligibilityMarkerIndexes(pipe, operationCtx, input.NewSnapshot.UserID, input.NewSnapshot.IsPaid, activeSanctionCodes) - return nil - }) - if err != nil { - return fmt.Errorf("extend entitlement for user %q in redis: %w", input.ExpectedCurrentSnapshot.UserID, err) - } - - return nil - }, watchedKeys...) - - switch { - case errors.Is(watchErr, redis.TxFailedErr): - return fmt.Errorf("extend entitlement for user %q in redis: %w", input.ExpectedCurrentSnapshot.UserID, ports.ErrConflict) - case watchErr != nil: - return watchErr - default: - return nil - } -} - -// RevokeEntitlement atomically closes the current paid history record, -// creates one free history record, and replaces the current snapshot. -func (store *Store) RevokeEntitlement(ctx context.Context, input ports.RevokeEntitlementInput) error { - if err := input.Validate(); err != nil { - return fmt.Errorf("revoke entitlement in redis: %w", err) - } - - updatedCurrentRecordPayload, err := marshalEntitlementPeriodRecord(input.UpdatedCurrentRecord) - if err != nil { - return fmt.Errorf("revoke entitlement in redis: %w", err) - } - newRecordPayload, err := marshalEntitlementPeriodRecord(input.NewRecord) - if err != nil { - return fmt.Errorf("revoke entitlement in redis: %w", err) - } - newSnapshotPayload, err := marshalEntitlementSnapshotRecord(input.NewSnapshot) - if err != nil { - return fmt.Errorf("revoke entitlement in redis: %w", err) - } - - currentRecordKey := store.keyspace.EntitlementRecord(input.ExpectedCurrentRecord.RecordID) - newRecordKey := store.keyspace.EntitlementRecord(input.NewRecord.RecordID) - historyKey := store.keyspace.EntitlementHistory(input.NewRecord.UserID) - snapshotKey := store.keyspace.EntitlementSnapshot(input.NewSnapshot.UserID) - watchedKeys := append( - []string{currentRecordKey, newRecordKey, historyKey, snapshotKey}, - store.activeSanctionWatchKeys(input.NewSnapshot.UserID)..., - ) - - operationCtx, cancel, err := store.operationContext(ctx, "revoke entitlement in redis") - if err != nil { - return err - } - defer cancel() - - watchErr := store.client.Watch(operationCtx, func(tx *redis.Tx) error { - storedSnapshot, err := store.loadEntitlementSnapshot(operationCtx, tx, input.ExpectedCurrentSnapshot.UserID) - if err != nil { - return fmt.Errorf("revoke entitlement for user %q in redis: %w", input.ExpectedCurrentSnapshot.UserID, err) - } - if !equalEntitlementSnapshots(storedSnapshot, input.ExpectedCurrentSnapshot) { - return fmt.Errorf("revoke entitlement for user %q in redis: %w", input.ExpectedCurrentSnapshot.UserID, ports.ErrConflict) - } - - storedCurrentRecord, err := store.loadEntitlementRecord(operationCtx, tx, input.ExpectedCurrentRecord.RecordID) - if err != nil { - return fmt.Errorf("revoke entitlement for user %q in redis: %w", input.ExpectedCurrentSnapshot.UserID, err) - } - if !equalEntitlementPeriodRecords(storedCurrentRecord, input.ExpectedCurrentRecord) { - return fmt.Errorf("revoke entitlement for user %q in redis: %w", input.ExpectedCurrentSnapshot.UserID, ports.ErrConflict) - } - if err := ensureKeyAbsent(operationCtx, tx, newRecordKey); err != nil { - return fmt.Errorf("revoke entitlement for user %q in redis: %w", input.ExpectedCurrentSnapshot.UserID, err) - } - activeSanctionCodes, err := store.loadActiveSanctionCodeSet(operationCtx, tx, input.NewSnapshot.UserID) - if err != nil { - return fmt.Errorf("revoke entitlement for user %q in redis: %w", input.ExpectedCurrentSnapshot.UserID, err) - } - - _, err = tx.TxPipelined(operationCtx, func(pipe redis.Pipeliner) error { - pipe.Set(operationCtx, currentRecordKey, updatedCurrentRecordPayload, 0) - pipe.Set(operationCtx, newRecordKey, newRecordPayload, 0) - pipe.ZAdd(operationCtx, historyKey, redis.Z{ - Score: float64(input.NewRecord.StartsAt.UTC().UnixMicro()), - Member: input.NewRecord.RecordID.String(), - }) - pipe.Set(operationCtx, snapshotKey, newSnapshotPayload, 0) - store.syncEntitlementIndexes(pipe, operationCtx, input.NewSnapshot) - store.syncEligibilityMarkerIndexes(pipe, operationCtx, input.NewSnapshot.UserID, input.NewSnapshot.IsPaid, activeSanctionCodes) - return nil - }) - if err != nil { - return fmt.Errorf("revoke entitlement for user %q in redis: %w", input.ExpectedCurrentSnapshot.UserID, err) - } - - return nil - }, watchedKeys...) - - switch { - case errors.Is(watchErr, redis.TxFailedErr): - return fmt.Errorf("revoke entitlement for user %q in redis: %w", input.ExpectedCurrentSnapshot.UserID, ports.ErrConflict) - case watchErr != nil: - return watchErr - default: - return nil - } -} - -// RepairExpiredEntitlement atomically replaces one expired finite paid -// snapshot with a materialized free state. -func (store *Store) RepairExpiredEntitlement(ctx context.Context, input ports.RepairExpiredEntitlementInput) error { - if err := input.Validate(); err != nil { - return fmt.Errorf("repair expired entitlement in redis: %w", err) - } - - newRecordPayload, err := marshalEntitlementPeriodRecord(input.NewRecord) - if err != nil { - return fmt.Errorf("repair expired entitlement in redis: %w", err) - } - newSnapshotPayload, err := marshalEntitlementSnapshotRecord(input.NewSnapshot) - if err != nil { - return fmt.Errorf("repair expired entitlement in redis: %w", err) - } - - newRecordKey := store.keyspace.EntitlementRecord(input.NewRecord.RecordID) - historyKey := store.keyspace.EntitlementHistory(input.NewRecord.UserID) - snapshotKey := store.keyspace.EntitlementSnapshot(input.NewSnapshot.UserID) - watchedKeys := append( - []string{newRecordKey, historyKey, snapshotKey}, - store.activeSanctionWatchKeys(input.NewSnapshot.UserID)..., - ) - - operationCtx, cancel, err := store.operationContext(ctx, "repair expired entitlement in redis") - if err != nil { - return err - } - defer cancel() - - watchErr := store.client.Watch(operationCtx, func(tx *redis.Tx) error { - storedSnapshot, err := store.loadEntitlementSnapshot(operationCtx, tx, input.ExpectedExpiredSnapshot.UserID) - if err != nil { - return fmt.Errorf("repair expired entitlement for user %q in redis: %w", input.ExpectedExpiredSnapshot.UserID, err) - } - if !equalEntitlementSnapshots(storedSnapshot, input.ExpectedExpiredSnapshot) { - return fmt.Errorf("repair expired entitlement for user %q in redis: %w", input.ExpectedExpiredSnapshot.UserID, ports.ErrConflict) - } - if err := ensureKeyAbsent(operationCtx, tx, newRecordKey); err != nil { - return fmt.Errorf("repair expired entitlement for user %q in redis: %w", input.ExpectedExpiredSnapshot.UserID, err) - } - activeSanctionCodes, err := store.loadActiveSanctionCodeSet(operationCtx, tx, input.NewSnapshot.UserID) - if err != nil { - return fmt.Errorf("repair expired entitlement for user %q in redis: %w", input.ExpectedExpiredSnapshot.UserID, err) - } - - _, err = tx.TxPipelined(operationCtx, func(pipe redis.Pipeliner) error { - pipe.Set(operationCtx, newRecordKey, newRecordPayload, 0) - pipe.ZAdd(operationCtx, historyKey, redis.Z{ - Score: float64(input.NewRecord.StartsAt.UTC().UnixMicro()), - Member: input.NewRecord.RecordID.String(), - }) - pipe.Set(operationCtx, snapshotKey, newSnapshotPayload, 0) - store.syncEntitlementIndexes(pipe, operationCtx, input.NewSnapshot) - store.syncEligibilityMarkerIndexes(pipe, operationCtx, input.NewSnapshot.UserID, input.NewSnapshot.IsPaid, activeSanctionCodes) - return nil - }) - if err != nil { - return fmt.Errorf("repair expired entitlement for user %q in redis: %w", input.ExpectedExpiredSnapshot.UserID, err) - } - - return nil - }, watchedKeys...) - - switch { - case errors.Is(watchErr, redis.TxFailedErr): - return fmt.Errorf("repair expired entitlement for user %q in redis: %w", input.ExpectedExpiredSnapshot.UserID, ports.ErrConflict) - case watchErr != nil: - return watchErr - default: - return nil - } -} - -func (store *Store) loadEntitlementRecord( - ctx context.Context, - getter bytesGetter, - recordID entitlement.EntitlementRecordID, -) (entitlement.PeriodRecord, error) { - payload, err := getter.Get(ctx, store.keyspace.EntitlementRecord(recordID)).Bytes() - switch { - case errors.Is(err, redis.Nil): - return entitlement.PeriodRecord{}, ports.ErrNotFound - case err != nil: - return entitlement.PeriodRecord{}, err - } - - return decodeEntitlementPeriodRecord(payload) -} - -func marshalEntitlementPeriodRecord(record entitlement.PeriodRecord) ([]byte, error) { - encoded := entitlementPeriodRecord{ - RecordID: record.RecordID.String(), - UserID: record.UserID.String(), - PlanCode: string(record.PlanCode), - Source: record.Source.String(), - ActorType: record.Actor.Type.String(), - ReasonCode: record.ReasonCode.String(), - StartsAt: record.StartsAt.UTC().Format(time.RFC3339Nano), - CreatedAt: record.CreatedAt.UTC().Format(time.RFC3339Nano), - } - if !record.Actor.ID.IsZero() { - value := record.Actor.ID.String() - encoded.ActorID = &value - } - if record.EndsAt != nil { - value := record.EndsAt.UTC().Format(time.RFC3339Nano) - encoded.EndsAt = &value - } - if record.ClosedAt != nil { - value := record.ClosedAt.UTC().Format(time.RFC3339Nano) - encoded.ClosedAt = &value - } - if !record.ClosedBy.Type.IsZero() { - value := record.ClosedBy.Type.String() - encoded.ClosedByType = &value - } - if !record.ClosedBy.ID.IsZero() { - value := record.ClosedBy.ID.String() - encoded.ClosedByID = &value - } - if !record.ClosedReasonCode.IsZero() { - value := record.ClosedReasonCode.String() - encoded.ClosedReasonCode = &value - } - - return json.Marshal(encoded) -} - -func decodeEntitlementPeriodRecord(payload []byte) (entitlement.PeriodRecord, error) { - var encoded entitlementPeriodRecord - if err := decodeJSONPayload(payload, &encoded); err != nil { - return entitlement.PeriodRecord{}, err - } - - startsAt, err := time.Parse(time.RFC3339Nano, encoded.StartsAt) - if err != nil { - return entitlement.PeriodRecord{}, fmt.Errorf("decode entitlement period record starts_at: %w", err) - } - createdAt, err := time.Parse(time.RFC3339Nano, encoded.CreatedAt) - if err != nil { - return entitlement.PeriodRecord{}, fmt.Errorf("decode entitlement period record created_at: %w", err) - } - - record := entitlement.PeriodRecord{ - RecordID: entitlement.EntitlementRecordID(encoded.RecordID), - UserID: common.UserID(encoded.UserID), - PlanCode: entitlement.PlanCode(encoded.PlanCode), - Source: common.Source(encoded.Source), - Actor: common.ActorRef{Type: common.ActorType(encoded.ActorType)}, - ReasonCode: common.ReasonCode(encoded.ReasonCode), - StartsAt: startsAt.UTC(), - CreatedAt: createdAt.UTC(), - } - if encoded.ActorID != nil { - record.Actor.ID = common.ActorID(*encoded.ActorID) - } - if encoded.EndsAt != nil { - value, err := time.Parse(time.RFC3339Nano, *encoded.EndsAt) - if err != nil { - return entitlement.PeriodRecord{}, fmt.Errorf("decode entitlement period record ends_at: %w", err) - } - value = value.UTC() - record.EndsAt = &value - } - if encoded.ClosedAt != nil { - value, err := time.Parse(time.RFC3339Nano, *encoded.ClosedAt) - if err != nil { - return entitlement.PeriodRecord{}, fmt.Errorf("decode entitlement period record closed_at: %w", err) - } - value = value.UTC() - record.ClosedAt = &value - } - if encoded.ClosedByType != nil { - record.ClosedBy.Type = common.ActorType(*encoded.ClosedByType) - } - if encoded.ClosedByID != nil { - record.ClosedBy.ID = common.ActorID(*encoded.ClosedByID) - } - if encoded.ClosedReasonCode != nil { - record.ClosedReasonCode = common.ReasonCode(*encoded.ClosedReasonCode) - } - if err := record.Validate(); err != nil { - return entitlement.PeriodRecord{}, fmt.Errorf("decode entitlement period record: %w", err) - } - - return record, nil -} - -func equalEntitlementSnapshots(left entitlement.CurrentSnapshot, right entitlement.CurrentSnapshot) bool { - return left.UserID == right.UserID && - left.PlanCode == right.PlanCode && - left.IsPaid == right.IsPaid && - left.StartsAt.Equal(right.StartsAt) && - equalOptionalTime(left.EndsAt, right.EndsAt) && - left.Source == right.Source && - left.Actor == right.Actor && - left.ReasonCode == right.ReasonCode && - left.UpdatedAt.Equal(right.UpdatedAt) -} - -func equalEntitlementPeriodRecords(left entitlement.PeriodRecord, right entitlement.PeriodRecord) bool { - return left.RecordID == right.RecordID && - left.UserID == right.UserID && - left.PlanCode == right.PlanCode && - left.Source == right.Source && - left.Actor == right.Actor && - left.ReasonCode == right.ReasonCode && - left.StartsAt.Equal(right.StartsAt) && - equalOptionalTime(left.EndsAt, right.EndsAt) && - left.CreatedAt.Equal(right.CreatedAt) && - equalOptionalTime(left.ClosedAt, right.ClosedAt) && - left.ClosedBy == right.ClosedBy && - left.ClosedReasonCode == right.ClosedReasonCode -} - -func equalOptionalTime(left *time.Time, right *time.Time) bool { - switch { - case left == nil && right == nil: - return true - case left == nil || right == nil: - return false - default: - return left.Equal(*right) - } -} - -// EntitlementHistoryStore adapts Store to the existing -// EntitlementHistoryStore port. -type EntitlementHistoryStore struct { - store *Store -} - -// EntitlementHistory returns one adapter that exposes the entitlement-history -// store port over Store. -func (store *Store) EntitlementHistory() *EntitlementHistoryStore { - if store == nil { - return nil - } - - return &EntitlementHistoryStore{store: store} -} - -// Create stores one new entitlement history record. -func (adapter *EntitlementHistoryStore) Create(ctx context.Context, record entitlement.PeriodRecord) error { - return adapter.store.CreateEntitlementRecord(ctx, record) -} - -// GetByRecordID returns the entitlement history record identified by recordID. -func (adapter *EntitlementHistoryStore) GetByRecordID( - ctx context.Context, - recordID entitlement.EntitlementRecordID, -) (entitlement.PeriodRecord, error) { - return adapter.store.GetEntitlementRecordByRecordID(ctx, recordID) -} - -// ListByUserID returns every entitlement history record owned by userID. -func (adapter *EntitlementHistoryStore) ListByUserID( - ctx context.Context, - userID common.UserID, -) ([]entitlement.PeriodRecord, error) { - return adapter.store.ListEntitlementRecordsByUserID(ctx, userID) -} - -// Update replaces one stored entitlement history record. -func (adapter *EntitlementHistoryStore) Update(ctx context.Context, record entitlement.PeriodRecord) error { - return adapter.store.UpdateEntitlementRecord(ctx, record) -} - -var _ ports.EntitlementHistoryStore = (*EntitlementHistoryStore)(nil) - -// EntitlementLifecycleStore adapts Store to the existing -// EntitlementLifecycleStore port. -type EntitlementLifecycleStore struct { - store *Store -} - -// EntitlementLifecycle returns one adapter that exposes the atomic -// entitlement-lifecycle store port over Store. -func (store *Store) EntitlementLifecycle() *EntitlementLifecycleStore { - if store == nil { - return nil - } - - return &EntitlementLifecycleStore{store: store} -} - -// Grant atomically applies one free-to-paid transition. -func (adapter *EntitlementLifecycleStore) Grant(ctx context.Context, input ports.GrantEntitlementInput) error { - return adapter.store.GrantEntitlement(ctx, input) -} - -// Extend atomically appends one paid extension segment and updates the current -// snapshot. -func (adapter *EntitlementLifecycleStore) Extend(ctx context.Context, input ports.ExtendEntitlementInput) error { - return adapter.store.ExtendEntitlement(ctx, input) -} - -// Revoke atomically applies one paid-to-free transition. -func (adapter *EntitlementLifecycleStore) Revoke(ctx context.Context, input ports.RevokeEntitlementInput) error { - return adapter.store.RevokeEntitlement(ctx, input) -} - -// RepairExpired atomically repairs one expired finite paid snapshot. -func (adapter *EntitlementLifecycleStore) RepairExpired( - ctx context.Context, - input ports.RepairExpiredEntitlementInput, -) error { - return adapter.store.RepairExpiredEntitlement(ctx, input) -} - -var _ ports.EntitlementLifecycleStore = (*EntitlementLifecycleStore)(nil) diff --git a/user/internal/adapters/redis/userstore/list_store.go b/user/internal/adapters/redis/userstore/list_store.go deleted file mode 100644 index e380c3d..0000000 --- a/user/internal/adapters/redis/userstore/list_store.go +++ /dev/null @@ -1,137 +0,0 @@ -package userstore - -import ( - "context" - "errors" - "fmt" - "time" - - "galaxy/user/internal/adapters/redisstate" - "galaxy/user/internal/domain/common" - "galaxy/user/internal/ports" - - "github.com/redis/go-redis/v9" -) - -// ListUserIDs returns one deterministic page of user identifiers ordered by -// `created_at desc`, then `user_id desc`. -func (store *Store) ListUserIDs(ctx context.Context, input ports.ListUsersInput) (ports.ListUsersResult, error) { - if err := input.Validate(); err != nil { - return ports.ListUsersResult{}, fmt.Errorf("list users in redis: %w", err) - } - - operationCtx, cancel, err := store.operationContext(ctx, "list users in redis") - if err != nil { - return ports.ListUsersResult{}, err - } - defer cancel() - - startIndex := int64(0) - filters := userListFiltersFromPorts(input.Filters) - if input.PageToken != "" { - cursor, err := redisstate.DecodePageToken(input.PageToken, filters) - if err != nil { - return ports.ListUsersResult{}, fmt.Errorf("list users in redis: %w", ports.ErrInvalidPageToken) - } - - score, err := store.client.ZScore(operationCtx, store.keyspace.CreatedAtIndex(), cursor.UserID.String()).Result() - switch { - case errors.Is(err, redis.Nil): - return ports.ListUsersResult{}, fmt.Errorf("list users in redis: %w", ports.ErrInvalidPageToken) - case err != nil: - return ports.ListUsersResult{}, fmt.Errorf("list users in redis: %w", err) - } - if !time.UnixMicro(int64(score)).UTC().Equal(cursor.CreatedAt.UTC()) { - return ports.ListUsersResult{}, fmt.Errorf("list users in redis: %w", ports.ErrInvalidPageToken) - } - - rank, err := store.client.ZRevRank(operationCtx, store.keyspace.CreatedAtIndex(), cursor.UserID.String()).Result() - switch { - case errors.Is(err, redis.Nil): - return ports.ListUsersResult{}, fmt.Errorf("list users in redis: %w", ports.ErrInvalidPageToken) - case err != nil: - return ports.ListUsersResult{}, fmt.Errorf("list users in redis: %w", err) - } - - startIndex = rank + 1 - } - - rawPage, err := store.client.ZRevRangeWithScores( - operationCtx, - store.keyspace.CreatedAtIndex(), - startIndex, - startIndex+int64(input.PageSize), - ).Result() - if err != nil { - return ports.ListUsersResult{}, fmt.Errorf("list users in redis: %w", err) - } - - result := ports.ListUsersResult{ - UserIDs: make([]common.UserID, 0, min(len(rawPage), input.PageSize)), - } - - visibleCount := min(len(rawPage), input.PageSize) - for index := 0; index < visibleCount; index++ { - userID, err := memberUserID(rawPage[index].Member) - if err != nil { - return ports.ListUsersResult{}, fmt.Errorf("list users in redis: %w", err) - } - result.UserIDs = append(result.UserIDs, userID) - } - - if len(rawPage) > input.PageSize { - lastVisible := rawPage[input.PageSize-1] - lastUserID, err := memberUserID(lastVisible.Member) - if err != nil { - return ports.ListUsersResult{}, fmt.Errorf("list users in redis: %w", err) - } - token, err := redisstate.EncodePageToken(redisstate.PageCursor{ - CreatedAt: time.UnixMicro(int64(lastVisible.Score)).UTC(), - UserID: lastUserID, - }, filters) - if err != nil { - return ports.ListUsersResult{}, fmt.Errorf("list users in redis: %w", err) - } - result.NextPageToken = token - } - - return result, nil -} - -func userListFiltersFromPorts(filters ports.UserListFilters) redisstate.UserListFilters { - return redisstate.UserListFilters{ - PaidState: filters.PaidState, - PaidExpiresBefore: filters.PaidExpiresBefore, - PaidExpiresAfter: filters.PaidExpiresAfter, - DeclaredCountry: filters.DeclaredCountry, - SanctionCode: filters.SanctionCode, - LimitCode: filters.LimitCode, - CanLogin: filters.CanLogin, - CanCreatePrivateGame: filters.CanCreatePrivateGame, - CanJoinGame: filters.CanJoinGame, - } -} - -func memberUserID(member any) (common.UserID, error) { - value, ok := member.(string) - if !ok { - return "", fmt.Errorf("unexpected created-at index member type %T", member) - } - - userID := common.UserID(value) - if err := userID.Validate(); err != nil { - return "", fmt.Errorf("created-at index member user id: %w", err) - } - - return userID, nil -} - -func min(left int, right int) int { - if left < right { - return left - } - - return right -} - -var _ ports.UserListStore = (*Store)(nil) diff --git a/user/internal/adapters/redis/userstore/policy_store.go b/user/internal/adapters/redis/userstore/policy_store.go deleted file mode 100644 index 20fc250..0000000 --- a/user/internal/adapters/redis/userstore/policy_store.go +++ /dev/null @@ -1,445 +0,0 @@ -package userstore - -import ( - "context" - "errors" - "fmt" - "time" - - "galaxy/user/internal/domain/policy" - "galaxy/user/internal/ports" - - "github.com/redis/go-redis/v9" -) - -// ApplySanction atomically creates one new active sanction record. -func (store *Store) ApplySanction(ctx context.Context, input ports.ApplySanctionInput) error { - if err := input.Validate(); err != nil { - return fmt.Errorf("apply sanction in redis: %w", err) - } - - recordPayload, err := marshalSanctionRecord(input.NewRecord) - if err != nil { - return fmt.Errorf("apply sanction in redis: %w", err) - } - - recordKey := store.keyspace.SanctionRecord(input.NewRecord.RecordID) - historyKey := store.keyspace.SanctionHistory(input.NewRecord.UserID) - activeKey := store.keyspace.ActiveSanction(input.NewRecord.UserID, input.NewRecord.SanctionCode) - snapshotKey := store.keyspace.EntitlementSnapshot(input.NewRecord.UserID) - watchedKeys := append( - []string{recordKey, historyKey, activeKey, snapshotKey}, - store.activeSanctionWatchKeys(input.NewRecord.UserID)..., - ) - - operationCtx, cancel, err := store.operationContext(ctx, "apply sanction in redis") - if err != nil { - return err - } - defer cancel() - - watchErr := store.client.Watch(operationCtx, func(tx *redis.Tx) error { - if err := ensureKeyAbsent(operationCtx, tx, recordKey); err != nil { - return fmt.Errorf("apply sanction for user %q in redis: %w", input.NewRecord.UserID, err) - } - if err := ensureKeyAbsent(operationCtx, tx, activeKey); err != nil { - return fmt.Errorf("apply sanction for user %q in redis: %w", input.NewRecord.UserID, err) - } - snapshot, err := store.loadEntitlementSnapshot(operationCtx, tx, input.NewRecord.UserID) - if err != nil { - return fmt.Errorf("apply sanction for user %q in redis: %w", input.NewRecord.UserID, err) - } - activeSanctionCodes, err := store.loadActiveSanctionCodeSet(operationCtx, tx, input.NewRecord.UserID) - if err != nil { - return fmt.Errorf("apply sanction for user %q in redis: %w", input.NewRecord.UserID, err) - } - activeSanctionCodes[input.NewRecord.SanctionCode] = struct{}{} - - _, err = tx.TxPipelined(operationCtx, func(pipe redis.Pipeliner) error { - pipe.Set(operationCtx, recordKey, recordPayload, 0) - pipe.ZAdd(operationCtx, historyKey, redis.Z{ - Score: float64(input.NewRecord.AppliedAt.UTC().UnixMicro()), - Member: input.NewRecord.RecordID.String(), - }) - setActiveSlot(pipe, operationCtx, activeKey, input.NewRecord.RecordID.String(), input.NewRecord.ExpiresAt) - store.syncActiveSanctionCodeIndexes(pipe, operationCtx, input.NewRecord.UserID, activeSanctionCodes) - store.syncEligibilityMarkerIndexes(pipe, operationCtx, input.NewRecord.UserID, snapshot.IsPaid, activeSanctionCodes) - return nil - }) - if err != nil { - return fmt.Errorf("apply sanction for user %q in redis: %w", input.NewRecord.UserID, err) - } - - return nil - }, watchedKeys...) - - switch { - case errors.Is(watchErr, redis.TxFailedErr): - return fmt.Errorf("apply sanction for user %q in redis: %w", input.NewRecord.UserID, ports.ErrConflict) - case watchErr != nil: - return watchErr - default: - return nil - } -} - -// RemoveSanction atomically removes one active sanction record. -func (store *Store) RemoveSanction(ctx context.Context, input ports.RemoveSanctionInput) error { - if err := input.Validate(); err != nil { - return fmt.Errorf("remove sanction in redis: %w", err) - } - - updatedPayload, err := marshalSanctionRecord(input.UpdatedRecord) - if err != nil { - return fmt.Errorf("remove sanction in redis: %w", err) - } - - recordKey := store.keyspace.SanctionRecord(input.ExpectedActiveRecord.RecordID) - activeKey := store.keyspace.ActiveSanction(input.ExpectedActiveRecord.UserID, input.ExpectedActiveRecord.SanctionCode) - snapshotKey := store.keyspace.EntitlementSnapshot(input.ExpectedActiveRecord.UserID) - watchedKeys := append( - []string{recordKey, activeKey, snapshotKey}, - store.activeSanctionWatchKeys(input.ExpectedActiveRecord.UserID)..., - ) - - operationCtx, cancel, err := store.operationContext(ctx, "remove sanction in redis") - if err != nil { - return err - } - defer cancel() - - watchErr := store.client.Watch(operationCtx, func(tx *redis.Tx) error { - activeRecordID, err := store.loadActiveSanctionRecordID(operationCtx, tx, activeKey) - if err != nil { - return fmt.Errorf("remove sanction for user %q in redis: %w", input.ExpectedActiveRecord.UserID, err) - } - if activeRecordID != input.ExpectedActiveRecord.RecordID { - return fmt.Errorf("remove sanction for user %q in redis: %w", input.ExpectedActiveRecord.UserID, ports.ErrConflict) - } - - storedRecord, err := store.loadSanctionRecord(operationCtx, tx, input.ExpectedActiveRecord.RecordID) - if err != nil { - return fmt.Errorf("remove sanction for user %q in redis: %w", input.ExpectedActiveRecord.UserID, err) - } - if !equalSanctionRecords(storedRecord, input.ExpectedActiveRecord) { - return fmt.Errorf("remove sanction for user %q in redis: %w", input.ExpectedActiveRecord.UserID, ports.ErrConflict) - } - snapshot, err := store.loadEntitlementSnapshot(operationCtx, tx, input.ExpectedActiveRecord.UserID) - if err != nil { - return fmt.Errorf("remove sanction for user %q in redis: %w", input.ExpectedActiveRecord.UserID, err) - } - activeSanctionCodes, err := store.loadActiveSanctionCodeSet(operationCtx, tx, input.ExpectedActiveRecord.UserID) - if err != nil { - return fmt.Errorf("remove sanction for user %q in redis: %w", input.ExpectedActiveRecord.UserID, err) - } - delete(activeSanctionCodes, input.ExpectedActiveRecord.SanctionCode) - - _, err = tx.TxPipelined(operationCtx, func(pipe redis.Pipeliner) error { - pipe.Set(operationCtx, recordKey, updatedPayload, 0) - pipe.Del(operationCtx, activeKey) - store.syncActiveSanctionCodeIndexes(pipe, operationCtx, input.ExpectedActiveRecord.UserID, activeSanctionCodes) - store.syncEligibilityMarkerIndexes(pipe, operationCtx, input.ExpectedActiveRecord.UserID, snapshot.IsPaid, activeSanctionCodes) - return nil - }) - if err != nil { - return fmt.Errorf("remove sanction for user %q in redis: %w", input.ExpectedActiveRecord.UserID, err) - } - - return nil - }, watchedKeys...) - - switch { - case errors.Is(watchErr, redis.TxFailedErr): - return fmt.Errorf("remove sanction for user %q in redis: %w", input.ExpectedActiveRecord.UserID, ports.ErrConflict) - case watchErr != nil: - return watchErr - default: - return nil - } -} - -// SetLimit atomically creates or replaces one active limit record. -func (store *Store) SetLimit(ctx context.Context, input ports.SetLimitInput) error { - if err := input.Validate(); err != nil { - return fmt.Errorf("set limit in redis: %w", err) - } - - newRecordPayload, err := marshalLimitRecord(input.NewRecord) - if err != nil { - return fmt.Errorf("set limit in redis: %w", err) - } - - newRecordKey := store.keyspace.LimitRecord(input.NewRecord.RecordID) - historyKey := store.keyspace.LimitHistory(input.NewRecord.UserID) - activeKey := store.keyspace.ActiveLimit(input.NewRecord.UserID, input.NewRecord.LimitCode) - watchedKeys := append( - []string{newRecordKey, historyKey, activeKey}, - store.activeLimitWatchKeys(input.NewRecord.UserID)..., - ) - - operationCtx, cancel, err := store.operationContext(ctx, "set limit in redis") - if err != nil { - return err - } - defer cancel() - if input.ExpectedActiveRecord != nil { - watchedKeys = append(watchedKeys, store.keyspace.LimitRecord(input.ExpectedActiveRecord.RecordID)) - } - - watchErr := store.client.Watch(operationCtx, func(tx *redis.Tx) error { - if err := ensureKeyAbsent(operationCtx, tx, newRecordKey); err != nil { - return fmt.Errorf("set limit for user %q in redis: %w", input.NewRecord.UserID, err) - } - - var updatedPayload []byte - if input.ExpectedActiveRecord == nil { - if err := ensureKeyAbsent(operationCtx, tx, activeKey); err != nil { - return fmt.Errorf("set limit for user %q in redis: %w", input.NewRecord.UserID, err) - } - } else { - activeRecordID, err := store.loadActiveLimitRecordID(operationCtx, tx, activeKey) - if err != nil { - return fmt.Errorf("set limit for user %q in redis: %w", input.NewRecord.UserID, err) - } - if activeRecordID != input.ExpectedActiveRecord.RecordID { - return fmt.Errorf("set limit for user %q in redis: %w", input.NewRecord.UserID, ports.ErrConflict) - } - - storedRecord, err := store.loadLimitRecord(operationCtx, tx, input.ExpectedActiveRecord.RecordID) - if err != nil { - return fmt.Errorf("set limit for user %q in redis: %w", input.NewRecord.UserID, err) - } - if !equalLimitRecords(storedRecord, *input.ExpectedActiveRecord) { - return fmt.Errorf("set limit for user %q in redis: %w", input.NewRecord.UserID, ports.ErrConflict) - } - - updatedPayload, err = marshalLimitRecord(*input.UpdatedActiveRecord) - if err != nil { - return fmt.Errorf("set limit for user %q in redis: %w", input.NewRecord.UserID, err) - } - } - activeLimitCodes, err := store.loadActiveLimitCodeSet(operationCtx, tx, input.NewRecord.UserID) - if err != nil { - return fmt.Errorf("set limit for user %q in redis: %w", input.NewRecord.UserID, err) - } - activeLimitCodes[input.NewRecord.LimitCode] = struct{}{} - - _, err = tx.TxPipelined(operationCtx, func(pipe redis.Pipeliner) error { - if input.ExpectedActiveRecord != nil { - pipe.Set(operationCtx, store.keyspace.LimitRecord(input.ExpectedActiveRecord.RecordID), updatedPayload, 0) - } - pipe.Set(operationCtx, newRecordKey, newRecordPayload, 0) - pipe.ZAdd(operationCtx, historyKey, redis.Z{ - Score: float64(input.NewRecord.AppliedAt.UTC().UnixMicro()), - Member: input.NewRecord.RecordID.String(), - }) - setActiveSlot(pipe, operationCtx, activeKey, input.NewRecord.RecordID.String(), input.NewRecord.ExpiresAt) - store.syncActiveLimitCodeIndexes(pipe, operationCtx, input.NewRecord.UserID, activeLimitCodes) - return nil - }) - if err != nil { - return fmt.Errorf("set limit for user %q in redis: %w", input.NewRecord.UserID, err) - } - - return nil - }, watchedKeys...) - - switch { - case errors.Is(watchErr, redis.TxFailedErr): - return fmt.Errorf("set limit for user %q in redis: %w", input.NewRecord.UserID, ports.ErrConflict) - case watchErr != nil: - return watchErr - default: - return nil - } -} - -// RemoveLimit atomically removes one active limit record. -func (store *Store) RemoveLimit(ctx context.Context, input ports.RemoveLimitInput) error { - if err := input.Validate(); err != nil { - return fmt.Errorf("remove limit in redis: %w", err) - } - - updatedPayload, err := marshalLimitRecord(input.UpdatedRecord) - if err != nil { - return fmt.Errorf("remove limit in redis: %w", err) - } - - recordKey := store.keyspace.LimitRecord(input.ExpectedActiveRecord.RecordID) - activeKey := store.keyspace.ActiveLimit(input.ExpectedActiveRecord.UserID, input.ExpectedActiveRecord.LimitCode) - watchedKeys := append( - []string{recordKey, activeKey}, - store.activeLimitWatchKeys(input.ExpectedActiveRecord.UserID)..., - ) - - operationCtx, cancel, err := store.operationContext(ctx, "remove limit in redis") - if err != nil { - return err - } - defer cancel() - - watchErr := store.client.Watch(operationCtx, func(tx *redis.Tx) error { - activeRecordID, err := store.loadActiveLimitRecordID(operationCtx, tx, activeKey) - if err != nil { - return fmt.Errorf("remove limit for user %q in redis: %w", input.ExpectedActiveRecord.UserID, err) - } - if activeRecordID != input.ExpectedActiveRecord.RecordID { - return fmt.Errorf("remove limit for user %q in redis: %w", input.ExpectedActiveRecord.UserID, ports.ErrConflict) - } - - storedRecord, err := store.loadLimitRecord(operationCtx, tx, input.ExpectedActiveRecord.RecordID) - if err != nil { - return fmt.Errorf("remove limit for user %q in redis: %w", input.ExpectedActiveRecord.UserID, err) - } - if !equalLimitRecords(storedRecord, input.ExpectedActiveRecord) { - return fmt.Errorf("remove limit for user %q in redis: %w", input.ExpectedActiveRecord.UserID, ports.ErrConflict) - } - activeLimitCodes, err := store.loadActiveLimitCodeSet(operationCtx, tx, input.ExpectedActiveRecord.UserID) - if err != nil { - return fmt.Errorf("remove limit for user %q in redis: %w", input.ExpectedActiveRecord.UserID, err) - } - delete(activeLimitCodes, input.ExpectedActiveRecord.LimitCode) - - _, err = tx.TxPipelined(operationCtx, func(pipe redis.Pipeliner) error { - pipe.Set(operationCtx, recordKey, updatedPayload, 0) - pipe.Del(operationCtx, activeKey) - store.syncActiveLimitCodeIndexes(pipe, operationCtx, input.ExpectedActiveRecord.UserID, activeLimitCodes) - return nil - }) - if err != nil { - return fmt.Errorf("remove limit for user %q in redis: %w", input.ExpectedActiveRecord.UserID, err) - } - - return nil - }, watchedKeys...) - - switch { - case errors.Is(watchErr, redis.TxFailedErr): - return fmt.Errorf("remove limit for user %q in redis: %w", input.ExpectedActiveRecord.UserID, ports.ErrConflict) - case watchErr != nil: - return watchErr - default: - return nil - } -} - -func (store *Store) loadActiveSanctionRecordID( - ctx context.Context, - getter bytesGetter, - key string, -) (policy.SanctionRecordID, error) { - value, err := getter.Get(ctx, key).Result() - switch { - case errors.Is(err, redis.Nil): - return "", ports.ErrNotFound - case err != nil: - return "", err - } - - recordID := policy.SanctionRecordID(value) - if err := recordID.Validate(); err != nil { - return "", fmt.Errorf("active sanction record id: %w", err) - } - - return recordID, nil -} - -func (store *Store) loadActiveLimitRecordID( - ctx context.Context, - getter bytesGetter, - key string, -) (policy.LimitRecordID, error) { - value, err := getter.Get(ctx, key).Result() - switch { - case errors.Is(err, redis.Nil): - return "", ports.ErrNotFound - case err != nil: - return "", err - } - - recordID := policy.LimitRecordID(value) - if err := recordID.Validate(); err != nil { - return "", fmt.Errorf("active limit record id: %w", err) - } - - return recordID, nil -} - -func setActiveSlot( - pipe redis.Pipeliner, - ctx context.Context, - key string, - recordID string, - expiresAt *time.Time, -) { - pipe.Set(ctx, key, recordID, 0) - if expiresAt != nil { - pipe.PExpireAt(ctx, key, expiresAt.UTC()) - } -} - -func equalSanctionRecords(left policy.SanctionRecord, right policy.SanctionRecord) bool { - return left.RecordID == right.RecordID && - left.UserID == right.UserID && - left.SanctionCode == right.SanctionCode && - left.Scope == right.Scope && - left.ReasonCode == right.ReasonCode && - left.Actor == right.Actor && - left.AppliedAt.Equal(right.AppliedAt) && - equalOptionalTime(left.ExpiresAt, right.ExpiresAt) && - equalOptionalTime(left.RemovedAt, right.RemovedAt) && - left.RemovedBy == right.RemovedBy && - left.RemovedReasonCode == right.RemovedReasonCode -} - -func equalLimitRecords(left policy.LimitRecord, right policy.LimitRecord) bool { - return left.RecordID == right.RecordID && - left.UserID == right.UserID && - left.LimitCode == right.LimitCode && - left.Value == right.Value && - left.ReasonCode == right.ReasonCode && - left.Actor == right.Actor && - left.AppliedAt.Equal(right.AppliedAt) && - equalOptionalTime(left.ExpiresAt, right.ExpiresAt) && - equalOptionalTime(left.RemovedAt, right.RemovedAt) && - left.RemovedBy == right.RemovedBy && - left.RemovedReasonCode == right.RemovedReasonCode -} - -// PolicyLifecycleStore adapts Store to the existing PolicyLifecycleStore -// port. -type PolicyLifecycleStore struct { - store *Store -} - -// PolicyLifecycle returns one adapter that exposes the atomic policy-lifecycle -// store port over Store. -func (store *Store) PolicyLifecycle() *PolicyLifecycleStore { - if store == nil { - return nil - } - - return &PolicyLifecycleStore{store: store} -} - -// ApplySanction atomically creates one new active sanction record. -func (adapter *PolicyLifecycleStore) ApplySanction(ctx context.Context, input ports.ApplySanctionInput) error { - return adapter.store.ApplySanction(ctx, input) -} - -// RemoveSanction atomically removes one active sanction record. -func (adapter *PolicyLifecycleStore) RemoveSanction(ctx context.Context, input ports.RemoveSanctionInput) error { - return adapter.store.RemoveSanction(ctx, input) -} - -// SetLimit atomically creates or replaces one active limit record. -func (adapter *PolicyLifecycleStore) SetLimit(ctx context.Context, input ports.SetLimitInput) error { - return adapter.store.SetLimit(ctx, input) -} - -// RemoveLimit atomically removes one active limit record. -func (adapter *PolicyLifecycleStore) RemoveLimit(ctx context.Context, input ports.RemoveLimitInput) error { - return adapter.store.RemoveLimit(ctx, input) -} - -var _ ports.PolicyLifecycleStore = (*PolicyLifecycleStore)(nil) diff --git a/user/internal/adapters/redis/userstore/store.go b/user/internal/adapters/redis/userstore/store.go deleted file mode 100644 index 9b2c74c..0000000 --- a/user/internal/adapters/redis/userstore/store.go +++ /dev/null @@ -1,1908 +0,0 @@ -// Package userstore implements the Redis-backed source-of-truth persistence -// used by the first runnable user-service slice. -package userstore - -import ( - "bytes" - "context" - "crypto/tls" - "encoding/json" - "errors" - "fmt" - "io" - "strings" - "time" - - "galaxy/user/internal/adapters/redisstate" - "galaxy/user/internal/domain/account" - "galaxy/user/internal/domain/authblock" - "galaxy/user/internal/domain/common" - "galaxy/user/internal/domain/entitlement" - "galaxy/user/internal/domain/policy" - "galaxy/user/internal/ports" - - "github.com/redis/go-redis/v9" -) - -const mutationRetryLimit = 3 - -// Config configures one Redis-backed user store instance. -type Config struct { - // Addr stores the Redis network address in host:port form. - Addr string - - // Username stores the optional Redis ACL username. - Username string - - // Password stores the optional Redis ACL password. - Password string - - // DB stores the Redis logical database index. - DB int - - // TLSEnabled enables TLS with a conservative minimum protocol version. - TLSEnabled bool - - // KeyspacePrefix stores the root prefix of the service-owned Redis keyspace. - KeyspacePrefix string - - // OperationTimeout bounds each Redis round trip performed by the store. - OperationTimeout time.Duration -} - -// Store persists auth-facing user state in Redis and exposes the narrow atomic -// auth-facing mutation boundary plus selected entity-store interfaces. -type Store struct { - client *redis.Client - keyspace redisstate.Keyspace - operationTimeout time.Duration -} - -type accountRecord struct { - UserID string `json:"user_id"` - Email string `json:"email"` - UserName string `json:"user_name"` - DisplayName string `json:"display_name,omitempty"` - PreferredLanguage string `json:"preferred_language"` - TimeZone string `json:"time_zone"` - DeclaredCountry *string `json:"declared_country,omitempty"` - CreatedAt string `json:"created_at"` - UpdatedAt string `json:"updated_at"` - DeletedAt *string `json:"deleted_at,omitempty"` -} - -type blockedEmailRecord struct { - Email string `json:"email"` - ReasonCode string `json:"reason_code"` - BlockedAt string `json:"blocked_at"` - ActorType *string `json:"actor_type,omitempty"` - ActorID *string `json:"actor_id,omitempty"` - ResolvedUserID *string `json:"resolved_user_id,omitempty"` -} - -type entitlementSnapshotRecord struct { - UserID string `json:"user_id"` - PlanCode string `json:"plan_code"` - IsPaid bool `json:"is_paid"` - StartsAt string `json:"starts_at"` - EndsAt *string `json:"ends_at,omitempty"` - Source string `json:"source"` - ActorType string `json:"actor_type"` - ActorID *string `json:"actor_id,omitempty"` - ReasonCode string `json:"reason_code"` - UpdatedAt string `json:"updated_at"` -} - -type sanctionRecord struct { - RecordID string `json:"record_id"` - UserID string `json:"user_id"` - SanctionCode string `json:"sanction_code"` - Scope string `json:"scope"` - ReasonCode string `json:"reason_code"` - ActorType string `json:"actor_type"` - ActorID *string `json:"actor_id,omitempty"` - AppliedAt string `json:"applied_at"` - ExpiresAt *string `json:"expires_at,omitempty"` - RemovedAt *string `json:"removed_at,omitempty"` - RemovedByType *string `json:"removed_by_type,omitempty"` - RemovedByID *string `json:"removed_by_id,omitempty"` - RemovedReasonCode *string `json:"removed_reason_code,omitempty"` -} - -type limitRecord struct { - RecordID string `json:"record_id"` - UserID string `json:"user_id"` - LimitCode string `json:"limit_code"` - Value int `json:"value"` - ReasonCode string `json:"reason_code"` - ActorType string `json:"actor_type"` - ActorID *string `json:"actor_id,omitempty"` - AppliedAt string `json:"applied_at"` - ExpiresAt *string `json:"expires_at,omitempty"` - RemovedAt *string `json:"removed_at,omitempty"` - RemovedByType *string `json:"removed_by_type,omitempty"` - RemovedByID *string `json:"removed_by_id,omitempty"` - RemovedReasonCode *string `json:"removed_reason_code,omitempty"` -} - -type bytesGetter interface { - Get(context.Context, string) *redis.StringCmd -} - -// New constructs one Redis-backed user store from cfg. -func New(cfg Config) (*Store, error) { - switch { - case strings.TrimSpace(cfg.Addr) == "": - return nil, errors.New("new redis user store: redis addr must not be empty") - case cfg.DB < 0: - return nil, errors.New("new redis user store: redis db must not be negative") - case strings.TrimSpace(cfg.KeyspacePrefix) == "": - return nil, errors.New("new redis user store: redis keyspace prefix must not be empty") - case cfg.OperationTimeout <= 0: - return nil, errors.New("new redis user store: operation timeout must be positive") - } - - options := &redis.Options{ - Addr: cfg.Addr, - Username: cfg.Username, - Password: cfg.Password, - DB: cfg.DB, - Protocol: 2, - DisableIdentity: true, - } - if cfg.TLSEnabled { - options.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12} - } - - return &Store{ - client: redis.NewClient(options), - keyspace: redisstate.Keyspace{Prefix: cfg.KeyspacePrefix}, - operationTimeout: cfg.OperationTimeout, - }, nil -} - -// Close releases the underlying Redis client resources. -func (store *Store) Close() error { - if store == nil || store.client == nil { - return nil - } - - return store.client.Close() -} - -// Ping verifies that the configured Redis backend is reachable. -func (store *Store) Ping(ctx context.Context) error { - operationCtx, cancel, err := store.operationContext(ctx, "ping redis user store") - if err != nil { - return err - } - defer cancel() - - if err := store.client.Ping(operationCtx).Err(); err != nil { - return fmt.Errorf("ping redis user store: %w", err) - } - - return nil -} - -// Create stores one new account record together with the exact user-name -// lookup state. -func (store *Store) Create(ctx context.Context, input ports.CreateAccountInput) error { - if err := input.Validate(); err != nil { - return fmt.Errorf("create account in redis: %w", err) - } - - accountPayload, err := marshalAccountRecord(input.Account) - if err != nil { - return fmt.Errorf("create account in redis: %w", err) - } - - accountKey := store.keyspace.Account(input.Account.UserID) - emailLookupKey := store.keyspace.EmailLookup(input.Account.Email) - userNameLookupKey := store.keyspace.UserNameLookup(input.Account.UserName) - - operationCtx, cancel, err := store.operationContext(ctx, "create account in redis") - if err != nil { - return err - } - defer cancel() - - watchErr := store.client.Watch(operationCtx, func(tx *redis.Tx) error { - if err := ensureKeyAbsent(operationCtx, tx, accountKey); err != nil { - return fmt.Errorf("create account %q in redis: %w", input.Account.UserID, err) - } - if err := ensureKeyAbsent(operationCtx, tx, emailLookupKey); err != nil { - return fmt.Errorf("create account %q in redis: %w", input.Account.UserID, err) - } - if err := ensureKeyAbsent(operationCtx, tx, userNameLookupKey); err != nil { - if errors.Is(err, ports.ErrConflict) { - return fmt.Errorf("create account %q in redis: %w", input.Account.UserID, ports.ErrUserNameConflict) - } - return fmt.Errorf("create account %q in redis: %w", input.Account.UserID, err) - } - - _, err := tx.TxPipelined(operationCtx, func(pipe redis.Pipeliner) error { - pipe.Set(operationCtx, accountKey, accountPayload, 0) - pipe.Set(operationCtx, emailLookupKey, input.Account.UserID.String(), 0) - pipe.Set(operationCtx, userNameLookupKey, input.Account.UserID.String(), 0) - store.addCreatedAtIndex(pipe, operationCtx, input.Account) - store.syncDeclaredCountryIndex(pipe, operationCtx, account.UserAccount{}, input.Account) - return nil - }) - if err != nil { - return fmt.Errorf("create account %q in redis: %w", input.Account.UserID, err) - } - - return nil - }, accountKey, emailLookupKey, userNameLookupKey) - - switch { - case errors.Is(watchErr, redis.TxFailedErr): - return fmt.Errorf("create account %q in redis: %w", input.Account.UserID, ports.ErrConflict) - case watchErr != nil: - return watchErr - default: - return nil - } -} - -// GetByUserID returns the stored account identified by userID. -func (store *Store) GetByUserID(ctx context.Context, userID common.UserID) (account.UserAccount, error) { - if err := userID.Validate(); err != nil { - return account.UserAccount{}, fmt.Errorf("get account by user id from redis: %w", err) - } - - operationCtx, cancel, err := store.operationContext(ctx, "get account by user id from redis") - if err != nil { - return account.UserAccount{}, err - } - defer cancel() - - record, err := store.loadAccount(operationCtx, store.client, userID) - if err != nil { - switch { - case errors.Is(err, ports.ErrNotFound): - return account.UserAccount{}, fmt.Errorf("get account by user id %q from redis: %w", userID, ports.ErrNotFound) - default: - return account.UserAccount{}, fmt.Errorf("get account by user id %q from redis: %w", userID, err) - } - } - - return record, nil -} - -// GetByEmail returns the stored account identified by email. -func (store *Store) GetByEmail(ctx context.Context, email common.Email) (account.UserAccount, error) { - if err := email.Validate(); err != nil { - return account.UserAccount{}, fmt.Errorf("get account by email from redis: %w", err) - } - - operationCtx, cancel, err := store.operationContext(ctx, "get account by email from redis") - if err != nil { - return account.UserAccount{}, err - } - defer cancel() - - userID, err := store.loadLookupUserID(operationCtx, store.client, store.keyspace.EmailLookup(email)) - if err != nil { - switch { - case errors.Is(err, ports.ErrNotFound): - return account.UserAccount{}, fmt.Errorf("get account by email %q from redis: %w", email, ports.ErrNotFound) - default: - return account.UserAccount{}, fmt.Errorf("get account by email %q from redis: %w", email, err) - } - } - - record, err := store.loadAccount(operationCtx, store.client, userID) - if err != nil { - switch { - case errors.Is(err, ports.ErrNotFound): - return account.UserAccount{}, fmt.Errorf("get account by email %q from redis: lookup references missing user %q", email, userID) - default: - return account.UserAccount{}, fmt.Errorf("get account by email %q from redis: %w", email, err) - } - } - - return record, nil -} - -// GetByUserName returns the stored account identified by the exact stored -// user name. -func (store *Store) GetByUserName(ctx context.Context, userName common.UserName) (account.UserAccount, error) { - if err := userName.Validate(); err != nil { - return account.UserAccount{}, fmt.Errorf("get account by user name from redis: %w", err) - } - - operationCtx, cancel, err := store.operationContext(ctx, "get account by user name from redis") - if err != nil { - return account.UserAccount{}, err - } - defer cancel() - - userID, err := store.loadLookupUserID(operationCtx, store.client, store.keyspace.UserNameLookup(userName)) - if err != nil { - switch { - case errors.Is(err, ports.ErrNotFound): - return account.UserAccount{}, fmt.Errorf("get account by user name %q from redis: %w", userName, ports.ErrNotFound) - default: - return account.UserAccount{}, fmt.Errorf("get account by user name %q from redis: %w", userName, err) - } - } - - record, err := store.loadAccount(operationCtx, store.client, userID) - if err != nil { - switch { - case errors.Is(err, ports.ErrNotFound): - return account.UserAccount{}, fmt.Errorf("get account by user name %q from redis: lookup references missing user %q", userName, userID) - default: - return account.UserAccount{}, fmt.Errorf("get account by user name %q from redis: %w", userName, err) - } - } - - return record, nil -} - -// ExistsByUserID reports whether userID currently identifies a stored account -// that is not soft-deleted. Soft-deleted accounts are treated as non-existing -// for external callers per Stage 22. -func (store *Store) ExistsByUserID(ctx context.Context, userID common.UserID) (bool, error) { - if err := userID.Validate(); err != nil { - return false, fmt.Errorf("exists by user id from redis: %w", err) - } - - operationCtx, cancel, err := store.operationContext(ctx, "exists by user id from redis") - if err != nil { - return false, err - } - defer cancel() - - record, err := store.loadAccount(operationCtx, store.client, userID) - switch { - case err == nil: - case errors.Is(err, ports.ErrNotFound): - return false, nil - default: - return false, fmt.Errorf("exists by user id %q from redis: %w", userID, err) - } - - if record.IsDeleted() { - return false, nil - } - - return true, nil -} - -// Update replaces the stored account state for record.UserID. `email` and -// `user_name` are immutable; any attempt to mutate them returns -// ports.ErrConflict. -func (store *Store) Update(ctx context.Context, record account.UserAccount) error { - if err := record.Validate(); err != nil { - return fmt.Errorf("update account in redis: %w", err) - } - - accountPayload, err := marshalAccountRecord(record) - if err != nil { - return fmt.Errorf("update account in redis: %w", err) - } - - accountKey := store.keyspace.Account(record.UserID) - emailLookupKey := store.keyspace.EmailLookup(record.Email) - userNameLookupKey := store.keyspace.UserNameLookup(record.UserName) - - operationCtx, cancel, err := store.operationContext(ctx, "update account in redis") - if err != nil { - return err - } - defer cancel() - - watchErr := store.client.Watch(operationCtx, func(tx *redis.Tx) error { - current, err := store.loadAccount(operationCtx, tx, record.UserID) - if err != nil { - return fmt.Errorf("update account %q in redis: %w", record.UserID, err) - } - if current.Email != record.Email || current.UserName != record.UserName { - return fmt.Errorf("update account %q in redis: %w", record.UserID, ports.ErrConflict) - } - - lookupUserID, err := store.loadLookupUserID(operationCtx, tx, emailLookupKey) - if err != nil { - return fmt.Errorf("update account %q in redis: %w", record.UserID, err) - } - if lookupUserID != record.UserID { - return fmt.Errorf("update account %q in redis: %w", record.UserID, ports.ErrConflict) - } - - userNameLookupUserID, err := store.loadLookupUserID(operationCtx, tx, userNameLookupKey) - if err != nil { - return fmt.Errorf("update account %q in redis: %w", record.UserID, err) - } - if userNameLookupUserID != record.UserID { - return fmt.Errorf("update account %q in redis: %w", record.UserID, ports.ErrConflict) - } - - _, err = tx.TxPipelined(operationCtx, func(pipe redis.Pipeliner) error { - pipe.Set(operationCtx, accountKey, accountPayload, 0) - store.syncDeclaredCountryIndex(pipe, operationCtx, current, record) - return nil - }) - if err != nil { - return fmt.Errorf("update account %q in redis: %w", record.UserID, err) - } - - return nil - }, accountKey, emailLookupKey, userNameLookupKey) - - switch { - case errors.Is(watchErr, redis.TxFailedErr): - return fmt.Errorf("update account %q in redis: %w", record.UserID, ports.ErrConflict) - case watchErr != nil: - return watchErr - default: - return nil - } -} - -// GetBlockedEmail returns the blocked-email subject for email. -func (store *Store) GetBlockedEmail(ctx context.Context, email common.Email) (authblock.BlockedEmailSubject, error) { - if err := email.Validate(); err != nil { - return authblock.BlockedEmailSubject{}, fmt.Errorf("get blocked email subject from redis: %w", err) - } - - operationCtx, cancel, err := store.operationContext(ctx, "get blocked email subject from redis") - if err != nil { - return authblock.BlockedEmailSubject{}, err - } - defer cancel() - - record, err := store.loadBlockedEmail(operationCtx, store.client, email) - if err != nil { - switch { - case errors.Is(err, ports.ErrNotFound): - return authblock.BlockedEmailSubject{}, fmt.Errorf("get blocked email subject %q from redis: %w", email, ports.ErrNotFound) - default: - return authblock.BlockedEmailSubject{}, fmt.Errorf("get blocked email subject %q from redis: %w", email, err) - } - } - - return record, nil -} - -// PutBlockedEmail stores or replaces the blocked-email subject for -// record.Email. -func (store *Store) PutBlockedEmail(ctx context.Context, record authblock.BlockedEmailSubject) error { - if err := record.Validate(); err != nil { - return fmt.Errorf("upsert blocked email subject in redis: %w", err) - } - - payload, err := marshalBlockedEmailRecord(record) - if err != nil { - return fmt.Errorf("upsert blocked email subject in redis: %w", err) - } - - operationCtx, cancel, err := store.operationContext(ctx, "upsert blocked email subject in redis") - if err != nil { - return err - } - defer cancel() - - if err := store.client.Set(operationCtx, store.keyspace.BlockedEmailSubject(record.Email), payload, 0).Err(); err != nil { - return fmt.Errorf("upsert blocked email subject %q in redis: %w", record.Email, err) - } - - return nil -} - -// GetEntitlementByUserID returns the current entitlement snapshot for userID. -func (store *Store) GetEntitlementByUserID(ctx context.Context, userID common.UserID) (entitlement.CurrentSnapshot, error) { - if err := userID.Validate(); err != nil { - return entitlement.CurrentSnapshot{}, fmt.Errorf("get entitlement snapshot from redis: %w", err) - } - - operationCtx, cancel, err := store.operationContext(ctx, "get entitlement snapshot from redis") - if err != nil { - return entitlement.CurrentSnapshot{}, err - } - defer cancel() - - record, err := store.loadEntitlementSnapshot(operationCtx, store.client, userID) - if err != nil { - switch { - case errors.Is(err, ports.ErrNotFound): - return entitlement.CurrentSnapshot{}, fmt.Errorf("get entitlement snapshot %q from redis: %w", userID, ports.ErrNotFound) - default: - return entitlement.CurrentSnapshot{}, fmt.Errorf("get entitlement snapshot %q from redis: %w", userID, err) - } - } - - return record, nil -} - -// PutEntitlement stores the current entitlement snapshot for record.UserID. -func (store *Store) PutEntitlement(ctx context.Context, record entitlement.CurrentSnapshot) error { - if err := record.Validate(); err != nil { - return fmt.Errorf("put entitlement snapshot in redis: %w", err) - } - - payload, err := marshalEntitlementSnapshotRecord(record) - if err != nil { - return fmt.Errorf("put entitlement snapshot in redis: %w", err) - } - - operationCtx, cancel, err := store.operationContext(ctx, "put entitlement snapshot in redis") - if err != nil { - return err - } - defer cancel() - - if err := store.client.Set(operationCtx, store.keyspace.EntitlementSnapshot(record.UserID), payload, 0).Err(); err != nil { - return fmt.Errorf("put entitlement snapshot %q in redis: %w", record.UserID, err) - } - - return nil -} - -// CreateSanction stores one new sanction history record. -func (store *Store) CreateSanction(ctx context.Context, record policy.SanctionRecord) error { - if err := record.Validate(); err != nil { - return fmt.Errorf("create sanction in redis: %w", err) - } - - payload, err := marshalSanctionRecord(record) - if err != nil { - return fmt.Errorf("create sanction in redis: %w", err) - } - - recordKey := store.keyspace.SanctionRecord(record.RecordID) - historyKey := store.keyspace.SanctionHistory(record.UserID) - - operationCtx, cancel, err := store.operationContext(ctx, "create sanction in redis") - if err != nil { - return err - } - defer cancel() - - watchErr := store.client.Watch(operationCtx, func(tx *redis.Tx) error { - if err := ensureKeyAbsent(operationCtx, tx, recordKey); err != nil { - return fmt.Errorf("create sanction %q in redis: %w", record.RecordID, err) - } - - _, err := tx.TxPipelined(operationCtx, func(pipe redis.Pipeliner) error { - pipe.Set(operationCtx, recordKey, payload, 0) - pipe.ZAdd(operationCtx, historyKey, redis.Z{ - Score: float64(record.AppliedAt.UTC().UnixMicro()), - Member: record.RecordID.String(), - }) - return nil - }) - if err != nil { - return fmt.Errorf("create sanction %q in redis: %w", record.RecordID, err) - } - - return nil - }, recordKey, historyKey) - - switch { - case errors.Is(watchErr, redis.TxFailedErr): - return fmt.Errorf("create sanction %q in redis: %w", record.RecordID, ports.ErrConflict) - case watchErr != nil: - return watchErr - default: - return nil - } -} - -// GetSanctionByRecordID returns the sanction history record identified by -// recordID. -func (store *Store) GetSanctionByRecordID(ctx context.Context, recordID policy.SanctionRecordID) (policy.SanctionRecord, error) { - if err := recordID.Validate(); err != nil { - return policy.SanctionRecord{}, fmt.Errorf("get sanction by record id from redis: %w", err) - } - - operationCtx, cancel, err := store.operationContext(ctx, "get sanction by record id from redis") - if err != nil { - return policy.SanctionRecord{}, err - } - defer cancel() - - record, err := store.loadSanctionRecord(operationCtx, store.client, recordID) - if err != nil { - switch { - case errors.Is(err, ports.ErrNotFound): - return policy.SanctionRecord{}, fmt.Errorf("get sanction by record id %q from redis: %w", recordID, ports.ErrNotFound) - default: - return policy.SanctionRecord{}, fmt.Errorf("get sanction by record id %q from redis: %w", recordID, err) - } - } - - return record, nil -} - -// ListSanctionsByUserID returns every sanction history record owned by userID. -func (store *Store) ListSanctionsByUserID(ctx context.Context, userID common.UserID) ([]policy.SanctionRecord, error) { - if err := userID.Validate(); err != nil { - return nil, fmt.Errorf("list sanctions by user id from redis: %w", err) - } - - operationCtx, cancel, err := store.operationContext(ctx, "list sanctions by user id from redis") - if err != nil { - return nil, err - } - defer cancel() - - recordIDs, err := store.client.ZRange(operationCtx, store.keyspace.SanctionHistory(userID), 0, -1).Result() - if err != nil { - return nil, fmt.Errorf("list sanctions by user id %q from redis: %w", userID, err) - } - - records := make([]policy.SanctionRecord, 0, len(recordIDs)) - for _, rawRecordID := range recordIDs { - record, err := store.loadSanctionRecord(operationCtx, store.client, policy.SanctionRecordID(rawRecordID)) - if err != nil { - return nil, fmt.Errorf("list sanctions by user id %q from redis: %w", userID, err) - } - records = append(records, record) - } - - return records, nil -} - -// UpdateSanction replaces one stored sanction history record. -func (store *Store) UpdateSanction(ctx context.Context, record policy.SanctionRecord) error { - if err := record.Validate(); err != nil { - return fmt.Errorf("update sanction in redis: %w", err) - } - - payload, err := marshalSanctionRecord(record) - if err != nil { - return fmt.Errorf("update sanction in redis: %w", err) - } - - recordKey := store.keyspace.SanctionRecord(record.RecordID) - - operationCtx, cancel, err := store.operationContext(ctx, "update sanction in redis") - if err != nil { - return err - } - defer cancel() - - watchErr := store.client.Watch(operationCtx, func(tx *redis.Tx) error { - if _, err := store.loadSanctionRecord(operationCtx, tx, record.RecordID); err != nil { - return fmt.Errorf("update sanction %q in redis: %w", record.RecordID, err) - } - - _, err := tx.TxPipelined(operationCtx, func(pipe redis.Pipeliner) error { - pipe.Set(operationCtx, recordKey, payload, 0) - return nil - }) - if err != nil { - return fmt.Errorf("update sanction %q in redis: %w", record.RecordID, err) - } - - return nil - }, recordKey) - - switch { - case errors.Is(watchErr, redis.TxFailedErr): - return fmt.Errorf("update sanction %q in redis: %w", record.RecordID, ports.ErrConflict) - case watchErr != nil: - return watchErr - default: - return nil - } -} - -// CreateLimit stores one new limit history record. -func (store *Store) CreateLimit(ctx context.Context, record policy.LimitRecord) error { - if err := record.Validate(); err != nil { - return fmt.Errorf("create limit in redis: %w", err) - } - - payload, err := marshalLimitRecord(record) - if err != nil { - return fmt.Errorf("create limit in redis: %w", err) - } - - recordKey := store.keyspace.LimitRecord(record.RecordID) - historyKey := store.keyspace.LimitHistory(record.UserID) - - operationCtx, cancel, err := store.operationContext(ctx, "create limit in redis") - if err != nil { - return err - } - defer cancel() - - watchErr := store.client.Watch(operationCtx, func(tx *redis.Tx) error { - if err := ensureKeyAbsent(operationCtx, tx, recordKey); err != nil { - return fmt.Errorf("create limit %q in redis: %w", record.RecordID, err) - } - - _, err := tx.TxPipelined(operationCtx, func(pipe redis.Pipeliner) error { - pipe.Set(operationCtx, recordKey, payload, 0) - pipe.ZAdd(operationCtx, historyKey, redis.Z{ - Score: float64(record.AppliedAt.UTC().UnixMicro()), - Member: record.RecordID.String(), - }) - return nil - }) - if err != nil { - return fmt.Errorf("create limit %q in redis: %w", record.RecordID, err) - } - - return nil - }, recordKey, historyKey) - - switch { - case errors.Is(watchErr, redis.TxFailedErr): - return fmt.Errorf("create limit %q in redis: %w", record.RecordID, ports.ErrConflict) - case watchErr != nil: - return watchErr - default: - return nil - } -} - -// GetLimitByRecordID returns the limit history record identified by recordID. -func (store *Store) GetLimitByRecordID(ctx context.Context, recordID policy.LimitRecordID) (policy.LimitRecord, error) { - if err := recordID.Validate(); err != nil { - return policy.LimitRecord{}, fmt.Errorf("get limit by record id from redis: %w", err) - } - - operationCtx, cancel, err := store.operationContext(ctx, "get limit by record id from redis") - if err != nil { - return policy.LimitRecord{}, err - } - defer cancel() - - record, err := store.loadLimitRecord(operationCtx, store.client, recordID) - if err != nil { - switch { - case errors.Is(err, ports.ErrNotFound): - return policy.LimitRecord{}, fmt.Errorf("get limit by record id %q from redis: %w", recordID, ports.ErrNotFound) - default: - return policy.LimitRecord{}, fmt.Errorf("get limit by record id %q from redis: %w", recordID, err) - } - } - - return record, nil -} - -// ListLimitsByUserID returns every limit history record owned by userID. -func (store *Store) ListLimitsByUserID(ctx context.Context, userID common.UserID) ([]policy.LimitRecord, error) { - if err := userID.Validate(); err != nil { - return nil, fmt.Errorf("list limits by user id from redis: %w", err) - } - - operationCtx, cancel, err := store.operationContext(ctx, "list limits by user id from redis") - if err != nil { - return nil, err - } - defer cancel() - - recordIDs, err := store.client.ZRange(operationCtx, store.keyspace.LimitHistory(userID), 0, -1).Result() - if err != nil { - return nil, fmt.Errorf("list limits by user id %q from redis: %w", userID, err) - } - - records := make([]policy.LimitRecord, 0, len(recordIDs)) - for _, rawRecordID := range recordIDs { - record, err := store.loadLimitRecord(operationCtx, store.client, policy.LimitRecordID(rawRecordID)) - if err != nil { - return nil, fmt.Errorf("list limits by user id %q from redis: %w", userID, err) - } - records = append(records, record) - } - - return records, nil -} - -// UpdateLimit replaces one stored limit history record. -func (store *Store) UpdateLimit(ctx context.Context, record policy.LimitRecord) error { - if err := record.Validate(); err != nil { - return fmt.Errorf("update limit in redis: %w", err) - } - - payload, err := marshalLimitRecord(record) - if err != nil { - return fmt.Errorf("update limit in redis: %w", err) - } - - recordKey := store.keyspace.LimitRecord(record.RecordID) - - operationCtx, cancel, err := store.operationContext(ctx, "update limit in redis") - if err != nil { - return err - } - defer cancel() - - watchErr := store.client.Watch(operationCtx, func(tx *redis.Tx) error { - if _, err := store.loadLimitRecord(operationCtx, tx, record.RecordID); err != nil { - return fmt.Errorf("update limit %q in redis: %w", record.RecordID, err) - } - - _, err := tx.TxPipelined(operationCtx, func(pipe redis.Pipeliner) error { - pipe.Set(operationCtx, recordKey, payload, 0) - return nil - }) - if err != nil { - return fmt.Errorf("update limit %q in redis: %w", record.RecordID, err) - } - - return nil - }, recordKey) - - switch { - case errors.Is(watchErr, redis.TxFailedErr): - return fmt.Errorf("update limit %q in redis: %w", record.RecordID, ports.ErrConflict) - case watchErr != nil: - return watchErr - default: - return nil - } -} - -// ResolveByEmail returns the current coarse auth-facing resolution state for -// email. -func (store *Store) ResolveByEmail(ctx context.Context, email common.Email) (ports.ResolveByEmailResult, error) { - if err := email.Validate(); err != nil { - return ports.ResolveByEmailResult{}, fmt.Errorf("resolve by email in redis: %w", err) - } - - operationCtx, cancel, err := store.operationContext(ctx, "resolve by email in redis") - if err != nil { - return ports.ResolveByEmailResult{}, err - } - defer cancel() - - blocked, err := store.loadBlockedEmail(operationCtx, store.client, email) - switch { - case err == nil: - return ports.ResolveByEmailResult{ - Kind: ports.AuthResolutionKindBlocked, - BlockReasonCode: blocked.ReasonCode, - }, nil - case !errors.Is(err, ports.ErrNotFound): - return ports.ResolveByEmailResult{}, fmt.Errorf("resolve by email %q in redis: %w", email, err) - } - - accountRecord, err := store.GetByEmailAccount(operationCtx, email) - switch { - case err == nil: - case errors.Is(err, ports.ErrNotFound): - return ports.ResolveByEmailResult{Kind: ports.AuthResolutionKindCreatable}, nil - default: - return ports.ResolveByEmailResult{}, fmt.Errorf("resolve by email %q in redis: %w", email, err) - } - - if accountRecord.IsDeleted() { - return ports.ResolveByEmailResult{ - Kind: ports.AuthResolutionKindBlocked, - BlockReasonCode: deletedAccountBlockReasonCode, - }, nil - } - - return ports.ResolveByEmailResult{ - Kind: ports.AuthResolutionKindExisting, - UserID: accountRecord.UserID, - }, nil -} - -// deletedAccountBlockReasonCode is the reason_code returned when an auth-facing -// lookup resolves to a soft-deleted account. It is not a real sanction; the -// auth/session service treats it as a blocked outcome and refuses to issue a -// session for the subject. -const deletedAccountBlockReasonCode common.ReasonCode = "account_deleted" - -// EnsureByEmail atomically returns an existing user, creates a new one, or -// reports a blocked outcome for one e-mail subject. -func (store *Store) EnsureByEmail(ctx context.Context, input ports.EnsureByEmailInput) (ports.EnsureByEmailResult, error) { - if err := input.Validate(); err != nil { - return ports.EnsureByEmailResult{}, fmt.Errorf("ensure by email in redis: %w", err) - } - - accountPayload, err := marshalAccountRecord(input.Account) - if err != nil { - return ports.EnsureByEmailResult{}, fmt.Errorf("ensure by email in redis: %w", err) - } - entitlementPayload, err := marshalEntitlementSnapshotRecord(input.Entitlement) - if err != nil { - return ports.EnsureByEmailResult{}, fmt.Errorf("ensure by email in redis: %w", err) - } - entitlementRecordPayload, err := marshalEntitlementPeriodRecord(input.EntitlementRecord) - if err != nil { - return ports.EnsureByEmailResult{}, fmt.Errorf("ensure by email in redis: %w", err) - } - operationCtx, cancel, err := store.operationContext(ctx, "ensure by email in redis") - if err != nil { - return ports.EnsureByEmailResult{}, err - } - defer cancel() - - var result ports.EnsureByEmailResult - var handled bool - - accountKey := store.keyspace.Account(input.Account.UserID) - emailLookupKey := store.keyspace.EmailLookup(input.Email) - userNameLookupKey := store.keyspace.UserNameLookup(input.Account.UserName) - blockedEmailKey := store.keyspace.BlockedEmailSubject(input.Email) - entitlementKey := store.keyspace.EntitlementSnapshot(input.Account.UserID) - entitlementRecordKey := store.keyspace.EntitlementRecord(input.EntitlementRecord.RecordID) - entitlementHistoryKey := store.keyspace.EntitlementHistory(input.Account.UserID) - - watchErr := store.client.Watch(operationCtx, func(tx *redis.Tx) error { - blocked, err := store.loadBlockedEmail(operationCtx, tx, input.Email) - switch { - case err == nil: - result = ports.EnsureByEmailResult{ - Outcome: ports.EnsureByEmailOutcomeBlocked, - BlockReasonCode: blocked.ReasonCode, - } - handled = true - return nil - case !errors.Is(err, ports.ErrNotFound): - return fmt.Errorf("ensure by email %q in redis: %w", input.Email, err) - } - - userID, err := store.loadLookupUserID(operationCtx, tx, emailLookupKey) - switch { - case err == nil: - record, err := store.loadAccount(operationCtx, tx, userID) - if err != nil { - return fmt.Errorf("ensure by email %q in redis: %w", input.Email, err) - } - if record.IsDeleted() { - result = ports.EnsureByEmailResult{ - Outcome: ports.EnsureByEmailOutcomeBlocked, - BlockReasonCode: deletedAccountBlockReasonCode, - } - handled = true - return nil - } - result = ports.EnsureByEmailResult{ - Outcome: ports.EnsureByEmailOutcomeExisting, - UserID: record.UserID, - } - handled = true - return nil - case !errors.Is(err, ports.ErrNotFound): - return fmt.Errorf("ensure by email %q in redis: %w", input.Email, err) - } - - if err := ensureKeyAbsent(operationCtx, tx, accountKey); err != nil { - return fmt.Errorf("ensure by email %q in redis: %w", input.Email, err) - } - if err := ensureKeyAbsent(operationCtx, tx, userNameLookupKey); err != nil { - if errors.Is(err, ports.ErrConflict) { - return fmt.Errorf("ensure by email %q in redis: %w", input.Email, ports.ErrUserNameConflict) - } - return fmt.Errorf("ensure by email %q in redis: %w", input.Email, err) - } - if err := ensureKeyAbsent(operationCtx, tx, entitlementKey); err != nil { - return fmt.Errorf("ensure by email %q in redis: %w", input.Email, err) - } - if err := ensureKeyAbsent(operationCtx, tx, entitlementRecordKey); err != nil { - return fmt.Errorf("ensure by email %q in redis: %w", input.Email, err) - } - - _, err = tx.TxPipelined(operationCtx, func(pipe redis.Pipeliner) error { - pipe.Set(operationCtx, accountKey, accountPayload, 0) - pipe.Set(operationCtx, emailLookupKey, input.Account.UserID.String(), 0) - pipe.Set(operationCtx, userNameLookupKey, input.Account.UserID.String(), 0) - pipe.Set(operationCtx, entitlementKey, entitlementPayload, 0) - pipe.Set(operationCtx, entitlementRecordKey, entitlementRecordPayload, 0) - pipe.ZAdd(operationCtx, entitlementHistoryKey, redis.Z{ - Score: float64(input.EntitlementRecord.StartsAt.UTC().UnixMicro()), - Member: input.EntitlementRecord.RecordID.String(), - }) - store.addCreatedAtIndex(pipe, operationCtx, input.Account) - store.syncDeclaredCountryIndex(pipe, operationCtx, account.UserAccount{}, input.Account) - store.syncEntitlementIndexes(pipe, operationCtx, input.Entitlement) - store.syncActiveSanctionCodeIndexes(pipe, operationCtx, input.Account.UserID, map[policy.SanctionCode]struct{}{}) - store.syncActiveLimitCodeIndexes(pipe, operationCtx, input.Account.UserID, map[policy.LimitCode]struct{}{}) - store.syncEligibilityMarkerIndexes(pipe, operationCtx, input.Account.UserID, input.Entitlement.IsPaid, map[policy.SanctionCode]struct{}{}) - return nil - }) - if err != nil { - return fmt.Errorf("ensure by email %q in redis: %w", input.Email, err) - } - - result = ports.EnsureByEmailResult{ - Outcome: ports.EnsureByEmailOutcomeCreated, - UserID: input.Account.UserID, - } - handled = true - return nil - }, blockedEmailKey, emailLookupKey, accountKey, userNameLookupKey, entitlementKey, entitlementRecordKey, entitlementHistoryKey) - - switch { - case errors.Is(watchErr, redis.TxFailedErr): - return ports.EnsureByEmailResult{}, fmt.Errorf("ensure by email %q in redis: %w", input.Email, ports.ErrConflict) - case watchErr != nil: - return ports.EnsureByEmailResult{}, watchErr - case !handled: - return ports.EnsureByEmailResult{}, fmt.Errorf("ensure by email %q in redis: unhandled watch result", input.Email) - default: - return result, nil - } -} - -// BlockByUserID applies a block state to the account identified by userID. -func (store *Store) BlockByUserID(ctx context.Context, input ports.BlockByUserIDInput) (ports.BlockResult, error) { - if err := input.Validate(); err != nil { - return ports.BlockResult{}, fmt.Errorf("block by user id in redis: %w", err) - } - - operationCtx, cancel, err := store.operationContext(ctx, "block by user id in redis") - if err != nil { - return ports.BlockResult{}, err - } - defer cancel() - - var result ports.BlockResult - var handled bool - - currentAccount, err := store.loadAccount(operationCtx, store.client, input.UserID) - if err != nil { - if errors.Is(err, ports.ErrNotFound) { - return ports.BlockResult{}, fmt.Errorf("block by user id %q in redis: %w", input.UserID, ports.ErrNotFound) - } - return ports.BlockResult{}, fmt.Errorf("block by user id %q in redis: %w", input.UserID, err) - } - if currentAccount.IsDeleted() { - return ports.BlockResult{}, fmt.Errorf("block by user id %q in redis: %w", input.UserID, ports.ErrNotFound) - } - - accountKey := store.keyspace.Account(input.UserID) - blockedEmailKey := store.keyspace.BlockedEmailSubject(currentAccount.Email) - - watchErr := store.client.Watch(operationCtx, func(tx *redis.Tx) error { - accountRecord, err := store.loadAccount(operationCtx, tx, input.UserID) - if err != nil { - return fmt.Errorf("block by user id %q in redis: %w", input.UserID, err) - } - if accountRecord.IsDeleted() { - return fmt.Errorf("block by user id %q in redis: %w", input.UserID, ports.ErrNotFound) - } - - blocked, err := store.loadBlockedEmail(operationCtx, tx, accountRecord.Email) - switch { - case err == nil: - result = ports.BlockResult{ - Outcome: ports.AuthBlockOutcomeAlreadyBlocked, - UserID: input.UserID, - } - if !blocked.ResolvedUserID.IsZero() { - result.UserID = blocked.ResolvedUserID - } - handled = true - return nil - case !errors.Is(err, ports.ErrNotFound): - return fmt.Errorf("block by user id %q in redis: %w", input.UserID, err) - } - - record := authblock.BlockedEmailSubject{ - Email: accountRecord.Email, - ReasonCode: input.ReasonCode, - BlockedAt: input.BlockedAt.UTC(), - ResolvedUserID: input.UserID, - } - payload, err := marshalBlockedEmailRecord(record) - if err != nil { - return fmt.Errorf("block by user id %q in redis: %w", input.UserID, err) - } - - _, err = tx.TxPipelined(operationCtx, func(pipe redis.Pipeliner) error { - pipe.Set(operationCtx, blockedEmailKey, payload, 0) - return nil - }) - if err != nil { - return fmt.Errorf("block by user id %q in redis: %w", input.UserID, err) - } - - result = ports.BlockResult{ - Outcome: ports.AuthBlockOutcomeBlocked, - UserID: input.UserID, - } - handled = true - return nil - }, accountKey, blockedEmailKey) - - switch { - case errors.Is(watchErr, redis.TxFailedErr): - return ports.BlockResult{}, fmt.Errorf("block by user id %q in redis: %w", input.UserID, ports.ErrConflict) - case watchErr != nil: - if errors.Is(watchErr, ports.ErrNotFound) { - return ports.BlockResult{}, fmt.Errorf("block by user id %q in redis: %w", input.UserID, ports.ErrNotFound) - } - return ports.BlockResult{}, watchErr - case !handled: - return ports.BlockResult{}, fmt.Errorf("block by user id %q in redis: unhandled watch result", input.UserID) - default: - return result, nil - } -} - -// BlockByEmail applies a block state to email even when no account exists yet. -func (store *Store) BlockByEmail(ctx context.Context, input ports.BlockByEmailInput) (ports.BlockResult, error) { - if err := input.Validate(); err != nil { - return ports.BlockResult{}, fmt.Errorf("block by email in redis: %w", err) - } - - operationCtx, cancel, err := store.operationContext(ctx, "block by email in redis") - if err != nil { - return ports.BlockResult{}, err - } - defer cancel() - - var result ports.BlockResult - var handled bool - - blockedEmailKey := store.keyspace.BlockedEmailSubject(input.Email) - emailLookupKey := store.keyspace.EmailLookup(input.Email) - - watchErr := store.client.Watch(operationCtx, func(tx *redis.Tx) error { - blocked, err := store.loadBlockedEmail(operationCtx, tx, input.Email) - switch { - case err == nil: - result = ports.BlockResult{ - Outcome: ports.AuthBlockOutcomeAlreadyBlocked, - UserID: blocked.ResolvedUserID, - } - handled = true - return nil - case !errors.Is(err, ports.ErrNotFound): - return fmt.Errorf("block by email %q in redis: %w", input.Email, err) - } - - resolvedUserID, err := store.loadLookupUserID(operationCtx, tx, emailLookupKey) - switch { - case err == nil: - if _, err := store.loadAccount(operationCtx, tx, resolvedUserID); err != nil { - return fmt.Errorf("block by email %q in redis: %w", input.Email, err) - } - case !errors.Is(err, ports.ErrNotFound): - return fmt.Errorf("block by email %q in redis: %w", input.Email, err) - default: - resolvedUserID = "" - } - - record := authblock.BlockedEmailSubject{ - Email: input.Email, - ReasonCode: input.ReasonCode, - BlockedAt: input.BlockedAt.UTC(), - } - if !resolvedUserID.IsZero() { - record.ResolvedUserID = resolvedUserID - } - payload, err := marshalBlockedEmailRecord(record) - if err != nil { - return fmt.Errorf("block by email %q in redis: %w", input.Email, err) - } - - _, err = tx.TxPipelined(operationCtx, func(pipe redis.Pipeliner) error { - pipe.Set(operationCtx, blockedEmailKey, payload, 0) - return nil - }) - if err != nil { - return fmt.Errorf("block by email %q in redis: %w", input.Email, err) - } - - result = ports.BlockResult{ - Outcome: ports.AuthBlockOutcomeBlocked, - UserID: resolvedUserID, - } - handled = true - return nil - }, blockedEmailKey, emailLookupKey) - - switch { - case errors.Is(watchErr, redis.TxFailedErr): - return ports.BlockResult{}, fmt.Errorf("block by email %q in redis: %w", input.Email, ports.ErrConflict) - case watchErr != nil: - return ports.BlockResult{}, watchErr - case !handled: - return ports.BlockResult{}, fmt.Errorf("block by email %q in redis: unhandled watch result", input.Email) - default: - return result, nil - } -} - -func (store *Store) GetByEmailAccount(ctx context.Context, email common.Email) (account.UserAccount, error) { - userID, err := store.loadLookupUserID(ctx, store.client, store.keyspace.EmailLookup(email)) - if err != nil { - return account.UserAccount{}, err - } - - return store.loadAccount(ctx, store.client, userID) -} - -func (store *Store) loadAccount(ctx context.Context, getter bytesGetter, userID common.UserID) (account.UserAccount, error) { - payload, err := getter.Get(ctx, store.keyspace.Account(userID)).Bytes() - switch { - case errors.Is(err, redis.Nil): - return account.UserAccount{}, ports.ErrNotFound - case err != nil: - return account.UserAccount{}, err - } - - return decodeAccountRecord(payload) -} - -func (store *Store) loadLookupUserID(ctx context.Context, getter bytesGetter, key string) (common.UserID, error) { - value, err := getter.Get(ctx, key).Result() - switch { - case errors.Is(err, redis.Nil): - return "", ports.ErrNotFound - case err != nil: - return "", err - } - - userID := common.UserID(value) - if err := userID.Validate(); err != nil { - return "", fmt.Errorf("lookup user id: %w", err) - } - - return userID, nil -} - -func (store *Store) loadBlockedEmail(ctx context.Context, getter bytesGetter, email common.Email) (authblock.BlockedEmailSubject, error) { - payload, err := getter.Get(ctx, store.keyspace.BlockedEmailSubject(email)).Bytes() - switch { - case errors.Is(err, redis.Nil): - return authblock.BlockedEmailSubject{}, ports.ErrNotFound - case err != nil: - return authblock.BlockedEmailSubject{}, err - } - - return decodeBlockedEmailRecord(payload) -} - -func (store *Store) loadEntitlementSnapshot(ctx context.Context, getter bytesGetter, userID common.UserID) (entitlement.CurrentSnapshot, error) { - payload, err := getter.Get(ctx, store.keyspace.EntitlementSnapshot(userID)).Bytes() - switch { - case errors.Is(err, redis.Nil): - return entitlement.CurrentSnapshot{}, ports.ErrNotFound - case err != nil: - return entitlement.CurrentSnapshot{}, err - } - - return decodeEntitlementSnapshotRecord(payload) -} - -func (store *Store) loadSanctionRecord(ctx context.Context, getter bytesGetter, recordID policy.SanctionRecordID) (policy.SanctionRecord, error) { - payload, err := getter.Get(ctx, store.keyspace.SanctionRecord(recordID)).Bytes() - switch { - case errors.Is(err, redis.Nil): - return policy.SanctionRecord{}, ports.ErrNotFound - case err != nil: - return policy.SanctionRecord{}, err - } - - return decodeSanctionRecord(payload) -} - -func (store *Store) loadLimitRecord(ctx context.Context, getter bytesGetter, recordID policy.LimitRecordID) (policy.LimitRecord, error) { - payload, err := getter.Get(ctx, store.keyspace.LimitRecord(recordID)).Bytes() - switch { - case errors.Is(err, redis.Nil): - return policy.LimitRecord{}, ports.ErrNotFound - case err != nil: - return policy.LimitRecord{}, err - } - - return decodeLimitRecord(payload) -} - -func (store *Store) operationContext(ctx context.Context, operation string) (context.Context, context.CancelFunc, error) { - if store == nil || store.client == nil { - return nil, nil, fmt.Errorf("%s: nil store", operation) - } - if ctx == nil { - return nil, nil, fmt.Errorf("%s: nil context", operation) - } - - operationCtx, cancel := context.WithTimeout(ctx, store.operationTimeout) - return operationCtx, cancel, nil -} - -func ensureKeyAbsent(ctx context.Context, getter bytesGetter, key string) error { - _, err := getter.Get(ctx, key).Bytes() - switch { - case errors.Is(err, redis.Nil): - return nil - case err != nil: - return err - default: - return ports.ErrConflict - } -} - -func ensureLookupAvailableOrOwned( - ctx context.Context, - getter bytesGetter, - key string, - userID common.UserID, -) error { - currentUserID, err := getter.Get(ctx, key).Result() - switch { - case errors.Is(err, redis.Nil): - return nil - case err != nil: - return err - } - - if currentUserID != userID.String() { - return ports.ErrConflict - } - - return nil -} - -func marshalAccountRecord(record account.UserAccount) ([]byte, error) { - encoded := accountRecord{ - UserID: record.UserID.String(), - Email: record.Email.String(), - UserName: record.UserName.String(), - DisplayName: record.DisplayName.String(), - PreferredLanguage: record.PreferredLanguage.String(), - TimeZone: record.TimeZone.String(), - CreatedAt: record.CreatedAt.UTC().Format(time.RFC3339Nano), - UpdatedAt: record.UpdatedAt.UTC().Format(time.RFC3339Nano), - } - if !record.DeclaredCountry.IsZero() { - value := record.DeclaredCountry.String() - encoded.DeclaredCountry = &value - } - if record.DeletedAt != nil { - value := record.DeletedAt.UTC().Format(time.RFC3339Nano) - encoded.DeletedAt = &value - } - - return json.Marshal(encoded) -} - -func decodeAccountRecord(payload []byte) (account.UserAccount, error) { - var encoded accountRecord - if err := decodeJSONPayload(payload, &encoded); err != nil { - return account.UserAccount{}, err - } - - createdAt, err := time.Parse(time.RFC3339Nano, encoded.CreatedAt) - if err != nil { - return account.UserAccount{}, fmt.Errorf("decode account record created_at: %w", err) - } - updatedAt, err := time.Parse(time.RFC3339Nano, encoded.UpdatedAt) - if err != nil { - return account.UserAccount{}, fmt.Errorf("decode account record updated_at: %w", err) - } - - record := account.UserAccount{ - UserID: common.UserID(encoded.UserID), - Email: common.Email(encoded.Email), - UserName: common.UserName(encoded.UserName), - DisplayName: common.DisplayName(encoded.DisplayName), - PreferredLanguage: common.LanguageTag(encoded.PreferredLanguage), - TimeZone: common.TimeZoneName(encoded.TimeZone), - CreatedAt: createdAt.UTC(), - UpdatedAt: updatedAt.UTC(), - } - if encoded.DeclaredCountry != nil { - record.DeclaredCountry = common.CountryCode(*encoded.DeclaredCountry) - } - if encoded.DeletedAt != nil { - deletedAt, err := time.Parse(time.RFC3339Nano, *encoded.DeletedAt) - if err != nil { - return account.UserAccount{}, fmt.Errorf("decode account record deleted_at: %w", err) - } - deletedAt = deletedAt.UTC() - record.DeletedAt = &deletedAt - } - if err := record.Validate(); err != nil { - return account.UserAccount{}, fmt.Errorf("decode account record: %w", err) - } - - return record, nil -} - -func marshalBlockedEmailRecord(record authblock.BlockedEmailSubject) ([]byte, error) { - encoded := blockedEmailRecord{ - Email: record.Email.String(), - ReasonCode: record.ReasonCode.String(), - BlockedAt: record.BlockedAt.UTC().Format(time.RFC3339Nano), - } - if !record.Actor.IsZero() { - actorType := record.Actor.Type.String() - encoded.ActorType = &actorType - if !record.Actor.ID.IsZero() { - actorID := record.Actor.ID.String() - encoded.ActorID = &actorID - } - } - if !record.ResolvedUserID.IsZero() { - resolvedUserID := record.ResolvedUserID.String() - encoded.ResolvedUserID = &resolvedUserID - } - - return json.Marshal(encoded) -} - -func decodeBlockedEmailRecord(payload []byte) (authblock.BlockedEmailSubject, error) { - var encoded blockedEmailRecord - if err := decodeJSONPayload(payload, &encoded); err != nil { - return authblock.BlockedEmailSubject{}, err - } - - blockedAt, err := time.Parse(time.RFC3339Nano, encoded.BlockedAt) - if err != nil { - return authblock.BlockedEmailSubject{}, fmt.Errorf("decode blocked email record blocked_at: %w", err) - } - - record := authblock.BlockedEmailSubject{ - Email: common.Email(encoded.Email), - ReasonCode: common.ReasonCode(encoded.ReasonCode), - BlockedAt: blockedAt.UTC(), - } - if encoded.ActorType != nil { - record.Actor.Type = common.ActorType(*encoded.ActorType) - } - if encoded.ActorID != nil { - record.Actor.ID = common.ActorID(*encoded.ActorID) - } - if encoded.ResolvedUserID != nil { - record.ResolvedUserID = common.UserID(*encoded.ResolvedUserID) - } - if err := record.Validate(); err != nil { - return authblock.BlockedEmailSubject{}, fmt.Errorf("decode blocked email record: %w", err) - } - - return record, nil -} - -func marshalEntitlementSnapshotRecord(record entitlement.CurrentSnapshot) ([]byte, error) { - encoded := entitlementSnapshotRecord{ - UserID: record.UserID.String(), - PlanCode: string(record.PlanCode), - IsPaid: record.IsPaid, - StartsAt: record.StartsAt.UTC().Format(time.RFC3339Nano), - Source: record.Source.String(), - ActorType: record.Actor.Type.String(), - ReasonCode: record.ReasonCode.String(), - UpdatedAt: record.UpdatedAt.UTC().Format(time.RFC3339Nano), - } - if record.EndsAt != nil { - value := record.EndsAt.UTC().Format(time.RFC3339Nano) - encoded.EndsAt = &value - } - if !record.Actor.ID.IsZero() { - value := record.Actor.ID.String() - encoded.ActorID = &value - } - - return json.Marshal(encoded) -} - -func decodeEntitlementSnapshotRecord(payload []byte) (entitlement.CurrentSnapshot, error) { - var encoded entitlementSnapshotRecord - if err := decodeJSONPayload(payload, &encoded); err != nil { - return entitlement.CurrentSnapshot{}, err - } - - startsAt, err := time.Parse(time.RFC3339Nano, encoded.StartsAt) - if err != nil { - return entitlement.CurrentSnapshot{}, fmt.Errorf("decode entitlement snapshot record starts_at: %w", err) - } - updatedAt, err := time.Parse(time.RFC3339Nano, encoded.UpdatedAt) - if err != nil { - return entitlement.CurrentSnapshot{}, fmt.Errorf("decode entitlement snapshot record updated_at: %w", err) - } - - record := entitlement.CurrentSnapshot{ - UserID: common.UserID(encoded.UserID), - PlanCode: entitlement.PlanCode(encoded.PlanCode), - IsPaid: encoded.IsPaid, - StartsAt: startsAt.UTC(), - Source: common.Source(encoded.Source), - Actor: common.ActorRef{Type: common.ActorType(encoded.ActorType)}, - ReasonCode: common.ReasonCode(encoded.ReasonCode), - UpdatedAt: updatedAt.UTC(), - } - if encoded.ActorID != nil { - record.Actor.ID = common.ActorID(*encoded.ActorID) - } - if encoded.EndsAt != nil { - value, err := time.Parse(time.RFC3339Nano, *encoded.EndsAt) - if err != nil { - return entitlement.CurrentSnapshot{}, fmt.Errorf("decode entitlement snapshot record ends_at: %w", err) - } - value = value.UTC() - record.EndsAt = &value - } - if err := record.Validate(); err != nil { - return entitlement.CurrentSnapshot{}, fmt.Errorf("decode entitlement snapshot record: %w", err) - } - - return record, nil -} - -func marshalSanctionRecord(record policy.SanctionRecord) ([]byte, error) { - encoded := sanctionRecord{ - RecordID: record.RecordID.String(), - UserID: record.UserID.String(), - SanctionCode: string(record.SanctionCode), - Scope: record.Scope.String(), - ReasonCode: record.ReasonCode.String(), - ActorType: record.Actor.Type.String(), - AppliedAt: record.AppliedAt.UTC().Format(time.RFC3339Nano), - } - if !record.Actor.ID.IsZero() { - value := record.Actor.ID.String() - encoded.ActorID = &value - } - if record.ExpiresAt != nil { - value := record.ExpiresAt.UTC().Format(time.RFC3339Nano) - encoded.ExpiresAt = &value - } - if record.RemovedAt != nil { - value := record.RemovedAt.UTC().Format(time.RFC3339Nano) - encoded.RemovedAt = &value - } - if !record.RemovedBy.Type.IsZero() { - value := record.RemovedBy.Type.String() - encoded.RemovedByType = &value - } - if !record.RemovedBy.ID.IsZero() { - value := record.RemovedBy.ID.String() - encoded.RemovedByID = &value - } - if !record.RemovedReasonCode.IsZero() { - value := record.RemovedReasonCode.String() - encoded.RemovedReasonCode = &value - } - - return json.Marshal(encoded) -} - -func decodeSanctionRecord(payload []byte) (policy.SanctionRecord, error) { - var encoded sanctionRecord - if err := decodeJSONPayload(payload, &encoded); err != nil { - return policy.SanctionRecord{}, err - } - - appliedAt, err := time.Parse(time.RFC3339Nano, encoded.AppliedAt) - if err != nil { - return policy.SanctionRecord{}, fmt.Errorf("decode sanction record applied_at: %w", err) - } - - record := policy.SanctionRecord{ - RecordID: policy.SanctionRecordID(encoded.RecordID), - UserID: common.UserID(encoded.UserID), - SanctionCode: policy.SanctionCode(encoded.SanctionCode), - Scope: common.Scope(encoded.Scope), - ReasonCode: common.ReasonCode(encoded.ReasonCode), - Actor: common.ActorRef{Type: common.ActorType(encoded.ActorType)}, - AppliedAt: appliedAt.UTC(), - } - if encoded.ActorID != nil { - record.Actor.ID = common.ActorID(*encoded.ActorID) - } - if encoded.ExpiresAt != nil { - value, err := time.Parse(time.RFC3339Nano, *encoded.ExpiresAt) - if err != nil { - return policy.SanctionRecord{}, fmt.Errorf("decode sanction record expires_at: %w", err) - } - value = value.UTC() - record.ExpiresAt = &value - } - if encoded.RemovedAt != nil { - value, err := time.Parse(time.RFC3339Nano, *encoded.RemovedAt) - if err != nil { - return policy.SanctionRecord{}, fmt.Errorf("decode sanction record removed_at: %w", err) - } - value = value.UTC() - record.RemovedAt = &value - } - if encoded.RemovedByType != nil { - record.RemovedBy.Type = common.ActorType(*encoded.RemovedByType) - } - if encoded.RemovedByID != nil { - record.RemovedBy.ID = common.ActorID(*encoded.RemovedByID) - } - if encoded.RemovedReasonCode != nil { - record.RemovedReasonCode = common.ReasonCode(*encoded.RemovedReasonCode) - } - if err := record.Validate(); err != nil { - return policy.SanctionRecord{}, fmt.Errorf("decode sanction record: %w", err) - } - - return record, nil -} - -func marshalLimitRecord(record policy.LimitRecord) ([]byte, error) { - encoded := limitRecord{ - RecordID: record.RecordID.String(), - UserID: record.UserID.String(), - LimitCode: string(record.LimitCode), - Value: record.Value, - ReasonCode: record.ReasonCode.String(), - ActorType: record.Actor.Type.String(), - AppliedAt: record.AppliedAt.UTC().Format(time.RFC3339Nano), - } - if !record.Actor.ID.IsZero() { - value := record.Actor.ID.String() - encoded.ActorID = &value - } - if record.ExpiresAt != nil { - value := record.ExpiresAt.UTC().Format(time.RFC3339Nano) - encoded.ExpiresAt = &value - } - if record.RemovedAt != nil { - value := record.RemovedAt.UTC().Format(time.RFC3339Nano) - encoded.RemovedAt = &value - } - if !record.RemovedBy.Type.IsZero() { - value := record.RemovedBy.Type.String() - encoded.RemovedByType = &value - } - if !record.RemovedBy.ID.IsZero() { - value := record.RemovedBy.ID.String() - encoded.RemovedByID = &value - } - if !record.RemovedReasonCode.IsZero() { - value := record.RemovedReasonCode.String() - encoded.RemovedReasonCode = &value - } - - return json.Marshal(encoded) -} - -func decodeLimitRecord(payload []byte) (policy.LimitRecord, error) { - var encoded limitRecord - if err := decodeJSONPayload(payload, &encoded); err != nil { - return policy.LimitRecord{}, err - } - - appliedAt, err := time.Parse(time.RFC3339Nano, encoded.AppliedAt) - if err != nil { - return policy.LimitRecord{}, fmt.Errorf("decode limit record applied_at: %w", err) - } - - record := policy.LimitRecord{ - RecordID: policy.LimitRecordID(encoded.RecordID), - UserID: common.UserID(encoded.UserID), - LimitCode: policy.LimitCode(encoded.LimitCode), - Value: encoded.Value, - ReasonCode: common.ReasonCode(encoded.ReasonCode), - Actor: common.ActorRef{Type: common.ActorType(encoded.ActorType)}, - AppliedAt: appliedAt.UTC(), - } - if encoded.ActorID != nil { - record.Actor.ID = common.ActorID(*encoded.ActorID) - } - if encoded.ExpiresAt != nil { - value, err := time.Parse(time.RFC3339Nano, *encoded.ExpiresAt) - if err != nil { - return policy.LimitRecord{}, fmt.Errorf("decode limit record expires_at: %w", err) - } - value = value.UTC() - record.ExpiresAt = &value - } - if encoded.RemovedAt != nil { - value, err := time.Parse(time.RFC3339Nano, *encoded.RemovedAt) - if err != nil { - return policy.LimitRecord{}, fmt.Errorf("decode limit record removed_at: %w", err) - } - value = value.UTC() - record.RemovedAt = &value - } - if encoded.RemovedByType != nil { - record.RemovedBy.Type = common.ActorType(*encoded.RemovedByType) - } - if encoded.RemovedByID != nil { - record.RemovedBy.ID = common.ActorID(*encoded.RemovedByID) - } - if encoded.RemovedReasonCode != nil { - record.RemovedReasonCode = common.ReasonCode(*encoded.RemovedReasonCode) - } - if err := record.Validate(); err != nil { - return policy.LimitRecord{}, fmt.Errorf("decode limit record: %w", err) - } - - return record, nil -} - -func decodeJSONPayload(payload []byte, target any) error { - decoder := json.NewDecoder(bytes.NewReader(payload)) - decoder.DisallowUnknownFields() - - if err := decoder.Decode(target); err != nil { - return fmt.Errorf("decode JSON payload: %w", err) - } - if err := decoder.Decode(&struct{}{}); err != io.EOF { - if err == nil { - return errors.New("decode JSON payload: unexpected trailing JSON input") - } - - return fmt.Errorf("decode JSON payload: %w", err) - } - - return nil -} - -var ( - _ ports.AuthDirectoryStore = (*Store)(nil) -) - -// AccountStore adapts Store to the existing UserAccountStore port. -type AccountStore struct { - store *Store -} - -// Accounts returns one adapter that exposes the existing user-account store -// port over Store. -func (store *Store) Accounts() *AccountStore { - if store == nil { - return nil - } - - return &AccountStore{store: store} -} - -// Create stores one new account record. -func (adapter *AccountStore) Create(ctx context.Context, input ports.CreateAccountInput) error { - return adapter.store.Create(ctx, input) -} - -// GetByUserID returns the stored account identified by userID. -func (adapter *AccountStore) GetByUserID(ctx context.Context, userID common.UserID) (account.UserAccount, error) { - return adapter.store.GetByUserID(ctx, userID) -} - -// GetByEmail returns the stored account identified by email. -func (adapter *AccountStore) GetByEmail(ctx context.Context, email common.Email) (account.UserAccount, error) { - return adapter.store.GetByEmail(ctx, email) -} - -// GetByUserName returns the stored account identified by userName. -func (adapter *AccountStore) GetByUserName(ctx context.Context, userName common.UserName) (account.UserAccount, error) { - return adapter.store.GetByUserName(ctx, userName) -} - -// ExistsByUserID reports whether userID currently identifies a stored -// account. -func (adapter *AccountStore) ExistsByUserID(ctx context.Context, userID common.UserID) (bool, error) { - return adapter.store.ExistsByUserID(ctx, userID) -} - -// Update replaces the stored account state for record.UserID. -func (adapter *AccountStore) Update(ctx context.Context, record account.UserAccount) error { - return adapter.store.Update(ctx, record) -} - -var _ ports.UserAccountStore = (*AccountStore)(nil) - -// BlockedEmailStore adapts Store to the existing BlockedEmailStore port. -type BlockedEmailStore struct { - store *Store -} - -// BlockedEmails returns one adapter that exposes the existing blocked-email -// store port over Store. -func (store *Store) BlockedEmails() *BlockedEmailStore { - if store == nil { - return nil - } - - return &BlockedEmailStore{store: store} -} - -// GetByEmail returns the blocked-email subject for email. -func (adapter *BlockedEmailStore) GetByEmail(ctx context.Context, email common.Email) (authblock.BlockedEmailSubject, error) { - return adapter.store.GetBlockedEmail(ctx, email) -} - -// Upsert stores or replaces the blocked-email subject for record.Email. -func (adapter *BlockedEmailStore) Upsert(ctx context.Context, record authblock.BlockedEmailSubject) error { - return adapter.store.PutBlockedEmail(ctx, record) -} - -var _ ports.BlockedEmailStore = (*BlockedEmailStore)(nil) - -// EntitlementSnapshotStore adapts Store to the existing -// EntitlementSnapshotStore port. -type EntitlementSnapshotStore struct { - store *Store -} - -// EntitlementSnapshots returns one adapter that exposes the existing -// entitlement-snapshot store port over Store. -func (store *Store) EntitlementSnapshots() *EntitlementSnapshotStore { - if store == nil { - return nil - } - - return &EntitlementSnapshotStore{store: store} -} - -// GetByUserID returns the current entitlement snapshot for userID. -func (adapter *EntitlementSnapshotStore) GetByUserID(ctx context.Context, userID common.UserID) (entitlement.CurrentSnapshot, error) { - return adapter.store.GetEntitlementByUserID(ctx, userID) -} - -// Put stores the current entitlement snapshot for record.UserID. -func (adapter *EntitlementSnapshotStore) Put(ctx context.Context, record entitlement.CurrentSnapshot) error { - return adapter.store.PutEntitlement(ctx, record) -} - -var _ ports.EntitlementSnapshotStore = (*EntitlementSnapshotStore)(nil) - -// SanctionStore adapts Store to the existing SanctionStore port. -type SanctionStore struct { - store *Store -} - -// Sanctions returns one adapter that exposes the sanction store port over -// Store. -func (store *Store) Sanctions() *SanctionStore { - if store == nil { - return nil - } - - return &SanctionStore{store: store} -} - -// Create stores one new sanction history record. -func (adapter *SanctionStore) Create(ctx context.Context, record policy.SanctionRecord) error { - return adapter.store.CreateSanction(ctx, record) -} - -// GetByRecordID returns the sanction history record identified by recordID. -func (adapter *SanctionStore) GetByRecordID(ctx context.Context, recordID policy.SanctionRecordID) (policy.SanctionRecord, error) { - return adapter.store.GetSanctionByRecordID(ctx, recordID) -} - -// ListByUserID returns every sanction history record owned by userID. -func (adapter *SanctionStore) ListByUserID(ctx context.Context, userID common.UserID) ([]policy.SanctionRecord, error) { - return adapter.store.ListSanctionsByUserID(ctx, userID) -} - -// Update replaces one stored sanction history record. -func (adapter *SanctionStore) Update(ctx context.Context, record policy.SanctionRecord) error { - return adapter.store.UpdateSanction(ctx, record) -} - -var _ ports.SanctionStore = (*SanctionStore)(nil) - -// LimitStore adapts Store to the existing LimitStore port. -type LimitStore struct { - store *Store -} - -// Limits returns one adapter that exposes the limit store port over Store. -func (store *Store) Limits() *LimitStore { - if store == nil { - return nil - } - - return &LimitStore{store: store} -} - -// Create stores one new limit history record. -func (adapter *LimitStore) Create(ctx context.Context, record policy.LimitRecord) error { - return adapter.store.CreateLimit(ctx, record) -} - -// GetByRecordID returns the limit history record identified by recordID. -func (adapter *LimitStore) GetByRecordID(ctx context.Context, recordID policy.LimitRecordID) (policy.LimitRecord, error) { - return adapter.store.GetLimitByRecordID(ctx, recordID) -} - -// ListByUserID returns every limit history record owned by userID. -func (adapter *LimitStore) ListByUserID(ctx context.Context, userID common.UserID) ([]policy.LimitRecord, error) { - return adapter.store.ListLimitsByUserID(ctx, userID) -} - -// Update replaces one stored limit history record. -func (adapter *LimitStore) Update(ctx context.Context, record policy.LimitRecord) error { - return adapter.store.UpdateLimit(ctx, record) -} - -var _ ports.LimitStore = (*LimitStore)(nil) diff --git a/user/internal/adapters/redis/userstore/store_test.go b/user/internal/adapters/redis/userstore/store_test.go deleted file mode 100644 index 1dbb387..0000000 --- a/user/internal/adapters/redis/userstore/store_test.go +++ /dev/null @@ -1,879 +0,0 @@ -package userstore - -import ( - "context" - "testing" - "time" - - "galaxy/user/internal/domain/account" - "galaxy/user/internal/domain/authblock" - "galaxy/user/internal/domain/common" - "galaxy/user/internal/domain/entitlement" - "galaxy/user/internal/domain/policy" - "galaxy/user/internal/ports" - - "github.com/alicebob/miniredis/v2" - "github.com/stretchr/testify/require" -) - -func TestAccountStoreCreateAndLookups(t *testing.T) { - t.Parallel() - - store := newTestStore(t) - accountStore := store.Accounts() - - record := validAccountRecord() - require.NoError(t, accountStore.Create(context.Background(), createAccountInput(record))) - - byUserID, err := accountStore.GetByUserID(context.Background(), record.UserID) - require.NoError(t, err) - require.Equal(t, record, byUserID) - - byEmail, err := accountStore.GetByEmail(context.Background(), record.Email) - require.NoError(t, err) - require.Equal(t, record, byEmail) - - byUserName, err := accountStore.GetByUserName(context.Background(), record.UserName) - require.NoError(t, err) - require.Equal(t, record, byUserName) - - exists, err := accountStore.ExistsByUserID(context.Background(), record.UserID) - require.NoError(t, err) - require.True(t, exists) -} - -func TestBlockedEmailStoreUpsertAndGet(t *testing.T) { - t.Parallel() - - store := newTestStore(t) - blockedEmailStore := store.BlockedEmails() - - record := authblock.BlockedEmailSubject{ - Email: common.Email("blocked@example.com"), - ReasonCode: common.ReasonCode("policy_blocked"), - BlockedAt: time.Unix(1_775_240_100, 0).UTC(), - ResolvedUserID: common.UserID("user-123"), - } - require.NoError(t, blockedEmailStore.Upsert(context.Background(), record)) - - got, err := blockedEmailStore.GetByEmail(context.Background(), record.Email) - require.NoError(t, err) - require.Equal(t, record, got) -} - -func TestEnsureResolveAndBlockFlows(t *testing.T) { - t.Parallel() - - store := newTestStore(t) - now := time.Unix(1_775_240_000, 0).UTC() - accountRecord := validAccountRecord() - entitlementSnapshot := validEntitlementSnapshot(accountRecord.UserID, now) - - created, err := store.EnsureByEmail(context.Background(), ports.EnsureByEmailInput{ - Email: accountRecord.Email, - Account: accountRecord, - Entitlement: entitlementSnapshot, - EntitlementRecord: validEntitlementRecord(accountRecord.UserID, now), - }) - require.NoError(t, err) - require.Equal(t, ports.EnsureByEmailOutcomeCreated, created.Outcome) - - byUserName, err := store.GetByUserName(context.Background(), accountRecord.UserName) - require.NoError(t, err) - require.Equal(t, accountRecord.UserID, byUserName.UserID) - - entitlementHistory, err := store.ListEntitlementRecordsByUserID(context.Background(), accountRecord.UserID) - require.NoError(t, err) - require.Len(t, entitlementHistory, 1) - require.Equal(t, validEntitlementRecord(accountRecord.UserID, now), entitlementHistory[0]) - - resolved, err := store.ResolveByEmail(context.Background(), accountRecord.Email) - require.NoError(t, err) - require.Equal(t, ports.AuthResolutionKindExisting, resolved.Kind) - - blockedByUserID, err := store.BlockByUserID(context.Background(), ports.BlockByUserIDInput{ - UserID: accountRecord.UserID, - ReasonCode: common.ReasonCode("policy_blocked"), - BlockedAt: now.Add(time.Minute), - }) - require.NoError(t, err) - require.Equal(t, ports.AuthBlockOutcomeBlocked, blockedByUserID.Outcome) - - repeatedBlock, err := store.BlockByEmail(context.Background(), ports.BlockByEmailInput{ - Email: accountRecord.Email, - ReasonCode: common.ReasonCode("policy_blocked"), - BlockedAt: now.Add(2 * time.Minute), - }) - require.NoError(t, err) - require.Equal(t, ports.AuthBlockOutcomeAlreadyBlocked, repeatedBlock.Outcome) - require.Equal(t, accountRecord.UserID, repeatedBlock.UserID) - - blockedResolution, err := store.ResolveByEmail(context.Background(), accountRecord.Email) - require.NoError(t, err) - require.Equal(t, ports.AuthResolutionKindBlocked, blockedResolution.Kind) - - ensureBlocked, err := store.EnsureByEmail(context.Background(), ports.EnsureByEmailInput{ - Email: accountRecord.Email, - Account: accountRecord, - Entitlement: entitlementSnapshot, - EntitlementRecord: validEntitlementRecord(accountRecord.UserID, now), - }) - require.NoError(t, err) - require.Equal(t, ports.EnsureByEmailOutcomeBlocked, ensureBlocked.Outcome) -} - -func TestBlockedEmailWithoutUserPreventsEnsureCreate(t *testing.T) { - t.Parallel() - - store := newTestStore(t) - now := time.Unix(1_775_240_000, 0).UTC() - accountRecord := validAccountRecord() - entitlementSnapshot := validEntitlementSnapshot(accountRecord.UserID, now) - - blocked, err := store.BlockByEmail(context.Background(), ports.BlockByEmailInput{ - Email: accountRecord.Email, - ReasonCode: common.ReasonCode("policy_blocked"), - BlockedAt: now, - }) - require.NoError(t, err) - require.Equal(t, ports.AuthBlockOutcomeBlocked, blocked.Outcome) - require.True(t, blocked.UserID.IsZero()) - - resolved, err := store.ResolveByEmail(context.Background(), accountRecord.Email) - require.NoError(t, err) - require.Equal(t, ports.AuthResolutionKindBlocked, resolved.Kind) - - ensured, err := store.EnsureByEmail(context.Background(), ports.EnsureByEmailInput{ - Email: accountRecord.Email, - Account: accountRecord, - Entitlement: entitlementSnapshot, - EntitlementRecord: validEntitlementRecord(accountRecord.UserID, now), - }) - require.NoError(t, err) - require.Equal(t, ports.EnsureByEmailOutcomeBlocked, ensured.Outcome) - - exists, err := store.ExistsByUserID(context.Background(), accountRecord.UserID) - require.NoError(t, err) - require.False(t, exists) -} - -func TestEnsureByEmailExistingDoesNotOverwriteStoredSettings(t *testing.T) { - t.Parallel() - - store := newTestStore(t) - createdAt := time.Unix(1_775_240_000, 0).UTC() - existingAccount := account.UserAccount{ - UserID: common.UserID("user-existing"), - Email: common.Email("pilot@example.com"), - UserName: common.UserName("player-abcdefgh"), - PreferredLanguage: common.LanguageTag("en"), - TimeZone: common.TimeZoneName("Europe/Kaliningrad"), - CreatedAt: createdAt, - UpdatedAt: createdAt, - } - require.NoError(t, store.Create(context.Background(), createAccountInput(existingAccount))) - - result, err := store.EnsureByEmail(context.Background(), ports.EnsureByEmailInput{ - Email: existingAccount.Email, - Account: account.UserAccount{ - UserID: common.UserID("user-created"), - Email: existingAccount.Email, - UserName: common.UserName("player-newabcde"), - PreferredLanguage: common.LanguageTag("fr-FR"), - TimeZone: common.TimeZoneName("UTC"), - CreatedAt: createdAt.Add(time.Minute), - UpdatedAt: createdAt.Add(time.Minute), - }, - Entitlement: validEntitlementSnapshot(common.UserID("user-created"), createdAt.Add(time.Minute)), - EntitlementRecord: validEntitlementRecord(common.UserID("user-created"), createdAt.Add(time.Minute)), - }) - require.NoError(t, err) - require.Equal(t, ports.EnsureByEmailOutcomeExisting, result.Outcome) - require.Equal(t, existingAccount.UserID, result.UserID) - - storedAccount, err := store.GetByEmail(context.Background(), existingAccount.Email) - require.NoError(t, err) - require.Equal(t, existingAccount, storedAccount) -} - -func TestAccountStoreUpdateDisplayNamePreservesImmutableFields(t *testing.T) { - t.Parallel() - - store := newTestStore(t) - accountStore := store.Accounts() - - record := validAccountRecord() - require.NoError(t, accountStore.Create(context.Background(), createAccountInput(record))) - - updated := record - updated.DisplayName = common.DisplayName("NovaPrime") - updated.UpdatedAt = record.UpdatedAt.Add(time.Minute) - - require.NoError(t, accountStore.Update(context.Background(), updated)) - - byUserID, err := accountStore.GetByUserID(context.Background(), record.UserID) - require.NoError(t, err) - require.Equal(t, updated, byUserID) - - byEmail, err := accountStore.GetByEmail(context.Background(), record.Email) - require.NoError(t, err) - require.Equal(t, updated, byEmail) - - byUserName, err := accountStore.GetByUserName(context.Background(), record.UserName) - require.NoError(t, err) - require.Equal(t, updated, byUserName) -} - -func TestAccountStoreUpdateRejectsUserNameMutation(t *testing.T) { - t.Parallel() - - store := newTestStore(t) - accountStore := store.Accounts() - - record := validAccountRecord() - require.NoError(t, accountStore.Create(context.Background(), createAccountInput(record))) - - attempted := record - attempted.UserName = common.UserName("player-changed") - attempted.UpdatedAt = record.UpdatedAt.Add(time.Minute) - - err := accountStore.Update(context.Background(), attempted) - require.ErrorIs(t, err, ports.ErrConflict) -} - -func TestAccountStoreUpdateDeclaredCountryPreservesLookups(t *testing.T) { - t.Parallel() - - store := newTestStore(t) - accountStore := store.Accounts() - - record := validAccountRecord() - require.NoError(t, accountStore.Create(context.Background(), createAccountInput(record))) - - updated := record - updated.DeclaredCountry = common.CountryCode("FR") - updated.UpdatedAt = record.UpdatedAt.Add(time.Minute) - - require.NoError(t, accountStore.Update(context.Background(), updated)) - - byUserID, err := accountStore.GetByUserID(context.Background(), record.UserID) - require.NoError(t, err) - require.Equal(t, updated, byUserID) - - byEmail, err := accountStore.GetByEmail(context.Background(), record.Email) - require.NoError(t, err) - require.Equal(t, updated, byEmail) - - byUserName, err := accountStore.GetByUserName(context.Background(), record.UserName) - require.NoError(t, err) - require.Equal(t, updated, byUserName) -} - -func TestAccountStorePersistsSoftDeleteMarker(t *testing.T) { - t.Parallel() - - store := newTestStore(t) - accountStore := store.Accounts() - - record := validAccountRecord() - require.NoError(t, accountStore.Create(context.Background(), createAccountInput(record))) - - deletedAt := record.UpdatedAt.Add(time.Hour) - updated := record - updated.UpdatedAt = deletedAt - updated.DeletedAt = &deletedAt - - require.NoError(t, accountStore.Update(context.Background(), updated)) - - byUserID, err := accountStore.GetByUserID(context.Background(), record.UserID) - require.NoError(t, err) - require.NotNil(t, byUserID.DeletedAt) - require.True(t, byUserID.DeletedAt.Equal(deletedAt)) - require.True(t, byUserID.IsDeleted()) -} - -func TestAccountStoreCreateReturnsUserNameConflict(t *testing.T) { - t.Parallel() - - store := newTestStore(t) - accountStore := store.Accounts() - - first := validAccountRecord() - second := validAccountRecord() - second.UserID = common.UserID("user-456") - second.Email = common.Email("other@example.com") - - require.NoError(t, accountStore.Create(context.Background(), createAccountInput(first))) - - err := accountStore.Create(context.Background(), createAccountInput(second)) - require.ErrorIs(t, err, ports.ErrUserNameConflict) -} - -func TestBlockByUserIDRepeatedCallsStayIdempotent(t *testing.T) { - t.Parallel() - - store := newTestStore(t) - now := time.Unix(1_775_240_000, 0).UTC() - accountRecord := validAccountRecord() - - require.NoError(t, store.Create(context.Background(), createAccountInput(accountRecord))) - - first, err := store.BlockByUserID(context.Background(), ports.BlockByUserIDInput{ - UserID: accountRecord.UserID, - ReasonCode: common.ReasonCode("policy_blocked"), - BlockedAt: now, - }) - require.NoError(t, err) - require.Equal(t, ports.AuthBlockOutcomeBlocked, first.Outcome) - - second, err := store.BlockByUserID(context.Background(), ports.BlockByUserIDInput{ - UserID: accountRecord.UserID, - ReasonCode: common.ReasonCode("policy_blocked"), - BlockedAt: now.Add(time.Minute), - }) - require.NoError(t, err) - require.Equal(t, ports.AuthBlockOutcomeAlreadyBlocked, second.Outcome) - require.Equal(t, accountRecord.UserID, second.UserID) -} - -func TestBlockByUserIDUnknownUserReturnsNotFound(t *testing.T) { - t.Parallel() - - store := newTestStore(t) - - _, err := store.BlockByUserID(context.Background(), ports.BlockByUserIDInput{ - UserID: common.UserID("user-missing"), - ReasonCode: common.ReasonCode("policy_blocked"), - BlockedAt: time.Unix(1_775_240_000, 0).UTC(), - }) - require.ErrorIs(t, err, ports.ErrNotFound) -} - -func TestSanctionAndLimitStoresRoundTrip(t *testing.T) { - t.Parallel() - - store := newTestStore(t) - sanctionStore := store.Sanctions() - limitStore := store.Limits() - now := time.Unix(1_775_240_000, 0).UTC() - - sanctionRecord := policy.SanctionRecord{ - RecordID: policy.SanctionRecordID("sanction-1"), - UserID: common.UserID("user-123"), - SanctionCode: policy.SanctionCodeLoginBlock, - Scope: common.Scope("self_service"), - ReasonCode: common.ReasonCode("policy_enforced"), - Actor: common.ActorRef{Type: common.ActorType("service"), ID: common.ActorID("user-service")}, - AppliedAt: now, - } - require.NoError(t, sanctionStore.Create(context.Background(), sanctionRecord)) - - gotSanction, err := sanctionStore.GetByRecordID(context.Background(), sanctionRecord.RecordID) - require.NoError(t, err) - require.Equal(t, sanctionRecord, gotSanction) - - sanctions, err := sanctionStore.ListByUserID(context.Background(), sanctionRecord.UserID) - require.NoError(t, err) - require.Len(t, sanctions, 1) - - expiresAt := now.Add(time.Hour) - sanctionRecord.ExpiresAt = &expiresAt - require.NoError(t, sanctionStore.Update(context.Background(), sanctionRecord)) - - gotSanction, err = sanctionStore.GetByRecordID(context.Background(), sanctionRecord.RecordID) - require.NoError(t, err) - require.Equal(t, sanctionRecord.RecordID, gotSanction.RecordID) - require.Equal(t, sanctionRecord.UserID, gotSanction.UserID) - require.Equal(t, sanctionRecord.SanctionCode, gotSanction.SanctionCode) - require.Equal(t, sanctionRecord.Scope, gotSanction.Scope) - require.Equal(t, sanctionRecord.ReasonCode, gotSanction.ReasonCode) - require.Equal(t, sanctionRecord.Actor, gotSanction.Actor) - require.True(t, gotSanction.AppliedAt.Equal(sanctionRecord.AppliedAt)) - require.NotNil(t, gotSanction.ExpiresAt) - require.True(t, gotSanction.ExpiresAt.Equal(*sanctionRecord.ExpiresAt)) - - limitRecord := policy.LimitRecord{ - RecordID: policy.LimitRecordID("limit-1"), - UserID: common.UserID("user-123"), - LimitCode: policy.LimitCodeMaxOwnedPrivateGames, - Value: 3, - ReasonCode: common.ReasonCode("policy_enforced"), - Actor: common.ActorRef{Type: common.ActorType("service"), ID: common.ActorID("user-service")}, - AppliedAt: now, - } - require.NoError(t, limitStore.Create(context.Background(), limitRecord)) - - gotLimit, err := limitStore.GetByRecordID(context.Background(), limitRecord.RecordID) - require.NoError(t, err) - require.Equal(t, limitRecord, gotLimit) - - limits, err := limitStore.ListByUserID(context.Background(), limitRecord.UserID) - require.NoError(t, err) - require.Len(t, limits, 1) - - limitRecord.Value = 5 - require.NoError(t, limitStore.Update(context.Background(), limitRecord)) - - gotLimit, err = limitStore.GetByRecordID(context.Background(), limitRecord.RecordID) - require.NoError(t, err) - require.Equal(t, limitRecord, gotLimit) -} - -func TestPolicyLifecycleApplyAndRemoveSanction(t *testing.T) { - t.Parallel() - - store := newTestStore(t) - lifecycleStore := store.PolicyLifecycle() - sanctionStore := store.Sanctions() - snapshotStore := store.EntitlementSnapshots() - now := time.Unix(1_775_240_000, 0).UTC() - userID := common.UserID("user-123") - require.NoError(t, snapshotStore.Put(context.Background(), validEntitlementSnapshot(userID, now))) - - record := policy.SanctionRecord{ - RecordID: policy.SanctionRecordID("sanction-1"), - UserID: userID, - SanctionCode: policy.SanctionCodeLoginBlock, - Scope: common.Scope("auth"), - ReasonCode: common.ReasonCode("manual_block"), - Actor: common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-1")}, - AppliedAt: now, - } - require.NoError(t, lifecycleStore.ApplySanction(context.Background(), ports.ApplySanctionInput{ - NewRecord: record, - })) - - activeRecordID, err := store.loadActiveSanctionRecordID( - context.Background(), - store.client, - store.keyspace.ActiveSanction(userID, policy.SanctionCodeLoginBlock), - ) - require.NoError(t, err) - require.Equal(t, record.RecordID, activeRecordID) - - err = lifecycleStore.ApplySanction(context.Background(), ports.ApplySanctionInput{ - NewRecord: policy.SanctionRecord{ - RecordID: policy.SanctionRecordID("sanction-2"), - UserID: userID, - SanctionCode: policy.SanctionCodeLoginBlock, - Scope: common.Scope("auth"), - ReasonCode: common.ReasonCode("manual_block"), - Actor: common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-2")}, - AppliedAt: now.Add(time.Minute), - }, - }) - require.ErrorIs(t, err, ports.ErrConflict) - - removed := record - removedAt := now.Add(30 * time.Minute) - removed.RemovedAt = &removedAt - removed.RemovedBy = common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-2")} - removed.RemovedReasonCode = common.ReasonCode("manual_remove") - require.NoError(t, lifecycleStore.RemoveSanction(context.Background(), ports.RemoveSanctionInput{ - ExpectedActiveRecord: record, - UpdatedRecord: removed, - })) - - stored, err := sanctionStore.GetByRecordID(context.Background(), record.RecordID) - require.NoError(t, err) - require.Equal(t, removed, stored) - - _, err = store.loadActiveSanctionRecordID( - context.Background(), - store.client, - store.keyspace.ActiveSanction(userID, policy.SanctionCodeLoginBlock), - ) - require.ErrorIs(t, err, ports.ErrNotFound) -} - -func TestPolicyLifecycleSetAndRemoveLimit(t *testing.T) { - t.Parallel() - - store := newTestStore(t) - lifecycleStore := store.PolicyLifecycle() - limitStore := store.Limits() - now := time.Unix(1_775_240_000, 0).UTC() - userID := common.UserID("user-123") - - first := policy.LimitRecord{ - RecordID: policy.LimitRecordID("limit-1"), - UserID: userID, - LimitCode: policy.LimitCodeMaxOwnedPrivateGames, - Value: 3, - ReasonCode: common.ReasonCode("manual_override"), - Actor: common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-1")}, - AppliedAt: now, - } - require.NoError(t, lifecycleStore.SetLimit(context.Background(), ports.SetLimitInput{ - NewRecord: first, - })) - - activeRecordID, err := store.loadActiveLimitRecordID( - context.Background(), - store.client, - store.keyspace.ActiveLimit(userID, policy.LimitCodeMaxOwnedPrivateGames), - ) - require.NoError(t, err) - require.Equal(t, first.RecordID, activeRecordID) - - second := policy.LimitRecord{ - RecordID: policy.LimitRecordID("limit-2"), - UserID: userID, - LimitCode: policy.LimitCodeMaxOwnedPrivateGames, - Value: 5, - ReasonCode: common.ReasonCode("manual_override"), - Actor: common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-2")}, - AppliedAt: now.Add(time.Hour), - } - updatedFirst := first - removedAt := second.AppliedAt - updatedFirst.RemovedAt = &removedAt - updatedFirst.RemovedBy = second.Actor - updatedFirst.RemovedReasonCode = second.ReasonCode - require.NoError(t, lifecycleStore.SetLimit(context.Background(), ports.SetLimitInput{ - ExpectedActiveRecord: &first, - UpdatedActiveRecord: &updatedFirst, - NewRecord: second, - })) - - storedFirst, err := limitStore.GetByRecordID(context.Background(), first.RecordID) - require.NoError(t, err) - require.Equal(t, updatedFirst, storedFirst) - - activeRecordID, err = store.loadActiveLimitRecordID( - context.Background(), - store.client, - store.keyspace.ActiveLimit(userID, policy.LimitCodeMaxOwnedPrivateGames), - ) - require.NoError(t, err) - require.Equal(t, second.RecordID, activeRecordID) - - removedSecond := second - removeAt := now.Add(90 * time.Minute) - removedSecond.RemovedAt = &removeAt - removedSecond.RemovedBy = common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-3")} - removedSecond.RemovedReasonCode = common.ReasonCode("manual_remove") - require.NoError(t, lifecycleStore.RemoveLimit(context.Background(), ports.RemoveLimitInput{ - ExpectedActiveRecord: second, - UpdatedRecord: removedSecond, - })) - - storedSecond, err := limitStore.GetByRecordID(context.Background(), second.RecordID) - require.NoError(t, err) - require.Equal(t, removedSecond, storedSecond) - - _, err = store.loadActiveLimitRecordID( - context.Background(), - store.client, - store.keyspace.ActiveLimit(userID, policy.LimitCodeMaxOwnedPrivateGames), - ) - require.ErrorIs(t, err, ports.ErrNotFound) -} - -func TestEntitlementLifecycleTransitions(t *testing.T) { - t.Parallel() - - store := newTestStore(t) - historyStore := store.EntitlementHistory() - snapshotStore := store.EntitlementSnapshots() - lifecycleStore := store.EntitlementLifecycle() - userID := common.UserID("user-123") - startedFreeAt := time.Unix(1_775_240_000, 0).UTC() - - freeRecord := validEntitlementRecord(userID, startedFreeAt) - freeSnapshot := validEntitlementSnapshot(userID, startedFreeAt) - require.NoError(t, historyStore.Create(context.Background(), freeRecord)) - require.NoError(t, snapshotStore.Put(context.Background(), freeSnapshot)) - - grantStartsAt := startedFreeAt.Add(24 * time.Hour) - grantEndsAt := grantStartsAt.Add(30 * 24 * time.Hour) - grantedRecord := paidEntitlementRecord( - entitlement.EntitlementRecordID("entitlement-paid-1"), - userID, - entitlement.PlanCodePaidMonthly, - grantStartsAt, - grantEndsAt, - common.Source("admin"), - common.ReasonCode("manual_grant"), - ) - grantedSnapshot := paidEntitlementSnapshot( - userID, - entitlement.PlanCodePaidMonthly, - grantStartsAt, - grantEndsAt, - common.Source("admin"), - common.ReasonCode("manual_grant"), - ) - closedFreeRecord := freeRecord - closedFreeRecord.ClosedAt = timePointer(grantStartsAt) - closedFreeRecord.ClosedBy = common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-1")} - closedFreeRecord.ClosedReasonCode = common.ReasonCode("manual_grant") - - require.NoError(t, lifecycleStore.Grant(context.Background(), ports.GrantEntitlementInput{ - ExpectedCurrentSnapshot: freeSnapshot, - ExpectedCurrentRecord: freeRecord, - UpdatedCurrentRecord: closedFreeRecord, - NewRecord: grantedRecord, - NewSnapshot: grantedSnapshot, - })) - - storedSnapshot, err := snapshotStore.GetByUserID(context.Background(), userID) - require.NoError(t, err) - require.Equal(t, grantedSnapshot, storedSnapshot) - - storedFreeRecord, err := historyStore.GetByRecordID(context.Background(), freeRecord.RecordID) - require.NoError(t, err) - require.Equal(t, closedFreeRecord, storedFreeRecord) - - extendedEndsAt := grantEndsAt.Add(30 * 24 * time.Hour) - extensionRecord := paidEntitlementRecord( - entitlement.EntitlementRecordID("entitlement-paid-2"), - userID, - entitlement.PlanCodePaidMonthly, - grantEndsAt, - extendedEndsAt, - common.Source("admin"), - common.ReasonCode("manual_extend"), - ) - extendedSnapshot := paidEntitlementSnapshot( - userID, - entitlement.PlanCodePaidMonthly, - grantStartsAt, - extendedEndsAt, - common.Source("admin"), - common.ReasonCode("manual_extend"), - ) - - require.NoError(t, lifecycleStore.Extend(context.Background(), ports.ExtendEntitlementInput{ - ExpectedCurrentSnapshot: grantedSnapshot, - NewRecord: extensionRecord, - NewSnapshot: extendedSnapshot, - })) - - storedSnapshot, err = snapshotStore.GetByUserID(context.Background(), userID) - require.NoError(t, err) - require.Equal(t, extendedSnapshot, storedSnapshot) - - revokeAt := grantEndsAt.Add(12 * time.Hour) - revokedCurrentRecord := extensionRecord - revokedCurrentRecord.ClosedAt = timePointer(revokeAt) - revokedCurrentRecord.ClosedBy = common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-1")} - revokedCurrentRecord.ClosedReasonCode = common.ReasonCode("manual_revoke") - - freeAfterRevokeRecord := entitlement.PeriodRecord{ - RecordID: entitlement.EntitlementRecordID("entitlement-free-2"), - UserID: userID, - PlanCode: entitlement.PlanCodeFree, - Source: common.Source("admin"), - Actor: common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-1")}, - ReasonCode: common.ReasonCode("manual_revoke"), - StartsAt: revokeAt, - CreatedAt: revokeAt, - } - freeAfterRevokeSnapshot := entitlement.CurrentSnapshot{ - UserID: userID, - PlanCode: entitlement.PlanCodeFree, - IsPaid: false, - StartsAt: revokeAt, - Source: common.Source("admin"), - Actor: common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-1")}, - ReasonCode: common.ReasonCode("manual_revoke"), - UpdatedAt: revokeAt, - } - - require.NoError(t, lifecycleStore.Revoke(context.Background(), ports.RevokeEntitlementInput{ - ExpectedCurrentSnapshot: extendedSnapshot, - ExpectedCurrentRecord: extensionRecord, - UpdatedCurrentRecord: revokedCurrentRecord, - NewRecord: freeAfterRevokeRecord, - NewSnapshot: freeAfterRevokeSnapshot, - })) - - storedSnapshot, err = snapshotStore.GetByUserID(context.Background(), userID) - require.NoError(t, err) - require.Equal(t, freeAfterRevokeSnapshot, storedSnapshot) - - historyRecords, err := historyStore.ListByUserID(context.Background(), userID) - require.NoError(t, err) - require.Len(t, historyRecords, 4) -} - -func TestRepairExpiredEntitlementMaterializesFreeSnapshot(t *testing.T) { - t.Parallel() - - store := newTestStore(t) - historyStore := store.EntitlementHistory() - snapshotStore := store.EntitlementSnapshots() - lifecycleStore := store.EntitlementLifecycle() - userID := common.UserID("user-123") - startsAt := time.Unix(1_775_240_000, 0).UTC() - endsAt := startsAt.Add(24 * time.Hour) - expiredSnapshot := paidEntitlementSnapshot( - userID, - entitlement.PlanCodePaidMonthly, - startsAt, - endsAt, - common.Source("admin"), - common.ReasonCode("manual_grant"), - ) - expiredSnapshot.UpdatedAt = endsAt.Add(24 * time.Hour) - expiredRecord := paidEntitlementRecord( - entitlement.EntitlementRecordID("entitlement-paid-1"), - userID, - entitlement.PlanCodePaidMonthly, - startsAt, - endsAt, - common.Source("admin"), - common.ReasonCode("manual_grant"), - ) - require.NoError(t, historyStore.Create(context.Background(), expiredRecord)) - require.NoError(t, snapshotStore.Put(context.Background(), expiredSnapshot)) - - repairedAt := endsAt.Add(2 * time.Hour) - freeRecord := entitlement.PeriodRecord{ - RecordID: entitlement.EntitlementRecordID("entitlement-free-after-expiry"), - UserID: userID, - PlanCode: entitlement.PlanCodeFree, - Source: common.Source("entitlement_expiry_repair"), - Actor: common.ActorRef{Type: common.ActorType("service"), ID: common.ActorID("user-service")}, - ReasonCode: common.ReasonCode("paid_entitlement_expired"), - StartsAt: endsAt, - CreatedAt: repairedAt, - } - freeSnapshot := entitlement.CurrentSnapshot{ - UserID: userID, - PlanCode: entitlement.PlanCodeFree, - IsPaid: false, - StartsAt: endsAt, - Source: common.Source("entitlement_expiry_repair"), - Actor: common.ActorRef{Type: common.ActorType("service"), ID: common.ActorID("user-service")}, - ReasonCode: common.ReasonCode("paid_entitlement_expired"), - UpdatedAt: repairedAt, - } - - require.NoError(t, lifecycleStore.RepairExpired(context.Background(), ports.RepairExpiredEntitlementInput{ - ExpectedExpiredSnapshot: expiredSnapshot, - NewRecord: freeRecord, - NewSnapshot: freeSnapshot, - })) - - storedSnapshot, err := snapshotStore.GetByUserID(context.Background(), userID) - require.NoError(t, err) - require.Equal(t, freeSnapshot, storedSnapshot) - - historyRecords, err := historyStore.ListByUserID(context.Background(), userID) - require.NoError(t, err) - require.Len(t, historyRecords, 2) - require.Equal(t, freeRecord, historyRecords[1]) -} - -func newTestStore(t *testing.T) *Store { - t.Helper() - - server := miniredis.RunT(t) - store, err := New(Config{ - Addr: server.Addr(), - DB: 0, - KeyspacePrefix: "user:test:", - OperationTimeout: 250 * time.Millisecond, - }) - require.NoError(t, err) - t.Cleanup(func() { - _ = store.Close() - }) - - return store -} - -func validAccountRecord() account.UserAccount { - createdAt := time.Unix(1_775_240_000, 0).UTC() - return account.UserAccount{ - UserID: common.UserID("user-123"), - Email: common.Email("pilot@example.com"), - UserName: common.UserName("player-abcdefgh"), - PreferredLanguage: common.LanguageTag("en"), - TimeZone: common.TimeZoneName("Europe/Kaliningrad"), - CreatedAt: createdAt, - UpdatedAt: createdAt, - } -} - -func validEntitlementSnapshot(userID common.UserID, now time.Time) entitlement.CurrentSnapshot { - return entitlement.CurrentSnapshot{ - UserID: userID, - PlanCode: entitlement.PlanCodeFree, - IsPaid: false, - StartsAt: now, - Source: common.Source("auth_registration"), - Actor: common.ActorRef{Type: common.ActorType("service"), ID: common.ActorID("user-service")}, - ReasonCode: common.ReasonCode("initial_free_entitlement"), - UpdatedAt: now, - } -} - -func validEntitlementRecord(userID common.UserID, now time.Time) entitlement.PeriodRecord { - return entitlement.PeriodRecord{ - RecordID: entitlement.EntitlementRecordID("entitlement-" + userID.String()), - UserID: userID, - PlanCode: entitlement.PlanCodeFree, - Source: common.Source("auth_registration"), - Actor: common.ActorRef{Type: common.ActorType("service"), ID: common.ActorID("user-service")}, - ReasonCode: common.ReasonCode("initial_free_entitlement"), - StartsAt: now, - CreatedAt: now, - } -} - -func paidEntitlementRecord( - recordID entitlement.EntitlementRecordID, - userID common.UserID, - planCode entitlement.PlanCode, - startsAt time.Time, - endsAt time.Time, - source common.Source, - reasonCode common.ReasonCode, -) entitlement.PeriodRecord { - return entitlement.PeriodRecord{ - RecordID: recordID, - UserID: userID, - PlanCode: planCode, - Source: source, - Actor: common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-1")}, - ReasonCode: reasonCode, - StartsAt: startsAt, - EndsAt: timePointer(endsAt), - CreatedAt: startsAt, - } -} - -func paidEntitlementSnapshot( - userID common.UserID, - planCode entitlement.PlanCode, - startsAt time.Time, - endsAt time.Time, - source common.Source, - reasonCode common.ReasonCode, -) entitlement.CurrentSnapshot { - return entitlement.CurrentSnapshot{ - UserID: userID, - PlanCode: planCode, - IsPaid: true, - StartsAt: startsAt, - EndsAt: timePointer(endsAt), - Source: source, - Actor: common.ActorRef{Type: common.ActorType("admin"), ID: common.ActorID("admin-1")}, - ReasonCode: reasonCode, - UpdatedAt: startsAt, - } -} - -func timePointer(value time.Time) *time.Time { - utcValue := value.UTC() - return &utcValue -} - -func createAccountInput(record account.UserAccount) ports.CreateAccountInput { - return ports.CreateAccountInput{ - Account: record, - } -} diff --git a/user/internal/adapters/redisstate/keyspace.go b/user/internal/adapters/redisstate/keyspace.go deleted file mode 100644 index 7e7d72b..0000000 --- a/user/internal/adapters/redisstate/keyspace.go +++ /dev/null @@ -1,193 +0,0 @@ -// Package redisstate defines the frozen Redis logical keyspace and pagination -// helpers used by future User Service storage adapters. -package redisstate - -import ( - "encoding/base64" - "fmt" - "strings" - "time" - - "galaxy/user/internal/domain/common" - "galaxy/user/internal/domain/entitlement" - "galaxy/user/internal/domain/policy" -) - -const defaultPrefix = "user:" - -// Keyspace builds the frozen Redis logical keys used by future storage -// adapters. The package intentionally exposes key construction only and does -// not depend on any Redis client. -type Keyspace struct { - // Prefix stores the namespace prefix applied to every key. The zero value - // uses `user:`. - Prefix string -} - -// Account returns the primary user-account key for userID. -func (k Keyspace) Account(userID common.UserID) string { - return k.prefix() + "account:" + encodeKeyComponent(userID.String()) -} - -// EmailLookup returns the exact normalized e-mail lookup key. -func (k Keyspace) EmailLookup(email common.Email) string { - return k.prefix() + "lookup:email:" + encodeKeyComponent(email.String()) -} - -// UserNameLookup returns the exact stored user-name lookup key. -func (k Keyspace) UserNameLookup(userName common.UserName) string { - return k.prefix() + "lookup:user-name:" + encodeKeyComponent(userName.String()) -} - -// BlockedEmailSubject returns the dedicated blocked-email-subject key. -func (k Keyspace) BlockedEmailSubject(email common.Email) string { - return k.prefix() + "blocked-email:" + encodeKeyComponent(email.String()) -} - -// EntitlementRecord returns the primary entitlement history-record key. -func (k Keyspace) EntitlementRecord(recordID entitlement.EntitlementRecordID) string { - return k.prefix() + "entitlement:record:" + encodeKeyComponent(recordID.String()) -} - -// EntitlementHistory returns the per-user entitlement-history index key. -func (k Keyspace) EntitlementHistory(userID common.UserID) string { - return k.prefix() + "entitlement:history:" + encodeKeyComponent(userID.String()) -} - -// EntitlementSnapshot returns the current entitlement-snapshot key. -func (k Keyspace) EntitlementSnapshot(userID common.UserID) string { - return k.prefix() + "entitlement:snapshot:" + encodeKeyComponent(userID.String()) -} - -// SanctionRecord returns the primary sanction history-record key. -func (k Keyspace) SanctionRecord(recordID policy.SanctionRecordID) string { - return k.prefix() + "sanction:record:" + encodeKeyComponent(recordID.String()) -} - -// SanctionHistory returns the per-user sanction-history index key. -func (k Keyspace) SanctionHistory(userID common.UserID) string { - return k.prefix() + "sanction:history:" + encodeKeyComponent(userID.String()) -} - -// ActiveSanction returns the per-user active-sanction slot for one sanction -// code. The slot guarantees at most one active sanction per `user_id + -// sanction_code`. -func (k Keyspace) ActiveSanction(userID common.UserID, code policy.SanctionCode) string { - return k.prefix() + "sanction:active:" + encodeKeyComponent(userID.String()) + ":" + encodeKeyComponent(string(code)) -} - -// LimitRecord returns the primary limit history-record key. -func (k Keyspace) LimitRecord(recordID policy.LimitRecordID) string { - return k.prefix() + "limit:record:" + encodeKeyComponent(recordID.String()) -} - -// LimitHistory returns the per-user limit-history index key. -func (k Keyspace) LimitHistory(userID common.UserID) string { - return k.prefix() + "limit:history:" + encodeKeyComponent(userID.String()) -} - -// ActiveLimit returns the per-user active-limit slot for one limit code. The -// slot guarantees at most one active limit per `user_id + limit_code`. -func (k Keyspace) ActiveLimit(userID common.UserID, code policy.LimitCode) string { - return k.prefix() + "limit:active:" + encodeKeyComponent(userID.String()) + ":" + encodeKeyComponent(string(code)) -} - -// CreatedAtIndex returns the deterministic newest-first user-ordering index. -func (k Keyspace) CreatedAtIndex() string { - return k.prefix() + "index:created-at" -} - -// PaidStateIndex returns the coarse free-versus-paid index key. -func (k Keyspace) PaidStateIndex(state entitlement.PaidState) string { - return k.prefix() + "index:paid-state:" + encodeKeyComponent(string(state)) -} - -// FinitePaidExpiryIndex returns the finite paid-expiry index key. Lifetime -// plans intentionally do not participate in this index. -func (k Keyspace) FinitePaidExpiryIndex() string { - return k.prefix() + "index:paid-expiry:finite" -} - -// DeclaredCountryIndex returns the current declared-country reverse-lookup -// index key. -func (k Keyspace) DeclaredCountryIndex(code common.CountryCode) string { - return k.prefix() + "index:declared-country:" + encodeKeyComponent(code.String()) -} - -// ActiveSanctionCodeIndex returns the reverse-lookup index key for users with -// an active sanction code. -func (k Keyspace) ActiveSanctionCodeIndex(code policy.SanctionCode) string { - return k.prefix() + "index:active-sanction:" + encodeKeyComponent(string(code)) -} - -// ActiveLimitCodeIndex returns the reverse-lookup index key for users with an -// active limit code. -func (k Keyspace) ActiveLimitCodeIndex(code policy.LimitCode) string { - return k.prefix() + "index:active-limit:" + encodeKeyComponent(string(code)) -} - -// EligibilityMarkerIndex returns the reverse-lookup index key for one derived -// eligibility marker boolean. -func (k Keyspace) EligibilityMarkerIndex(marker policy.EligibilityMarker, value bool) string { - return fmt.Sprintf("%sindex:eligibility:%s:%t", k.prefix(), encodeKeyComponent(string(marker)), value) -} - -// CreatedAtScore returns the frozen ZSET score representation for created-at -// ordering and deterministic pagination. -func CreatedAtScore(createdAt time.Time) float64 { - return float64(createdAt.UTC().UnixMicro()) -} - -// ExpiryScore returns the frozen ZSET score representation for finite paid -// expiry ordering. -func ExpiryScore(expiresAt time.Time) float64 { - return float64(expiresAt.UTC().UnixMicro()) -} - -// PageCursor identifies the last seen `(created_at, user_id)` tuple used by -// deterministic newest-first pagination. -type PageCursor struct { - // CreatedAt stores the created-at component of the last seen row. - CreatedAt time.Time - - // UserID stores the user-id tiebreaker component of the last seen row. - UserID common.UserID -} - -// Validate reports whether PageCursor contains a complete cursor tuple. -func (cursor PageCursor) Validate() error { - if err := common.ValidateTimestamp("page cursor created at", cursor.CreatedAt); err != nil { - return err - } - if err := cursor.UserID.Validate(); err != nil { - return fmt.Errorf("page cursor user id: %w", err) - } - - return nil -} - -// ComparePageOrder compares two listing positions using the frozen ordering: -// `created_at desc`, then `user_id desc`. -func ComparePageOrder(left PageCursor, right PageCursor) int { - switch { - case left.CreatedAt.After(right.CreatedAt): - return -1 - case left.CreatedAt.Before(right.CreatedAt): - return 1 - default: - return -strings.Compare(left.UserID.String(), right.UserID.String()) - } -} - -func (k Keyspace) prefix() string { - prefix := strings.TrimSpace(k.Prefix) - if prefix == "" { - return defaultPrefix - } - - return prefix -} - -func encodeKeyComponent(value string) string { - return base64.RawURLEncoding.EncodeToString([]byte(value)) -} diff --git a/user/internal/adapters/redisstate/keyspace_test.go b/user/internal/adapters/redisstate/keyspace_test.go deleted file mode 100644 index 4ad6952..0000000 --- a/user/internal/adapters/redisstate/keyspace_test.go +++ /dev/null @@ -1,57 +0,0 @@ -package redisstate - -import ( - "testing" - "time" - - "galaxy/user/internal/domain/common" - "galaxy/user/internal/domain/entitlement" - "galaxy/user/internal/domain/policy" - - "github.com/stretchr/testify/require" -) - -func TestKeyspaceBuildsStableKeys(t *testing.T) { - t.Parallel() - - keyspace := Keyspace{Prefix: "custom:"} - - require.Equal(t, "custom:account:dXNlci0xMjM", keyspace.Account(common.UserID("user-123"))) - require.Equal(t, "custom:lookup:email:cGlsb3RAZXhhbXBsZS5jb20", keyspace.EmailLookup(common.Email("pilot@example.com"))) - require.Equal(t, "custom:lookup:user-name:cGxheWVyLWFiY2RlZmdo", keyspace.UserNameLookup(common.UserName("player-abcdefgh"))) - require.Equal(t, "custom:blocked-email:cGlsb3RAZXhhbXBsZS5jb20", keyspace.BlockedEmailSubject(common.Email("pilot@example.com"))) - require.Equal(t, "custom:entitlement:record:ZW50aXRsZW1lbnQtMTIz", keyspace.EntitlementRecord(entitlement.EntitlementRecordID("entitlement-123"))) - require.Equal(t, "custom:sanction:record:c2FuY3Rpb24tMQ", keyspace.SanctionRecord(policy.SanctionRecordID("sanction-1"))) - require.Equal(t, "custom:limit:record:bGltaXQtMQ", keyspace.LimitRecord(policy.LimitRecordID("limit-1"))) - require.Equal(t, "custom:sanction:active:dXNlci0xMjM:bG9naW5fYmxvY2s", keyspace.ActiveSanction(common.UserID("user-123"), policy.SanctionCodeLoginBlock)) - require.Equal(t, "custom:limit:active:dXNlci0xMjM:bWF4X293bmVkX3ByaXZhdGVfZ2FtZXM", keyspace.ActiveLimit(common.UserID("user-123"), policy.LimitCodeMaxOwnedPrivateGames)) - require.Equal(t, "custom:index:created-at", keyspace.CreatedAtIndex()) - require.Equal(t, "custom:index:paid-state:cGFpZA", keyspace.PaidStateIndex(entitlement.PaidStatePaid)) - require.Equal(t, "custom:index:paid-expiry:finite", keyspace.FinitePaidExpiryIndex()) - require.Equal(t, "custom:index:declared-country:REU", keyspace.DeclaredCountryIndex(common.CountryCode("DE"))) - require.Equal(t, "custom:index:active-sanction:bG9naW5fYmxvY2s", keyspace.ActiveSanctionCodeIndex(policy.SanctionCodeLoginBlock)) - require.Equal(t, "custom:index:active-limit:bWF4X293bmVkX3ByaXZhdGVfZ2FtZXM", keyspace.ActiveLimitCodeIndex(policy.LimitCodeMaxOwnedPrivateGames)) - require.Equal(t, "custom:index:eligibility:Y2FuX2xvZ2lu:true", keyspace.EligibilityMarkerIndex(policy.EligibilityMarkerCanLogin, true)) -} - -func TestComparePageOrder(t *testing.T) { - t.Parallel() - - newer := PageCursor{CreatedAt: time.Unix(20, 0).UTC(), UserID: common.UserID("user-200")} - older := PageCursor{CreatedAt: time.Unix(10, 0).UTC(), UserID: common.UserID("user-100")} - sameTimeHigherUserID := PageCursor{CreatedAt: time.Unix(20, 0).UTC(), UserID: common.UserID("user-300")} - - require.Negative(t, ComparePageOrder(newer, older)) - require.Positive(t, ComparePageOrder(older, newer)) - require.Negative(t, ComparePageOrder(sameTimeHigherUserID, newer)) -} - -func TestScoresUseUnixMicro(t *testing.T) { - t.Parallel() - - value := time.Unix(1_775_240_000, 123_000).UTC() - want := float64(value.UnixMicro()) - - require.Equal(t, want, CreatedAtScore(value)) - require.Equal(t, want, ExpiryScore(value)) -} diff --git a/user/internal/adapters/redisstate/page_token.go b/user/internal/adapters/redisstate/page_token.go deleted file mode 100644 index 8a268a4..0000000 --- a/user/internal/adapters/redisstate/page_token.go +++ /dev/null @@ -1,191 +0,0 @@ -package redisstate - -import ( - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "time" - - "galaxy/user/internal/domain/common" - "galaxy/user/internal/domain/entitlement" - "galaxy/user/internal/domain/policy" -) - -var ( - // ErrPageTokenFiltersMismatch reports that a supplied page token was created - // for a different normalized filter set. - ErrPageTokenFiltersMismatch = errors.New("page token filters do not match current filters") -) - -// UserListFilters stores the frozen admin-listing filter set that becomes part -// of the opaque page token fingerprint. -type UserListFilters struct { - // PaidState stores the coarse free-versus-paid filter. - PaidState entitlement.PaidState - - // PaidExpiresBefore stores the optional finite-paid expiry upper bound. - PaidExpiresBefore *time.Time - - // PaidExpiresAfter stores the optional finite-paid expiry lower bound. - PaidExpiresAfter *time.Time - - // DeclaredCountry stores the optional declared-country filter. - DeclaredCountry common.CountryCode - - // SanctionCode stores the optional active-sanction filter. - SanctionCode policy.SanctionCode - - // LimitCode stores the optional active-limit filter. - LimitCode policy.LimitCode - - // CanLogin stores the optional login-eligibility filter. - CanLogin *bool - - // CanCreatePrivateGame stores the optional private-game-create eligibility - // filter. - CanCreatePrivateGame *bool - - // CanJoinGame stores the optional join-game eligibility filter. - CanJoinGame *bool -} - -// Validate reports whether UserListFilters is structurally valid. -func (filters UserListFilters) Validate() error { - if !filters.PaidState.IsKnown() { - return fmt.Errorf("paid state %q is unsupported", filters.PaidState) - } - if filters.PaidExpiresBefore != nil && filters.PaidExpiresBefore.IsZero() { - return fmt.Errorf("paid expires before must not be zero") - } - if filters.PaidExpiresAfter != nil && filters.PaidExpiresAfter.IsZero() { - return fmt.Errorf("paid expires after must not be zero") - } - if !filters.DeclaredCountry.IsZero() { - if err := filters.DeclaredCountry.Validate(); err != nil { - return fmt.Errorf("declared country: %w", err) - } - } - if filters.SanctionCode != "" && !filters.SanctionCode.IsKnown() { - return fmt.Errorf("sanction code %q is unsupported", filters.SanctionCode) - } - if filters.LimitCode != "" && !filters.LimitCode.IsKnown() { - return fmt.Errorf("limit code %q is unsupported", filters.LimitCode) - } - - return nil -} - -// EncodePageToken encodes cursor and filters into the frozen opaque page token -// format. -func EncodePageToken(cursor PageCursor, filters UserListFilters) (string, error) { - if err := cursor.Validate(); err != nil { - return "", fmt.Errorf("encode page token: %w", err) - } - fingerprint, err := normalizeFilters(filters) - if err != nil { - return "", fmt.Errorf("encode page token: %w", err) - } - - payload, err := json.Marshal(pageTokenPayload{ - CreatedAt: cursor.CreatedAt.UTC().Format(time.RFC3339Nano), - UserID: cursor.UserID.String(), - Filters: fingerprint, - }) - if err != nil { - return "", fmt.Errorf("encode page token: %w", err) - } - - return base64.RawURLEncoding.EncodeToString(payload), nil -} - -// DecodePageToken decodes raw into the frozen page cursor and verifies that -// the embedded normalized filter set matches expectedFilters. -func DecodePageToken(raw string, expectedFilters UserListFilters) (PageCursor, error) { - fingerprint, err := normalizeFilters(expectedFilters) - if err != nil { - return PageCursor{}, fmt.Errorf("decode page token: %w", err) - } - - payload, err := base64.RawURLEncoding.DecodeString(raw) - if err != nil { - return PageCursor{}, fmt.Errorf("decode page token: %w", err) - } - - var token pageTokenPayload - if err := json.Unmarshal(payload, &token); err != nil { - return PageCursor{}, fmt.Errorf("decode page token: %w", err) - } - if token.Filters != fingerprint { - return PageCursor{}, ErrPageTokenFiltersMismatch - } - - createdAt, err := time.Parse(time.RFC3339Nano, token.CreatedAt) - if err != nil { - return PageCursor{}, fmt.Errorf("decode page token: parse created_at: %w", err) - } - - cursor := PageCursor{ - CreatedAt: createdAt.UTC(), - UserID: common.UserID(token.UserID), - } - if err := cursor.Validate(); err != nil { - return PageCursor{}, fmt.Errorf("decode page token: %w", err) - } - - return cursor, nil -} - -type pageTokenPayload struct { - CreatedAt string `json:"created_at"` - UserID string `json:"user_id"` - Filters normalizedFilterPayload `json:"filters"` -} - -type normalizedFilterPayload struct { - PaidState string `json:"paid_state,omitempty"` - PaidExpiresBeforeUTC string `json:"paid_expires_before_utc,omitempty"` - PaidExpiresAfterUTC string `json:"paid_expires_after_utc,omitempty"` - DeclaredCountry string `json:"declared_country,omitempty"` - SanctionCode string `json:"sanction_code,omitempty"` - LimitCode string `json:"limit_code,omitempty"` - CanLogin string `json:"can_login,omitempty"` - CanCreatePrivateGame string `json:"can_create_private_game,omitempty"` - CanJoinGame string `json:"can_join_game,omitempty"` -} - -func normalizeFilters(filters UserListFilters) (normalizedFilterPayload, error) { - if err := filters.Validate(); err != nil { - return normalizedFilterPayload{}, err - } - - return normalizedFilterPayload{ - PaidState: string(filters.PaidState), - PaidExpiresBeforeUTC: formatOptionalTime(filters.PaidExpiresBefore), - PaidExpiresAfterUTC: formatOptionalTime(filters.PaidExpiresAfter), - DeclaredCountry: filters.DeclaredCountry.String(), - SanctionCode: string(filters.SanctionCode), - LimitCode: string(filters.LimitCode), - CanLogin: formatOptionalBool(filters.CanLogin), - CanCreatePrivateGame: formatOptionalBool(filters.CanCreatePrivateGame), - CanJoinGame: formatOptionalBool(filters.CanJoinGame), - }, nil -} - -func formatOptionalTime(value *time.Time) string { - if value == nil { - return "" - } - - return value.UTC().Format(time.RFC3339Nano) -} - -func formatOptionalBool(value *bool) string { - if value == nil { - return "" - } - if *value { - return "true" - } - return "false" -} diff --git a/user/internal/adapters/redisstate/page_token_test.go b/user/internal/adapters/redisstate/page_token_test.go deleted file mode 100644 index 8455b04..0000000 --- a/user/internal/adapters/redisstate/page_token_test.go +++ /dev/null @@ -1,70 +0,0 @@ -package redisstate - -import ( - "testing" - "time" - - "galaxy/user/internal/domain/common" - "galaxy/user/internal/domain/entitlement" - "galaxy/user/internal/domain/policy" - - "github.com/stretchr/testify/require" -) - -func TestEncodeDecodePageToken(t *testing.T) { - t.Parallel() - - before := time.Unix(1_775_250_000, 0).UTC() - after := time.Unix(1_775_240_000, 0).UTC() - canLogin := true - canCreate := false - canJoin := true - - filters := UserListFilters{ - PaidState: entitlement.PaidStatePaid, - PaidExpiresBefore: &before, - PaidExpiresAfter: &after, - DeclaredCountry: common.CountryCode("DE"), - SanctionCode: policy.SanctionCodeLoginBlock, - LimitCode: policy.LimitCodeMaxOwnedPrivateGames, - CanLogin: &canLogin, - CanCreatePrivateGame: &canCreate, - CanJoinGame: &canJoin, - } - cursor := PageCursor{ - CreatedAt: time.Unix(1_775_240_100, 987_000_000).UTC(), - UserID: common.UserID("user-123"), - } - - token, err := EncodePageToken(cursor, filters) - require.NoError(t, err) - - decoded, err := DecodePageToken(token, filters) - require.NoError(t, err) - require.Equal(t, cursor, decoded) -} - -func TestDecodePageTokenFilterMismatch(t *testing.T) { - t.Parallel() - - cursor := PageCursor{ - CreatedAt: time.Unix(1_775_240_100, 0).UTC(), - UserID: common.UserID("user-123"), - } - filters := UserListFilters{ - PaidState: entitlement.PaidStatePaid, - } - - token, err := EncodePageToken(cursor, filters) - require.NoError(t, err) - - _, err = DecodePageToken(token, UserListFilters{PaidState: entitlement.PaidStateFree}) - require.ErrorIs(t, err, ErrPageTokenFiltersMismatch) -} - -func TestDecodePageTokenRejectsInvalidInput(t *testing.T) { - t.Parallel() - - _, err := DecodePageToken("%%%not-base64%%%", UserListFilters{}) - require.Error(t, err) -} diff --git a/user/internal/app/runtime.go b/user/internal/app/runtime.go index 31b08fd..c8729c9 100644 --- a/user/internal/app/runtime.go +++ b/user/internal/app/runtime.go @@ -3,16 +3,20 @@ package app import ( "context" + "database/sql" "errors" "fmt" "log/slog" "strings" "sync" + "galaxy/postgres" + "galaxy/redisconn" "galaxy/user/internal/adapters/local" + "galaxy/user/internal/adapters/postgres/migrations" + pguserstore "galaxy/user/internal/adapters/postgres/userstore" "galaxy/user/internal/adapters/redis/domainevents" "galaxy/user/internal/adapters/redis/lifecycleevents" - "galaxy/user/internal/adapters/redis/userstore" "galaxy/user/internal/adminapi" "galaxy/user/internal/api/internalhttp" "galaxy/user/internal/config" @@ -25,16 +29,14 @@ import ( "galaxy/user/internal/service/policysvc" "galaxy/user/internal/service/selfservice" "galaxy/user/internal/telemetry" + + goredis "github.com/redis/go-redis/v9" ) type pinger interface { Ping(context.Context) error } -type closer interface { - Close() error -} - // Runtime owns the runnable user-service process plus the cleanup functions // that release runtime resources after shutdown. type Runtime struct { @@ -93,61 +95,75 @@ func NewRuntime(ctx context.Context, cfg config.Config, logger *slog.Logger) (*R return telemetryRuntime.Shutdown(shutdownCtx) }) - store, err := userstore.New(userstore.Config{ - Addr: cfg.Redis.Addr, - Username: cfg.Redis.Username, - Password: cfg.Redis.Password, - DB: cfg.Redis.DB, - TLSEnabled: cfg.Redis.TLSEnabled, - KeyspacePrefix: cfg.Redis.KeyspacePrefix, - OperationTimeout: cfg.Redis.OperationTimeout, - }) - if err != nil { - return cleanupOnError(fmt.Errorf("new user-service runtime: redis user store: %w", err)) + // Open the shared Redis master client for both stream publishers. The + // client is owned by the runtime; publishers borrow it through their + // New(client, cfg) constructors. + redisClient := redisconn.NewMasterClient(cfg.Redis.Conn) + if err := redisconn.Instrument(redisClient, + redisconn.WithTracerProvider(telemetryRuntime.TracerProvider()), + redisconn.WithMeterProvider(telemetryRuntime.MeterProvider()), + ); err != nil { + return cleanupOnError(fmt.Errorf("new user-service runtime: instrument redis client: %w", err)) } - runtime.cleanupFns = append(runtime.cleanupFns, store.Close) - - if err := pingDependency(ctx, "redis user store", store); err != nil { + runtime.cleanupFns = append(runtime.cleanupFns, redisClient.Close) + if err := pingRedisClient(ctx, redisClient, cfg.Redis.Conn); err != nil { return cleanupOnError(fmt.Errorf("new user-service runtime: %w", err)) } - domainEventPublisher, err := domainevents.New(domainevents.Config{ - Addr: cfg.Redis.Addr, - Username: cfg.Redis.Username, - Password: cfg.Redis.Password, - DB: cfg.Redis.DB, - TLSEnabled: cfg.Redis.TLSEnabled, + // Open the PostgreSQL pool, attach instrumentation, ping it, and apply + // embedded migrations strictly before any HTTP listener opens. A failure + // at any of these steps is fatal: the service exits with non-zero status. + pgPool, err := postgres.OpenPrimary(ctx, cfg.Postgres.Conn, + postgres.WithTracerProvider(telemetryRuntime.TracerProvider()), + postgres.WithMeterProvider(telemetryRuntime.MeterProvider()), + ) + if err != nil { + return cleanupOnError(fmt.Errorf("new user-service runtime: open postgres primary: %w", err)) + } + runtime.cleanupFns = append(runtime.cleanupFns, pgPool.Close) + unregisterDBStats, err := postgres.InstrumentDBStats(pgPool, + postgres.WithMeterProvider(telemetryRuntime.MeterProvider()), + ) + if err != nil { + return cleanupOnError(fmt.Errorf("new user-service runtime: instrument postgres db stats: %w", err)) + } + runtime.cleanupFns = append(runtime.cleanupFns, unregisterDBStats) + if err := postgres.Ping(ctx, pgPool, cfg.Postgres.Conn.OperationTimeout); err != nil { + return cleanupOnError(fmt.Errorf("new user-service runtime: %w", err)) + } + migrationsFS := migrations.FS() + if err := postgres.RunMigrations(ctx, pgPool, migrationsFS, "."); err != nil { + return cleanupOnError(fmt.Errorf("new user-service runtime: run postgres migrations: %w", err)) + } + + store, err := pguserstore.New(pguserstore.Config{ + DB: pgPool, + OperationTimeout: cfg.Postgres.Conn.OperationTimeout, + }) + if err != nil { + return cleanupOnError(fmt.Errorf("new user-service runtime: postgres user store: %w", err)) + } + if err := pingDependency(ctx, "postgres user store", store); err != nil { + return cleanupOnError(fmt.Errorf("new user-service runtime: %w", err)) + } + + domainEventPublisher, err := domainevents.New(redisClient, domainevents.Config{ Stream: cfg.Redis.DomainEventsStream, StreamMaxLen: cfg.Redis.DomainEventsStreamMaxLen, - OperationTimeout: cfg.Redis.OperationTimeout, + OperationTimeout: cfg.Redis.Conn.OperationTimeout, }) if err != nil { return cleanupOnError(fmt.Errorf("new user-service runtime: redis domain-event publisher: %w", err)) } - runtime.cleanupFns = append(runtime.cleanupFns, domainEventPublisher.Close) - if err := pingDependency(ctx, "redis domain-event publisher", domainEventPublisher); err != nil { - return cleanupOnError(fmt.Errorf("new user-service runtime: %w", err)) - } - - lifecycleEventPublisher, err := lifecycleevents.New(lifecycleevents.Config{ - Addr: cfg.Redis.Addr, - Username: cfg.Redis.Username, - Password: cfg.Redis.Password, - DB: cfg.Redis.DB, - TLSEnabled: cfg.Redis.TLSEnabled, + lifecycleEventPublisher, err := lifecycleevents.New(redisClient, lifecycleevents.Config{ Stream: cfg.Redis.LifecycleEventsStream, StreamMaxLen: cfg.Redis.LifecycleEventsStreamMaxLen, - OperationTimeout: cfg.Redis.OperationTimeout, + OperationTimeout: cfg.Redis.Conn.OperationTimeout, }) if err != nil { return cleanupOnError(fmt.Errorf("new user-service runtime: redis lifecycle-event publisher: %w", err)) } - runtime.cleanupFns = append(runtime.cleanupFns, lifecycleEventPublisher.Close) - - if err := pingDependency(ctx, "redis lifecycle-event publisher", lifecycleEventPublisher); err != nil { - return cleanupOnError(fmt.Errorf("new user-service runtime: %w", err)) - } clock := local.Clock{} idGenerator := local.IDGenerator{} @@ -517,4 +533,24 @@ func pingDependency(ctx context.Context, name string, dependency pinger) error { return nil } -var _ closer = (*userstore.Store)(nil) +func pingRedisClient(ctx context.Context, client *goredis.Client, cfg redisconn.Config) error { + pingCtx, cancel := context.WithTimeout(ctx, cfg.OperationTimeout) + defer cancel() + if err := client.Ping(pingCtx).Err(); err != nil { + return fmt.Errorf("ping redis master: %w", err) + } + return nil +} + +// Compile-time guard that the postgres-backed user store implements the +// closer pattern relied on by cleanupFns. Close is a no-op on the postgres +// store; the underlying *sql.DB is closed via cleanupFns appended above. +var _ interface{ Close() error } = (*pguserstore.Store)(nil) + +// Compile-time guard that the postgres-backed user store also satisfies the +// pinger contract used by pingDependency. +var _ pinger = (*pguserstore.Store)(nil) + +// Compile-time guard kept from the previous implementation so future readers +// can trust the *sql.DB life cycle remains consistent with cleanupFns. +var _ *sql.DB = (*sql.DB)(nil) diff --git a/user/internal/config/config.go b/user/internal/config/config.go index d61735e..bf0d8b6 100644 --- a/user/internal/config/config.go +++ b/user/internal/config/config.go @@ -3,16 +3,20 @@ package config import ( - "crypto/tls" "fmt" "net" "os" "strconv" "strings" "time" + + "galaxy/postgres" + "galaxy/redisconn" ) const ( + envPrefix = "USERSERVICE" + shutdownTimeoutEnvVar = "USERSERVICE_SHUTDOWN_TIMEOUT" logLevelEnvVar = "USERSERVICE_LOG_LEVEL" @@ -27,13 +31,6 @@ const ( adminHTTPReadTimeoutEnvVar = "USERSERVICE_ADMIN_HTTP_READ_TIMEOUT" adminHTTPIdleTimeoutEnvVar = "USERSERVICE_ADMIN_HTTP_IDLE_TIMEOUT" - redisAddrEnvVar = "USERSERVICE_REDIS_ADDR" - redisUsernameEnvVar = "USERSERVICE_REDIS_USERNAME" - redisPasswordEnvVar = "USERSERVICE_REDIS_PASSWORD" - redisDBEnvVar = "USERSERVICE_REDIS_DB" - redisTLSEnabledEnvVar = "USERSERVICE_REDIS_TLS_ENABLED" - redisOperationTimeoutEnvVar = "USERSERVICE_REDIS_OPERATION_TIMEOUT" - redisKeyspacePrefixEnvVar = "USERSERVICE_REDIS_KEYSPACE_PREFIX" redisDomainEventsStreamEnvVar = "USERSERVICE_REDIS_DOMAIN_EVENTS_STREAM" redisDomainEventsStreamMaxLenEnvVar = "USERSERVICE_REDIS_DOMAIN_EVENTS_STREAM_MAX_LEN" redisLifecycleEventsStreamEnvVar = "USERSERVICE_REDIS_LIFECYCLE_EVENTS_STREAM" @@ -48,26 +45,23 @@ const ( otelStdoutTracesEnabledEnvVar = "USERSERVICE_OTEL_STDOUT_TRACES_ENABLED" otelStdoutMetricsEnabledEnvVar = "USERSERVICE_OTEL_STDOUT_METRICS_ENABLED" - defaultShutdownTimeout = 5 * time.Second - defaultLogLevel = "info" - defaultInternalHTTPAddr = ":8091" - defaultAdminHTTPAddr = "" - defaultReadHeaderTimeout = 2 * time.Second - defaultReadTimeout = 10 * time.Second - defaultIdleTimeout = time.Minute - defaultRequestTimeout = 3 * time.Second - defaultRedisDB = 0 - defaultRedisOperationTimeout = 250 * time.Millisecond - defaultRedisKeyspacePrefix = "user:" + defaultShutdownTimeout = 5 * time.Second + defaultLogLevel = "info" + defaultInternalHTTPAddr = ":8091" + defaultAdminHTTPAddr = "" + defaultReadHeaderTimeout = 2 * time.Second + defaultReadTimeout = 10 * time.Second + defaultIdleTimeout = time.Minute + defaultRequestTimeout = 3 * time.Second defaultDomainEventsStream = "user:domain_events" defaultDomainEventsStreamMaxLen = 1024 defaultLifecycleEventsStream = "user:lifecycle_events" defaultLifecycleEventsStreamMaxLen = 1024 - defaultOTelServiceName = "galaxy-user" - otelExporterNone = "none" - otelExporterOTLP = "otlp" - otelProtocolHTTPProtobuf = "http/protobuf" - otelProtocolGRPC = "grpc" + defaultOTelServiceName = "galaxy-user" + otelExporterNone = "none" + otelExporterOTLP = "otlp" + otelProtocolHTTPProtobuf = "http/protobuf" + otelProtocolGRPC = "grpc" ) // Config stores the full user-service process configuration. @@ -85,9 +79,14 @@ type Config struct { // AdminHTTP configures the optional private admin HTTP listener. AdminHTTP AdminHTTPConfig - // Redis configures the Redis-backed user store and domain-event publisher. + // Redis configures the Redis-backed event publishers (domain + lifecycle + // streams) plus the connection topology consumed via `pkg/redisconn`. Redis RedisConfig + // Postgres configures the PostgreSQL-backed durable store consumed via + // `pkg/postgres`. + Postgres PostgresConfig + // Telemetry configures the process-wide OpenTelemetry runtime. Telemetry TelemetryConfig } @@ -171,28 +170,12 @@ func (cfg AdminHTTPConfig) Validate() error { } } -// RedisConfig configures the Redis-backed store and domain-event publisher. +// RedisConfig configures the Redis-backed event publishers and the connection +// topology shared with `pkg/redisconn`. type RedisConfig struct { - // Addr stores the Redis network address. - Addr string - - // Username stores the optional Redis ACL username. - Username string - - // Password stores the optional Redis ACL password. - Password string - - // DB stores the Redis logical database index. - DB int - - // TLSEnabled reports whether TLS must be used for Redis connections. - TLSEnabled bool - - // OperationTimeout bounds one Redis round trip. - OperationTimeout time.Duration - - // KeyspacePrefix stores the root prefix of the service-owned Redis keyspace. - KeyspacePrefix string + // Conn carries the connection topology (master, replicas, password, db, + // per-call timeout). Loaded via redisconn.LoadFromEnv("USERSERVICE"). + Conn redisconn.Config // DomainEventsStream stores the Redis Stream key used for auxiliary // post-commit domain events. @@ -203,8 +186,8 @@ type RedisConfig struct { DomainEventsStreamMaxLen int64 // LifecycleEventsStream stores the Redis Stream key used for trusted - // user-lifecycle events (permanent_block, delete) consumed by - // `Game Lobby` for Race Name Directory cascade release. + // user-lifecycle events (permanent_block, delete) consumed by `Game + // Lobby` for Race Name Directory cascade release. LifecycleEventsStream string // LifecycleEventsStreamMaxLen bounds the lifecycle-events Redis Stream @@ -212,27 +195,12 @@ type RedisConfig struct { LifecycleEventsStreamMaxLen int64 } -// TLSConfig returns the conservative TLS configuration used by Redis adapters -// when TLSEnabled is true. -func (cfg RedisConfig) TLSConfig() *tls.Config { - if !cfg.TLSEnabled { - return nil - } - - return &tls.Config{MinVersion: tls.VersionTLS12} -} - // Validate reports whether cfg stores a usable Redis configuration. func (cfg RedisConfig) Validate() error { + if err := cfg.Conn.Validate(); err != nil { + return err + } switch { - case strings.TrimSpace(cfg.Addr) == "": - return fmt.Errorf("redis addr must not be empty") - case cfg.DB < 0: - return fmt.Errorf("redis db must not be negative") - case cfg.OperationTimeout <= 0: - return fmt.Errorf("redis operation timeout must be positive") - case strings.TrimSpace(cfg.KeyspacePrefix) == "": - return fmt.Errorf("redis keyspace prefix must not be empty") case strings.TrimSpace(cfg.DomainEventsStream) == "": return fmt.Errorf("redis domain events stream must not be empty") case cfg.DomainEventsStreamMaxLen <= 0: @@ -246,6 +214,20 @@ func (cfg RedisConfig) Validate() error { } } +// PostgresConfig configures the PostgreSQL-backed durable store. It wraps +// the shared `pkg/postgres.Config` so callers receive the same struct shape +// across services. +type PostgresConfig struct { + // Conn stores the primary plus replica DSN topology and pool tuning. + // Loaded via postgres.LoadFromEnv("USERSERVICE"). + Conn postgres.Config +} + +// Validate reports whether cfg stores a usable PostgreSQL configuration. +func (cfg PostgresConfig) Validate() error { + return cfg.Conn.Validate() +} + // TelemetryConfig configures the user-service OpenTelemetry runtime. type TelemetryConfig struct { // ServiceName overrides the default OpenTelemetry service name. @@ -313,7 +295,9 @@ func DefaultAdminHTTPConfig() AdminHTTPConfig { } // DefaultConfig returns the default process configuration with all optional -// values filled. +// values filled. Required connection coordinates (Redis master/password, +// Postgres primary DSN) remain zero-valued and must be supplied via +// LoadFromEnv. func DefaultConfig() Config { return Config{ ShutdownTimeout: defaultShutdownTimeout, @@ -329,14 +313,15 @@ func DefaultConfig() Config { }, AdminHTTP: DefaultAdminHTTPConfig(), Redis: RedisConfig{ - DB: defaultRedisDB, - OperationTimeout: defaultRedisOperationTimeout, - KeyspacePrefix: defaultRedisKeyspacePrefix, + Conn: redisconn.DefaultConfig(), DomainEventsStream: defaultDomainEventsStream, DomainEventsStreamMaxLen: defaultDomainEventsStreamMaxLen, LifecycleEventsStream: defaultLifecycleEventsStream, LifecycleEventsStreamMaxLen: defaultLifecycleEventsStreamMaxLen, }, + Postgres: PostgresConfig{ + Conn: postgres.DefaultConfig(), + }, Telemetry: TelemetryConfig{ ServiceName: defaultOTelServiceName, TracesExporter: otelExporterNone, @@ -360,6 +345,9 @@ func (cfg Config) Validate() error { if err := cfg.Redis.Validate(); err != nil { return fmt.Errorf("redis config: %w", err) } + if err := cfg.Postgres.Validate(); err != nil { + return fmt.Errorf("postgres config: %w", err) + } if _, err := parseLogLevel(cfg.Logging.Level); err != nil { return fmt.Errorf("logging config: %w", err) } @@ -370,7 +358,11 @@ func (cfg Config) Validate() error { return nil } -// LoadFromEnv loads Config from the process environment. +// LoadFromEnv loads Config from the process environment. Connection topology +// for Redis and PostgreSQL is delegated to the shared `pkg/redisconn` and +// `pkg/postgres` LoadFromEnv helpers, which enforce the architectural rules +// (mandatory Redis password, deprecated TLS/USERNAME variables hard-fail, +// required Postgres primary DSN). func LoadFromEnv() (Config, error) { cfg := DefaultConfig() @@ -413,22 +405,11 @@ func LoadFromEnv() (Config, error) { return Config{}, err } - cfg.Redis.Addr = loadString(redisAddrEnvVar, cfg.Redis.Addr) - cfg.Redis.Username = loadString(redisUsernameEnvVar, cfg.Redis.Username) - cfg.Redis.Password = loadString(redisPasswordEnvVar, cfg.Redis.Password) - cfg.Redis.DB, err = loadInt(redisDBEnvVar, cfg.Redis.DB) + redisConn, err := redisconn.LoadFromEnv(envPrefix) if err != nil { return Config{}, err } - cfg.Redis.TLSEnabled, err = loadBool(redisTLSEnabledEnvVar, cfg.Redis.TLSEnabled) - if err != nil { - return Config{}, err - } - cfg.Redis.OperationTimeout, err = loadDuration(redisOperationTimeoutEnvVar, cfg.Redis.OperationTimeout) - if err != nil { - return Config{}, err - } - cfg.Redis.KeyspacePrefix = loadString(redisKeyspacePrefixEnvVar, cfg.Redis.KeyspacePrefix) + cfg.Redis.Conn = redisConn cfg.Redis.DomainEventsStream = loadString(redisDomainEventsStreamEnvVar, cfg.Redis.DomainEventsStream) cfg.Redis.DomainEventsStreamMaxLen, err = loadInt64(redisDomainEventsStreamMaxLenEnvVar, cfg.Redis.DomainEventsStreamMaxLen) if err != nil { @@ -440,6 +421,12 @@ func LoadFromEnv() (Config, error) { return Config{}, err } + pgConn, err := postgres.LoadFromEnv(envPrefix) + if err != nil { + return Config{}, err + } + cfg.Postgres.Conn = pgConn + cfg.Telemetry.ServiceName = loadString(otelServiceNameEnvVar, cfg.Telemetry.ServiceName) cfg.Telemetry.TracesExporter = normalizeExporterValue(loadString(otelTracesExporterEnvVar, cfg.Telemetry.TracesExporter)) cfg.Telemetry.MetricsExporter = normalizeExporterValue(loadString(otelMetricsExporterEnvVar, cfg.Telemetry.MetricsExporter)) @@ -492,20 +479,6 @@ func loadDuration(envName string, defaultValue time.Duration) (time.Duration, er return duration, nil } -func loadInt(envName string, defaultValue int) (int, error) { - value, ok := os.LookupEnv(envName) - if !ok { - return defaultValue, nil - } - - parsedValue, err := strconv.Atoi(strings.TrimSpace(value)) - if err != nil { - return 0, fmt.Errorf("%s: parse int: %w", envName, err) - } - - return parsedValue, nil -} - func loadInt64(envName string, defaultValue int64) (int64, error) { value, ok := os.LookupEnv(envName) if !ok { diff --git a/user/internal/config/config_test.go b/user/internal/config/config_test.go index 82fc75b..36386f7 100644 --- a/user/internal/config/config_test.go +++ b/user/internal/config/config_test.go @@ -1,14 +1,37 @@ package config import ( + "strings" "testing" "time" "github.com/stretchr/testify/require" ) +const ( + redisMasterAddrEnvVar = "USERSERVICE_REDIS_MASTER_ADDR" + redisReplicaAddrsEnvVar = "USERSERVICE_REDIS_REPLICA_ADDRS" + redisPasswordEnvVar = "USERSERVICE_REDIS_PASSWORD" + redisDBEnvVar = "USERSERVICE_REDIS_DB" + redisOperationTimeoutEnvVar = "USERSERVICE_REDIS_OPERATION_TIMEOUT" + redisLegacyAddrEnvVar = "USERSERVICE_REDIS_ADDR" + redisLegacyUsernameEnvVar = "USERSERVICE_REDIS_USERNAME" + redisLegacyTLSEnabledEnvVar = "USERSERVICE_REDIS_TLS_ENABLED" + redisLegacyKeyspacePrefixEnv = "USERSERVICE_REDIS_KEYSPACE_PREFIX" + postgresPrimaryDSNEnvVar = "USERSERVICE_POSTGRES_PRIMARY_DSN" + postgresReplicaDSNsEnvVar = "USERSERVICE_POSTGRES_REPLICA_DSNS" + postgresOperationTimeoutEnvVar = "USERSERVICE_POSTGRES_OPERATION_TIMEOUT" + postgresMaxOpenConnsEnvVar = "USERSERVICE_POSTGRES_MAX_OPEN_CONNS" + postgresMaxIdleConnsEnvVar = "USERSERVICE_POSTGRES_MAX_IDLE_CONNS" + postgresConnMaxLifetimeEnvVar = "USERSERVICE_POSTGRES_CONN_MAX_LIFETIME" + + defaultPostgresDSN = "postgres://userservice:secret@127.0.0.1:5432/galaxy?search_path=user&sslmode=disable" +) + func TestLoadFromEnvUsesDefaults(t *testing.T) { - t.Setenv(redisAddrEnvVar, "127.0.0.1:6379") + t.Setenv(redisMasterAddrEnvVar, "127.0.0.1:6379") + t.Setenv(redisPasswordEnvVar, "secret") + t.Setenv(postgresPrimaryDSNEnvVar, defaultPostgresDSN) cfg, err := LoadFromEnv() require.NoError(t, err) @@ -18,10 +41,18 @@ func TestLoadFromEnvUsesDefaults(t *testing.T) { require.Equal(t, defaults.Logging.Level, cfg.Logging.Level) require.Equal(t, defaults.InternalHTTP, cfg.InternalHTTP) require.Equal(t, defaults.AdminHTTP, cfg.AdminHTTP) - require.Equal(t, "127.0.0.1:6379", cfg.Redis.Addr) - require.Equal(t, defaults.Redis.DB, cfg.Redis.DB) + require.Equal(t, "127.0.0.1:6379", cfg.Redis.Conn.MasterAddr) + require.Equal(t, "secret", cfg.Redis.Conn.Password) + require.Equal(t, defaults.Redis.Conn.DB, cfg.Redis.Conn.DB) require.Equal(t, defaults.Redis.DomainEventsStream, cfg.Redis.DomainEventsStream) require.Equal(t, defaults.Redis.DomainEventsStreamMaxLen, cfg.Redis.DomainEventsStreamMaxLen) + require.Equal(t, defaults.Redis.LifecycleEventsStream, cfg.Redis.LifecycleEventsStream) + require.Equal(t, defaults.Redis.LifecycleEventsStreamMaxLen, cfg.Redis.LifecycleEventsStreamMaxLen) + require.Equal(t, defaultPostgresDSN, cfg.Postgres.Conn.PrimaryDSN) + require.Equal(t, defaults.Postgres.Conn.OperationTimeout, cfg.Postgres.Conn.OperationTimeout) + require.Equal(t, defaults.Postgres.Conn.MaxOpenConns, cfg.Postgres.Conn.MaxOpenConns) + require.Equal(t, defaults.Postgres.Conn.MaxIdleConns, cfg.Postgres.Conn.MaxIdleConns) + require.Equal(t, defaults.Postgres.Conn.ConnMaxLifetime, cfg.Postgres.Conn.ConnMaxLifetime) require.Equal(t, defaults.Telemetry, cfg.Telemetry) } @@ -33,15 +64,21 @@ func TestLoadFromEnvAppliesOverrides(t *testing.T) { t.Setenv(internalHTTPRequestTimeoutEnvVar, "750ms") t.Setenv(adminHTTPAddrEnvVar, "127.0.0.1:19091") t.Setenv(adminHTTPIdleTimeoutEnvVar, "90s") - t.Setenv(redisAddrEnvVar, "127.0.0.1:6380") - t.Setenv(redisUsernameEnvVar, "alice") - t.Setenv(redisPasswordEnvVar, "secret") + t.Setenv(redisMasterAddrEnvVar, "127.0.0.1:6380") + t.Setenv(redisReplicaAddrsEnvVar, "127.0.0.1:6381,127.0.0.1:6382") + t.Setenv(redisPasswordEnvVar, "redis-secret") t.Setenv(redisDBEnvVar, "3") - t.Setenv(redisTLSEnabledEnvVar, "true") t.Setenv(redisOperationTimeoutEnvVar, "900ms") - t.Setenv(redisKeyspacePrefixEnvVar, "user:custom:") t.Setenv(redisDomainEventsStreamEnvVar, "user:test_events") t.Setenv(redisDomainEventsStreamMaxLenEnvVar, "2048") + t.Setenv(redisLifecycleEventsStreamEnvVar, "user:test_lifecycle") + t.Setenv(redisLifecycleEventsStreamMaxLenEnvVar, "512") + t.Setenv(postgresPrimaryDSNEnvVar, defaultPostgresDSN) + t.Setenv(postgresReplicaDSNsEnvVar, "postgres://userservice:secret@replica-a/galaxy?sslmode=disable,postgres://userservice:secret@replica-b/galaxy?sslmode=disable") + t.Setenv(postgresOperationTimeoutEnvVar, "2s") + t.Setenv(postgresMaxOpenConnsEnvVar, "40") + t.Setenv(postgresMaxIdleConnsEnvVar, "8") + t.Setenv(postgresConnMaxLifetimeEnvVar, "45m") t.Setenv(otelServiceNameEnvVar, "galaxy-user-stage12") t.Setenv(otelTracesExporterEnvVar, "otlp") t.Setenv(otelMetricsExporterEnvVar, "otlp") @@ -60,15 +97,24 @@ func TestLoadFromEnvAppliesOverrides(t *testing.T) { require.Equal(t, 750*time.Millisecond, cfg.InternalHTTP.RequestTimeout) require.Equal(t, "127.0.0.1:19091", cfg.AdminHTTP.Addr) require.Equal(t, 90*time.Second, cfg.AdminHTTP.IdleTimeout) - require.Equal(t, "127.0.0.1:6380", cfg.Redis.Addr) - require.Equal(t, "alice", cfg.Redis.Username) - require.Equal(t, "secret", cfg.Redis.Password) - require.Equal(t, 3, cfg.Redis.DB) - require.True(t, cfg.Redis.TLSEnabled) - require.Equal(t, 900*time.Millisecond, cfg.Redis.OperationTimeout) - require.Equal(t, "user:custom:", cfg.Redis.KeyspacePrefix) + require.Equal(t, "127.0.0.1:6380", cfg.Redis.Conn.MasterAddr) + require.Equal(t, []string{"127.0.0.1:6381", "127.0.0.1:6382"}, cfg.Redis.Conn.ReplicaAddrs) + require.Equal(t, "redis-secret", cfg.Redis.Conn.Password) + require.Equal(t, 3, cfg.Redis.Conn.DB) + require.Equal(t, 900*time.Millisecond, cfg.Redis.Conn.OperationTimeout) require.Equal(t, "user:test_events", cfg.Redis.DomainEventsStream) require.Equal(t, int64(2048), cfg.Redis.DomainEventsStreamMaxLen) + require.Equal(t, "user:test_lifecycle", cfg.Redis.LifecycleEventsStream) + require.Equal(t, int64(512), cfg.Redis.LifecycleEventsStreamMaxLen) + require.Equal(t, defaultPostgresDSN, cfg.Postgres.Conn.PrimaryDSN) + require.Equal(t, []string{ + "postgres://userservice:secret@replica-a/galaxy?sslmode=disable", + "postgres://userservice:secret@replica-b/galaxy?sslmode=disable", + }, cfg.Postgres.Conn.ReplicaDSNs) + require.Equal(t, 2*time.Second, cfg.Postgres.Conn.OperationTimeout) + require.Equal(t, 40, cfg.Postgres.Conn.MaxOpenConns) + require.Equal(t, 8, cfg.Postgres.Conn.MaxIdleConns) + require.Equal(t, 45*time.Minute, cfg.Postgres.Conn.ConnMaxLifetime) require.Equal(t, "galaxy-user-stage12", cfg.Telemetry.ServiceName) require.Equal(t, "otlp", cfg.Telemetry.TracesExporter) require.Equal(t, "otlp", cfg.Telemetry.MetricsExporter) @@ -78,29 +124,90 @@ func TestLoadFromEnvAppliesOverrides(t *testing.T) { require.True(t, cfg.Telemetry.StdoutMetricsEnabled) } +// TestLoadFromEnvRejectsLegacyRedisVars verifies the architectural rule from +// PG_PLAN.md §3 / ARCHITECTURE.md §Persistence Backends: legacy +// USERSERVICE_REDIS_TLS_ENABLED and USERSERVICE_REDIS_USERNAME variables must +// produce a startup error from `pkg/redisconn` so operators see the breaking +// rename immediately. +func TestLoadFromEnvRejectsLegacyRedisVars(t *testing.T) { + cases := []struct { + name string + envName string + }{ + {name: "tls_enabled deprecated", envName: redisLegacyTLSEnabledEnvVar}, + {name: "username deprecated", envName: redisLegacyUsernameEnvVar}, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + t.Setenv(redisMasterAddrEnvVar, "127.0.0.1:6379") + t.Setenv(redisPasswordEnvVar, "secret") + t.Setenv(postgresPrimaryDSNEnvVar, defaultPostgresDSN) + t.Setenv(tc.envName, "true") + + _, err := LoadFromEnv() + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), "no longer supported")) + }) + } +} + +// TestLoadFromEnvRequiresMandatoryFields covers the architectural rule that +// Redis password, master address and Postgres primary DSN are mandatory; +// missing any one returns a startup error. +func TestLoadFromEnvRequiresMandatoryFields(t *testing.T) { + t.Run("missing redis password", func(t *testing.T) { + t.Setenv(redisMasterAddrEnvVar, "127.0.0.1:6379") + t.Setenv(postgresPrimaryDSNEnvVar, defaultPostgresDSN) + + _, err := LoadFromEnv() + require.Error(t, err) + }) + t.Run("missing redis master addr", func(t *testing.T) { + t.Setenv(redisPasswordEnvVar, "secret") + t.Setenv(postgresPrimaryDSNEnvVar, defaultPostgresDSN) + + _, err := LoadFromEnv() + require.Error(t, err) + }) + t.Run("missing postgres dsn", func(t *testing.T) { + t.Setenv(redisMasterAddrEnvVar, "127.0.0.1:6379") + t.Setenv(redisPasswordEnvVar, "secret") + + _, err := LoadFromEnv() + require.Error(t, err) + }) +} + func TestLoadFromEnvRejectsInvalidValues(t *testing.T) { - tests := []struct { + cases := []struct { name string envName string envVal string }{ {name: "invalid duration", envName: shutdownTimeoutEnvVar, envVal: "later"}, - {name: "invalid bool", envName: redisTLSEnabledEnvVar, envVal: "sometimes"}, {name: "invalid log level", envName: logLevelEnvVar, envVal: "verbose"}, - {name: "invalid int", envName: redisDBEnvVar, envVal: "db-three"}, + {name: "invalid redis db", envName: redisDBEnvVar, envVal: "db-three"}, {name: "invalid stream max len", envName: redisDomainEventsStreamMaxLenEnvVar, envVal: "many"}, {name: "invalid traces exporter", envName: otelTracesExporterEnvVar, envVal: "zipkin"}, {name: "invalid metrics protocol", envName: otelExporterOTLPMetricsProtocolEnvVar, envVal: "udp"}, + {name: "invalid postgres operation timeout", envName: postgresOperationTimeoutEnvVar, envVal: "soon"}, + {name: "invalid postgres max open conns", envName: postgresMaxOpenConnsEnvVar, envVal: "none"}, } - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - t.Setenv(redisAddrEnvVar, "127.0.0.1:6379") - t.Setenv(tt.envName, tt.envVal) + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + t.Setenv(redisMasterAddrEnvVar, "127.0.0.1:6379") + t.Setenv(redisPasswordEnvVar, "secret") + t.Setenv(postgresPrimaryDSNEnvVar, defaultPostgresDSN) + t.Setenv(tc.envName, tc.envVal) _, err := LoadFromEnv() require.Error(t, err) }) } } + +// Suppress unused-warning for legacy keyspace prefix env reference: keep the +// constant in test scope for documentation, though no current code uses it. +var _ = redisLegacyAddrEnvVar +var _ = redisLegacyKeyspacePrefixEnv diff --git a/user/internal/service/lobbyeligibility/service_test.go b/user/internal/service/lobbyeligibility/service_test.go index e508d78..2ac8ba6 100644 --- a/user/internal/service/lobbyeligibility/service_test.go +++ b/user/internal/service/lobbyeligibility/service_test.go @@ -5,15 +5,12 @@ import ( "testing" "time" - "galaxy/user/internal/adapters/redis/userstore" "galaxy/user/internal/domain/account" "galaxy/user/internal/domain/common" "galaxy/user/internal/domain/entitlement" "galaxy/user/internal/domain/policy" "galaxy/user/internal/ports" - "galaxy/user/internal/service/entitlementsvc" - "github.com/alicebob/miniredis/v2" "github.com/stretchr/testify/require" ) @@ -249,66 +246,14 @@ func TestSnapshotReaderExecutePermanentBlockCollapsesMarkers(t *testing.T) { } } -func TestSnapshotReaderExecuteRepairsExpiredPaidSnapshotWithStore(t *testing.T) { - t.Parallel() - - now := time.Unix(1_775_240_500, 0).UTC() - store := newRedisStore(t) - userID := common.UserID("user-123") - accountRecord := validAccountRecord() - - require.NoError(t, store.Accounts().Create(context.Background(), ports.CreateAccountInput{ - Account: accountRecord, - })) - - expiredEndsAt := now.Add(-time.Minute) - require.NoError(t, store.EntitlementSnapshots().Put(context.Background(), entitlement.CurrentSnapshot{ - UserID: userID, - PlanCode: entitlement.PlanCodePaidMonthly, - IsPaid: true, - StartsAt: now.Add(-30 * 24 * time.Hour), - EndsAt: timePointer(expiredEndsAt), - Source: common.Source("billing"), - Actor: common.ActorRef{Type: common.ActorType("billing"), ID: common.ActorID("invoice-1")}, - ReasonCode: common.ReasonCode("renewal"), - UpdatedAt: now.Add(-2 * time.Hour), - })) - - entitlementReader, err := entitlementsvc.NewReader( - store.EntitlementSnapshots(), - store.EntitlementLifecycle(), - fixedClock{now: now}, - fixedIDGenerator{entitlementRecordID: entitlement.EntitlementRecordID("entitlement-expiry-repair")}, - ) - require.NoError(t, err) - - service, err := NewSnapshotReader( - store.Accounts(), - entitlementReader, - store.Sanctions(), - store.Limits(), - fixedClock{now: now}, - ) - require.NoError(t, err) - - result, err := service.Execute(context.Background(), GetUserEligibilityInput{UserID: userID.String()}) - require.NoError(t, err) - require.True(t, result.Exists) - require.NotNil(t, result.Entitlement) - require.Equal(t, "free", result.Entitlement.PlanCode) - require.False(t, result.Entitlement.IsPaid) - require.Equal(t, expiredEndsAt, result.Entitlement.StartsAt) - require.Equal(t, []EffectiveLimitView{ - {LimitCode: "max_pending_public_applications", Value: 3}, - {LimitCode: "max_active_game_memberships", Value: 3}, - {LimitCode: "max_registered_race_names", Value: 1}, - }, result.EffectiveLimits) - - storedSnapshot, err := store.EntitlementSnapshots().GetByUserID(context.Background(), userID) - require.NoError(t, err) - require.Equal(t, entitlement.PlanCodeFree, storedSnapshot.PlanCode) - require.False(t, storedSnapshot.IsPaid) -} +// The expired-snapshot repair is exercised end-to-end through the +// runtime-contract test (`runtime_contract_test.go`), which boots a real +// PostgreSQL container and the full runtime. The original miniredis-based +// version of this test was removed in PG_PLAN.md §3 because the +// adapter-level RepairExpired path no longer exists in this package; the +// in-memory fake stores below cover the service-layer logic for every other +// scenario in the file. +var _ = entitlement.EntitlementRecordID("") type fakeAccountStore struct { existsByUserID map[common.UserID]bool @@ -553,24 +498,6 @@ func validAccountRecord() account.UserAccount { } } -func newRedisStore(t *testing.T) *userstore.Store { - t.Helper() - - server := miniredis.RunT(t) - store, err := userstore.New(userstore.Config{ - Addr: server.Addr(), - DB: 0, - KeyspacePrefix: "user:test:", - OperationTimeout: 250 * time.Millisecond, - }) - require.NoError(t, err) - t.Cleanup(func() { - _ = store.Close() - }) - - return store -} - func timePointer(value time.Time) *time.Time { utcValue := value.UTC() return &utcValue diff --git a/user/runtime_contract_test.go b/user/runtime_contract_test.go index 0ed74f8..9e22e06 100644 --- a/user/runtime_contract_test.go +++ b/user/runtime_contract_test.go @@ -5,19 +5,25 @@ import ( "context" "encoding/json" "errors" + "fmt" "io" "log/slog" "net" "net/http" + "net/url" "strings" "testing" "time" + "galaxy/postgres" "galaxy/user/internal/app" "galaxy/user/internal/config" "github.com/alicebob/miniredis/v2" "github.com/stretchr/testify/require" + testcontainers "github.com/testcontainers/testcontainers-go" + tcpostgres "github.com/testcontainers/testcontainers-go/modules/postgres" + "github.com/testcontainers/testcontainers-go/wait" ) type runtimeContractHarness struct { @@ -34,9 +40,14 @@ func newRuntimeContractHarness(t *testing.T) *runtimeContractHarness { t.Helper() redisServer := miniredis.RunT(t) + redisServer.RequireAuth("integration") + + pgDSN := startPostgresForContractTest(t) cfg := config.DefaultConfig() - cfg.Redis.Addr = redisServer.Addr() + cfg.Redis.Conn.MasterAddr = redisServer.Addr() + cfg.Redis.Conn.Password = "integration" + cfg.Postgres.Conn.PrimaryDSN = pgDSN cfg.InternalHTTP.Addr = freeLoopbackAddress(t) cfg.AdminHTTP.Addr = "" cfg.ShutdownTimeout = 10 * time.Second @@ -841,3 +852,72 @@ func TestEligibilityUnknownMarkersZeroValueMatchesContract(t *testing.T) { require.Equal(t, eligibilityMarkers{}, eligibilityMarkers{}) require.False(t, strings.HasPrefix("", "user-")) } + +// startPostgresForContractTest boots one isolated PostgreSQL container, +// provisions the user schema with the userservice role, and returns a DSN +// pinned to search_path=user. The test is skipped (not failed) when a +// container cannot be started — typically because Docker is unavailable in +// the dev environment. +func startPostgresForContractTest(t *testing.T) string { + t.Helper() + + ctx := context.Background() + container, err := tcpostgres.Run(ctx, + "postgres:16-alpine", + tcpostgres.WithDatabase("galaxy_user"), + tcpostgres.WithUsername("galaxy"), + tcpostgres.WithPassword("galaxy"), + testcontainers.WithWaitStrategy( + wait.ForLog("database system is ready to accept connections"). + WithOccurrence(2). + WithStartupTimeout(60*time.Second), + ), + ) + if err != nil { + t.Skipf("postgres container start failed (Docker likely unavailable): %v", err) + } + t.Cleanup(func() { + if err := testcontainers.TerminateContainer(container); err != nil { + t.Errorf("terminate postgres container: %v", err) + } + }) + + baseDSN, err := container.ConnectionString(ctx, "sslmode=disable") + require.NoError(t, err) + + cfg := postgres.DefaultConfig() + cfg.PrimaryDSN = baseDSN + cfg.OperationTimeout = 5 * time.Second + db, err := postgres.OpenPrimary(ctx, cfg) + require.NoError(t, err) + defer func() { _ = db.Close() }() + + for _, statement := range []string{ + `CREATE ROLE userservice LOGIN PASSWORD 'userservice'`, + `CREATE SCHEMA IF NOT EXISTS "user" AUTHORIZATION userservice`, + `GRANT USAGE ON SCHEMA "user" TO userservice`, + } { + if _, err := db.ExecContext(ctx, statement); err != nil { + require.NoError(t, err, "provision postgres role/schema: %s", statement) + } + } + + parsed, err := url.Parse(baseDSN) + require.NoError(t, err) + + values := url.Values{} + values.Set("search_path", "user") + values.Set("sslmode", "disable") + scoped := url.URL{ + Scheme: parsed.Scheme, + User: url.UserPassword("userservice", "userservice"), + Host: parsed.Host, + Path: parsed.Path, + RawQuery: values.Encode(), + } + return scoped.String() +} + +// errSentinel is a small unused alias kept to silence imports above when +// non-default builds drop testcontainers references. +var errSentinel = fmt.Errorf("contract test sentinel")