docs: reorder & testing

This commit is contained in:
Ilia Denisov
2026-05-07 00:58:53 +03:00
committed by GitHub
parent f446c6a2ac
commit 604fe40bcf
148 changed files with 9150 additions and 2757 deletions
+6 -44
View File
@@ -1,50 +1,12 @@
package session
import (
"context"
"errors"
"fmt"
)
import "context"
// BackendLookup describes the slice of `backendclient.RESTClient`
// SessionCache depends on. The narrow interface keeps this package free
// of any backendclient import.
// BackendLookup is the slice of backend's REST surface that the
// session-cache layer depends on. The narrow interface keeps this
// package free of any backendclient import. The canonical
// implementation is `*backendclient.RESTClient`; tests can supply a
// fake.
type BackendLookup interface {
LookupSession(ctx context.Context, deviceSessionID string) (Record, error)
}
// BackendCache resolves authenticated device sessions by issuing one
// synchronous REST call to backend per request. The canonical implementation replaces the
// previous Redis-backed projection with this thin wrapper; gateway no
// longer keeps a process-local snapshot. See ARCHITECTURE.md §11
// «backend (sync REST), no Redis projection».
type BackendCache struct {
backend BackendLookup
}
// NewBackendCache constructs a Cache that delegates every Lookup to
// backend over REST. backend must not be nil.
func NewBackendCache(backend BackendLookup) (*BackendCache, error) {
if backend == nil {
return nil, errors.New("session.NewBackendCache: backend lookup must not be nil")
}
return &BackendCache{backend: backend}, nil
}
// Lookup resolves deviceSessionID via backend. ErrNotFound is forwarded
// unchanged so callers can keep using the existing equality check.
func (c *BackendCache) Lookup(ctx context.Context, deviceSessionID string) (Record, error) {
if c == nil {
return Record{}, errors.New("session backend cache: nil cache")
}
if c.backend == nil {
return Record{}, errors.New("session backend cache: nil backend lookup")
}
rec, err := c.backend.LookupSession(ctx, deviceSessionID)
if err != nil {
return Record{}, fmt.Errorf("session backend cache: %w", err)
}
return rec, nil
}
var _ Cache = (*BackendCache)(nil)
+238
View File
@@ -0,0 +1,238 @@
package session
import (
"container/list"
"context"
"errors"
"fmt"
"sync"
"time"
"go.uber.org/zap"
)
// DefaultMaxEntries is the LRU bound applied when MemoryCacheOptions
// does not supply a positive MaxEntries. Holds well below the per-process
// memory budget for the documented MVP scale (≤10K active accounts,
// ≤100K device sessions).
const DefaultMaxEntries = 50_000
// DefaultTTL is the safety-net freshness window applied when
// MemoryCacheOptions does not supply a positive TTL. Push events drive
// invalidation in the steady state; the TTL guards against missed
// events (cursor aged out, gateway restart) by forcing a fresh backend
// lookup at most once per window.
const DefaultTTL = 10 * time.Minute
// MemoryCache is the canonical Cache implementation. Hot-path Lookup
// reads serve from a process-local LRU + TTL map; misses delegate to
// BackendLookup and seed the cache. session_invalidation push events
// flip cached records to a revoked status without a backend
// roundtrip, after which Lookup returns the revoked record straight
// from memory and gateway rejects the request.
//
// MemoryCache is safe for concurrent use.
type MemoryCache struct {
mu sync.Mutex
entries map[string]*list.Element
byUser map[string]map[string]struct{}
order *list.List
max int
ttl time.Duration
backend BackendLookup
now func() time.Time
logger *zap.Logger
}
// memoryEntry is the value stored inside the LRU list. The key
// duplication keeps Element.Value self-describing for eviction.
type memoryEntry struct {
key string
record Record
expiresAt time.Time
}
// MemoryCacheOptions tunes the cache.
type MemoryCacheOptions struct {
// MaxEntries bounds the number of cached records. Zero or
// negative values default to DefaultMaxEntries.
MaxEntries int
// TTL bounds how long a cached entry serves the hot path before
// a fresh backend lookup. Zero or negative values default to
// DefaultTTL.
TTL time.Duration
// Now overrides time.Now for tests.
Now func() time.Time
// Logger is named "session.cache". A nil value uses zap.NewNop.
Logger *zap.Logger
}
// NewMemoryCache constructs a MemoryCache. backend must not be nil.
func NewMemoryCache(backend BackendLookup, opts MemoryCacheOptions) (*MemoryCache, error) {
if backend == nil {
return nil, errors.New("session.NewMemoryCache: backend lookup must not be nil")
}
max := opts.MaxEntries
if max <= 0 {
max = DefaultMaxEntries
}
ttl := opts.TTL
if ttl <= 0 {
ttl = DefaultTTL
}
now := opts.Now
if now == nil {
now = time.Now
}
logger := opts.Logger
if logger == nil {
logger = zap.NewNop()
}
return &MemoryCache{
entries: make(map[string]*list.Element, max),
byUser: make(map[string]map[string]struct{}),
order: list.New(),
max: max,
ttl: ttl,
backend: backend,
now: now,
logger: logger.Named("session.cache"),
}, nil
}
// Lookup serves deviceSessionID from the cache. A miss (or an entry
// past its TTL) triggers a backend lookup and seeds the cache before
// returning. Concurrent Lookups for the same key are not coalesced —
// that level of optimisation is not needed at the documented MVP
// scale.
func (c *MemoryCache) Lookup(ctx context.Context, deviceSessionID string) (Record, error) {
if c == nil {
return Record{}, errors.New("session memory cache: nil cache")
}
if deviceSessionID == "" {
return Record{}, ErrNotFound
}
now := c.now()
c.mu.Lock()
if elem, ok := c.entries[deviceSessionID]; ok {
entry := elem.Value.(*memoryEntry)
if entry.expiresAt.After(now) {
c.order.MoveToFront(elem)
rec := entry.record
c.mu.Unlock()
return rec, nil
}
// Expired — evict and fall through to backend.
c.evictLocked(elem)
}
c.mu.Unlock()
rec, err := c.backend.LookupSession(ctx, deviceSessionID)
if err != nil {
return Record{}, fmt.Errorf("session memory cache: %w", err)
}
c.mu.Lock()
c.insertLocked(deviceSessionID, rec, now.Add(c.ttl))
c.mu.Unlock()
return rec, nil
}
// MarkRevoked flips the cached record for deviceSessionID to a
// revoked status. Calling on a missing entry is a no-op.
func (c *MemoryCache) MarkRevoked(deviceSessionID string) {
if c == nil || deviceSessionID == "" {
return
}
c.mu.Lock()
defer c.mu.Unlock()
elem, ok := c.entries[deviceSessionID]
if !ok {
return
}
entry := elem.Value.(*memoryEntry)
entry.record.Status = StatusRevoked
}
// MarkAllRevokedForUser flips every cached record whose UserID is
// userID to revoked. The user index is updated in O(n) over the
// user's session set, not the whole cache.
func (c *MemoryCache) MarkAllRevokedForUser(userID string) {
if c == nil || userID == "" {
return
}
c.mu.Lock()
defer c.mu.Unlock()
set, ok := c.byUser[userID]
if !ok {
return
}
for id := range set {
if elem, ok := c.entries[id]; ok {
elem.Value.(*memoryEntry).record.Status = StatusRevoked
}
}
}
// Len returns the current number of cached entries. Useful for
// metrics and tests.
func (c *MemoryCache) Len() int {
if c == nil {
return 0
}
c.mu.Lock()
defer c.mu.Unlock()
return c.order.Len()
}
// insertLocked stores rec under deviceSessionID. The caller holds c.mu.
func (c *MemoryCache) insertLocked(deviceSessionID string, rec Record, expiresAt time.Time) {
if existing, ok := c.entries[deviceSessionID]; ok {
existing.Value.(*memoryEntry).record = rec
existing.Value.(*memoryEntry).expiresAt = expiresAt
c.order.MoveToFront(existing)
c.indexUserLocked(deviceSessionID, rec.UserID)
return
}
elem := c.order.PushFront(&memoryEntry{
key: deviceSessionID,
record: rec,
expiresAt: expiresAt,
})
c.entries[deviceSessionID] = elem
c.indexUserLocked(deviceSessionID, rec.UserID)
if c.order.Len() > c.max {
oldest := c.order.Back()
if oldest != nil {
c.evictLocked(oldest)
}
}
}
// evictLocked removes elem from every internal index. The caller holds c.mu.
func (c *MemoryCache) evictLocked(elem *list.Element) {
entry := elem.Value.(*memoryEntry)
delete(c.entries, entry.key)
if set := c.byUser[entry.record.UserID]; set != nil {
delete(set, entry.key)
if len(set) == 0 {
delete(c.byUser, entry.record.UserID)
}
}
c.order.Remove(elem)
}
// indexUserLocked associates deviceSessionID with userID in byUser.
// The caller holds c.mu.
func (c *MemoryCache) indexUserLocked(deviceSessionID, userID string) {
if userID == "" {
return
}
set, ok := c.byUser[userID]
if !ok {
set = make(map[string]struct{})
c.byUser[userID] = set
}
set[deviceSessionID] = struct{}{}
}
var _ Cache = (*MemoryCache)(nil)
+204
View File
@@ -0,0 +1,204 @@
package session_test
import (
"context"
"errors"
"sync"
"sync/atomic"
"testing"
"time"
"galaxy/gateway/internal/session"
)
// stubLookup is the BackendLookup test fake. lookups counts hits;
// records is the canonical source of truth keyed by device_session_id.
type stubLookup struct {
mu sync.Mutex
records map[string]session.Record
hits atomic.Int64
notFound bool
}
func newStubLookup() *stubLookup {
return &stubLookup{records: make(map[string]session.Record)}
}
func (s *stubLookup) put(rec session.Record) {
s.mu.Lock()
s.records[rec.DeviceSessionID] = rec
s.mu.Unlock()
}
func (s *stubLookup) LookupSession(_ context.Context, deviceSessionID string) (session.Record, error) {
s.hits.Add(1)
s.mu.Lock()
defer s.mu.Unlock()
if s.notFound {
return session.Record{}, session.ErrNotFound
}
rec, ok := s.records[deviceSessionID]
if !ok {
return session.Record{}, session.ErrNotFound
}
return rec, nil
}
func TestMemoryCacheLookupHitsCacheAfterFirstFetch(t *testing.T) {
stub := newStubLookup()
stub.put(session.Record{DeviceSessionID: "a", UserID: "u1", Status: session.StatusActive})
cache, err := session.NewMemoryCache(stub, session.MemoryCacheOptions{
MaxEntries: 10,
TTL: time.Hour,
})
if err != nil {
t.Fatalf("NewMemoryCache: %v", err)
}
if _, err := cache.Lookup(context.Background(), "a"); err != nil {
t.Fatalf("first lookup: %v", err)
}
if _, err := cache.Lookup(context.Background(), "a"); err != nil {
t.Fatalf("second lookup: %v", err)
}
if got := stub.hits.Load(); got != 1 {
t.Fatalf("backend hits = %d, want 1 (cache should serve the second call)", got)
}
}
func TestMemoryCacheLookupRefreshesOnTTLExpiry(t *testing.T) {
stub := newStubLookup()
stub.put(session.Record{DeviceSessionID: "a", UserID: "u1", Status: session.StatusActive})
clock := time.Unix(1_000_000, 0)
now := func() time.Time { return clock }
cache, err := session.NewMemoryCache(stub, session.MemoryCacheOptions{
MaxEntries: 10,
TTL: 100 * time.Millisecond,
Now: now,
})
if err != nil {
t.Fatalf("NewMemoryCache: %v", err)
}
if _, err := cache.Lookup(context.Background(), "a"); err != nil {
t.Fatalf("first lookup: %v", err)
}
clock = clock.Add(200 * time.Millisecond)
if _, err := cache.Lookup(context.Background(), "a"); err != nil {
t.Fatalf("post-TTL lookup: %v", err)
}
if got := stub.hits.Load(); got != 2 {
t.Fatalf("backend hits = %d, want 2 (TTL expiry should refetch)", got)
}
}
func TestMemoryCacheMarkRevokedFlipsCachedRecord(t *testing.T) {
stub := newStubLookup()
stub.put(session.Record{DeviceSessionID: "a", UserID: "u1", Status: session.StatusActive})
cache, err := session.NewMemoryCache(stub, session.MemoryCacheOptions{MaxEntries: 10, TTL: time.Hour})
if err != nil {
t.Fatalf("NewMemoryCache: %v", err)
}
if _, err := cache.Lookup(context.Background(), "a"); err != nil {
t.Fatalf("first lookup: %v", err)
}
cache.MarkRevoked("a")
rec, err := cache.Lookup(context.Background(), "a")
if err != nil {
t.Fatalf("post-revoke lookup: %v", err)
}
if rec.Status != session.StatusRevoked {
t.Fatalf("status = %q, want %q", rec.Status, session.StatusRevoked)
}
if got := stub.hits.Load(); got != 1 {
t.Fatalf("backend hits = %d, want 1 (MarkRevoked must not refetch)", got)
}
}
func TestMemoryCacheMarkAllRevokedForUserFlipsAllSessions(t *testing.T) {
stub := newStubLookup()
stub.put(session.Record{DeviceSessionID: "a", UserID: "u1", Status: session.StatusActive})
stub.put(session.Record{DeviceSessionID: "b", UserID: "u1", Status: session.StatusActive})
stub.put(session.Record{DeviceSessionID: "c", UserID: "u2", Status: session.StatusActive})
cache, err := session.NewMemoryCache(stub, session.MemoryCacheOptions{MaxEntries: 10, TTL: time.Hour})
if err != nil {
t.Fatalf("NewMemoryCache: %v", err)
}
for _, id := range []string{"a", "b", "c"} {
if _, err := cache.Lookup(context.Background(), id); err != nil {
t.Fatalf("seed %s: %v", id, err)
}
}
cache.MarkAllRevokedForUser("u1")
for _, id := range []string{"a", "b"} {
rec, err := cache.Lookup(context.Background(), id)
if err != nil {
t.Fatalf("post-revoke lookup %s: %v", id, err)
}
if rec.Status != session.StatusRevoked {
t.Fatalf("session %s status = %q, want revoked", id, rec.Status)
}
}
rec, err := cache.Lookup(context.Background(), "c")
if err != nil {
t.Fatalf("post-revoke lookup c: %v", err)
}
if rec.Status != session.StatusActive {
t.Fatalf("session c status = %q, want active (other user)", rec.Status)
}
}
func TestMemoryCacheLRUEvictsLeastRecentlyUsed(t *testing.T) {
stub := newStubLookup()
stub.put(session.Record{DeviceSessionID: "a", UserID: "u1", Status: session.StatusActive})
stub.put(session.Record{DeviceSessionID: "b", UserID: "u2", Status: session.StatusActive})
stub.put(session.Record{DeviceSessionID: "c", UserID: "u3", Status: session.StatusActive})
cache, err := session.NewMemoryCache(stub, session.MemoryCacheOptions{MaxEntries: 2, TTL: time.Hour})
if err != nil {
t.Fatalf("NewMemoryCache: %v", err)
}
if _, err := cache.Lookup(context.Background(), "a"); err != nil {
t.Fatalf("seed a: %v", err)
}
if _, err := cache.Lookup(context.Background(), "b"); err != nil {
t.Fatalf("seed b: %v", err)
}
if _, err := cache.Lookup(context.Background(), "c"); err != nil {
t.Fatalf("seed c: %v", err)
}
if got := cache.Len(); got != 2 {
t.Fatalf("Len = %d, want 2", got)
}
hitsBefore := stub.hits.Load()
if _, err := cache.Lookup(context.Background(), "a"); err != nil {
t.Fatalf("re-lookup a: %v", err)
}
if got := stub.hits.Load(); got != hitsBefore+1 {
t.Fatalf("backend hits = %d, want +1 (a was evicted)", got-hitsBefore)
}
}
func TestMemoryCachePropagatesBackendNotFound(t *testing.T) {
stub := newStubLookup()
stub.notFound = true
cache, err := session.NewMemoryCache(stub, session.MemoryCacheOptions{MaxEntries: 4, TTL: time.Hour})
if err != nil {
t.Fatalf("NewMemoryCache: %v", err)
}
_, err = cache.Lookup(context.Background(), "missing")
if !errors.Is(err, session.ErrNotFound) {
t.Fatalf("Lookup error = %v, want ErrNotFound", err)
}
}
+19 -3
View File
@@ -14,13 +14,29 @@ var (
)
// Cache resolves authenticated device-session state from the gateway
// hot path. The implementation dropped the previous Redis projection: the only
// implementation is *BackendCache, which calls backend's
// `/api/v1/internal/sessions/{id}` synchronously per request.
// hot path. The canonical implementation is *MemoryCache: a
// process-local LRU + TTL store that falls back to backend's
// `/api/v1/internal/sessions/{id}` on miss and listens for
// `session_invalidation` push events from backend so revoked sessions
// are reflected immediately without a fresh backend lookup.
//
// The Mark* methods are called by the push dispatcher. They flip
// cached entries to revoked status; subsequent Lookups serve the
// revoked record directly so authenticated traffic on those sessions
// is rejected at the edge before reaching backend.
type Cache interface {
// Lookup returns the cached record for deviceSessionID. Implementations must
// wrap ErrNotFound when the cache does not contain the requested record.
Lookup(ctx context.Context, deviceSessionID string) (Record, error)
// MarkRevoked flips the cached record for deviceSessionID to a
// revoked status. Calling on a missing entry is a no-op.
MarkRevoked(deviceSessionID string)
// MarkAllRevokedForUser flips every cached record belonging to
// userID to a revoked status. Calling on a user with no cached
// sessions is a no-op.
MarkAllRevokedForUser(userID string)
}
// Status identifies the cached lifecycle state of a device session.