feat: runtime manager
This commit is contained in:
@@ -0,0 +1,85 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"galaxy/redisconn"
|
||||
"galaxy/rtmanager/internal/config"
|
||||
"galaxy/rtmanager/internal/telemetry"
|
||||
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
// newRedisClient builds the master Redis client from cfg via the shared
|
||||
// `pkg/redisconn` helper. Replica clients are not opened in this iteration
|
||||
// per ARCHITECTURE.md §Persistence Backends; they will be wired when read
|
||||
// routing is introduced.
|
||||
func newRedisClient(cfg config.RedisConfig) *redis.Client {
|
||||
return redisconn.NewMasterClient(cfg.Conn)
|
||||
}
|
||||
|
||||
// instrumentRedisClient attaches the OpenTelemetry tracing and metrics
|
||||
// instrumentation to client when telemetryRuntime is available. The
|
||||
// actual instrumentation lives in `pkg/redisconn` so every Galaxy service
|
||||
// shares one surface.
|
||||
func instrumentRedisClient(redisClient *redis.Client, telemetryRuntime *telemetry.Runtime) error {
|
||||
if redisClient == nil {
|
||||
return errors.New("instrument redis client: nil client")
|
||||
}
|
||||
if telemetryRuntime == nil {
|
||||
return nil
|
||||
}
|
||||
return redisconn.Instrument(redisClient,
|
||||
redisconn.WithTracerProvider(telemetryRuntime.TracerProvider()),
|
||||
redisconn.WithMeterProvider(telemetryRuntime.MeterProvider()),
|
||||
)
|
||||
}
|
||||
|
||||
// pingRedis performs a single Redis PING bounded by
|
||||
// cfg.Conn.OperationTimeout to confirm that the configured Redis endpoint
|
||||
// is reachable at startup.
|
||||
func pingRedis(ctx context.Context, cfg config.RedisConfig, redisClient *redis.Client) error {
|
||||
return redisconn.Ping(ctx, redisClient, cfg.Conn.OperationTimeout)
|
||||
}
|
||||
|
||||
// newDockerClient constructs a Docker SDK client for cfg.Host with an
|
||||
// optional API version override. The bootstrap layer opens and pings
|
||||
// the client; the production Docker adapter wraps it for the service
|
||||
// layer.
|
||||
func newDockerClient(cfg config.DockerConfig) (*client.Client, error) {
|
||||
options := []client.Opt{client.WithHost(cfg.Host)}
|
||||
if cfg.APIVersion == "" {
|
||||
options = append(options, client.WithAPIVersionNegotiation())
|
||||
} else {
|
||||
options = append(options, client.WithVersion(cfg.APIVersion))
|
||||
}
|
||||
|
||||
docker, err := client.NewClientWithOpts(options...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("new docker client: %w", err)
|
||||
}
|
||||
return docker, nil
|
||||
}
|
||||
|
||||
// pingDocker bounds one Docker daemon ping under timeout and returns a
|
||||
// wrapped error so startup failures are easy to spot in service logs.
|
||||
func pingDocker(ctx context.Context, dockerClient *client.Client, timeout time.Duration) error {
|
||||
if dockerClient == nil {
|
||||
return errors.New("ping docker: nil client")
|
||||
}
|
||||
if timeout <= 0 {
|
||||
return errors.New("ping docker: timeout must be positive")
|
||||
}
|
||||
|
||||
pingCtx, cancel := context.WithTimeout(ctx, timeout)
|
||||
defer cancel()
|
||||
|
||||
if _, err := dockerClient.Ping(pingCtx); err != nil {
|
||||
return fmt.Errorf("ping docker: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
Reference in New Issue
Block a user