feat: runtime manager
This commit is contained in:
@@ -0,0 +1,139 @@
|
||||
package harness
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// EngineImageRef is the canonical tag the lobbyrtm boundary suite (and
|
||||
// any future suite that needs the galaxy/game engine binary) builds and
|
||||
// runs against. The `-lobbyrtm-it` suffix differs from the
|
||||
// `-rtm-it` tag the service-local rtmanager/integration harness uses, so
|
||||
// an operator running both suites locally cannot accidentally consume
|
||||
// the wrong image, and `docker image rm` of one suite's leftovers does
|
||||
// not remove the other suite's tag.
|
||||
const EngineImageRef = "galaxy/game:1.0.0-lobbyrtm-it"
|
||||
|
||||
const (
|
||||
imageBuildTimeout = 10 * time.Minute
|
||||
dockerDaemonPingTimeout = 5 * time.Second
|
||||
)
|
||||
|
||||
var (
|
||||
engineImageOnce sync.Once
|
||||
engineImageErr error
|
||||
|
||||
dockerAvailableOnce sync.Once
|
||||
dockerAvailableErr error
|
||||
)
|
||||
|
||||
// RequireDockerDaemon skips the calling test when no Docker daemon is
|
||||
// reachable from this process. Suites that need Docker but stand up
|
||||
// testcontainers (Postgres/Redis) before any RTM-specific helper
|
||||
// should call this helper first so the skip path runs *before* the
|
||||
// testcontainer client probes the daemon and fails hard.
|
||||
func RequireDockerDaemon(t testing.TB) {
|
||||
t.Helper()
|
||||
requireDockerDaemon(t)
|
||||
}
|
||||
|
||||
// EnsureGalaxyGameImage builds the galaxy/game engine image from the
|
||||
// workspace root once per test process and returns the canonical tag.
|
||||
// On hosts without a reachable Docker daemon the helper calls `t.Skip`
|
||||
// so suites stay green when `/var/run/docker.sock` is missing and
|
||||
// `DOCKER_HOST` is unset.
|
||||
//
|
||||
// The build is wrapped in `sync.Once`; concurrent suite invocations
|
||||
// share the same image. The Dockerfile path and build context match
|
||||
// `rtmanager/integration/harness/docker.go::buildAndTagEngineImage` —
|
||||
// galaxy's `go.work` resolves `galaxy/{model,error,...}` only when the
|
||||
// workspace root is the build context.
|
||||
func EnsureGalaxyGameImage(t testing.TB) string {
|
||||
t.Helper()
|
||||
requireDockerDaemon(t)
|
||||
|
||||
engineImageOnce.Do(func() {
|
||||
engineImageErr = buildEngineImage()
|
||||
})
|
||||
if engineImageErr != nil {
|
||||
t.Fatalf("integration harness: build galaxy/game image: %v", engineImageErr)
|
||||
}
|
||||
return EngineImageRef
|
||||
}
|
||||
|
||||
func buildEngineImage() error {
|
||||
root, err := workspaceRoot()
|
||||
if err != nil {
|
||||
return fmt.Errorf("resolve workspace root: %w", err)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), imageBuildTimeout)
|
||||
defer cancel()
|
||||
|
||||
dockerfilePath := filepath.Join("game", "Dockerfile")
|
||||
cmd := exec.CommandContext(ctx, "docker", "build",
|
||||
"-f", dockerfilePath,
|
||||
"-t", EngineImageRef,
|
||||
".",
|
||||
)
|
||||
cmd.Dir = root
|
||||
cmd.Env = append(os.Environ(), "DOCKER_BUILDKIT=1")
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("docker build (-f %s) in %s: %w; output:\n%s",
|
||||
dockerfilePath, root, err, strings.TrimSpace(string(output)))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// requireDockerDaemon skips the calling test when no Docker daemon is
|
||||
// reachable from this process. The check runs once per process and
|
||||
// caches the verdict so successive callers do not pay the ping cost.
|
||||
func requireDockerDaemon(t testing.TB) {
|
||||
t.Helper()
|
||||
dockerAvailableOnce.Do(func() {
|
||||
dockerAvailableErr = pingDockerDaemon()
|
||||
})
|
||||
if dockerAvailableErr != nil {
|
||||
t.Skipf("integration harness: docker daemon unavailable: %v", dockerAvailableErr)
|
||||
}
|
||||
}
|
||||
|
||||
func pingDockerDaemon() error {
|
||||
if os.Getenv("DOCKER_HOST") == "" {
|
||||
if _, err := os.Stat("/var/run/docker.sock"); err != nil {
|
||||
return fmt.Errorf("set DOCKER_HOST or expose /var/run/docker.sock: %w", err)
|
||||
}
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), dockerDaemonPingTimeout)
|
||||
defer cancel()
|
||||
cmd := exec.CommandContext(ctx, "docker", "version", "--format", "{{.Server.Version}}")
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("docker version: %w; output:\n%s", err, strings.TrimSpace(string(output)))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// workspaceRoot resolves the absolute path of the galaxy/ workspace
|
||||
// root by anchoring on this file's location. The harness lives at
|
||||
// `galaxy/integration/internal/harness/engineimage.go`; the workspace
|
||||
// root is three directories up.
|
||||
func workspaceRoot() (string, error) {
|
||||
_, file, _, ok := runtime.Caller(0)
|
||||
if !ok {
|
||||
return "", errors.New("resolve runtime caller for workspace root")
|
||||
}
|
||||
dir := filepath.Dir(file)
|
||||
root := filepath.Clean(filepath.Join(dir, "..", "..", ".."))
|
||||
return root, nil
|
||||
}
|
||||
Reference in New Issue
Block a user