237 lines
7.8 KiB
Go
237 lines
7.8 KiB
Go
package harness
|
|
|
|
import (
|
|
"context"
|
|
"crypto/rand"
|
|
"encoding/hex"
|
|
"errors"
|
|
"fmt"
|
|
"os"
|
|
"os/exec"
|
|
"path/filepath"
|
|
"runtime"
|
|
"strings"
|
|
"sync"
|
|
"testing"
|
|
"time"
|
|
|
|
cerrdefs "github.com/containerd/errdefs"
|
|
"github.com/docker/docker/api/types/network"
|
|
dockerclient "github.com/docker/docker/client"
|
|
)
|
|
|
|
// Engine image tags used by the integration suite. `EngineImageRef` is
|
|
// the image we actually build from `galaxy/game/Dockerfile`;
|
|
// `PatchedEngineImageRef` is the same image content tagged at a higher
|
|
// semver patch so the patch lifecycle test exercises the
|
|
// `semver_patch_only` validation against a real image. Keeping both at
|
|
// the same digest avoids a redundant build.
|
|
const (
|
|
EngineImageRef = "galaxy/game:1.0.0-rtm-it"
|
|
PatchedEngineImageRef = "galaxy/game:1.0.1-rtm-it"
|
|
|
|
dockerNetworkPrefix = "rtmanager-it-"
|
|
|
|
dockerPingTimeout = 5 * time.Second
|
|
dockerNetworkTimeout = 30 * time.Second
|
|
imageBuildTimeout = 10 * time.Minute
|
|
)
|
|
|
|
// DockerEnv carries the per-package Docker client plus the workspace
|
|
// root used by image builds. The client is opened lazily on the first
|
|
// EnsureDocker call and closed by ShutdownDocker at TestMain exit.
|
|
type DockerEnv struct {
|
|
client *dockerclient.Client
|
|
workspaceRoot string
|
|
}
|
|
|
|
// Client returns the harness-owned Docker SDK client. Tests use it
|
|
// directly for "external actions" the harness does not wrap (e.g.,
|
|
// removing a running container behind RTM's back in `health_test`).
|
|
func (env *DockerEnv) Client() *dockerclient.Client { return env.client }
|
|
|
|
// WorkspaceRoot returns the absolute path of the galaxy/ workspace
|
|
// root. It is exported so the runtime helper can resolve the host
|
|
// game-state root relative to it if a test needs a deterministic
|
|
// location, though the default places state under `t.ArtifactDir()`.
|
|
func (env *DockerEnv) WorkspaceRoot() string { return env.workspaceRoot }
|
|
|
|
var (
|
|
dockerOnce sync.Once
|
|
dockerEnv *DockerEnv
|
|
dockerErr error
|
|
|
|
imageOnce sync.Once
|
|
imageErr error
|
|
)
|
|
|
|
// EnsureDocker opens the shared Docker SDK client and verifies the
|
|
// daemon is reachable. When the daemon is unavailable the helper calls
|
|
// `t.Skip` so suites stay green on hosts without `/var/run/docker.sock`
|
|
// or `DOCKER_HOST`.
|
|
func EnsureDocker(t testing.TB) *DockerEnv {
|
|
t.Helper()
|
|
dockerOnce.Do(func() {
|
|
dockerEnv, dockerErr = openDocker()
|
|
})
|
|
if dockerErr != nil {
|
|
t.Skipf("rtmanager integration: docker daemon unavailable: %v", dockerErr)
|
|
}
|
|
return dockerEnv
|
|
}
|
|
|
|
// EnsureEngineImage builds the `galaxy/game` engine image from the
|
|
// workspace root once per package run via `sync.Once`, then tags the
|
|
// resulting image at both `EngineImageRef` and `PatchedEngineImageRef`
|
|
// so the patch lifecycle has a second semver-valid tag to point at.
|
|
// Subsequent calls re-use the cached image. Any test that asks for the
|
|
// engine image must invoke this helper first; it is intentionally
|
|
// separate from `EnsureDocker` so suites that only need the daemon
|
|
// (e.g., a future "Docker network missing" negative test) do not pay
|
|
// the build cost.
|
|
func EnsureEngineImage(t testing.TB) string {
|
|
t.Helper()
|
|
env := EnsureDocker(t)
|
|
imageOnce.Do(func() {
|
|
imageErr = buildAndTagEngineImage(env)
|
|
})
|
|
if imageErr != nil {
|
|
t.Skipf("rtmanager integration: build galaxy/game image: %v", imageErr)
|
|
}
|
|
return EngineImageRef
|
|
}
|
|
|
|
// EnsureNetwork creates a uniquely-named Docker bridge network for the
|
|
// caller's test and registers cleanup. Each test gets its own network
|
|
// so concurrent scenarios cannot collide on the per-game DNS hostname.
|
|
func EnsureNetwork(t testing.TB) string {
|
|
t.Helper()
|
|
env := EnsureDocker(t)
|
|
name := dockerNetworkPrefix + uniqueSuffix(t)
|
|
|
|
createCtx, cancel := context.WithTimeout(context.Background(), dockerNetworkTimeout)
|
|
defer cancel()
|
|
if _, err := env.client.NetworkCreate(createCtx, name, network.CreateOptions{Driver: "bridge"}); err != nil {
|
|
t.Fatalf("rtmanager integration: create docker network %q: %v", name, err)
|
|
}
|
|
t.Cleanup(func() {
|
|
removeCtx, removeCancel := context.WithTimeout(context.Background(), dockerNetworkTimeout)
|
|
defer removeCancel()
|
|
if err := env.client.NetworkRemove(removeCtx, name); err != nil && !cerrdefs.IsNotFound(err) {
|
|
t.Logf("rtmanager integration: remove docker network %q: %v", name, err)
|
|
}
|
|
})
|
|
return name
|
|
}
|
|
|
|
// ShutdownDocker closes the shared Docker SDK client. `TestMain`
|
|
// invokes it after `m.Run`. The harness deliberately leaves the engine
|
|
// image in the local Docker cache so the next package run benefits
|
|
// from the layer cache; operators can `docker image rm` the
|
|
// `*-rtm-it` tags by hand if a stale image gets in the way.
|
|
func ShutdownDocker() {
|
|
if dockerEnv == nil {
|
|
return
|
|
}
|
|
if dockerEnv.client != nil {
|
|
_ = dockerEnv.client.Close()
|
|
}
|
|
dockerEnv = nil
|
|
}
|
|
|
|
// uniqueSuffix returns 8 hex characters of randomness suitable for a
|
|
// per-test resource name. The same helper is used in
|
|
// `internal/adapters/docker/smoke_test.go`; we duplicate it instead of
|
|
// importing because `_test.go`-only helpers cannot be exported.
|
|
func uniqueSuffix(t testing.TB) string {
|
|
t.Helper()
|
|
buf := make([]byte, 4)
|
|
if _, err := rand.Read(buf); err != nil {
|
|
t.Fatalf("rtmanager integration: read random suffix: %v", err)
|
|
}
|
|
return hex.EncodeToString(buf)
|
|
}
|
|
|
|
func openDocker() (*DockerEnv, error) {
|
|
if os.Getenv("DOCKER_HOST") == "" {
|
|
if _, err := os.Stat("/var/run/docker.sock"); err != nil {
|
|
return nil, fmt.Errorf("set DOCKER_HOST or expose /var/run/docker.sock: %w", err)
|
|
}
|
|
}
|
|
|
|
client, err := dockerclient.NewClientWithOpts(
|
|
dockerclient.FromEnv,
|
|
dockerclient.WithAPIVersionNegotiation(),
|
|
)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("new docker client: %w", err)
|
|
}
|
|
|
|
pingCtx, cancel := context.WithTimeout(context.Background(), dockerPingTimeout)
|
|
defer cancel()
|
|
if _, err := client.Ping(pingCtx); err != nil {
|
|
_ = client.Close()
|
|
return nil, fmt.Errorf("ping docker daemon: %w", err)
|
|
}
|
|
|
|
root, err := workspaceRoot()
|
|
if err != nil {
|
|
_ = client.Close()
|
|
return nil, fmt.Errorf("resolve workspace root: %w", err)
|
|
}
|
|
|
|
return &DockerEnv{
|
|
client: client,
|
|
workspaceRoot: root,
|
|
}, nil
|
|
}
|
|
|
|
// buildAndTagEngineImage invokes `docker build` against the workspace
|
|
// root context to materialise the `galaxy/game` image, then tags the
|
|
// resulting image at the patch tag. Shelling out to the CLI keeps the
|
|
// implementation tiny — using the SDK would require streaming a tar
|
|
// of the workspace root, which is heavy and duplicates what the CLI
|
|
// already optimises. The workspace-root build context is required by
|
|
// `galaxy/game` (see `galaxy/game/README.md` §Build).
|
|
func buildAndTagEngineImage(env *DockerEnv) error {
|
|
if env == nil {
|
|
return errors.New("nil docker env")
|
|
}
|
|
ctx, cancel := context.WithTimeout(context.Background(), imageBuildTimeout)
|
|
defer cancel()
|
|
|
|
dockerfilePath := filepath.Join("game", "Dockerfile")
|
|
cmd := exec.CommandContext(ctx, "docker", "build",
|
|
"-f", dockerfilePath,
|
|
"-t", EngineImageRef,
|
|
".",
|
|
)
|
|
cmd.Dir = env.workspaceRoot
|
|
cmd.Env = append(os.Environ(), "DOCKER_BUILDKIT=1")
|
|
output, err := cmd.CombinedOutput()
|
|
if err != nil {
|
|
return fmt.Errorf("docker build (-f %s) in %s: %w; output:\n%s",
|
|
dockerfilePath, env.workspaceRoot, err, strings.TrimSpace(string(output)))
|
|
}
|
|
|
|
if err := env.client.ImageTag(ctx, EngineImageRef, PatchedEngineImageRef); err != nil {
|
|
return fmt.Errorf("tag %s as %s: %w", EngineImageRef, PatchedEngineImageRef, err)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// workspaceRoot resolves the absolute path of the galaxy/ workspace
|
|
// root by anchoring on this file's location. The harness lives at
|
|
// `galaxy/rtmanager/integration/harness/docker.go`, so the workspace
|
|
// root is three directories up. Mirrors the `cmd/jetgen` strategy.
|
|
func workspaceRoot() (string, error) {
|
|
_, file, _, ok := runtime.Caller(0)
|
|
if !ok {
|
|
return "", errors.New("resolve runtime caller for workspace root")
|
|
}
|
|
dir := filepath.Dir(file)
|
|
// dir = .../galaxy/rtmanager/integration/harness
|
|
root := filepath.Clean(filepath.Join(dir, "..", "..", ".."))
|
|
return root, nil
|
|
}
|