feat: backend service

This commit is contained in:
Ilia Denisov
2026-05-06 10:14:55 +03:00
committed by GitHub
parent 3e2622757e
commit f446c6a2ac
1486 changed files with 49720 additions and 266401 deletions
+427
View File
@@ -0,0 +1,427 @@
package dockerclient
import (
"context"
"errors"
"fmt"
"io"
"strconv"
"strings"
"time"
cerrdefs "github.com/containerd/errdefs"
"github.com/moby/moby/api/types/container"
"github.com/moby/moby/api/types/mount"
"github.com/moby/moby/api/types/network"
mobyclient "github.com/moby/moby/client"
)
// enginePort is the in-container HTTP port the engine listens on. Galaxy
// never publishes the port to the host; it is reachable only through
// Docker DNS on the user-defined network.
const enginePort = 8080
// Adapter is the production *Client implementation backed by
// `github.com/moby/moby/client`. Use NewAdapter to construct it.
type Adapter struct {
docker *mobyclient.Client
clock func() time.Time
}
// AdapterConfig configures an Adapter.
type AdapterConfig struct {
// Docker is the underlying Moby client. Must be non-nil.
Docker *mobyclient.Client
// Clock supplies the wall-clock used when the daemon does not
// return a parseable started_at value. Defaults to time.Now.
Clock func() time.Time
}
// NewAdapter wraps a moby client with the dockerclient port surface.
func NewAdapter(cfg AdapterConfig) (*Adapter, error) {
if cfg.Docker == nil {
return nil, errors.New("dockerclient: nil moby client")
}
clock := cfg.Clock
if clock == nil {
clock = time.Now
}
return &Adapter{docker: cfg.Docker, clock: clock}, nil
}
// EnsureNetwork returns nil when the named user-defined network exists
// on the daemon; ErrNetworkMissing otherwise. Adapter never creates
// networks itself — operators provision the network ahead of time.
func (a *Adapter) EnsureNetwork(ctx context.Context, name string) error {
if _, err := a.docker.NetworkInspect(ctx, name, mobyclient.NetworkInspectOptions{}); err != nil {
if cerrdefs.IsNotFound(err) {
return ErrNetworkMissing
}
return fmt.Errorf("dockerclient: inspect network %q: %w", name, err)
}
return nil
}
// PullImage pulls ref according to policy. The pull stream is fully
// drained synchronously so callers know the image is ready when this
// returns nil.
func (a *Adapter) PullImage(ctx context.Context, ref string, policy PullPolicy) error {
if !policy.IsKnown() {
return ErrInvalidPullPolicy
}
switch policy {
case PullPolicyNever:
if _, err := a.InspectImage(ctx, ref); err != nil {
return err
}
return nil
case PullPolicyIfMissing:
if _, err := a.InspectImage(ctx, ref); err == nil {
return nil
} else if !errors.Is(err, ErrImageNotFound) {
return err
}
}
resp, err := a.docker.ImagePull(ctx, ref, mobyclient.ImagePullOptions{})
if err != nil {
return fmt.Errorf("%w: pull %q: %v", ErrImagePullFailed, ref, err)
}
if _, drainErr := io.Copy(io.Discard, resp); drainErr != nil {
_ = resp.Close()
return fmt.Errorf("%w: drain %q: %v", ErrImagePullFailed, ref, drainErr)
}
if closeErr := resp.Close(); closeErr != nil {
return fmt.Errorf("%w: close %q: %v", ErrImagePullFailed, ref, closeErr)
}
return nil
}
// InspectImage returns the labels of ref. Maps daemon `not found` to
// ErrImageNotFound.
func (a *Adapter) InspectImage(ctx context.Context, ref string) (ImageInspect, error) {
res, err := a.docker.ImageInspect(ctx, ref)
if err != nil {
if cerrdefs.IsNotFound(err) {
return ImageInspect{}, ErrImageNotFound
}
return ImageInspect{}, fmt.Errorf("dockerclient: inspect image %q: %w", ref, err)
}
out := ImageInspect{Ref: ref}
if res.Config != nil {
out.Labels = cloneStringMap(res.Config.Labels)
}
return out, nil
}
// InspectContainer returns the metadata for idOrName. Maps daemon
// `not found` to ErrContainerNotFound.
func (a *Adapter) InspectContainer(ctx context.Context, idOrName string) (ContainerInspect, error) {
res, err := a.docker.ContainerInspect(ctx, idOrName, mobyclient.ContainerInspectOptions{})
if err != nil {
if cerrdefs.IsNotFound(err) {
return ContainerInspect{}, ErrContainerNotFound
}
return ContainerInspect{}, fmt.Errorf("dockerclient: inspect container %q: %w", idOrName, err)
}
return mapContainerInspect(res.Container), nil
}
// Run pulls the image (per spec.PullPolicy), creates the container with
// the documented label set, attaches it to spec.Network, starts it, and
// returns the canonical engine endpoint URL.
func (a *Adapter) Run(ctx context.Context, spec RunSpec) (RunResult, error) {
if strings.TrimSpace(spec.Name) == "" {
return RunResult{}, errors.New("dockerclient: run: name must not be empty")
}
if strings.TrimSpace(spec.Image) == "" {
return RunResult{}, errors.New("dockerclient: run: image must not be empty")
}
if strings.TrimSpace(spec.Network) == "" {
return RunResult{}, errors.New("dockerclient: run: network must not be empty")
}
if strings.TrimSpace(spec.Hostname) == "" {
return RunResult{}, errors.New("dockerclient: run: hostname must not be empty")
}
policy := spec.PullPolicy
if policy == "" {
policy = PullPolicyIfMissing
}
if err := a.PullImage(ctx, spec.Image, policy); err != nil {
return RunResult{}, err
}
envSlice := make([]string, 0, len(spec.Env))
for k, v := range spec.Env {
envSlice = append(envSlice, k+"="+v)
}
labels := make(map[string]string, len(spec.Labels)+1)
for k, v := range spec.Labels {
labels[k] = v
}
labels[ManagedLabel] = ManagedLabelValue
mounts := make([]mount.Mount, 0, len(spec.BindMounts))
for _, b := range spec.BindMounts {
mounts = append(mounts, mount.Mount{
Type: mount.TypeBind,
Source: b.HostPath,
Target: b.MountPath,
ReadOnly: b.ReadOnly,
})
}
resources := container.Resources{}
if spec.CPUQuota > 0 {
// Convert decimal cpus into NanoCPUs (1.0 = 1e9).
resources.NanoCPUs = int64(spec.CPUQuota * 1e9)
}
if mem, err := parseMemoryString(spec.Memory); err != nil {
return RunResult{}, err
} else if mem > 0 {
resources.Memory = mem
}
if spec.PIDsLimit > 0 {
pl := int64(spec.PIDsLimit)
resources.PidsLimit = &pl
}
logConfig := container.LogConfig{}
if spec.LogDriver != "" {
logConfig.Type = spec.LogDriver
}
if spec.LogOpts != "" {
opts, err := parseLogOpts(spec.LogOpts)
if err != nil {
return RunResult{}, err
}
logConfig.Config = opts
}
hostCfg := &container.HostConfig{
NetworkMode: container.NetworkMode(spec.Network),
Mounts: mounts,
LogConfig: logConfig,
Resources: resources,
AutoRemove: false,
ReadonlyRootfs: false,
RestartPolicy: container.RestartPolicy{
Name: container.RestartPolicyOnFailure,
},
}
netCfg := &network.NetworkingConfig{
EndpointsConfig: map[string]*network.EndpointSettings{
spec.Network: {
Aliases: []string{spec.Hostname},
},
},
}
created, err := a.docker.ContainerCreate(ctx, mobyclient.ContainerCreateOptions{
Name: spec.Name,
Config: &container.Config{
Hostname: spec.Hostname,
Image: spec.Image,
Env: envSlice,
Cmd: spec.Cmd,
Labels: labels,
},
HostConfig: hostCfg,
NetworkingConfig: netCfg,
})
if err != nil {
return RunResult{}, fmt.Errorf("dockerclient: create container %q: %w", spec.Name, err)
}
if _, err := a.docker.ContainerStart(ctx, created.ID, mobyclient.ContainerStartOptions{}); err != nil {
// Best-effort: try to remove the freshly-created container so we
// do not leak a half-started one.
_, _ = a.docker.ContainerRemove(ctx, created.ID, mobyclient.ContainerRemoveOptions{Force: true})
return RunResult{}, fmt.Errorf("dockerclient: start container %q: %w", spec.Name, err)
}
startedAt := a.clock()
if inspect, err := a.docker.ContainerInspect(ctx, created.ID, mobyclient.ContainerInspectOptions{}); err == nil {
if inspect.Container.State != nil && inspect.Container.State.StartedAt != "" {
if parsed, perr := time.Parse(time.RFC3339Nano, inspect.Container.State.StartedAt); perr == nil {
startedAt = parsed
}
}
}
return RunResult{
ContainerID: created.ID,
EngineEndpoint: fmt.Sprintf("http://%s:%d", spec.Hostname, enginePort),
StartedAt: startedAt,
}, nil
}
// Stop sends SIGTERM to idOrName and waits up to timeoutSeconds before
// forcibly killing it. Maps daemon `not found` to ErrContainerNotFound.
func (a *Adapter) Stop(ctx context.Context, idOrName string, timeoutSeconds int) error {
opts := mobyclient.ContainerStopOptions{}
if timeoutSeconds >= 0 {
t := timeoutSeconds
opts.Timeout = &t
}
if _, err := a.docker.ContainerStop(ctx, idOrName, opts); err != nil {
if cerrdefs.IsNotFound(err) {
return ErrContainerNotFound
}
return fmt.Errorf("dockerclient: stop %q: %w", idOrName, err)
}
return nil
}
// Remove deletes idOrName. Idempotent: nil when the container is
// already gone.
func (a *Adapter) Remove(ctx context.Context, idOrName string) error {
if _, err := a.docker.ContainerRemove(ctx, idOrName, mobyclient.ContainerRemoveOptions{Force: true}); err != nil {
if cerrdefs.IsNotFound(err) {
return nil
}
return fmt.Errorf("dockerclient: remove %q: %w", idOrName, err)
}
return nil
}
// List returns container summaries that match filter.
func (a *Adapter) List(ctx context.Context, filter ListFilter) ([]ContainerSummary, error) {
filters := mobyclient.Filters{}
for k, v := range filter.Labels {
if v == "" {
filters.Add("label", k)
continue
}
filters.Add("label", k+"="+v)
}
res, err := a.docker.ContainerList(ctx, mobyclient.ContainerListOptions{
All: true,
Filters: filters,
})
if err != nil {
return nil, fmt.Errorf("dockerclient: list: %w", err)
}
out := make([]ContainerSummary, 0, len(res.Items))
for _, item := range res.Items {
out = append(out, mapContainerSummary(item))
}
return out, nil
}
func mapContainerInspect(c container.InspectResponse) ContainerInspect {
out := ContainerInspect{
ID: c.ID,
Name: strings.TrimPrefix(c.Name, "/"),
ImageRef: c.Image,
}
if c.Config != nil {
out.Hostname = c.Config.Hostname
out.Labels = cloneStringMap(c.Config.Labels)
if out.ImageRef == "" {
out.ImageRef = c.Config.Image
}
}
if c.State != nil {
out.Status = string(c.State.Status)
out.ExitCode = c.State.ExitCode
if t, err := time.Parse(time.RFC3339Nano, c.State.StartedAt); err == nil && !t.IsZero() {
out.StartedAt = t
}
if t, err := time.Parse(time.RFC3339Nano, c.State.FinishedAt); err == nil && !t.IsZero() {
out.FinishedAt = t
}
if c.State.Health != nil {
out.Health = string(c.State.Health.Status)
}
}
return out
}
func mapContainerSummary(s container.Summary) ContainerSummary {
out := ContainerSummary{
ID: s.ID,
ImageRef: s.Image,
Status: string(s.State),
Labels: cloneStringMap(s.Labels),
}
if len(s.Names) > 0 {
out.Name = strings.TrimPrefix(s.Names[0], "/")
}
out.StartedAt = time.Unix(s.Created, 0).UTC()
return out
}
func cloneStringMap(in map[string]string) map[string]string {
if len(in) == 0 {
return nil
}
out := make(map[string]string, len(in))
for k, v := range in {
out[k] = v
}
return out
}
// parseMemoryString accepts the docker `--memory` short forms (e.g.
// `512m`, `1g`) and returns the corresponding byte count. An empty
// string yields 0 (no memory limit). Unknown formats produce an error.
func parseMemoryString(raw string) (int64, error) {
raw = strings.TrimSpace(raw)
if raw == "" {
return 0, nil
}
multiplier := int64(1)
last := raw[len(raw)-1]
digits := raw
switch last {
case 'b', 'B':
multiplier = 1
digits = raw[:len(raw)-1]
case 'k', 'K':
multiplier = 1024
digits = raw[:len(raw)-1]
case 'm', 'M':
multiplier = 1024 * 1024
digits = raw[:len(raw)-1]
case 'g', 'G':
multiplier = 1024 * 1024 * 1024
digits = raw[:len(raw)-1]
default:
if last < '0' || last > '9' {
return 0, fmt.Errorf("dockerclient: invalid memory suffix in %q", raw)
}
}
n, err := strconv.ParseInt(digits, 10, 64)
if err != nil {
return 0, fmt.Errorf("dockerclient: parse memory %q: %w", raw, err)
}
if n < 0 {
return 0, fmt.Errorf("dockerclient: memory must be non-negative, got %q", raw)
}
return n * multiplier, nil
}
// parseLogOpts splits a comma-separated `key=value` list into a map.
func parseLogOpts(raw string) (map[string]string, error) {
out := make(map[string]string)
for _, pair := range strings.Split(raw, ",") {
pair = strings.TrimSpace(pair)
if pair == "" {
continue
}
k, v, ok := strings.Cut(pair, "=")
if !ok {
return nil, fmt.Errorf("dockerclient: log opt %q must be key=value", pair)
}
k = strings.TrimSpace(k)
v = strings.TrimSpace(v)
if k == "" {
return nil, fmt.Errorf("dockerclient: log opt %q has empty key", pair)
}
out[k] = v
}
return out, nil
}
@@ -0,0 +1,84 @@
package dockerclient
import (
"strings"
"testing"
)
func TestPullPolicyIsKnown(t *testing.T) {
cases := map[PullPolicy]bool{
PullPolicyIfMissing: true,
PullPolicyAlways: true,
PullPolicyNever: true,
PullPolicy(""): false,
PullPolicy("other"): false,
}
for p, want := range cases {
if got := p.IsKnown(); got != want {
t.Errorf("PullPolicy(%q).IsKnown() = %v, want %v", p, got, want)
}
}
}
func TestParseMemoryString(t *testing.T) {
cases := []struct {
raw string
want int64
}{
{"", 0},
{" ", 0},
{"512", 512},
{"512b", 512},
{"4k", 4 * 1024},
{"1m", 1 * 1024 * 1024},
{"512M", 512 * 1024 * 1024},
{"2g", 2 * 1024 * 1024 * 1024},
}
for _, c := range cases {
got, err := parseMemoryString(c.raw)
if err != nil {
t.Errorf("parseMemoryString(%q) returned error: %v", c.raw, err)
continue
}
if got != c.want {
t.Errorf("parseMemoryString(%q) = %d, want %d", c.raw, got, c.want)
}
}
}
func TestParseMemoryStringRejectsInvalid(t *testing.T) {
cases := []string{"abc", "1x", "-1m"}
for _, c := range cases {
if _, err := parseMemoryString(c); err == nil {
t.Errorf("parseMemoryString(%q) expected error, got nil", c)
}
}
}
func TestParseLogOpts(t *testing.T) {
got, err := parseLogOpts("max-size=10m,max-file=3")
if err != nil {
t.Fatalf("parseLogOpts unexpected error: %v", err)
}
if got["max-size"] != "10m" || got["max-file"] != "3" {
t.Errorf("parseLogOpts produced %v", got)
}
}
func TestParseLogOptsRejectsMissingValue(t *testing.T) {
if _, err := parseLogOpts("solo,foo=bar"); err == nil || !strings.Contains(err.Error(), "key=value") {
t.Errorf("expected key=value error, got %v", err)
}
}
func TestCloneStringMapNilSafe(t *testing.T) {
if got := cloneStringMap(nil); got != nil {
t.Errorf("cloneStringMap(nil) = %v, want nil", got)
}
src := map[string]string{"a": "1"}
got := cloneStringMap(src)
got["a"] = "mutated"
if src["a"] != "1" {
t.Errorf("cloneStringMap leaks mutation: %v", src)
}
}
+37
View File
@@ -0,0 +1,37 @@
package dockerclient
import "context"
// Client is the narrow Docker port consumed by `internal/runtime`. The
// production adapter is *Adapter (see adapter.go); tests substitute a
// hand-rolled stub or generated mock.
//
// Method semantics:
//
// - EnsureNetwork verifies a user-defined Docker network exists on
// the daemon. Adapter never creates networks.
// - PullImage pulls ref according to policy. Implementations must
// honour PullPolicyNever by skipping the pull and returning nil
// when the image is already present, or ErrImageNotFound otherwise.
// - InspectImage / InspectContainer return ErrImageNotFound /
// ErrContainerNotFound for missing inputs.
// - Run creates and starts one container. The returned RunResult
// carries the container id, the stable engine endpoint URL, and
// the wall-clock observed by the daemon.
// - Stop sends SIGTERM and waits up to the spec timeout before
// SIGKILL. Returns ErrContainerNotFound when the target is already
// gone.
// - Remove deletes the container. Idempotent: nil when already
// removed.
// - List returns container summaries that match filter. Adapter
// translates filter.Labels into the daemon-side filters argument.
type Client interface {
EnsureNetwork(ctx context.Context, name string) error
PullImage(ctx context.Context, ref string, policy PullPolicy) error
InspectImage(ctx context.Context, ref string) (ImageInspect, error)
InspectContainer(ctx context.Context, idOrName string) (ContainerInspect, error)
Run(ctx context.Context, spec RunSpec) (RunResult, error)
Stop(ctx context.Context, idOrName string, timeoutSeconds int) error
Remove(ctx context.Context, idOrName string) error
List(ctx context.Context, filter ListFilter) ([]ContainerSummary, error)
}
+36
View File
@@ -0,0 +1,36 @@
package dockerclient
import "errors"
// Sentinel errors returned by the production adapter and consumed by
// `internal/runtime`. Tests substitute their own implementations of
// Client and may return these sentinels verbatim or wrap them with
// extra context via fmt.Errorf("...: %w", ...).
var (
// ErrNetworkMissing is returned by EnsureNetwork when the configured
// user-defined Docker network does not exist on the daemon.
// `internal/runtime` treats this as a fatal startup error — Galaxy
// never creates Docker networks itself.
ErrNetworkMissing = errors.New("dockerclient: network missing")
// ErrImageNotFound is returned by InspectImage / PullImage(never)
// when the image is absent locally and the active pull policy
// forbids fetching it.
ErrImageNotFound = errors.New("dockerclient: image not found")
// ErrContainerNotFound is returned by InspectContainer / Stop /
// Remove when no container with the supplied id or name exists.
// `internal/runtime` treats this as an idempotent miss for Stop and
// Remove and as a removed-container signal for InspectContainer.
ErrContainerNotFound = errors.New("dockerclient: container not found")
// ErrInvalidPullPolicy is returned by Run / PullImage when the
// supplied PullPolicy is not part of the closed vocabulary.
ErrInvalidPullPolicy = errors.New("dockerclient: invalid pull policy")
// ErrImagePullFailed wraps every PullImage failure path returned to
// the caller so `internal/runtime` can attribute the failure to the
// pull stage rather than to container creation. The unwrap chain
// preserves the underlying daemon error for logs and metrics.
ErrImagePullFailed = errors.New("dockerclient: image pull failed")
)
+223
View File
@@ -0,0 +1,223 @@
// Package dockerclient is the narrow Docker API surface consumed by
// `internal/runtime`. Its sole responsibility is to translate between the
// runtime domain and the Moby SDK; no orchestration, persistence, or
// notification logic lives in this package.
//
// The package is intentionally small. The implementation only surfaces the
// container-lifecycle calls the runtime module needs (`EnsureNetwork`,
// `PullImage`, `InspectImage`, `InspectContainer`, `Run`, `Stop`,
// `Remove`, `List`); any future functionality is introduced as an
// additive method on the `Client` interface so the runtime package can
// adopt it without round-tripping through Moby SDK type imports.
//
// Production wiring uses *Adapter, which delegates to
// `github.com/moby/moby/client`. Unit tests in `internal/runtime` and
// elsewhere should mock the `Client` interface directly rather than
// reaching into Moby types.
package dockerclient
import (
"time"
)
// PullPolicy enumerates the supported image-pull behaviours documented in
// `backend/README.md` §4 under `BACKEND_RUNTIME_IMAGE_PULL_POLICY`.
type PullPolicy string
const (
// PullPolicyIfMissing pulls the image only when it is absent from the
// local Docker daemon.
PullPolicyIfMissing PullPolicy = "if_missing"
// PullPolicyAlways pulls the image on every Run.
PullPolicyAlways PullPolicy = "always"
// PullPolicyNever skips the pull and fails Run when the image is
// absent locally.
PullPolicyNever PullPolicy = "never"
)
// IsKnown reports whether p belongs to the closed PullPolicy vocabulary.
func (p PullPolicy) IsKnown() bool {
switch p {
case PullPolicyIfMissing, PullPolicyAlways, PullPolicyNever:
return true
default:
return false
}
}
// ManagedLabel is the Docker container label runtime stamps on every
// engine container so the reconciler and the events listener can
// identify Galaxy-managed containers from unrelated workloads sharing
// the daemon.
const ManagedLabel = "galaxy.backend"
// ManagedLabelValue is the string value paired with `ManagedLabel`.
const ManagedLabelValue = "1"
// RunSpec is the request shape used by Client.Run. Producers populate
// it inside `runtime.Service.StartGame`.
type RunSpec struct {
// Name is the container name (typically `galaxy-game-{game_id}`).
Name string
// Image is the resolved image reference (e.g.
// `galaxy-game:0.1.0`).
Image string
// Hostname is the container hostname; the engine endpoint URL
// `http://galaxy-game-{game_id}:8080` resolves through Docker DNS
// against this name on the user-defined network.
Hostname string
// Network is the user-defined Docker network name the container
// attaches to.
Network string
// Env lists the environment variables forwarded to the container.
Env map[string]string
// Cmd overrides the entrypoint arguments. Production callers leave
// it nil so the engine image's CMD runs.
Cmd []string
// Labels are applied at create time. The adapter merges
// `ManagedLabel=ManagedLabelValue` into this map automatically;
// callers may add more entries.
Labels map[string]string
// BindMounts describe the host-to-container bind mounts. Galaxy
// uses exactly one in MVP (the per-game state directory).
BindMounts []BindMount
// LogDriver is the Docker log-driver name (e.g. `json-file`).
LogDriver string
// LogOpts is the comma-separated `key=value` list forwarded to the
// log driver. May be empty.
LogOpts string
// CPUQuota is the `--cpus` value applied as a resource limit.
CPUQuota float64
// Memory is the `--memory` value (e.g. `512m`) applied as a
// resource limit.
Memory string
// PIDsLimit is the `--pids-limit` value.
PIDsLimit int
// PullPolicy selects how Run resolves a missing image. Defaults to
// PullPolicyIfMissing when zero.
PullPolicy PullPolicy
}
// BindMount stores one host-to-container bind mount.
type BindMount struct {
// HostPath is the absolute host path bound into the container.
HostPath string
// MountPath is the absolute in-container path the host directory
// is mounted at.
MountPath string
// ReadOnly mounts the host path read-only when true.
ReadOnly bool
}
// RunResult is the response shape returned by Client.Run.
type RunResult struct {
// ContainerID identifies the created container.
ContainerID string
// EngineEndpoint is the URL Galaxy uses to reach the engine. The
// adapter synthesises it as `http://{Hostname}:8080`.
EngineEndpoint string
// StartedAt is the wall-clock observed by the daemon for the start
// event.
StartedAt time.Time
}
// ImageInspect carries the subset of `docker image inspect` fields the
// runtime reads.
type ImageInspect struct {
// Ref is the image reference the inspection was scoped to.
Ref string
// Labels are the image-level labels (e.g. `com.galaxy.cpu_quota`).
Labels map[string]string
}
// ContainerInspect carries the subset of `docker inspect` fields the
// runtime reads from a running or exited container.
type ContainerInspect struct {
// ID identifies the container.
ID string
// Name is the container name (without leading `/`).
Name string
// ImageRef is the image reference the container was started from.
ImageRef string
// Hostname is the container hostname.
Hostname string
// Labels are the container labels assigned at create time.
Labels map[string]string
// Status is the verbatim Docker `State.Status` value (e.g.
// `running`, `exited`).
Status string
// Health is the verbatim Docker `State.Health.Status` value
// (e.g. `healthy`, `unhealthy`). Empty when the image declares no
// HEALTHCHECK.
Health string
// StartedAt is the daemon-observed start wall-clock.
StartedAt time.Time
// FinishedAt is the daemon-observed exit wall-clock. Zero when the
// container is still running.
FinishedAt time.Time
// ExitCode is the exit code reported by the daemon. Zero when the
// container is still running.
ExitCode int
}
// ContainerSummary carries the subset of `docker ps` fields the runtime
// reads.
type ContainerSummary struct {
// ID identifies the container.
ID string
// Name is the container name (without leading `/`).
Name string
// ImageRef is the image reference.
ImageRef string
// Hostname is the container hostname.
Hostname string
// Labels are the container labels assigned at create time.
Labels map[string]string
// Status is the verbatim Docker `State.Status` value.
Status string
// StartedAt is the daemon-observed start wall-clock.
StartedAt time.Time
}
// ListFilter narrows the ContainerList result. Empty fields match
// everything.
type ListFilter struct {
// Labels lists `key=value` label pairs that must all be present on
// the container. Empty matches every container.
Labels map[string]string
}