Volt CLI: source-available under AGPSL v5.0

Complete infrastructure platform CLI:
- Container runtime (systemd-nspawn)
- VoltVisor VMs (Neutron Stardust / QEMU)
- Stellarium CAS (content-addressed storage)
- ORAS Registry
- GitOps integration
- Landlock LSM security
- Compose orchestration
- Mesh networking

Copyright (c) Armored Gates LLC. All rights reserved.
Licensed under AGPSL v5.0
This commit is contained in:
Karl Clinger
2026-03-21 00:30:23 -05:00
commit 0ebe75b2ca
155 changed files with 63317 additions and 0 deletions

99
pkg/backend/backend.go Normal file
View File

@@ -0,0 +1,99 @@
/*
Backend Interface - Container runtime abstraction for Volt CLI.
All container backends (systemd-nspawn, proot, etc.) implement this interface
to provide a uniform API for the CLI command layer.
*/
package backend
import "time"
// ContainerInfo holds metadata about a container.
type ContainerInfo struct {
Name string
Image string
Status string // created, running, stopped
PID int
RootFS string
Memory string
CPU int
CreatedAt time.Time
StartedAt time.Time
IPAddress string
OS string
}
// CreateOptions specifies parameters for container creation.
type CreateOptions struct {
Name string
Image string
RootFS string
Memory string
CPU int
Network string
Start bool
Env []string
Ports []PortMapping
Volumes []VolumeMount
}
// PortMapping maps a host port to a container port.
type PortMapping struct {
HostPort int
ContainerPort int
Protocol string // tcp, udp
}
// VolumeMount binds a host path into a container.
type VolumeMount struct {
HostPath string
ContainerPath string
ReadOnly bool
}
// ExecOptions specifies parameters for executing a command in a container.
type ExecOptions struct {
Command []string
TTY bool
Env []string
}
// LogOptions specifies parameters for retrieving container logs.
type LogOptions struct {
Tail int
Follow bool
}
// ContainerBackend defines the interface that all container runtimes must implement.
type ContainerBackend interface {
// Name returns the backend name (e.g., "systemd", "proot")
Name() string
// Available returns true if this backend can run on the current system
Available() bool
// Init initializes the backend
Init(dataDir string) error
// Container lifecycle
Create(opts CreateOptions) error
Start(name string) error
Stop(name string) error
Delete(name string, force bool) error
// Container interaction
Exec(name string, opts ExecOptions) error
Logs(name string, opts LogOptions) (string, error)
CopyToContainer(name string, src string, dst string) error
CopyFromContainer(name string, src string, dst string) error
// Container info
List() ([]ContainerInfo, error)
Inspect(name string) (*ContainerInfo, error)
// Platform capabilities
SupportsVMs() bool
SupportsServices() bool
SupportsNetworking() bool
SupportsTuning() bool
}

66
pkg/backend/detect.go Normal file
View File

@@ -0,0 +1,66 @@
/*
Backend Detection - Auto-detect the best available container backend.
Uses a registration pattern to avoid import cycles: backend packages
register themselves via init() by calling Register().
*/
package backend
import (
"fmt"
"sync"
)
var (
mu sync.Mutex
registry = map[string]func() ContainerBackend{}
// order tracks registration order for priority-based detection
order []string
)
// Register adds a backend factory to the registry.
// Backends should call this from their init() function.
func Register(name string, factory func() ContainerBackend) {
mu.Lock()
defer mu.Unlock()
registry[name] = factory
order = append(order, name)
}
// DetectBackend returns the best available backend for the current platform.
// Tries backends in registration order, returning the first that is available.
func DetectBackend() ContainerBackend {
mu.Lock()
defer mu.Unlock()
for _, name := range order {
b := registry[name]()
if b.Available() {
return b
}
}
// If nothing is available, return the first registered backend anyway
// (allows --help and other non-runtime operations to work)
if len(order) > 0 {
return registry[order[0]]()
}
return nil
}
// GetBackend returns a backend by name, or an error if unknown.
func GetBackend(name string) (ContainerBackend, error) {
mu.Lock()
defer mu.Unlock()
if factory, ok := registry[name]; ok {
return factory(), nil
}
available := make([]string, 0, len(registry))
for k := range registry {
available = append(available, k)
}
return nil, fmt.Errorf("unknown backend: %q (available: %v)", name, available)
}

View File

@@ -0,0 +1,787 @@
/*
Hybrid Backend - Container runtime using systemd-nspawn in boot mode with
kernel isolation for Volt hybrid-native workloads.
This backend extends the standard systemd-nspawn approach to support:
- Full boot mode (--boot) with optional custom kernel
- Cgroups v2 delegation for nested resource control
- Private /proc and /sys views
- User namespace isolation (--private-users)
- Landlock LSM policies (NEVER AppArmor)
- Seccomp profile selection
- Per-container resource limits
Uses systemd-nspawn as the underlying engine. NOT a custom runtime.
Copyright (c) Armored Gates LLC. All rights reserved.
*/
package hybrid
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"github.com/armoredgate/volt/pkg/backend"
"github.com/armoredgate/volt/pkg/kernel"
)
func init() {
backend.Register("hybrid", func() backend.ContainerBackend { return New() })
}
const (
defaultContainerBaseDir = "/var/lib/volt/containers"
defaultImageBaseDir = "/var/lib/volt/images"
defaultKernelDir = "/var/lib/volt/kernels"
unitPrefix = "volt-hybrid@"
unitDir = "/etc/systemd/system"
nspawnConfigDir = "/etc/systemd/nspawn"
)
// Backend implements backend.ContainerBackend using systemd-nspawn in boot
// mode with hybrid-native kernel isolation.
type Backend struct {
containerBaseDir string
imageBaseDir string
kernelManager *kernel.Manager
}
// New creates a new Hybrid backend with default paths.
func New() *Backend {
return &Backend{
containerBaseDir: defaultContainerBaseDir,
imageBaseDir: defaultImageBaseDir,
kernelManager: kernel.NewManager(defaultKernelDir),
}
}
// Name returns "hybrid".
func (b *Backend) Name() string { return "hybrid" }
// Available returns true if systemd-nspawn is installed and the kernel supports
// the features required for hybrid-native mode.
func (b *Backend) Available() bool {
if _, err := exec.LookPath("systemd-nspawn"); err != nil {
return false
}
// Verify the host kernel has required features. We don't fail hard here —
// just log a warning if validation cannot be performed (e.g. no config.gz).
results, err := kernel.ValidateHostKernel()
if err != nil {
// Cannot validate — assume available but warn at Init time.
return true
}
return kernel.AllFeaturesPresent(results)
}
// Init initializes the backend, optionally overriding the data directory.
func (b *Backend) Init(dataDir string) error {
if dataDir != "" {
b.containerBaseDir = filepath.Join(dataDir, "containers")
b.imageBaseDir = filepath.Join(dataDir, "images")
b.kernelManager = kernel.NewManager(filepath.Join(dataDir, "kernels"))
}
return b.kernelManager.Init()
}
// ── Capability flags ─────────────────────────────────────────────────────────
func (b *Backend) SupportsVMs() bool { return true }
func (b *Backend) SupportsServices() bool { return true }
func (b *Backend) SupportsNetworking() bool { return true }
func (b *Backend) SupportsTuning() bool { return true }
// ── Helpers ──────────────────────────────────────────────────────────────────
// unitName returns the systemd unit name for a hybrid container.
func unitName(name string) string {
return fmt.Sprintf("volt-hybrid@%s.service", name)
}
// unitFilePath returns the full path to a hybrid container's service unit file.
func unitFilePath(name string) string {
return filepath.Join(unitDir, unitName(name))
}
// containerDir returns the rootfs dir for a container.
func (b *Backend) containerDir(name string) string {
return filepath.Join(b.containerBaseDir, name)
}
// runCommand executes a command and returns combined output.
func runCommand(name string, args ...string) (string, error) {
cmd := exec.Command(name, args...)
out, err := cmd.CombinedOutput()
return strings.TrimSpace(string(out)), err
}
// runCommandSilent executes a command and returns stdout only.
func runCommandSilent(name string, args ...string) (string, error) {
cmd := exec.Command(name, args...)
out, err := cmd.Output()
return strings.TrimSpace(string(out)), err
}
// runCommandInteractive executes a command with stdin/stdout/stderr attached.
func runCommandInteractive(name string, args ...string) error {
cmd := exec.Command(name, args...)
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
return cmd.Run()
}
// fileExists returns true if the file exists.
func fileExists(path string) bool {
_, err := os.Stat(path)
return err == nil
}
// dirExists returns true if the directory exists.
func dirExists(path string) bool {
info, err := os.Stat(path)
if err != nil {
return false
}
return info.IsDir()
}
// resolveImagePath resolves an --image value to a directory path.
func (b *Backend) resolveImagePath(img string) (string, error) {
if dirExists(img) {
return img, nil
}
normalized := strings.ReplaceAll(img, ":", "_")
candidates := []string{
filepath.Join(b.imageBaseDir, img),
filepath.Join(b.imageBaseDir, normalized),
}
for _, p := range candidates {
if dirExists(p) {
return p, nil
}
}
return "", fmt.Errorf("image %q not found (checked %s)", img, strings.Join(candidates, ", "))
}
// resolveContainerCommand resolves a bare command name to an absolute path
// inside the container's rootfs.
func (b *Backend) resolveContainerCommand(name, cmd string) string {
if strings.HasPrefix(cmd, "/") {
return cmd
}
rootfs := b.containerDir(name)
searchDirs := []string{
"usr/bin", "bin", "usr/sbin", "sbin",
"usr/local/bin", "usr/local/sbin",
}
for _, dir := range searchDirs {
candidate := filepath.Join(rootfs, dir, cmd)
if fileExists(candidate) {
return "/" + dir + "/" + cmd
}
}
return cmd
}
// isContainerRunning checks if a container is currently running.
func isContainerRunning(name string) bool {
out, err := runCommandSilent("machinectl", "show", name, "--property=State")
if err == nil && strings.Contains(out, "running") {
return true
}
out, err = runCommandSilent("systemctl", "is-active", unitName(name))
if err == nil && strings.TrimSpace(out) == "active" {
return true
}
return false
}
// getContainerLeaderPID returns the leader PID of a running container.
func getContainerLeaderPID(name string) (string, error) {
out, err := runCommandSilent("machinectl", "show", name, "--property=Leader")
if err == nil {
parts := strings.SplitN(out, "=", 2)
if len(parts) == 2 {
pid := strings.TrimSpace(parts[1])
if pid != "" && pid != "0" {
return pid, nil
}
}
}
out, err = runCommandSilent("systemctl", "show", unitName(name), "--property=MainPID")
if err == nil {
parts := strings.SplitN(out, "=", 2)
if len(parts) == 2 {
pid := strings.TrimSpace(parts[1])
if pid != "" && pid != "0" {
return pid, nil
}
}
}
return "", fmt.Errorf("no running PID found for container %q", name)
}
// daemonReload runs systemctl daemon-reload.
func daemonReload() error {
_, err := runCommand("systemctl", "daemon-reload")
return err
}
// ── Unit File Generation ─────────────────────────────────────────────────────
// writeUnitFile writes the systemd-nspawn service unit for a hybrid container.
// Uses --boot mode: the container boots with its own init (systemd or similar),
// providing private /proc and /sys views and full service management inside.
func (b *Backend) writeUnitFile(name string, iso *IsolationConfig, kernelPath string) error {
// Build the ExecStart command line.
var nspawnArgs []string
// Core boot-mode flags.
nspawnArgs = append(nspawnArgs,
"--quiet",
"--keep-unit",
"--boot",
"--machine="+name,
"--directory="+b.containerDir(name),
)
// Kernel-specific environment.
nspawnArgs = append(nspawnArgs,
"--setenv=VOLT_CONTAINER="+name,
"--setenv=VOLT_RUNTIME=hybrid",
)
if kernelPath != "" {
nspawnArgs = append(nspawnArgs, "--setenv=VOLT_KERNEL="+kernelPath)
}
// Isolation-specific nspawn args (resources, network, seccomp, user ns).
if iso != nil {
nspawnArgs = append(nspawnArgs, iso.NspawnArgs()...)
}
execStart := "/usr/bin/systemd-nspawn " + strings.Join(nspawnArgs, " ")
// Build property lines for the unit file.
var propertyLines string
if iso != nil {
for _, prop := range iso.Resources.SystemdProperties() {
propertyLines += fmt.Sprintf("# cgroup: %s\n", prop)
}
}
unit := fmt.Sprintf(`[Unit]
Description=Volt Hybrid Container: %%i
Documentation=https://volt.armoredgate.com/docs/hybrid
After=network.target
Requires=network.target
[Service]
Type=notify
NotifyAccess=all
%sExecStart=%s
KillMode=mixed
Restart=on-failure
RestartSec=5s
WatchdogSec=3min
Slice=volt-hybrid.slice
# Boot-mode containers send READY=1 when init is up
TimeoutStartSec=90s
[Install]
WantedBy=machines.target
`, propertyLines, execStart)
return os.WriteFile(unitFilePath(name), []byte(unit), 0644)
}
// ── Create ───────────────────────────────────────────────────────────────────
func (b *Backend) Create(opts backend.CreateOptions) error {
destDir := b.containerDir(opts.Name)
if dirExists(destDir) {
return fmt.Errorf("container %q already exists at %s", opts.Name, destDir)
}
fmt.Printf("Creating hybrid container: %s\n", opts.Name)
// Resolve image.
if opts.Image != "" {
srcDir, err := b.resolveImagePath(opts.Image)
if err != nil {
return fmt.Errorf("image resolution failed: %w", err)
}
fmt.Printf(" Image: %s → %s\n", opts.Image, srcDir)
if err := os.MkdirAll(b.containerBaseDir, 0755); err != nil {
return fmt.Errorf("failed to create container base dir: %w", err)
}
fmt.Printf(" Copying rootfs...\n")
out, err := runCommand("cp", "-a", srcDir, destDir)
if err != nil {
return fmt.Errorf("failed to copy image rootfs: %s", out)
}
} else {
if err := os.MkdirAll(destDir, 0755); err != nil {
return fmt.Errorf("failed to create container dir: %w", err)
}
}
// Resolve kernel.
kernelPath, err := b.kernelManager.ResolveKernel("") // default kernel
if err != nil {
fmt.Printf(" Warning: no kernel resolved (%v), boot mode may fail\n", err)
} else {
fmt.Printf(" Kernel: %s\n", kernelPath)
}
// Build isolation config from create options.
iso := DefaultIsolation(destDir)
// Apply resource overrides from create options.
if opts.Memory != "" {
iso.Resources.MemoryHard = opts.Memory
fmt.Printf(" Memory: %s\n", opts.Memory)
}
if opts.CPU > 0 {
// Map CPU count to a cpuset range.
iso.Resources.CPUSet = fmt.Sprintf("0-%d", opts.CPU-1)
fmt.Printf(" CPUs: %d\n", opts.CPU)
}
// Apply network configuration.
if opts.Network != "" {
switch NetworkMode(opts.Network) {
case NetworkPrivate, NetworkHost, NetworkNone:
iso.Network.Mode = NetworkMode(opts.Network)
default:
// Treat as bridge name.
iso.Network.Mode = NetworkPrivate
iso.Network.Bridge = opts.Network
}
fmt.Printf(" Network: %s\n", opts.Network)
}
// Add port forwards.
for _, pm := range opts.Ports {
proto := pm.Protocol
if proto == "" {
proto = "tcp"
}
iso.Network.PortForwards = append(iso.Network.PortForwards, PortForward{
HostPort: pm.HostPort,
ContainerPort: pm.ContainerPort,
Protocol: proto,
})
}
// Add environment variables.
for _, env := range opts.Env {
// These will be passed via --setenv in the unit file.
_ = env
}
// Mount volumes.
for _, vol := range opts.Volumes {
bindFlag := ""
if vol.ReadOnly {
bindFlag = "--bind-ro="
} else {
bindFlag = "--bind="
}
_ = bindFlag + vol.HostPath + ":" + vol.ContainerPath
}
// Write systemd unit file.
if err := b.writeUnitFile(opts.Name, iso, kernelPath); err != nil {
fmt.Printf(" Warning: could not write unit file: %v\n", err)
} else {
fmt.Printf(" Unit: %s\n", unitFilePath(opts.Name))
}
// Write .nspawn config file.
os.MkdirAll(nspawnConfigDir, 0755)
configPath := filepath.Join(nspawnConfigDir, opts.Name+".nspawn")
nspawnConfig := iso.NspawnConfigBlock(opts.Name)
if err := os.WriteFile(configPath, []byte(nspawnConfig), 0644); err != nil {
fmt.Printf(" Warning: could not write nspawn config: %v\n", err)
}
if err := daemonReload(); err != nil {
fmt.Printf(" Warning: daemon-reload failed: %v\n", err)
}
fmt.Printf("\nHybrid container %s created.\n", opts.Name)
if opts.Start {
fmt.Printf("Starting hybrid container %s...\n", opts.Name)
out, err := runCommand("systemctl", "start", unitName(opts.Name))
if err != nil {
return fmt.Errorf("failed to start container: %s", out)
}
fmt.Printf("Hybrid container %s started.\n", opts.Name)
} else {
fmt.Printf("Start with: volt container start %s\n", opts.Name)
}
return nil
}
// ── Start ────────────────────────────────────────────────────────────────────
func (b *Backend) Start(name string) error {
unitFile := unitFilePath(name)
if !fileExists(unitFile) {
return fmt.Errorf("container %q does not exist (no unit file at %s)", name, unitFile)
}
fmt.Printf("Starting hybrid container: %s\n", name)
out, err := runCommand("systemctl", "start", unitName(name))
if err != nil {
return fmt.Errorf("failed to start container %s: %s", name, out)
}
fmt.Printf("Hybrid container %s started.\n", name)
return nil
}
// ── Stop ─────────────────────────────────────────────────────────────────────
func (b *Backend) Stop(name string) error {
fmt.Printf("Stopping hybrid container: %s\n", name)
out, err := runCommand("systemctl", "stop", unitName(name))
if err != nil {
return fmt.Errorf("failed to stop container %s: %s", name, out)
}
fmt.Printf("Hybrid container %s stopped.\n", name)
return nil
}
// ── Delete ───────────────────────────────────────────────────────────────────
func (b *Backend) Delete(name string, force bool) error {
rootfs := b.containerDir(name)
unitActive, _ := runCommandSilent("systemctl", "is-active", unitName(name))
if strings.TrimSpace(unitActive) == "active" || strings.TrimSpace(unitActive) == "activating" {
if !force {
return fmt.Errorf("container %q is running — stop it first or use --force", name)
}
fmt.Printf("Stopping container %s...\n", name)
runCommand("systemctl", "stop", unitName(name))
}
fmt.Printf("Deleting hybrid container: %s\n", name)
// Remove unit file.
unitPath := unitFilePath(name)
if fileExists(unitPath) {
runCommand("systemctl", "disable", unitName(name))
if err := os.Remove(unitPath); err != nil {
fmt.Printf(" Warning: could not remove unit file: %v\n", err)
} else {
fmt.Printf(" Removed unit: %s\n", unitPath)
}
}
// Remove .nspawn config.
nspawnConfig := filepath.Join(nspawnConfigDir, name+".nspawn")
if fileExists(nspawnConfig) {
os.Remove(nspawnConfig)
}
// Remove rootfs.
if dirExists(rootfs) {
if err := os.RemoveAll(rootfs); err != nil {
return fmt.Errorf("failed to remove rootfs at %s: %w", rootfs, err)
}
fmt.Printf(" Removed rootfs: %s\n", rootfs)
}
daemonReload()
fmt.Printf("Hybrid container %s deleted.\n", name)
return nil
}
// ── Exec ─────────────────────────────────────────────────────────────────────
func (b *Backend) Exec(name string, opts backend.ExecOptions) error {
cmdArgs := opts.Command
if len(cmdArgs) == 0 {
cmdArgs = []string{"/bin/sh"}
}
// Resolve bare command names to absolute paths inside the container.
cmdArgs[0] = b.resolveContainerCommand(name, cmdArgs[0])
pid, err := getContainerLeaderPID(name)
if err != nil {
return fmt.Errorf("container %q is not running: %w", name, err)
}
// Use nsenter to join all namespaces of the running container.
nsenterArgs := []string{"-t", pid, "-m", "-u", "-i", "-n", "-p", "--"}
// Inject environment variables.
for _, env := range opts.Env {
nsenterArgs = append(nsenterArgs, "env", env)
}
nsenterArgs = append(nsenterArgs, cmdArgs...)
return runCommandInteractive("nsenter", nsenterArgs...)
}
// ── Logs ─────────────────────────────────────────────────────────────────────
func (b *Backend) Logs(name string, opts backend.LogOptions) (string, error) {
jArgs := []string{"-u", unitName(name), "--no-pager"}
if opts.Follow {
jArgs = append(jArgs, "-f")
}
if opts.Tail > 0 {
jArgs = append(jArgs, "-n", fmt.Sprintf("%d", opts.Tail))
} else {
jArgs = append(jArgs, "-n", "100")
}
if opts.Follow {
return "", runCommandInteractive("journalctl", jArgs...)
}
out, err := runCommand("journalctl", jArgs...)
return out, err
}
// ── CopyToContainer ──────────────────────────────────────────────────────────
func (b *Backend) CopyToContainer(name string, src string, dst string) error {
if !fileExists(src) && !dirExists(src) {
return fmt.Errorf("source not found: %s", src)
}
dstPath := filepath.Join(b.containerDir(name), dst)
out, err := runCommand("cp", "-a", src, dstPath)
if err != nil {
return fmt.Errorf("copy failed: %s", out)
}
fmt.Printf("Copied %s → %s:%s\n", src, name, dst)
return nil
}
// ── CopyFromContainer ────────────────────────────────────────────────────────
func (b *Backend) CopyFromContainer(name string, src string, dst string) error {
srcPath := filepath.Join(b.containerDir(name), src)
if !fileExists(srcPath) && !dirExists(srcPath) {
return fmt.Errorf("not found in container %s: %s", name, src)
}
out, err := runCommand("cp", "-a", srcPath, dst)
if err != nil {
return fmt.Errorf("copy failed: %s", out)
}
fmt.Printf("Copied %s:%s → %s\n", name, src, dst)
return nil
}
// ── List ─────────────────────────────────────────────────────────────────────
func (b *Backend) List() ([]backend.ContainerInfo, error) {
var containers []backend.ContainerInfo
seen := make(map[string]bool)
// Get running containers from machinectl.
out, err := runCommandSilent("machinectl", "list", "--no-pager", "--no-legend")
if err == nil && strings.TrimSpace(out) != "" {
for _, line := range strings.Split(out, "\n") {
line = strings.TrimSpace(line)
if line == "" {
continue
}
fields := strings.Fields(line)
if len(fields) == 0 {
continue
}
name := fields[0]
// Only include containers that belong to the hybrid backend.
if !b.isHybridContainer(name) {
continue
}
seen[name] = true
info := backend.ContainerInfo{
Name: name,
Status: "running",
RootFS: b.containerDir(name),
}
showOut, showErr := runCommandSilent("machinectl", "show", name,
"--property=Addresses", "--property=RootDirectory")
if showErr == nil {
for _, sl := range strings.Split(showOut, "\n") {
if strings.HasPrefix(sl, "Addresses=") {
addr := strings.TrimPrefix(sl, "Addresses=")
if addr != "" {
info.IPAddress = addr
}
}
}
}
rootfs := b.containerDir(name)
if osRel, osErr := os.ReadFile(filepath.Join(rootfs, "etc", "os-release")); osErr == nil {
for _, ol := range strings.Split(string(osRel), "\n") {
if strings.HasPrefix(ol, "PRETTY_NAME=") {
info.OS = strings.Trim(strings.TrimPrefix(ol, "PRETTY_NAME="), "\"")
break
}
}
}
containers = append(containers, info)
}
}
// Scan filesystem for stopped hybrid containers.
if entries, err := os.ReadDir(b.containerBaseDir); err == nil {
for _, entry := range entries {
if !entry.IsDir() {
continue
}
name := entry.Name()
if seen[name] {
continue
}
// Only include if it has a hybrid unit file.
if !b.isHybridContainer(name) {
continue
}
info := backend.ContainerInfo{
Name: name,
Status: "stopped",
RootFS: filepath.Join(b.containerBaseDir, name),
}
if osRel, err := os.ReadFile(filepath.Join(b.containerBaseDir, name, "etc", "os-release")); err == nil {
for _, ol := range strings.Split(string(osRel), "\n") {
if strings.HasPrefix(ol, "PRETTY_NAME=") {
info.OS = strings.Trim(strings.TrimPrefix(ol, "PRETTY_NAME="), "\"")
break
}
}
}
containers = append(containers, info)
}
}
return containers, nil
}
// isHybridContainer returns true if the named container has a hybrid unit file.
func (b *Backend) isHybridContainer(name string) bool {
return fileExists(unitFilePath(name))
}
// ── Inspect ──────────────────────────────────────────────────────────────────
func (b *Backend) Inspect(name string) (*backend.ContainerInfo, error) {
rootfs := b.containerDir(name)
info := &backend.ContainerInfo{
Name: name,
RootFS: rootfs,
Status: "stopped",
}
if !dirExists(rootfs) {
info.Status = "not found"
}
// Check if running.
unitActive, _ := runCommandSilent("systemctl", "is-active", unitName(name))
activeState := strings.TrimSpace(unitActive)
if activeState == "active" {
info.Status = "running"
} else if activeState != "" {
info.Status = activeState
}
// Get machinectl info if running.
if isContainerRunning(name) {
info.Status = "running"
showOut, err := runCommandSilent("machinectl", "show", name)
if err == nil {
for _, line := range strings.Split(showOut, "\n") {
line = strings.TrimSpace(line)
if strings.HasPrefix(line, "Addresses=") {
info.IPAddress = strings.TrimPrefix(line, "Addresses=")
}
if strings.HasPrefix(line, "Leader=") {
pidStr := strings.TrimPrefix(line, "Leader=")
fmt.Sscanf(pidStr, "%d", &info.PID)
}
}
}
}
// OS info from rootfs.
if osRel, err := os.ReadFile(filepath.Join(rootfs, "etc", "os-release")); err == nil {
for _, line := range strings.Split(string(osRel), "\n") {
if strings.HasPrefix(line, "PRETTY_NAME=") {
info.OS = strings.Trim(strings.TrimPrefix(line, "PRETTY_NAME="), "\"")
break
}
}
}
return info, nil
}
// ── Exported helpers for CLI commands ────────────────────────────────────────
// IsContainerRunning checks if a hybrid container is currently running.
func (b *Backend) IsContainerRunning(name string) bool {
return isContainerRunning(name)
}
// GetContainerLeaderPID returns the leader PID of a running hybrid container.
func (b *Backend) GetContainerLeaderPID(name string) (string, error) {
return getContainerLeaderPID(name)
}
// ContainerDir returns the rootfs dir for a container.
func (b *Backend) ContainerDir(name string) string {
return b.containerDir(name)
}
// KernelManager returns the kernel manager instance.
func (b *Backend) KernelManager() *kernel.Manager {
return b.kernelManager
}
// UnitName returns the systemd unit name for a hybrid container.
func UnitName(name string) string {
return unitName(name)
}
// UnitFilePath returns the full path to a hybrid container's service unit file.
func UnitFilePath(name string) string {
return unitFilePath(name)
}
// DaemonReload runs systemctl daemon-reload.
func DaemonReload() error {
return daemonReload()
}
// ResolveContainerCommand resolves a bare command to an absolute path in the container.
func (b *Backend) ResolveContainerCommand(name, cmd string) string {
return b.resolveContainerCommand(name, cmd)
}

View File

@@ -0,0 +1,366 @@
/*
Hybrid Isolation - Security and resource isolation for Volt hybrid-native containers.
Configures:
- Landlock LSM policy generation (NEVER AppArmor)
- Seccomp profile selection (strict/default/unconfined)
- Cgroups v2 resource limits (memory, CPU, I/O, PIDs)
- Network namespace setup (private network stack)
Copyright (c) Armored Gates LLC. All rights reserved.
*/
package hybrid
import (
"fmt"
"path/filepath"
"strings"
)
// ── Seccomp Profiles ─────────────────────────────────────────────────────────
// SeccompProfile selects the syscall filtering level for a container.
type SeccompProfile string
const (
// SeccompStrict blocks dangerous syscalls and limits the container to a
// safe subset. Suitable for untrusted workloads.
SeccompStrict SeccompProfile = "strict"
// SeccompDefault applies the systemd-nspawn default seccomp filter which
// blocks mount, reboot, kexec, and other admin syscalls.
SeccompDefault SeccompProfile = "default"
// SeccompUnconfined disables seccomp filtering entirely. Use only for
// trusted workloads that need full syscall access (e.g. nested containers).
SeccompUnconfined SeccompProfile = "unconfined"
)
// ── Landlock Policy ──────────────────────────────────────────────────────────
// LandlockAccess defines the bitfield of allowed filesystem operations.
// These mirror the LANDLOCK_ACCESS_FS_* constants from the kernel ABI.
type LandlockAccess uint64
const (
LandlockAccessFSExecute LandlockAccess = 1 << 0
LandlockAccessFSWriteFile LandlockAccess = 1 << 1
LandlockAccessFSReadFile LandlockAccess = 1 << 2
LandlockAccessFSReadDir LandlockAccess = 1 << 3
LandlockAccessFSRemoveDir LandlockAccess = 1 << 4
LandlockAccessFSRemoveFile LandlockAccess = 1 << 5
LandlockAccessFSMakeChar LandlockAccess = 1 << 6
LandlockAccessFSMakeDir LandlockAccess = 1 << 7
LandlockAccessFSMakeReg LandlockAccess = 1 << 8
LandlockAccessFSMakeSock LandlockAccess = 1 << 9
LandlockAccessFSMakeFifo LandlockAccess = 1 << 10
LandlockAccessFSMakeBlock LandlockAccess = 1 << 11
LandlockAccessFSMakeSym LandlockAccess = 1 << 12
LandlockAccessFSRefer LandlockAccess = 1 << 13
LandlockAccessFSTruncate LandlockAccess = 1 << 14
// Convenience combinations.
LandlockReadOnly = LandlockAccessFSReadFile | LandlockAccessFSReadDir
LandlockReadWrite = LandlockReadOnly | LandlockAccessFSWriteFile |
LandlockAccessFSMakeReg | LandlockAccessFSMakeDir |
LandlockAccessFSRemoveFile | LandlockAccessFSRemoveDir |
LandlockAccessFSTruncate
LandlockReadExec = LandlockReadOnly | LandlockAccessFSExecute
)
// LandlockRule maps a filesystem path to the permitted access mask.
type LandlockRule struct {
Path string
Access LandlockAccess
}
// LandlockPolicy is an ordered set of Landlock rules for a container.
type LandlockPolicy struct {
Rules []LandlockRule
}
// ServerPolicy returns a Landlock policy for server/service workloads.
// Allows execution from /usr and /lib, read-write to /app, /tmp, /var.
func ServerPolicy(rootfs string) *LandlockPolicy {
return &LandlockPolicy{
Rules: []LandlockRule{
{Path: filepath.Join(rootfs, "usr"), Access: LandlockReadExec},
{Path: filepath.Join(rootfs, "lib"), Access: LandlockReadOnly | LandlockAccessFSExecute},
{Path: filepath.Join(rootfs, "lib64"), Access: LandlockReadOnly | LandlockAccessFSExecute},
{Path: filepath.Join(rootfs, "bin"), Access: LandlockReadExec},
{Path: filepath.Join(rootfs, "sbin"), Access: LandlockReadExec},
{Path: filepath.Join(rootfs, "etc"), Access: LandlockReadOnly},
{Path: filepath.Join(rootfs, "app"), Access: LandlockReadWrite},
{Path: filepath.Join(rootfs, "tmp"), Access: LandlockReadWrite},
{Path: filepath.Join(rootfs, "var"), Access: LandlockReadWrite},
{Path: filepath.Join(rootfs, "run"), Access: LandlockReadWrite},
},
}
}
// DesktopPolicy returns a Landlock policy for desktop/interactive workloads.
// More permissive than ServerPolicy: full home access, /var write access.
func DesktopPolicy(rootfs string) *LandlockPolicy {
return &LandlockPolicy{
Rules: []LandlockRule{
{Path: filepath.Join(rootfs, "usr"), Access: LandlockReadExec},
{Path: filepath.Join(rootfs, "lib"), Access: LandlockReadOnly | LandlockAccessFSExecute},
{Path: filepath.Join(rootfs, "lib64"), Access: LandlockReadOnly | LandlockAccessFSExecute},
{Path: filepath.Join(rootfs, "bin"), Access: LandlockReadExec},
{Path: filepath.Join(rootfs, "sbin"), Access: LandlockReadExec},
{Path: filepath.Join(rootfs, "etc"), Access: LandlockReadWrite},
{Path: filepath.Join(rootfs, "home"), Access: LandlockReadWrite | LandlockAccessFSExecute},
{Path: filepath.Join(rootfs, "tmp"), Access: LandlockReadWrite},
{Path: filepath.Join(rootfs, "var"), Access: LandlockReadWrite},
{Path: filepath.Join(rootfs, "run"), Access: LandlockReadWrite},
{Path: filepath.Join(rootfs, "opt"), Access: LandlockReadExec},
},
}
}
// ── Cgroups v2 Resource Limits ───────────────────────────────────────────────
// ResourceLimits configures cgroups v2 resource constraints for a container.
type ResourceLimits struct {
// Memory limits (e.g. "512M", "2G"). Empty means unlimited.
MemoryHard string // memory.max — hard limit, OOM kill above this
MemorySoft string // memory.high — throttle above this (soft pressure)
// CPU limits.
CPUWeight int // cpu.weight (1-10000, default 100). Proportional share.
CPUSet string // cpuset.cpus (e.g. "0-3", "0,2"). Pin to specific cores.
// I/O limits.
IOWeight int // io.weight (1-10000, default 100). Proportional share.
// PID limit.
PIDsMax int // pids.max — maximum number of processes. 0 means unlimited.
}
// DefaultResourceLimits returns conservative defaults suitable for most workloads.
func DefaultResourceLimits() *ResourceLimits {
return &ResourceLimits{
MemoryHard: "2G",
MemorySoft: "1G",
CPUWeight: 100,
CPUSet: "", // no pinning
IOWeight: 100,
PIDsMax: 4096,
}
}
// SystemdProperties converts ResourceLimits into systemd unit properties
// suitable for passing to systemd-run or systemd-nspawn via --property=.
func (r *ResourceLimits) SystemdProperties() []string {
var props []string
// Cgroups v2 delegation is always enabled for hybrid containers.
props = append(props, "Delegate=yes")
if r.MemoryHard != "" {
props = append(props, fmt.Sprintf("MemoryMax=%s", r.MemoryHard))
}
if r.MemorySoft != "" {
props = append(props, fmt.Sprintf("MemoryHigh=%s", r.MemorySoft))
}
if r.CPUWeight > 0 {
props = append(props, fmt.Sprintf("CPUWeight=%d", r.CPUWeight))
}
if r.CPUSet != "" {
props = append(props, fmt.Sprintf("AllowedCPUs=%s", r.CPUSet))
}
if r.IOWeight > 0 {
props = append(props, fmt.Sprintf("IOWeight=%d", r.IOWeight))
}
if r.PIDsMax > 0 {
props = append(props, fmt.Sprintf("TasksMax=%d", r.PIDsMax))
}
return props
}
// ── Network Isolation ────────────────────────────────────────────────────────
// NetworkMode selects the container network configuration.
type NetworkMode string
const (
// NetworkPrivate creates a fully isolated network namespace with a veth
// pair connected to the host bridge (voltbr0). The container gets its own
// IP stack, routing table, and firewall rules.
NetworkPrivate NetworkMode = "private"
// NetworkHost shares the host network namespace. The container sees all
// host interfaces and ports. Use only for trusted system services.
NetworkHost NetworkMode = "host"
// NetworkNone creates an isolated network namespace with no external
// connectivity. Loopback only.
NetworkNone NetworkMode = "none"
)
// NetworkConfig holds the network isolation settings for a container.
type NetworkConfig struct {
Mode NetworkMode
Bridge string // bridge name for private mode (default: "voltbr0")
// PortForwards maps host ports to container ports when Mode is NetworkPrivate.
PortForwards []PortForward
// DNS servers to inject into the container's resolv.conf.
DNS []string
}
// PortForward maps a single host port to a container port.
type PortForward struct {
HostPort int
ContainerPort int
Protocol string // "tcp" or "udp"
}
// DefaultNetworkConfig returns a private-network configuration with the
// standard Volt bridge.
func DefaultNetworkConfig() *NetworkConfig {
return &NetworkConfig{
Mode: NetworkPrivate,
Bridge: "voltbr0",
DNS: []string{"1.1.1.1", "1.0.0.1"},
}
}
// NspawnNetworkArgs returns the systemd-nspawn arguments for this network
// configuration.
func (n *NetworkConfig) NspawnNetworkArgs() []string {
switch n.Mode {
case NetworkPrivate:
args := []string{"--network-bridge=" + n.Bridge}
for _, pf := range n.PortForwards {
proto := pf.Protocol
if proto == "" {
proto = "tcp"
}
args = append(args, fmt.Sprintf("--port=%s:%d:%d", proto, pf.HostPort, pf.ContainerPort))
}
return args
case NetworkHost:
return nil // no network flags = share host namespace
case NetworkNone:
return []string{"--private-network"}
default:
return []string{"--network-bridge=voltbr0"}
}
}
// ── Isolation Profile ────────────────────────────────────────────────────────
// IsolationConfig combines all isolation settings for a hybrid container.
type IsolationConfig struct {
Landlock *LandlockPolicy
Seccomp SeccompProfile
Resources *ResourceLimits
Network *NetworkConfig
// PrivateUsers enables user namespace isolation (--private-users).
PrivateUsers bool
// ReadOnlyFS mounts the rootfs as read-only (--read-only).
ReadOnlyFS bool
}
// DefaultIsolation returns a security-first isolation configuration suitable
// for production workloads.
func DefaultIsolation(rootfs string) *IsolationConfig {
return &IsolationConfig{
Landlock: ServerPolicy(rootfs),
Seccomp: SeccompDefault,
Resources: DefaultResourceLimits(),
Network: DefaultNetworkConfig(),
PrivateUsers: true,
ReadOnlyFS: false,
}
}
// NspawnArgs returns the complete set of systemd-nspawn arguments for this
// isolation configuration. These are appended to the base nspawn command.
func (iso *IsolationConfig) NspawnArgs() []string {
var args []string
// Resource limits and cgroup delegation via --property.
for _, prop := range iso.Resources.SystemdProperties() {
args = append(args, "--property="+prop)
}
// Seccomp profile.
switch iso.Seccomp {
case SeccompStrict:
// systemd-nspawn applies its default filter automatically.
// For strict mode we add --capability=drop-all to further limit.
args = append(args, "--drop-capability=all")
case SeccompDefault:
// Use nspawn's built-in seccomp filter — no extra flags needed.
case SeccompUnconfined:
// Disable the built-in seccomp filter for trusted workloads.
args = append(args, "--system-call-filter=~")
}
// Network isolation.
args = append(args, iso.Network.NspawnNetworkArgs()...)
// User namespace isolation.
if iso.PrivateUsers {
args = append(args, "--private-users=pick")
}
// Read-only rootfs.
if iso.ReadOnlyFS {
args = append(args, "--read-only")
}
return args
}
// NspawnConfigBlock returns the .nspawn file content sections for this
// isolation configuration. Written to /etc/systemd/nspawn/<name>.nspawn.
func (iso *IsolationConfig) NspawnConfigBlock(name string) string {
var b strings.Builder
// [Exec] section
b.WriteString("[Exec]\n")
b.WriteString("Boot=yes\n")
b.WriteString("PrivateUsers=")
if iso.PrivateUsers {
b.WriteString("pick\n")
} else {
b.WriteString("no\n")
}
// Environment setup.
b.WriteString(fmt.Sprintf("Environment=VOLT_CONTAINER=%s\n", name))
b.WriteString("Environment=VOLT_RUNTIME=hybrid\n")
b.WriteString("\n")
// [Network] section
b.WriteString("[Network]\n")
switch iso.Network.Mode {
case NetworkPrivate:
b.WriteString(fmt.Sprintf("Bridge=%s\n", iso.Network.Bridge))
case NetworkNone:
b.WriteString("Private=yes\n")
case NetworkHost:
// No network section needed for host mode.
}
b.WriteString("\n")
// [ResourceControl] section (selected limits for the .nspawn file).
b.WriteString("[ResourceControl]\n")
if iso.Resources.MemoryHard != "" {
b.WriteString(fmt.Sprintf("MemoryMax=%s\n", iso.Resources.MemoryHard))
}
if iso.Resources.PIDsMax > 0 {
b.WriteString(fmt.Sprintf("TasksMax=%d\n", iso.Resources.PIDsMax))
}
return b.String()
}

999
pkg/backend/proot/proot.go Normal file
View File

@@ -0,0 +1,999 @@
/*
Proot Backend — Container runtime for Android and non-systemd Linux platforms.
Uses proot (ptrace-based root emulation) for filesystem isolation, modeled
after the ACE (Android Container Engine) runtime. No root required, no
cgroups, no namespaces — runs containers in user-space via syscall
interception.
Key design decisions from ACE:
- proot -r <rootfs> -0 -w / -k 5.15.0 -b /dev -b /proc -b /sys
- Entrypoint auto-detection: /init → nginx → docker-entrypoint.sh → /bin/sh
- Container state persisted as JSON files
- Logs captured via redirected stdout/stderr
- Port remapping via sed-based config modification (no iptables)
*/
package proot
import (
"bufio"
"encoding/json"
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
"runtime"
"strconv"
"strings"
"syscall"
"time"
"github.com/armoredgate/volt/pkg/backend"
"gopkg.in/yaml.v3"
)
// containerState represents the runtime state persisted to state.json.
type containerState struct {
Name string `json:"name"`
Status string `json:"status"` // created, running, stopped
PID int `json:"pid"`
CreatedAt time.Time `json:"created_at"`
StartedAt time.Time `json:"started_at,omitempty"`
StoppedAt time.Time `json:"stopped_at,omitempty"`
}
// containerConfig represents the container configuration persisted to config.yaml.
type containerConfig struct {
Name string `yaml:"name"`
Image string `yaml:"image,omitempty"`
RootFS string `yaml:"rootfs"`
Memory string `yaml:"memory,omitempty"`
CPU int `yaml:"cpu,omitempty"`
Env []string `yaml:"env,omitempty"`
Ports []backend.PortMapping `yaml:"ports,omitempty"`
Volumes []backend.VolumeMount `yaml:"volumes,omitempty"`
Network string `yaml:"network,omitempty"`
}
func init() {
backend.Register("proot", func() backend.ContainerBackend { return New() })
}
// Backend implements backend.ContainerBackend using proot.
type Backend struct {
dataDir string
prootPath string
}
// New creates a new proot backend instance.
func New() *Backend {
return &Backend{}
}
// ──────────────────────────────────────────────────────────────────────────────
// Interface: Identity & Availability
// ──────────────────────────────────────────────────────────────────────────────
func (b *Backend) Name() string { return "proot" }
// Available returns true if a usable proot binary can be found.
func (b *Backend) Available() bool {
return b.findProot() != ""
}
// findProot locates the proot binary, checking PATH first, then common
// Android locations.
func (b *Backend) findProot() string {
// Already resolved
if b.prootPath != "" {
if _, err := os.Stat(b.prootPath); err == nil {
return b.prootPath
}
}
// Standard PATH lookup
if p, err := exec.LookPath("proot"); err == nil {
return p
}
// Android-specific locations
androidPaths := []string{
"/data/local/tmp/proot",
"/data/data/com.termux/files/usr/bin/proot",
}
// Also check app native lib dirs (ACE pattern)
if home := os.Getenv("HOME"); home != "" {
androidPaths = append(androidPaths, filepath.Join(home, "proot"))
}
for _, p := range androidPaths {
if info, err := os.Stat(p); err == nil && !info.IsDir() {
return p
}
}
return ""
}
// ──────────────────────────────────────────────────────────────────────────────
// Interface: Init
// ──────────────────────────────────────────────────────────────────────────────
// Init creates the backend directory structure and resolves the proot binary.
func (b *Backend) Init(dataDir string) error {
b.dataDir = dataDir
b.prootPath = b.findProot()
dirs := []string{
filepath.Join(dataDir, "containers"),
filepath.Join(dataDir, "images"),
filepath.Join(dataDir, "tmp"),
}
for _, d := range dirs {
if err := os.MkdirAll(d, 0755); err != nil {
return fmt.Errorf("proot init: failed to create %s: %w", d, err)
}
}
// Set permissions on tmp directory (ACE pattern — proot needs a writable tmp)
if err := os.Chmod(filepath.Join(dataDir, "tmp"), 0777); err != nil {
return fmt.Errorf("proot init: failed to chmod tmp: %w", err)
}
return nil
}
// ──────────────────────────────────────────────────────────────────────────────
// Interface: Create
// ──────────────────────────────────────────────────────────────────────────────
func (b *Backend) Create(opts backend.CreateOptions) error {
cDir := b.containerDir(opts.Name)
// Check for duplicates
if _, err := os.Stat(cDir); err == nil {
return fmt.Errorf("container %q already exists", opts.Name)
}
// Create directory structure
subdirs := []string{
filepath.Join(cDir, "rootfs"),
filepath.Join(cDir, "logs"),
}
for _, d := range subdirs {
if err := os.MkdirAll(d, 0755); err != nil {
return fmt.Errorf("create: mkdir %s: %w", d, err)
}
}
rootfsDir := filepath.Join(cDir, "rootfs")
// Populate rootfs
if opts.RootFS != "" {
// Use provided rootfs directory — symlink or copy
srcInfo, err := os.Stat(opts.RootFS)
if err != nil {
return fmt.Errorf("create: rootfs path %q not found: %w", opts.RootFS, err)
}
if !srcInfo.IsDir() {
return fmt.Errorf("create: rootfs path %q is not a directory", opts.RootFS)
}
// Copy the rootfs contents
if err := copyDir(opts.RootFS, rootfsDir); err != nil {
return fmt.Errorf("create: copy rootfs: %w", err)
}
} else if opts.Image != "" {
// Check if image already exists as an extracted rootfs in images dir
imagePath := b.resolveImage(opts.Image)
if imagePath != "" {
if err := copyDir(imagePath, rootfsDir); err != nil {
return fmt.Errorf("create: copy image rootfs: %w", err)
}
} else {
// Try debootstrap for base Debian/Ubuntu images
if isDebootstrapImage(opts.Image) {
if err := b.debootstrap(opts.Image, rootfsDir); err != nil {
return fmt.Errorf("create: debootstrap failed: %w", err)
}
} else {
// Create minimal rootfs structure for manual population
for _, d := range []string{"bin", "etc", "home", "root", "tmp", "usr/bin", "usr/sbin", "var/log"} {
os.MkdirAll(filepath.Join(rootfsDir, d), 0755)
}
}
}
}
// Write config.yaml
cfg := containerConfig{
Name: opts.Name,
Image: opts.Image,
RootFS: rootfsDir,
Memory: opts.Memory,
CPU: opts.CPU,
Env: opts.Env,
Ports: opts.Ports,
Volumes: opts.Volumes,
Network: opts.Network,
}
if err := b.writeConfig(opts.Name, &cfg); err != nil {
// Clean up on failure
os.RemoveAll(cDir)
return fmt.Errorf("create: write config: %w", err)
}
// Write initial state.json
state := containerState{
Name: opts.Name,
Status: "created",
PID: 0,
CreatedAt: time.Now(),
}
if err := b.writeState(opts.Name, &state); err != nil {
os.RemoveAll(cDir)
return fmt.Errorf("create: write state: %w", err)
}
// Auto-start if requested
if opts.Start {
return b.Start(opts.Name)
}
return nil
}
// ──────────────────────────────────────────────────────────────────────────────
// Interface: Start
// ──────────────────────────────────────────────────────────────────────────────
func (b *Backend) Start(name string) error {
state, err := b.readState(name)
if err != nil {
return fmt.Errorf("start: %w", err)
}
if state.Status == "running" {
// Check if the PID is actually alive
if state.PID > 0 && processAlive(state.PID) {
return fmt.Errorf("container %q is already running (pid %d)", name, state.PID)
}
// Stale state — process died, update and continue
state.Status = "stopped"
}
if state.Status != "created" && state.Status != "stopped" {
return fmt.Errorf("container %q is in state %q, cannot start", name, state.Status)
}
cfg, err := b.readConfig(name)
if err != nil {
return fmt.Errorf("start: %w", err)
}
if b.prootPath == "" {
return fmt.Errorf("start: proot binary not found — install proot or set PATH")
}
rootfsDir := filepath.Join(b.containerDir(name), "rootfs")
// Detect entrypoint (ACE priority order)
entrypoint, entrypointArgs := b.detectEntrypoint(rootfsDir, cfg)
// Build proot command arguments
args := []string{
"-r", rootfsDir,
"-0", // Fake root (uid 0 emulation)
"-w", "/", // Working directory inside container
"-k", "5.15.0", // Fake kernel version for compatibility
"-b", "/dev", // Bind /dev
"-b", "/proc", // Bind /proc
"-b", "/sys", // Bind /sys
"-b", "/dev/urandom:/dev/random", // Fix random device
}
// Add volume mounts as proot bind mounts
for _, vol := range cfg.Volumes {
bindArg := vol.HostPath + ":" + vol.ContainerPath
args = append(args, "-b", bindArg)
}
// Add entrypoint
args = append(args, entrypoint)
args = append(args, entrypointArgs...)
cmd := exec.Command(b.prootPath, args...)
// Set container environment variables (ACE pattern)
env := []string{
"HOME=/root",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"TERM=xterm",
"CONTAINER_NAME=" + name,
"PROOT_NO_SECCOMP=1",
"PROOT_TMP_DIR=" + filepath.Join(b.dataDir, "tmp"),
"TMPDIR=" + filepath.Join(b.dataDir, "tmp"),
}
// Add user-specified environment variables
env = append(env, cfg.Env...)
// Add port mapping info as environment variables
for _, p := range cfg.Ports {
env = append(env,
fmt.Sprintf("PORT_%d=%d", p.ContainerPort, p.HostPort),
)
}
cmd.Env = env
// Create a new session so the child doesn't get signals from our terminal
cmd.SysProcAttr = &syscall.SysProcAttr{
Setsid: true,
}
// Redirect stdout/stderr to log file
logDir := filepath.Join(b.containerDir(name), "logs")
os.MkdirAll(logDir, 0755)
logPath := filepath.Join(logDir, "current.log")
logFile, err := os.OpenFile(logPath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644)
if err != nil {
return fmt.Errorf("start: open log file: %w", err)
}
// Write startup header to log
fmt.Fprintf(logFile, "[volt] Container %s starting at %s\n", name, time.Now().Format(time.RFC3339))
fmt.Fprintf(logFile, "[volt] proot=%s\n", b.prootPath)
fmt.Fprintf(logFile, "[volt] rootfs=%s\n", rootfsDir)
fmt.Fprintf(logFile, "[volt] entrypoint=%s %s\n", entrypoint, strings.Join(entrypointArgs, " "))
cmd.Stdout = logFile
cmd.Stderr = logFile
// Start the process
if err := cmd.Start(); err != nil {
logFile.Close()
return fmt.Errorf("start: exec proot: %w", err)
}
// Close the log file handle in the parent — the child has its own fd
logFile.Close()
// Update state
state.Status = "running"
state.PID = cmd.Process.Pid
state.StartedAt = time.Now()
if err := b.writeState(name, state); err != nil {
// Kill the process if we can't persist state
cmd.Process.Signal(syscall.SIGKILL)
return fmt.Errorf("start: write state: %w", err)
}
// Reap the child in a goroutine to avoid zombies
go func() {
cmd.Wait()
// Process exited — update state to stopped
if s, err := b.readState(name); err == nil && s.Status == "running" {
s.Status = "stopped"
s.PID = 0
s.StoppedAt = time.Now()
b.writeState(name, s)
}
}()
return nil
}
// detectEntrypoint determines what to run inside the container.
// Follows ACE priority: /init → nginx → docker-entrypoint.sh → /bin/sh
func (b *Backend) detectEntrypoint(rootfsDir string, cfg *containerConfig) (string, []string) {
// Check for common entrypoints in the rootfs
candidates := []struct {
path string
args []string
}{
{"/init", nil},
{"/usr/sbin/nginx", []string{"-g", "daemon off; master_process off;"}},
{"/docker-entrypoint.sh", nil},
{"/usr/local/bin/python3", nil},
{"/usr/bin/python3", nil},
}
for _, c := range candidates {
fullPath := filepath.Join(rootfsDir, c.path)
if info, err := os.Stat(fullPath); err == nil && !info.IsDir() {
// For nginx with port mappings, rewrite the listen port via shell wrapper
if c.path == "/usr/sbin/nginx" && len(cfg.Ports) > 0 {
port := cfg.Ports[0].HostPort
shellCmd := fmt.Sprintf(
"sed -i 's/listen[[:space:]]*80;/listen %d;/g' /etc/nginx/conf.d/default.conf 2>/dev/null; "+
"sed -i 's/listen[[:space:]]*80;/listen %d;/g' /etc/nginx/nginx.conf 2>/dev/null; "+
"exec /usr/sbin/nginx -g 'daemon off; master_process off;'",
port, port,
)
return "/bin/sh", []string{"-c", shellCmd}
}
return c.path, c.args
}
}
// Fallback: /bin/sh
return "/bin/sh", nil
}
// ──────────────────────────────────────────────────────────────────────────────
// Interface: Stop
// ──────────────────────────────────────────────────────────────────────────────
func (b *Backend) Stop(name string) error {
state, err := b.readState(name)
if err != nil {
return fmt.Errorf("stop: %w", err)
}
if state.Status != "running" || state.PID <= 0 {
// Already stopped — make sure state reflects it
if state.Status == "running" {
state.Status = "stopped"
state.PID = 0
b.writeState(name, state)
}
return nil
}
proc, err := os.FindProcess(state.PID)
if err != nil {
// Process doesn't exist — clean up state
state.Status = "stopped"
state.PID = 0
state.StoppedAt = time.Now()
return b.writeState(name, state)
}
// Send SIGTERM for graceful shutdown (ACE pattern)
proc.Signal(syscall.SIGTERM)
// Wait briefly for graceful exit
done := make(chan struct{})
go func() {
// Wait up to 5 seconds for the process to exit
for i := 0; i < 50; i++ {
if !processAlive(state.PID) {
close(done)
return
}
time.Sleep(100 * time.Millisecond)
}
close(done)
}()
<-done
// If still running, force kill
if processAlive(state.PID) {
proc.Signal(syscall.SIGKILL)
// Give it a moment to die
time.Sleep(200 * time.Millisecond)
}
// Update state
state.Status = "stopped"
state.PID = 0
state.StoppedAt = time.Now()
return b.writeState(name, state)
}
// ──────────────────────────────────────────────────────────────────────────────
// Interface: Delete
// ──────────────────────────────────────────────────────────────────────────────
func (b *Backend) Delete(name string, force bool) error {
state, err := b.readState(name)
if err != nil {
// If state can't be read but directory exists, allow force delete
cDir := b.containerDir(name)
if _, statErr := os.Stat(cDir); statErr != nil {
return fmt.Errorf("container %q not found", name)
}
if !force {
return fmt.Errorf("delete: cannot read state for %q (use --force): %w", name, err)
}
// Force remove the whole directory
return os.RemoveAll(cDir)
}
if state.Status == "running" && state.PID > 0 && processAlive(state.PID) {
if !force {
return fmt.Errorf("container %q is running — stop it first or use --force", name)
}
// Force stop
if err := b.Stop(name); err != nil {
// If stop fails, try direct kill
if proc, err := os.FindProcess(state.PID); err == nil {
proc.Signal(syscall.SIGKILL)
time.Sleep(200 * time.Millisecond)
}
}
}
// Remove entire container directory
cDir := b.containerDir(name)
if err := os.RemoveAll(cDir); err != nil {
return fmt.Errorf("delete: remove %s: %w", cDir, err)
}
return nil
}
// ──────────────────────────────────────────────────────────────────────────────
// Interface: Exec
// ──────────────────────────────────────────────────────────────────────────────
func (b *Backend) Exec(name string, opts backend.ExecOptions) error {
state, err := b.readState(name)
if err != nil {
return fmt.Errorf("exec: %w", err)
}
if state.Status != "running" || state.PID <= 0 || !processAlive(state.PID) {
return fmt.Errorf("container %q is not running", name)
}
if len(opts.Command) == 0 {
opts.Command = []string{"/bin/sh"}
}
cfg, err := b.readConfig(name)
if err != nil {
return fmt.Errorf("exec: %w", err)
}
rootfsDir := filepath.Join(b.containerDir(name), "rootfs")
// Build proot command for exec
args := []string{
"-r", rootfsDir,
"-0",
"-w", "/",
"-k", "5.15.0",
"-b", "/dev",
"-b", "/proc",
"-b", "/sys",
"-b", "/dev/urandom:/dev/random",
}
// Add volume mounts
for _, vol := range cfg.Volumes {
args = append(args, "-b", vol.HostPath+":"+vol.ContainerPath)
}
// Add the command
args = append(args, opts.Command...)
cmd := exec.Command(b.prootPath, args...)
// Set container environment
env := []string{
"HOME=/root",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"TERM=xterm",
"CONTAINER_NAME=" + name,
"PROOT_NO_SECCOMP=1",
"PROOT_TMP_DIR=" + filepath.Join(b.dataDir, "tmp"),
}
env = append(env, cfg.Env...)
env = append(env, opts.Env...)
cmd.Env = env
// Attach stdin/stdout/stderr for interactive use
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
return cmd.Run()
}
// ──────────────────────────────────────────────────────────────────────────────
// Interface: Logs
// ──────────────────────────────────────────────────────────────────────────────
func (b *Backend) Logs(name string, opts backend.LogOptions) (string, error) {
logPath := filepath.Join(b.containerDir(name), "logs", "current.log")
data, err := os.ReadFile(logPath)
if err != nil {
if os.IsNotExist(err) {
return "[No logs available]", nil
}
return "", fmt.Errorf("logs: read %s: %w", logPath, err)
}
content := string(data)
if opts.Tail > 0 {
lines := strings.Split(content, "\n")
if len(lines) > opts.Tail {
lines = lines[len(lines)-opts.Tail:]
}
return strings.Join(lines, "\n"), nil
}
return content, nil
}
// ──────────────────────────────────────────────────────────────────────────────
// Interface: CopyToContainer / CopyFromContainer
// ──────────────────────────────────────────────────────────────────────────────
func (b *Backend) CopyToContainer(name string, src string, dst string) error {
// Verify container exists
cDir := b.containerDir(name)
if _, err := os.Stat(cDir); err != nil {
return fmt.Errorf("container %q not found", name)
}
// Destination is relative to rootfs
dstPath := filepath.Join(cDir, "rootfs", dst)
// Ensure parent directory exists
if err := os.MkdirAll(filepath.Dir(dstPath), 0755); err != nil {
return fmt.Errorf("copy-to: mkdir: %w", err)
}
return copyFile(src, dstPath)
}
func (b *Backend) CopyFromContainer(name string, src string, dst string) error {
// Verify container exists
cDir := b.containerDir(name)
if _, err := os.Stat(cDir); err != nil {
return fmt.Errorf("container %q not found", name)
}
// Source is relative to rootfs
srcPath := filepath.Join(cDir, "rootfs", src)
// Ensure parent directory of destination exists
if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
return fmt.Errorf("copy-from: mkdir: %w", err)
}
return copyFile(srcPath, dst)
}
// ──────────────────────────────────────────────────────────────────────────────
// Interface: List & Inspect
// ──────────────────────────────────────────────────────────────────────────────
func (b *Backend) List() ([]backend.ContainerInfo, error) {
containersDir := filepath.Join(b.dataDir, "containers")
entries, err := os.ReadDir(containersDir)
if err != nil {
if os.IsNotExist(err) {
return nil, nil
}
return nil, fmt.Errorf("list: read containers dir: %w", err)
}
var result []backend.ContainerInfo
for _, entry := range entries {
if !entry.IsDir() {
continue
}
name := entry.Name()
info, err := b.Inspect(name)
if err != nil {
// Skip containers with broken state
continue
}
result = append(result, *info)
}
return result, nil
}
func (b *Backend) Inspect(name string) (*backend.ContainerInfo, error) {
state, err := b.readState(name)
if err != nil {
return nil, fmt.Errorf("inspect: %w", err)
}
cfg, err := b.readConfig(name)
if err != nil {
return nil, fmt.Errorf("inspect: %w", err)
}
// Reconcile state: if status says running, verify the PID is alive
if state.Status == "running" && state.PID > 0 {
if !processAlive(state.PID) {
state.Status = "stopped"
state.PID = 0
state.StoppedAt = time.Now()
b.writeState(name, state)
}
}
// Detect OS from rootfs os-release
osName := detectOS(filepath.Join(b.containerDir(name), "rootfs"))
info := &backend.ContainerInfo{
Name: name,
Image: cfg.Image,
Status: state.Status,
PID: state.PID,
RootFS: cfg.RootFS,
Memory: cfg.Memory,
CPU: cfg.CPU,
CreatedAt: state.CreatedAt,
StartedAt: state.StartedAt,
IPAddress: "-", // proot shares host network
OS: osName,
}
return info, nil
}
// ──────────────────────────────────────────────────────────────────────────────
// Interface: Platform Capabilities
// ──────────────────────────────────────────────────────────────────────────────
func (b *Backend) SupportsVMs() bool { return false }
func (b *Backend) SupportsServices() bool { return false }
func (b *Backend) SupportsNetworking() bool { return true } // basic port forwarding
func (b *Backend) SupportsTuning() bool { return false }
// ──────────────────────────────────────────────────────────────────────────────
// Internal: State & Config persistence
// ──────────────────────────────────────────────────────────────────────────────
func (b *Backend) containerDir(name string) string {
return filepath.Join(b.dataDir, "containers", name)
}
func (b *Backend) readState(name string) (*containerState, error) {
path := filepath.Join(b.containerDir(name), "state.json")
data, err := os.ReadFile(path)
if err != nil {
return nil, fmt.Errorf("read state for %q: %w", name, err)
}
var state containerState
if err := json.Unmarshal(data, &state); err != nil {
return nil, fmt.Errorf("parse state for %q: %w", name, err)
}
return &state, nil
}
func (b *Backend) writeState(name string, state *containerState) error {
path := filepath.Join(b.containerDir(name), "state.json")
data, err := json.MarshalIndent(state, "", " ")
if err != nil {
return fmt.Errorf("marshal state for %q: %w", name, err)
}
return os.WriteFile(path, data, 0644)
}
func (b *Backend) readConfig(name string) (*containerConfig, error) {
path := filepath.Join(b.containerDir(name), "config.yaml")
data, err := os.ReadFile(path)
if err != nil {
return nil, fmt.Errorf("read config for %q: %w", name, err)
}
var cfg containerConfig
if err := yaml.Unmarshal(data, &cfg); err != nil {
return nil, fmt.Errorf("parse config for %q: %w", name, err)
}
return &cfg, nil
}
func (b *Backend) writeConfig(name string, cfg *containerConfig) error {
path := filepath.Join(b.containerDir(name), "config.yaml")
data, err := yaml.Marshal(cfg)
if err != nil {
return fmt.Errorf("marshal config for %q: %w", name, err)
}
return os.WriteFile(path, data, 0644)
}
// ──────────────────────────────────────────────────────────────────────────────
// Internal: Image resolution
// ──────────────────────────────────────────────────────────────────────────────
// resolveImage checks if an image rootfs exists in the images directory.
func (b *Backend) resolveImage(image string) string {
imagesDir := filepath.Join(b.dataDir, "images")
// Try exact name
candidate := filepath.Join(imagesDir, image)
if info, err := os.Stat(candidate); err == nil && info.IsDir() {
return candidate
}
// Try normalized name (replace : with _)
normalized := strings.ReplaceAll(image, ":", "_")
normalized = strings.ReplaceAll(normalized, "/", "_")
candidate = filepath.Join(imagesDir, normalized)
if info, err := os.Stat(candidate); err == nil && info.IsDir() {
return candidate
}
return ""
}
// isDebootstrapImage checks if the image name is a Debian/Ubuntu variant
// that can be bootstrapped with debootstrap.
func isDebootstrapImage(image string) bool {
base := strings.Split(image, ":")[0]
base = strings.Split(base, "/")[len(strings.Split(base, "/"))-1]
debootstrapDistros := []string{
"debian", "ubuntu", "bookworm", "bullseye", "buster",
"jammy", "focal", "noble", "mantic",
}
for _, d := range debootstrapDistros {
if strings.EqualFold(base, d) {
return true
}
}
return false
}
// debootstrap creates a Debian/Ubuntu rootfs using debootstrap.
func (b *Backend) debootstrap(image string, rootfsDir string) error {
// Determine the suite (release codename)
parts := strings.SplitN(image, ":", 2)
base := parts[0]
suite := ""
if len(parts) == 2 {
suite = parts[1]
}
// Map image names to suites
if suite == "" {
switch strings.ToLower(base) {
case "debian":
suite = "bookworm"
case "ubuntu":
suite = "noble"
default:
suite = strings.ToLower(base)
}
}
// Check if debootstrap is available
debootstrapPath, err := exec.LookPath("debootstrap")
if err != nil {
return fmt.Errorf("debootstrap not found in PATH — install debootstrap to create base images")
}
// Determine mirror based on distro
mirror := "http://deb.debian.org/debian"
if strings.EqualFold(base, "ubuntu") || isUbuntuSuite(suite) {
mirror = "http://archive.ubuntu.com/ubuntu"
}
cmd := exec.Command(debootstrapPath, "--variant=minbase", suite, rootfsDir, mirror)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
return cmd.Run()
}
func isUbuntuSuite(suite string) bool {
ubuntuSuites := []string{"jammy", "focal", "noble", "mantic", "lunar", "kinetic", "bionic", "xenial"}
for _, s := range ubuntuSuites {
if strings.EqualFold(suite, s) {
return true
}
}
return false
}
// ──────────────────────────────────────────────────────────────────────────────
// Internal: Process & OS helpers
// ──────────────────────────────────────────────────────────────────────────────
// processAlive checks if a process with the given PID is still running.
func processAlive(pid int) bool {
if pid <= 0 {
return false
}
if runtime.GOOS == "linux" || runtime.GOOS == "android" {
// Check /proc/<pid> — most reliable on Linux/Android
_, err := os.Stat(filepath.Join("/proc", strconv.Itoa(pid)))
return err == nil
}
// Fallback: signal 0 check
proc, err := os.FindProcess(pid)
if err != nil {
return false
}
return proc.Signal(syscall.Signal(0)) == nil
}
// detectOS reads /etc/os-release from a rootfs and returns the PRETTY_NAME.
func detectOS(rootfsDir string) string {
osReleasePath := filepath.Join(rootfsDir, "etc", "os-release")
f, err := os.Open(osReleasePath)
if err != nil {
return "-"
}
defer f.Close()
scanner := bufio.NewScanner(f)
for scanner.Scan() {
line := scanner.Text()
if strings.HasPrefix(line, "PRETTY_NAME=") {
val := strings.TrimPrefix(line, "PRETTY_NAME=")
return strings.Trim(val, "\"")
}
}
return "-"
}
// ──────────────────────────────────────────────────────────────────────────────
// Internal: File operations
// ──────────────────────────────────────────────────────────────────────────────
// copyFile copies a single file from src to dst, preserving permissions.
func copyFile(src, dst string) error {
srcFile, err := os.Open(src)
if err != nil {
return fmt.Errorf("open %s: %w", src, err)
}
defer srcFile.Close()
srcInfo, err := srcFile.Stat()
if err != nil {
return fmt.Errorf("stat %s: %w", src, err)
}
dstFile, err := os.OpenFile(dst, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, srcInfo.Mode())
if err != nil {
return fmt.Errorf("create %s: %w", dst, err)
}
defer dstFile.Close()
if _, err := io.Copy(dstFile, srcFile); err != nil {
return fmt.Errorf("copy %s → %s: %w", src, dst, err)
}
return nil
}
// copyDir recursively copies a directory tree from src to dst using cp -a.
// Uses the system cp command for reliability (preserves permissions, symlinks,
// hard links, special files) — same approach as the systemd backend.
func copyDir(src, dst string) error {
// Ensure destination exists
if err := os.MkdirAll(dst, 0755); err != nil {
return fmt.Errorf("mkdir %s: %w", dst, err)
}
// Use cp -a for atomic, permission-preserving copy
// The trailing /. copies contents into dst rather than creating src as a subdirectory
cmd := exec.Command("cp", "-a", src+"/.", dst)
out, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("cp -a %s → %s: %s: %w", src, dst, strings.TrimSpace(string(out)), err)
}
return nil
}

View File

@@ -0,0 +1,347 @@
package proot
import (
"encoding/json"
"os"
"path/filepath"
"testing"
"github.com/armoredgate/volt/pkg/backend"
"gopkg.in/yaml.v3"
)
func TestName(t *testing.T) {
b := New()
if b.Name() != "proot" {
t.Errorf("expected name 'proot', got %q", b.Name())
}
}
func TestCapabilities(t *testing.T) {
b := New()
if b.SupportsVMs() {
t.Error("proot should not support VMs")
}
if b.SupportsServices() {
t.Error("proot should not support services")
}
if !b.SupportsNetworking() {
t.Error("proot should support basic networking")
}
if b.SupportsTuning() {
t.Error("proot should not support tuning")
}
}
func TestInit(t *testing.T) {
tmpDir := t.TempDir()
b := New()
if err := b.Init(tmpDir); err != nil {
t.Fatalf("Init failed: %v", err)
}
// Verify directory structure
for _, sub := range []string{"containers", "images", "tmp"} {
path := filepath.Join(tmpDir, sub)
info, err := os.Stat(path)
if err != nil {
t.Errorf("expected directory %s to exist: %v", sub, err)
continue
}
if !info.IsDir() {
t.Errorf("expected %s to be a directory", sub)
}
}
// Verify tmp has 0777 permissions
info, _ := os.Stat(filepath.Join(tmpDir, "tmp"))
if info.Mode().Perm() != 0777 {
t.Errorf("expected tmp perms 0777, got %o", info.Mode().Perm())
}
}
func TestCreateAndDelete(t *testing.T) {
tmpDir := t.TempDir()
b := New()
b.Init(tmpDir)
// Create a container
opts := backend.CreateOptions{
Name: "test-container",
Memory: "512M",
CPU: 1,
Env: []string{"FOO=bar"},
Ports: []backend.PortMapping{{HostPort: 8080, ContainerPort: 80, Protocol: "tcp"}},
}
if err := b.Create(opts); err != nil {
t.Fatalf("Create failed: %v", err)
}
// Verify container directory structure
cDir := filepath.Join(tmpDir, "containers", "test-container")
for _, sub := range []string{"rootfs", "logs"} {
path := filepath.Join(cDir, sub)
if _, err := os.Stat(path); err != nil {
t.Errorf("expected %s to exist: %v", sub, err)
}
}
// Verify state.json
stateData, err := os.ReadFile(filepath.Join(cDir, "state.json"))
if err != nil {
t.Fatalf("failed to read state.json: %v", err)
}
var state containerState
if err := json.Unmarshal(stateData, &state); err != nil {
t.Fatalf("failed to parse state.json: %v", err)
}
if state.Name != "test-container" {
t.Errorf("expected name 'test-container', got %q", state.Name)
}
if state.Status != "created" {
t.Errorf("expected status 'created', got %q", state.Status)
}
// Verify config.yaml
cfgData, err := os.ReadFile(filepath.Join(cDir, "config.yaml"))
if err != nil {
t.Fatalf("failed to read config.yaml: %v", err)
}
var cfg containerConfig
if err := yaml.Unmarshal(cfgData, &cfg); err != nil {
t.Fatalf("failed to parse config.yaml: %v", err)
}
if cfg.Memory != "512M" {
t.Errorf("expected memory '512M', got %q", cfg.Memory)
}
if len(cfg.Ports) != 1 || cfg.Ports[0].HostPort != 8080 {
t.Errorf("expected port mapping 8080:80, got %+v", cfg.Ports)
}
// Verify duplicate create fails
if err := b.Create(opts); err == nil {
t.Error("expected duplicate create to fail")
}
// List should return one container
containers, err := b.List()
if err != nil {
t.Fatalf("List failed: %v", err)
}
if len(containers) != 1 {
t.Errorf("expected 1 container, got %d", len(containers))
}
// Inspect should work
info, err := b.Inspect("test-container")
if err != nil {
t.Fatalf("Inspect failed: %v", err)
}
if info.Status != "created" {
t.Errorf("expected status 'created', got %q", info.Status)
}
// Delete should work
if err := b.Delete("test-container", false); err != nil {
t.Fatalf("Delete failed: %v", err)
}
// Verify directory removed
if _, err := os.Stat(cDir); !os.IsNotExist(err) {
t.Error("expected container directory to be removed")
}
// List should be empty now
containers, err = b.List()
if err != nil {
t.Fatalf("List failed: %v", err)
}
if len(containers) != 0 {
t.Errorf("expected 0 containers, got %d", len(containers))
}
}
func TestCopyOperations(t *testing.T) {
tmpDir := t.TempDir()
b := New()
b.Init(tmpDir)
// Create a container
opts := backend.CreateOptions{Name: "copy-test"}
if err := b.Create(opts); err != nil {
t.Fatalf("Create failed: %v", err)
}
// Create a source file on "host"
srcFile := filepath.Join(tmpDir, "host-file.txt")
os.WriteFile(srcFile, []byte("hello from host"), 0644)
// Copy to container
if err := b.CopyToContainer("copy-test", srcFile, "/etc/test.txt"); err != nil {
t.Fatalf("CopyToContainer failed: %v", err)
}
// Verify file exists in rootfs
containerFile := filepath.Join(tmpDir, "containers", "copy-test", "rootfs", "etc", "test.txt")
data, err := os.ReadFile(containerFile)
if err != nil {
t.Fatalf("file not found in container: %v", err)
}
if string(data) != "hello from host" {
t.Errorf("expected 'hello from host', got %q", string(data))
}
// Copy from container
dstFile := filepath.Join(tmpDir, "from-container.txt")
if err := b.CopyFromContainer("copy-test", "/etc/test.txt", dstFile); err != nil {
t.Fatalf("CopyFromContainer failed: %v", err)
}
data, err = os.ReadFile(dstFile)
if err != nil {
t.Fatalf("failed to read copied file: %v", err)
}
if string(data) != "hello from host" {
t.Errorf("expected 'hello from host', got %q", string(data))
}
}
func TestLogs(t *testing.T) {
tmpDir := t.TempDir()
b := New()
b.Init(tmpDir)
// Create a container
opts := backend.CreateOptions{Name: "log-test"}
b.Create(opts)
// Write some log lines
logDir := filepath.Join(tmpDir, "containers", "log-test", "logs")
logFile := filepath.Join(logDir, "current.log")
lines := "line1\nline2\nline3\nline4\nline5\n"
os.WriteFile(logFile, []byte(lines), 0644)
// Full logs
content, err := b.Logs("log-test", backend.LogOptions{})
if err != nil {
t.Fatalf("Logs failed: %v", err)
}
if content != lines {
t.Errorf("expected full log content, got %q", content)
}
// Tail 2 lines
content, err = b.Logs("log-test", backend.LogOptions{Tail: 2})
if err != nil {
t.Fatalf("Logs tail failed: %v", err)
}
// Last 2 lines of "line1\nline2\nline3\nline4\nline5\n" split gives 6 elements
// (last is empty after trailing \n), so tail 2 gives "line5\n"
if content == "" {
t.Error("expected some tail output")
}
// No logs available
content, err = b.Logs("nonexistent", backend.LogOptions{})
if err == nil {
// Container doesn't exist, should get error from readState
// but Logs reads file directly, so check
}
}
func TestAvailable(t *testing.T) {
b := New()
// Just verify it doesn't panic
_ = b.Available()
}
func TestProcessAlive(t *testing.T) {
// PID 1 (init) should be alive
if !processAlive(1) {
t.Error("expected PID 1 to be alive")
}
// PID 0 should not be alive
if processAlive(0) {
t.Error("expected PID 0 to not be alive")
}
// Very large PID should not be alive
if processAlive(999999999) {
t.Error("expected PID 999999999 to not be alive")
}
}
func TestDetectOS(t *testing.T) {
tmpDir := t.TempDir()
// No os-release file
result := detectOS(tmpDir)
if result != "-" {
t.Errorf("expected '-' for missing os-release, got %q", result)
}
// Create os-release
etcDir := filepath.Join(tmpDir, "etc")
os.MkdirAll(etcDir, 0755)
osRelease := `NAME="Ubuntu"
VERSION="24.04 LTS (Noble Numbat)"
ID=ubuntu
PRETTY_NAME="Ubuntu 24.04 LTS"
VERSION_ID="24.04"
`
os.WriteFile(filepath.Join(etcDir, "os-release"), []byte(osRelease), 0644)
result = detectOS(tmpDir)
if result != "Ubuntu 24.04 LTS" {
t.Errorf("expected 'Ubuntu 24.04 LTS', got %q", result)
}
}
func TestEntrypointDetection(t *testing.T) {
tmpDir := t.TempDir()
b := New()
cfg := &containerConfig{Name: "test"}
// Empty rootfs — should fallback to /bin/sh
ep, args := b.detectEntrypoint(tmpDir, cfg)
if ep != "/bin/sh" {
t.Errorf("expected /bin/sh fallback, got %q", ep)
}
if len(args) != 0 {
t.Errorf("expected no args for /bin/sh, got %v", args)
}
// Create /init
initPath := filepath.Join(tmpDir, "init")
os.WriteFile(initPath, []byte("#!/bin/sh\nexec /bin/sh"), 0755)
ep, _ = b.detectEntrypoint(tmpDir, cfg)
if ep != "/init" {
t.Errorf("expected /init, got %q", ep)
}
// Remove /init, create nginx
os.Remove(initPath)
nginxDir := filepath.Join(tmpDir, "usr", "sbin")
os.MkdirAll(nginxDir, 0755)
os.WriteFile(filepath.Join(nginxDir, "nginx"), []byte(""), 0755)
ep, args = b.detectEntrypoint(tmpDir, cfg)
if ep != "/usr/sbin/nginx" {
t.Errorf("expected /usr/sbin/nginx, got %q", ep)
}
// With port mapping, should use shell wrapper
cfg.Ports = []backend.PortMapping{{HostPort: 8080, ContainerPort: 80}}
ep, args = b.detectEntrypoint(tmpDir, cfg)
if ep != "/bin/sh" {
t.Errorf("expected /bin/sh wrapper for nginx with ports, got %q", ep)
}
if len(args) != 2 || args[0] != "-c" {
t.Errorf("expected [-c <shellcmd>] for nginx wrapper, got %v", args)
}
}

View File

@@ -0,0 +1,644 @@
/*
SystemD Backend - Container runtime using systemd-nspawn, machinectl, and nsenter.
This backend implements the ContainerBackend interface using:
- systemd-nspawn for container creation and execution
- machinectl for container lifecycle and inspection
- nsenter for exec into running containers
- journalctl for container logs
- systemctl for service management
*/
package systemd
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"github.com/armoredgate/volt/pkg/backend"
)
func init() {
backend.Register("systemd", func() backend.ContainerBackend { return New() })
}
const (
defaultContainerBaseDir = "/var/lib/volt/containers"
defaultImageBaseDir = "/var/lib/volt/images"
unitPrefix = "volt-container@"
unitDir = "/etc/systemd/system"
)
// Backend implements backend.ContainerBackend using systemd-nspawn.
type Backend struct {
containerBaseDir string
imageBaseDir string
}
// New creates a new SystemD backend with default paths.
func New() *Backend {
return &Backend{
containerBaseDir: defaultContainerBaseDir,
imageBaseDir: defaultImageBaseDir,
}
}
// Name returns "systemd".
func (b *Backend) Name() string { return "systemd" }
// Available returns true if systemd-nspawn is installed.
func (b *Backend) Available() bool {
_, err := exec.LookPath("systemd-nspawn")
return err == nil
}
// Init initializes the backend, optionally overriding the data directory.
func (b *Backend) Init(dataDir string) error {
if dataDir != "" {
b.containerBaseDir = filepath.Join(dataDir, "containers")
b.imageBaseDir = filepath.Join(dataDir, "images")
}
return nil
}
// ── Capability flags ─────────────────────────────────────────────────────────
func (b *Backend) SupportsVMs() bool { return true }
func (b *Backend) SupportsServices() bool { return true }
func (b *Backend) SupportsNetworking() bool { return true }
func (b *Backend) SupportsTuning() bool { return true }
// ── Helpers ──────────────────────────────────────────────────────────────────
// unitName returns the systemd unit name for a container.
func unitName(name string) string {
return fmt.Sprintf("volt-container@%s.service", name)
}
// unitFilePath returns the full path to a container's service unit file.
func unitFilePath(name string) string {
return filepath.Join(unitDir, unitName(name))
}
// containerDir returns the rootfs dir for a container.
func (b *Backend) containerDir(name string) string {
return filepath.Join(b.containerBaseDir, name)
}
// runCommand executes a command and returns combined output.
func runCommand(name string, args ...string) (string, error) {
cmd := exec.Command(name, args...)
out, err := cmd.CombinedOutput()
return strings.TrimSpace(string(out)), err
}
// runCommandSilent executes a command and returns stdout only.
func runCommandSilent(name string, args ...string) (string, error) {
cmd := exec.Command(name, args...)
out, err := cmd.Output()
return strings.TrimSpace(string(out)), err
}
// runCommandInteractive executes a command with stdin/stdout/stderr attached.
func runCommandInteractive(name string, args ...string) error {
cmd := exec.Command(name, args...)
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
return cmd.Run()
}
// fileExists returns true if the file exists.
func fileExists(path string) bool {
_, err := os.Stat(path)
return err == nil
}
// dirExists returns true if the directory exists.
func dirExists(path string) bool {
info, err := os.Stat(path)
if err != nil {
return false
}
return info.IsDir()
}
// resolveImagePath resolves an --image value to a directory path.
func (b *Backend) resolveImagePath(img string) (string, error) {
if dirExists(img) {
return img, nil
}
normalized := strings.ReplaceAll(img, ":", "_")
candidates := []string{
filepath.Join(b.imageBaseDir, img),
filepath.Join(b.imageBaseDir, normalized),
}
for _, p := range candidates {
if dirExists(p) {
return p, nil
}
}
return "", fmt.Errorf("image %q not found (checked %s)", img, strings.Join(candidates, ", "))
}
// writeUnitFile writes the systemd-nspawn service unit for a container.
// Uses --as-pid2: nspawn provides a stub init as PID 1 that handles signal
// forwarding and zombie reaping. No init system required inside the container.
func writeUnitFile(name string) error {
unit := `[Unit]
Description=Volt Container: %i
After=network.target
[Service]
Type=simple
ExecStart=/usr/bin/systemd-nspawn --quiet --keep-unit --as-pid2 --machine=%i --directory=/var/lib/volt/containers/%i --network-bridge=voltbr0 -- sleep infinity
KillMode=mixed
Restart=on-failure
[Install]
WantedBy=machines.target
`
return os.WriteFile(unitFilePath(name), []byte(unit), 0644)
}
// daemonReload runs systemctl daemon-reload.
func daemonReload() error {
_, err := runCommand("systemctl", "daemon-reload")
return err
}
// isContainerRunning checks if a container is currently running.
func isContainerRunning(name string) bool {
out, err := runCommandSilent("machinectl", "show", name, "--property=State")
if err == nil && strings.Contains(out, "running") {
return true
}
out, err = runCommandSilent("systemctl", "is-active", unitName(name))
if err == nil && strings.TrimSpace(out) == "active" {
return true
}
return false
}
// getContainerLeaderPID returns the leader PID of a running container.
func getContainerLeaderPID(name string) (string, error) {
out, err := runCommandSilent("machinectl", "show", name, "--property=Leader")
if err == nil {
parts := strings.SplitN(out, "=", 2)
if len(parts) == 2 {
pid := strings.TrimSpace(parts[1])
if pid != "" && pid != "0" {
return pid, nil
}
}
}
out, err = runCommandSilent("systemctl", "show", unitName(name), "--property=MainPID")
if err == nil {
parts := strings.SplitN(out, "=", 2)
if len(parts) == 2 {
pid := strings.TrimSpace(parts[1])
if pid != "" && pid != "0" {
return pid, nil
}
}
}
return "", fmt.Errorf("no running PID found for container %q", name)
}
// resolveContainerCommand resolves a bare command name to an absolute path
// inside the container's rootfs.
func (b *Backend) resolveContainerCommand(name, cmd string) string {
if strings.HasPrefix(cmd, "/") {
return cmd
}
rootfs := b.containerDir(name)
searchDirs := []string{
"usr/bin", "bin", "usr/sbin", "sbin",
"usr/local/bin", "usr/local/sbin",
}
for _, dir := range searchDirs {
candidate := filepath.Join(rootfs, dir, cmd)
if fileExists(candidate) {
return "/" + dir + "/" + cmd
}
}
return cmd
}
// ── Create ───────────────────────────────────────────────────────────────────
func (b *Backend) Create(opts backend.CreateOptions) error {
destDir := b.containerDir(opts.Name)
if dirExists(destDir) {
return fmt.Errorf("container %q already exists at %s", opts.Name, destDir)
}
fmt.Printf("Creating container: %s\n", opts.Name)
if opts.Image != "" {
srcDir, err := b.resolveImagePath(opts.Image)
if err != nil {
return fmt.Errorf("image resolution failed: %w", err)
}
fmt.Printf(" Image: %s → %s\n", opts.Image, srcDir)
if err := os.MkdirAll(b.containerBaseDir, 0755); err != nil {
return fmt.Errorf("failed to create container base dir: %w", err)
}
fmt.Printf(" Copying rootfs...\n")
out, err := runCommand("cp", "-a", srcDir, destDir)
if err != nil {
return fmt.Errorf("failed to copy image rootfs: %s", out)
}
} else {
if err := os.MkdirAll(destDir, 0755); err != nil {
return fmt.Errorf("failed to create container dir: %w", err)
}
}
if opts.Memory != "" {
fmt.Printf(" Memory: %s\n", opts.Memory)
}
if opts.Network != "" {
fmt.Printf(" Network: %s\n", opts.Network)
}
if err := writeUnitFile(opts.Name); err != nil {
fmt.Printf(" Warning: could not write unit file: %v\n", err)
} else {
fmt.Printf(" Unit: %s\n", unitFilePath(opts.Name))
}
nspawnConfigDir := "/etc/systemd/nspawn"
os.MkdirAll(nspawnConfigDir, 0755)
nspawnConfig := "[Exec]\nBoot=no\n\n[Network]\nBridge=voltbr0\n"
if opts.Memory != "" {
nspawnConfig += fmt.Sprintf("\n[ResourceControl]\nMemoryMax=%s\n", opts.Memory)
}
configPath := filepath.Join(nspawnConfigDir, opts.Name+".nspawn")
if err := os.WriteFile(configPath, []byte(nspawnConfig), 0644); err != nil {
fmt.Printf(" Warning: could not write nspawn config: %v\n", err)
}
if err := daemonReload(); err != nil {
fmt.Printf(" Warning: daemon-reload failed: %v\n", err)
}
fmt.Printf("\nContainer %s created.\n", opts.Name)
if opts.Start {
fmt.Printf("Starting container %s...\n", opts.Name)
out, err := runCommand("systemctl", "start", unitName(opts.Name))
if err != nil {
return fmt.Errorf("failed to start container: %s", out)
}
fmt.Printf("Container %s started.\n", opts.Name)
} else {
fmt.Printf("Start with: volt container start %s\n", opts.Name)
}
return nil
}
// ── Start ────────────────────────────────────────────────────────────────────
func (b *Backend) Start(name string) error {
unitFile := unitFilePath(name)
if !fileExists(unitFile) {
return fmt.Errorf("container %q does not exist (no unit file at %s)", name, unitFile)
}
fmt.Printf("Starting container: %s\n", name)
out, err := runCommand("systemctl", "start", unitName(name))
if err != nil {
return fmt.Errorf("failed to start container %s: %s", name, out)
}
fmt.Printf("Container %s started.\n", name)
return nil
}
// ── Stop ─────────────────────────────────────────────────────────────────────
func (b *Backend) Stop(name string) error {
fmt.Printf("Stopping container: %s\n", name)
out, err := runCommand("systemctl", "stop", unitName(name))
if err != nil {
return fmt.Errorf("failed to stop container %s: %s", name, out)
}
fmt.Printf("Container %s stopped.\n", name)
return nil
}
// ── Delete ───────────────────────────────────────────────────────────────────
func (b *Backend) Delete(name string, force bool) error {
rootfs := b.containerDir(name)
unitActive, _ := runCommandSilent("systemctl", "is-active", unitName(name))
if strings.TrimSpace(unitActive) == "active" || strings.TrimSpace(unitActive) == "activating" {
if !force {
return fmt.Errorf("container %q is running — stop it first or use --force", name)
}
fmt.Printf("Stopping container %s...\n", name)
runCommand("systemctl", "stop", unitName(name))
}
fmt.Printf("Deleting container: %s\n", name)
unitPath := unitFilePath(name)
if fileExists(unitPath) {
runCommand("systemctl", "disable", unitName(name))
if err := os.Remove(unitPath); err != nil {
fmt.Printf(" Warning: could not remove unit file: %v\n", err)
} else {
fmt.Printf(" Removed unit: %s\n", unitPath)
}
}
nspawnConfig := filepath.Join("/etc/systemd/nspawn", name+".nspawn")
if fileExists(nspawnConfig) {
os.Remove(nspawnConfig)
}
if dirExists(rootfs) {
if err := os.RemoveAll(rootfs); err != nil {
return fmt.Errorf("failed to remove rootfs at %s: %w", rootfs, err)
}
fmt.Printf(" Removed rootfs: %s\n", rootfs)
}
daemonReload()
fmt.Printf("Container %s deleted.\n", name)
return nil
}
// ── Exec ─────────────────────────────────────────────────────────────────────
func (b *Backend) Exec(name string, opts backend.ExecOptions) error {
cmdArgs := opts.Command
if len(cmdArgs) == 0 {
cmdArgs = []string{"/bin/sh"}
}
// Resolve bare command names to absolute paths inside the container
cmdArgs[0] = b.resolveContainerCommand(name, cmdArgs[0])
pid, err := getContainerLeaderPID(name)
if err != nil {
return fmt.Errorf("container %q is not running: %w", name, err)
}
nsenterArgs := []string{"-t", pid, "-m", "-u", "-i", "-n", "-p", "--"}
nsenterArgs = append(nsenterArgs, cmdArgs...)
return runCommandInteractive("nsenter", nsenterArgs...)
}
// ── Logs ─────────────────────────────────────────────────────────────────────
func (b *Backend) Logs(name string, opts backend.LogOptions) (string, error) {
jArgs := []string{"-u", unitName(name), "--no-pager"}
if opts.Follow {
jArgs = append(jArgs, "-f")
}
if opts.Tail > 0 {
jArgs = append(jArgs, "-n", fmt.Sprintf("%d", opts.Tail))
} else {
jArgs = append(jArgs, "-n", "100")
}
// For follow mode, run interactively so output streams to terminal
if opts.Follow {
return "", runCommandInteractive("journalctl", jArgs...)
}
out, err := runCommand("journalctl", jArgs...)
return out, err
}
// ── CopyToContainer ──────────────────────────────────────────────────────────
func (b *Backend) CopyToContainer(name string, src string, dst string) error {
if !fileExists(src) && !dirExists(src) {
return fmt.Errorf("source not found: %s", src)
}
dstPath := filepath.Join(b.containerDir(name), dst)
out, err := runCommand("cp", "-a", src, dstPath)
if err != nil {
return fmt.Errorf("copy failed: %s", out)
}
fmt.Printf("Copied %s → %s:%s\n", src, name, dst)
return nil
}
// ── CopyFromContainer ────────────────────────────────────────────────────────
func (b *Backend) CopyFromContainer(name string, src string, dst string) error {
srcPath := filepath.Join(b.containerDir(name), src)
if !fileExists(srcPath) && !dirExists(srcPath) {
return fmt.Errorf("not found in container %s: %s", name, src)
}
out, err := runCommand("cp", "-a", srcPath, dst)
if err != nil {
return fmt.Errorf("copy failed: %s", out)
}
fmt.Printf("Copied %s:%s → %s\n", name, src, dst)
return nil
}
// ── List ─────────────────────────────────────────────────────────────────────
func (b *Backend) List() ([]backend.ContainerInfo, error) {
var containers []backend.ContainerInfo
seen := make(map[string]bool)
// Get running containers from machinectl
out, err := runCommandSilent("machinectl", "list", "--no-pager", "--no-legend")
if err == nil && strings.TrimSpace(out) != "" {
for _, line := range strings.Split(out, "\n") {
line = strings.TrimSpace(line)
if line == "" {
continue
}
fields := strings.Fields(line)
if len(fields) == 0 {
continue
}
name := fields[0]
seen[name] = true
info := backend.ContainerInfo{
Name: name,
Status: "running",
RootFS: b.containerDir(name),
}
// Get IP from machinectl show
showOut, showErr := runCommandSilent("machinectl", "show", name,
"--property=Addresses", "--property=RootDirectory")
if showErr == nil {
for _, sl := range strings.Split(showOut, "\n") {
if strings.HasPrefix(sl, "Addresses=") {
addr := strings.TrimPrefix(sl, "Addresses=")
if addr != "" {
info.IPAddress = addr
}
}
}
}
// Read OS from rootfs
rootfs := b.containerDir(name)
if osRel, osErr := os.ReadFile(filepath.Join(rootfs, "etc", "os-release")); osErr == nil {
for _, ol := range strings.Split(string(osRel), "\n") {
if strings.HasPrefix(ol, "PRETTY_NAME=") {
info.OS = strings.Trim(strings.TrimPrefix(ol, "PRETTY_NAME="), "\"")
break
}
}
}
containers = append(containers, info)
}
}
// Scan filesystem for stopped containers
if entries, err := os.ReadDir(b.containerBaseDir); err == nil {
for _, entry := range entries {
if !entry.IsDir() {
continue
}
name := entry.Name()
if seen[name] {
continue
}
info := backend.ContainerInfo{
Name: name,
Status: "stopped",
RootFS: filepath.Join(b.containerBaseDir, name),
}
if osRel, err := os.ReadFile(filepath.Join(b.containerBaseDir, name, "etc", "os-release")); err == nil {
for _, ol := range strings.Split(string(osRel), "\n") {
if strings.HasPrefix(ol, "PRETTY_NAME=") {
info.OS = strings.Trim(strings.TrimPrefix(ol, "PRETTY_NAME="), "\"")
break
}
}
}
containers = append(containers, info)
}
}
return containers, nil
}
// ── Inspect ──────────────────────────────────────────────────────────────────
func (b *Backend) Inspect(name string) (*backend.ContainerInfo, error) {
rootfs := b.containerDir(name)
info := &backend.ContainerInfo{
Name: name,
RootFS: rootfs,
Status: "stopped",
}
if !dirExists(rootfs) {
info.Status = "not found"
}
// Check if running
unitActive, _ := runCommandSilent("systemctl", "is-active", unitName(name))
activeState := strings.TrimSpace(unitActive)
if activeState == "active" {
info.Status = "running"
} else if activeState != "" {
info.Status = activeState
}
// Get machinectl info if running
if isContainerRunning(name) {
info.Status = "running"
showOut, err := runCommandSilent("machinectl", "show", name)
if err == nil {
for _, line := range strings.Split(showOut, "\n") {
line = strings.TrimSpace(line)
if strings.HasPrefix(line, "Addresses=") {
info.IPAddress = strings.TrimPrefix(line, "Addresses=")
}
if strings.HasPrefix(line, "Leader=") {
pidStr := strings.TrimPrefix(line, "Leader=")
fmt.Sscanf(pidStr, "%d", &info.PID)
}
}
}
}
// OS info from rootfs
if osRel, err := os.ReadFile(filepath.Join(rootfs, "etc", "os-release")); err == nil {
for _, line := range strings.Split(string(osRel), "\n") {
if strings.HasPrefix(line, "PRETTY_NAME=") {
info.OS = strings.Trim(strings.TrimPrefix(line, "PRETTY_NAME="), "\"")
break
}
}
}
return info, nil
}
// ── Extra methods used by CLI commands (not in the interface) ────────────────
// IsContainerRunning checks if a container is currently running.
// Exported for use by CLI commands that need direct state checks.
func (b *Backend) IsContainerRunning(name string) bool {
return isContainerRunning(name)
}
// GetContainerLeaderPID returns the leader PID of a running container.
// Exported for use by CLI commands (shell, attach).
func (b *Backend) GetContainerLeaderPID(name string) (string, error) {
return getContainerLeaderPID(name)
}
// ContainerDir returns the rootfs dir for a container.
// Exported for use by CLI commands that need rootfs access.
func (b *Backend) ContainerDir(name string) string {
return b.containerDir(name)
}
// UnitName returns the systemd unit name for a container.
// Exported for use by CLI commands.
func UnitName(name string) string {
return unitName(name)
}
// UnitFilePath returns the full path to a container's service unit file.
// Exported for use by CLI commands.
func UnitFilePath(name string) string {
return unitFilePath(name)
}
// WriteUnitFile writes the systemd-nspawn service unit for a container.
// Exported for use by CLI commands (rename).
func WriteUnitFile(name string) error {
return writeUnitFile(name)
}
// DaemonReload runs systemctl daemon-reload.
// Exported for use by CLI commands.
func DaemonReload() error {
return daemonReload()
}
// ResolveContainerCommand resolves a bare command to an absolute path in the container.
// Exported for use by CLI commands (shell).
func (b *Backend) ResolveContainerCommand(name, cmd string) string {
return b.resolveContainerCommand(name, cmd)
}