Volt CLI: source-available under AGPSL v5.0

Complete infrastructure platform CLI:
- Container runtime (systemd-nspawn)
- VoltVisor VMs (Neutron Stardust / QEMU)
- Stellarium CAS (content-addressed storage)
- ORAS Registry
- GitOps integration
- Landlock LSM security
- Compose orchestration
- Mesh networking

Copyright (c) Armored Gates LLC. All rights reserved.
Licensed under AGPSL v5.0
This commit is contained in:
Karl Clinger
2026-03-21 00:30:23 -05:00
commit 0ebe75b2ca
155 changed files with 63317 additions and 0 deletions

View File

@@ -0,0 +1,304 @@
#!/bin/bash
# ══════════════════════════════════════════════════════════════════════════════
# Volt Hybrid Integration Tests — Container Mode Lifecycle
#
# Tests the full lifecycle of a standard container workload:
# 1. Create container from manifest/image
# 2. Start and verify running (process visible, network reachable)
# 3. Execute a command inside the container
# 4. Stop gracefully
# 5. Destroy and verify cleanup
# 6. CAS dedup: two containers from same image share objects
#
# Requires: root, systemd-nspawn, base image at /var/lib/volt/images/ubuntu_24.04
# ══════════════════════════════════════════════════════════════════════════════
set -uo pipefail
source "$(dirname "$0")/test_helpers.sh"
# ── Prerequisites ─────────────────────────────────────────────────────────────
require_root
require_volt
require_nspawn
BASE_IMAGE="/var/lib/volt/images/ubuntu_24.04"
if ! require_image "$BASE_IMAGE"; then
echo "SKIP: No base image. Run: sudo debootstrap noble $BASE_IMAGE http://archive.ubuntu.com/ubuntu"
exit 0
fi
trap cleanup_all EXIT
echo "⚡ Volt Hybrid Integration Tests — Container Mode Lifecycle"
echo "════════════════════════════════════════════════════════════════"
# ── 1. Create container ──────────────────────────────────────────────────────
section "📦 1. Create Container"
CON1=$(test_name "lifecycle")
output=$(create_container "$CON1" "$BASE_IMAGE" 2>&1)
assert_ok "Create container '$CON1'" test $? -eq 0
# Verify rootfs directory was created
assert_dir_exists "Container rootfs exists" "/var/lib/volt/containers/$CON1"
# Verify systemd unit file was written
assert_file_exists "Unit file exists" "/etc/systemd/system/volt-hybrid@${CON1}.service"
# Verify .nspawn config was written
assert_file_exists "Nspawn config exists" "/etc/systemd/nspawn/${CON1}.nspawn"
# Verify the unit file references boot mode
if grep -q "\-\-boot" "/etc/systemd/system/volt-hybrid@${CON1}.service" 2>/dev/null; then
pass "Unit file uses --boot mode"
else
fail "Unit file uses --boot mode" "expected --boot in unit file"
fi
# ── 2. Start and verify running ─────────────────────────────────────────────
section "🚀 2. Start Container"
output=$(start_workload "$CON1" 2>&1)
assert_ok "Start container '$CON1'" test $? -eq 0
# Wait for the container to actually be running
if wait_running "$CON1" 30; then
pass "Container reached running state"
else
fail "Container reached running state" "timed out after 30s"
fi
# Verify the container is visible in machinectl list
if sudo machinectl list --no-legend --no-pager 2>/dev/null | grep -q "$CON1"; then
pass "Container visible in machinectl list"
else
fail "Container visible in machinectl list"
fi
# Verify leader PID exists
LEADER_PID=$(get_leader_pid "$CON1")
if [[ -n "$LEADER_PID" && "$LEADER_PID" != "0" ]]; then
pass "Leader PID is set (PID=$LEADER_PID)"
else
fail "Leader PID is set" "got: '$LEADER_PID'"
fi
# Verify the leader PID is an actual process on the host
if [[ -n "$LEADER_PID" ]] && [[ -d "/proc/$LEADER_PID" ]]; then
pass "Leader PID is a real process on host"
else
fail "Leader PID is a real process on host"
fi
# Check if the container has an IP address (network reachable)
sleep 2 # give the network a moment to come up
CON1_IP=$(get_container_ip "$CON1")
if [[ -n "$CON1_IP" ]]; then
pass "Container has IP address ($CON1_IP)"
# Try to ping the container from the host
if ping -c 1 -W 3 "$CON1_IP" &>/dev/null; then
pass "Container is network-reachable (ping)"
else
skip "Container is network-reachable (ping)" "bridge may not be configured"
fi
else
skip "Container has IP address" "no IP assigned (bridge may not exist)"
fi
# Verify container appears in volt container list
if sudo "$VOLT" container list --backend hybrid 2>/dev/null | grep -q "$CON1"; then
pass "Container visible in 'volt container list'"
else
# May also appear without --backend flag
if sudo "$VOLT" container list 2>/dev/null | grep -q "$CON1"; then
pass "Container visible in 'volt container list'"
else
fail "Container visible in 'volt container list'"
fi
fi
# ── 3. Exec command inside container ────────────────────────────────────────
section "🔧 3. Execute Command Inside Container"
# Simple command — check hostname
hostname_out=$(exec_in "$CON1" hostname 2>&1) || true
if [[ -n "$hostname_out" ]]; then
pass "exec hostname returns output ('$hostname_out')"
else
fail "exec hostname returns output" "empty output"
fi
# Check that /etc/os-release is readable
if exec_in "$CON1" cat /etc/os-release 2>/dev/null | grep -qi "ubuntu"; then
pass "exec cat /etc/os-release shows Ubuntu"
else
fail "exec cat /etc/os-release shows Ubuntu"
fi
# Create a test file and verify it persists
exec_in "$CON1" sh -c "echo 'volt-test-marker' > /tmp/test-exec-file" 2>/dev/null || true
if exec_in "$CON1" cat /tmp/test-exec-file 2>/dev/null | grep -q "volt-test-marker"; then
pass "exec can create and read files inside container"
else
fail "exec can create and read files inside container"
fi
# Verify environment variable is set
if exec_in "$CON1" env 2>/dev/null | grep -q "VOLT_CONTAINER=$CON1"; then
pass "VOLT_CONTAINER env var is set inside container"
else
skip "VOLT_CONTAINER env var is set inside container" "may not be injected yet"
fi
if exec_in "$CON1" env 2>/dev/null | grep -q "VOLT_RUNTIME=hybrid"; then
pass "VOLT_RUNTIME=hybrid env var is set"
else
skip "VOLT_RUNTIME=hybrid env var is set" "may not be injected yet"
fi
# ── 4. Stop gracefully ──────────────────────────────────────────────────────
section "⏹️ 4. Stop Container"
output=$(stop_workload "$CON1" 2>&1)
assert_ok "Stop container '$CON1'" test $? -eq 0
# Verify the container is no longer running
sleep 2
if ! sudo machinectl show "$CON1" --property=State 2>/dev/null | grep -q "running"; then
pass "Container is no longer running after stop"
else
fail "Container is no longer running after stop"
fi
# Verify the leader PID is gone
if [[ -n "$LEADER_PID" ]] && [[ ! -d "/proc/$LEADER_PID" ]]; then
pass "Leader PID ($LEADER_PID) is gone after stop"
else
if [[ -z "$LEADER_PID" ]]; then
skip "Leader PID is gone after stop" "no PID was recorded"
else
fail "Leader PID ($LEADER_PID) is gone after stop" "process still exists"
fi
fi
# Verify rootfs still exists (stop should not destroy data)
assert_dir_exists "Rootfs still exists after stop" "/var/lib/volt/containers/$CON1"
# ── 5. Destroy and verify cleanup ───────────────────────────────────────────
section "🗑️ 5. Destroy Container"
output=$(destroy_workload "$CON1" 2>&1)
assert_ok "Destroy container '$CON1'" test $? -eq 0
# Verify rootfs is gone
if [[ ! -d "/var/lib/volt/containers/$CON1" ]]; then
pass "Rootfs removed after destroy"
else
fail "Rootfs removed after destroy" "directory still exists"
fi
# Verify unit file is removed
if [[ ! -f "/etc/systemd/system/volt-hybrid@${CON1}.service" ]]; then
pass "Unit file removed after destroy"
else
fail "Unit file removed after destroy"
fi
# Verify .nspawn config is removed
if [[ ! -f "/etc/systemd/nspawn/${CON1}.nspawn" ]]; then
pass "Nspawn config removed after destroy"
else
fail "Nspawn config removed after destroy"
fi
# Verify container no longer appears in any listing
if ! sudo machinectl list --no-legend --no-pager 2>/dev/null | grep -q "$CON1"; then
pass "Container gone from machinectl list"
else
fail "Container gone from machinectl list"
fi
# Remove from cleanup list since we destroyed manually
CLEANUP_WORKLOADS=("${CLEANUP_WORKLOADS[@]/$CON1/}")
# ── 6. CAS Dedup — Two containers from same image ───────────────────────────
section "🔗 6. CAS Dedup Verification"
CON_A=$(test_name "dedup-a")
CON_B=$(test_name "dedup-b")
create_container "$CON_A" "$BASE_IMAGE" 2>&1 >/dev/null
assert_ok "Create first container for dedup test" test $? -eq 0
create_container "$CON_B" "$BASE_IMAGE" 2>&1 >/dev/null
assert_ok "Create second container for dedup test" test $? -eq 0
# Both should have rootfs directories
assert_dir_exists "Container A rootfs exists" "/var/lib/volt/containers/$CON_A"
assert_dir_exists "Container B rootfs exists" "/var/lib/volt/containers/$CON_B"
# If CAS is in use, check for shared objects in the CAS store
CAS_DIR="/var/lib/volt/cas/objects"
if [[ -d "$CAS_DIR" ]]; then
# Count objects — two identical images should share all CAS objects
CAS_COUNT=$(find "$CAS_DIR" -type f 2>/dev/null | wc -l)
if [[ $CAS_COUNT -gt 0 ]]; then
pass "CAS objects exist ($CAS_COUNT objects)"
# Check CAS refs for both containers
if [[ -d "/var/lib/volt/cas/refs" ]]; then
REFS_A=$(find /var/lib/volt/cas/refs -name "*$CON_A*" 2>/dev/null | wc -l)
REFS_B=$(find /var/lib/volt/cas/refs -name "*$CON_B*" 2>/dev/null | wc -l)
if [[ $REFS_A -gt 0 && $REFS_B -gt 0 ]]; then
pass "Both containers have CAS refs"
else
skip "Both containers have CAS refs" "CAS refs not found (may use direct copy)"
fi
else
skip "CAS refs directory check" "no refs dir"
fi
else
skip "CAS dedup objects" "CAS store empty — may use direct copy instead"
fi
else
skip "CAS dedup verification" "CAS not active (containers use direct rootfs copy)"
fi
# Verify both containers are independent (different rootfs paths)
if [[ "/var/lib/volt/containers/$CON_A" != "/var/lib/volt/containers/$CON_B" ]]; then
pass "Containers have independent rootfs paths"
else
fail "Containers have independent rootfs paths"
fi
# Verify the rootfs contents are identical (same image, same content)
# Compare a few key files
for f in "etc/os-release" "usr/bin/env"; do
if [[ -f "/var/lib/volt/containers/$CON_A/$f" ]] && [[ -f "/var/lib/volt/containers/$CON_B/$f" ]]; then
if diff -q "/var/lib/volt/containers/$CON_A/$f" "/var/lib/volt/containers/$CON_B/$f" &>/dev/null; then
pass "Identical content: $f"
else
fail "Identical content: $f" "files differ"
fi
fi
done
# Cleanup dedup containers
destroy_workload "$CON_A"
destroy_workload "$CON_B"
CLEANUP_WORKLOADS=("${CLEANUP_WORKLOADS[@]/$CON_A/}")
CLEANUP_WORKLOADS=("${CLEANUP_WORKLOADS[@]/$CON_B/}")
# ── Results ──────────────────────────────────────────────────────────────────
print_results "Container Mode Lifecycle"
exit $?