Volt CLI: source-available under AGPSL v5.0

Complete infrastructure platform CLI:
- Container runtime (systemd-nspawn)
- VoltVisor VMs (Neutron Stardust / QEMU)
- Stellarium CAS (content-addressed storage)
- ORAS Registry
- GitOps integration
- Landlock LSM security
- Compose orchestration
- Mesh networking

Copyright (c) Armored Gates LLC. All rights reserved.
Licensed under AGPSL v5.0
This commit is contained in:
Karl Clinger
2026-03-21 00:30:23 -05:00
commit 0ebe75b2ca
155 changed files with 63317 additions and 0 deletions

1084
pkg/storage/cas.go Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,503 @@
package storage
import (
"crypto/sha256"
"encoding/hex"
"encoding/json"
"os"
"path/filepath"
"testing"
"time"
)
// helper: create a blob with known content, return its digest
func createTestBlob(t *testing.T, objectsDir string, content []byte) string {
t.Helper()
h := sha256.Sum256(content)
digest := hex.EncodeToString(h[:])
if err := os.WriteFile(filepath.Join(objectsDir, digest), content, 0644); err != nil {
t.Fatalf("create blob: %v", err)
}
return digest
}
// helper: create a manifest referencing given digests
func createTestManifest(t *testing.T, refsDir, name string, objects map[string]string) {
t.Helper()
bm := BlobManifest{
Name: name,
CreatedAt: time.Now().Format(time.RFC3339),
Objects: objects,
}
data, err := json.MarshalIndent(bm, "", " ")
if err != nil {
t.Fatalf("marshal manifest: %v", err)
}
h := sha256.Sum256(data)
digest := hex.EncodeToString(h[:])
refName := name + "-" + digest[:12] + ".json"
if err := os.WriteFile(filepath.Join(refsDir, refName), data, 0644); err != nil {
t.Fatalf("write manifest: %v", err)
}
}
// helper: set up a temp CAS store
func setupTestCAS(t *testing.T) *CASStore {
t.Helper()
tmpDir := t.TempDir()
store := NewCASStore(tmpDir)
if err := store.Init(); err != nil {
t.Fatalf("init CAS: %v", err)
}
return store
}
func TestDedupAnalytics(t *testing.T) {
store := setupTestCAS(t)
// Create 3 distinct blobs
digestA := createTestBlob(t, store.ObjectsDir(), []byte("file-content-alpha"))
digestB := createTestBlob(t, store.ObjectsDir(), []byte("file-content-bravo"))
digestC := createTestBlob(t, store.ObjectsDir(), []byte("file-content-charlie"))
// Manifest 1: references A and B
createTestManifest(t, store.refsDir, "manifest1", map[string]string{
"bin/alpha": digestA,
"bin/bravo": digestB,
})
// Manifest 2: references A and C (A is shared/deduped)
createTestManifest(t, store.refsDir, "manifest2", map[string]string{
"bin/alpha": digestA,
"lib/charlie": digestC,
})
report, err := store.Analytics()
if err != nil {
t.Fatalf("Analytics: %v", err)
}
// 3 distinct blobs
if report.TotalBlobs != 3 {
t.Errorf("TotalBlobs = %d, want 3", report.TotalBlobs)
}
// 4 total references across both manifests
if report.TotalReferences != 4 {
t.Errorf("TotalReferences = %d, want 4", report.TotalReferences)
}
// 3 unique blobs
if report.UniqueBlobs != 3 {
t.Errorf("UniqueBlobs = %d, want 3", report.UniqueBlobs)
}
// Dedup ratio = 4/3 ≈ 1.33
if report.DedupRatio < 1.3 || report.DedupRatio > 1.4 {
t.Errorf("DedupRatio = %.2f, want ~1.33", report.DedupRatio)
}
// Storage savings: blob A (18 bytes) is referenced 2 times, saving 1 copy
sizeA := int64(len("file-content-alpha"))
if report.StorageSavings != sizeA {
t.Errorf("StorageSavings = %d, want %d", report.StorageSavings, sizeA)
}
// 2 manifests
if len(report.ManifestStats) != 2 {
t.Errorf("ManifestStats count = %d, want 2", len(report.ManifestStats))
}
// Top blobs: A should be #1 with 2 refs
if len(report.TopBlobs) == 0 {
t.Fatal("expected TopBlobs to be non-empty")
}
if report.TopBlobs[0].Digest != digestA {
t.Errorf("TopBlobs[0].Digest = %s, want %s", report.TopBlobs[0].Digest, digestA)
}
if report.TopBlobs[0].RefCount != 2 {
t.Errorf("TopBlobs[0].RefCount = %d, want 2", report.TopBlobs[0].RefCount)
}
}
func TestAnalyticsEmptyStore(t *testing.T) {
store := setupTestCAS(t)
report, err := store.Analytics()
if err != nil {
t.Fatalf("Analytics: %v", err)
}
if report.TotalBlobs != 0 {
t.Errorf("TotalBlobs = %d, want 0", report.TotalBlobs)
}
if report.TotalReferences != 0 {
t.Errorf("TotalReferences = %d, want 0", report.TotalReferences)
}
}
func TestAnalyticsSizeDistribution(t *testing.T) {
store := setupTestCAS(t)
// Tiny: < 1 KiB
createTestBlob(t, store.ObjectsDir(), []byte("tiny"))
// Small: 1 KiB 64 KiB (create a 2 KiB blob)
smallContent := make([]byte, 2048)
for i := range smallContent {
smallContent[i] = byte(i % 256)
}
createTestBlob(t, store.ObjectsDir(), smallContent)
// Medium: 64 KiB 1 MiB (create a 100 KiB blob)
mediumContent := make([]byte, 100*1024)
for i := range mediumContent {
mediumContent[i] = byte((i + 1) % 256)
}
createTestBlob(t, store.ObjectsDir(), mediumContent)
report, err := store.Analytics()
if err != nil {
t.Fatalf("Analytics: %v", err)
}
if report.SizeDistribution.Tiny != 1 {
t.Errorf("Tiny = %d, want 1", report.SizeDistribution.Tiny)
}
if report.SizeDistribution.Small != 1 {
t.Errorf("Small = %d, want 1", report.SizeDistribution.Small)
}
if report.SizeDistribution.Medium != 1 {
t.Errorf("Medium = %d, want 1", report.SizeDistribution.Medium)
}
}
func TestRetentionMaxAge(t *testing.T) {
store := setupTestCAS(t)
// Create blobs — one "old", one "new"
oldDigest := createTestBlob(t, store.ObjectsDir(), []byte("old-blob-content"))
newDigest := createTestBlob(t, store.ObjectsDir(), []byte("new-blob-content"))
// Make the "old" blob look 45 days old
oldTime := time.Now().Add(-45 * 24 * time.Hour)
os.Chtimes(filepath.Join(store.ObjectsDir(), oldDigest), oldTime, oldTime)
// Neither blob is referenced by any manifest → both are unreferenced
policy := RetentionPolicy{
MaxAge: "30d",
MinCopies: 1,
}
result, err := store.ApplyRetention(policy, true) // dry run
if err != nil {
t.Fatalf("ApplyRetention: %v", err)
}
// Only the old blob should be a candidate
if len(result.Candidates) != 1 {
t.Fatalf("Candidates = %d, want 1", len(result.Candidates))
}
if result.Candidates[0].Digest != oldDigest {
t.Errorf("Candidate digest = %s, want %s", result.Candidates[0].Digest, oldDigest)
}
// New blob should NOT be a candidate
for _, c := range result.Candidates {
if c.Digest == newDigest {
t.Errorf("new blob should not be a candidate")
}
}
// Verify dry run didn't delete anything
if _, err := os.Stat(filepath.Join(store.ObjectsDir(), oldDigest)); err != nil {
t.Errorf("dry run should not have deleted old blob")
}
}
func TestRetentionMaxAgeExecute(t *testing.T) {
store := setupTestCAS(t)
oldDigest := createTestBlob(t, store.ObjectsDir(), []byte("old-blob-for-deletion"))
oldTime := time.Now().Add(-45 * 24 * time.Hour)
os.Chtimes(filepath.Join(store.ObjectsDir(), oldDigest), oldTime, oldTime)
policy := RetentionPolicy{
MaxAge: "30d",
MinCopies: 1,
}
result, err := store.ApplyRetention(policy, false) // actually delete
if err != nil {
t.Fatalf("ApplyRetention: %v", err)
}
if result.TotalDeleted != 1 {
t.Errorf("TotalDeleted = %d, want 1", result.TotalDeleted)
}
// Blob should be gone
if _, err := os.Stat(filepath.Join(store.ObjectsDir(), oldDigest)); !os.IsNotExist(err) {
t.Errorf("old blob should have been deleted")
}
}
func TestRetentionMaxSize(t *testing.T) {
store := setupTestCAS(t)
// Create several blobs totaling more than our limit
blobs := []struct {
content []byte
age time.Duration
}{
{make([]byte, 500), -10 * 24 * time.Hour}, // 500 bytes, 10 days old
{make([]byte, 600), -20 * 24 * time.Hour}, // 600 bytes, 20 days old
{make([]byte, 400), -5 * 24 * time.Hour}, // 400 bytes, 5 days old
}
// Fill with distinct content
for i := range blobs {
for j := range blobs[i].content {
blobs[i].content[j] = byte(i*100 + j%256)
}
}
var digests []string
for _, b := range blobs {
d := createTestBlob(t, store.ObjectsDir(), b.content)
digests = append(digests, d)
ts := time.Now().Add(b.age)
os.Chtimes(filepath.Join(store.ObjectsDir(), d), ts, ts)
}
// Total: 1500 bytes. Set max to 1000 bytes.
policy := RetentionPolicy{
MaxSize: "1000",
MinCopies: 1,
}
result, err := store.ApplyRetention(policy, true)
if err != nil {
t.Fatalf("ApplyRetention: %v", err)
}
// Should identify enough blobs to get under 1000 bytes
var freedTotal int64
for _, c := range result.Candidates {
freedTotal += c.Size
}
remaining := int64(1500) - freedTotal
if remaining > 1000 {
t.Errorf("remaining %d bytes still over 1000 limit after retention", remaining)
}
// The oldest blob (20 days) should be deleted first
if len(result.Candidates) == 0 {
t.Fatal("expected at least one candidate")
}
// First candidate should be the oldest unreferenced blob
if result.Candidates[0].Digest != digests[1] { // 20 days old
t.Errorf("expected oldest blob to be first candidate, got %s", result.Candidates[0].Digest[:16])
}
}
func TestRetentionProtectsReferenced(t *testing.T) {
store := setupTestCAS(t)
// Create blobs
referencedDigest := createTestBlob(t, store.ObjectsDir(), []byte("referenced-blob"))
unreferencedDigest := createTestBlob(t, store.ObjectsDir(), []byte("unreferenced-blob"))
// Make both blobs old
oldTime := time.Now().Add(-60 * 24 * time.Hour)
os.Chtimes(filepath.Join(store.ObjectsDir(), referencedDigest), oldTime, oldTime)
os.Chtimes(filepath.Join(store.ObjectsDir(), unreferencedDigest), oldTime, oldTime)
// Create a manifest referencing only the first blob
createTestManifest(t, store.refsDir, "keep-manifest", map[string]string{
"important/file": referencedDigest,
})
policy := RetentionPolicy{
MaxAge: "30d",
MinCopies: 1, // blob has 1 ref, so it's protected
}
result, err := store.ApplyRetention(policy, true)
if err != nil {
t.Fatalf("ApplyRetention: %v", err)
}
// Only unreferenced blob should be a candidate
for _, c := range result.Candidates {
if c.Digest == referencedDigest {
t.Errorf("referenced blob %s should be protected, but was marked for deletion", referencedDigest[:16])
}
}
// Unreferenced blob should be a candidate
found := false
for _, c := range result.Candidates {
if c.Digest == unreferencedDigest {
found = true
break
}
}
if !found {
t.Errorf("unreferenced blob should be a candidate for deletion")
}
}
func TestRetentionProtectsReferencedMaxSize(t *testing.T) {
store := setupTestCAS(t)
// Create blobs
refContent := make([]byte, 800)
for i := range refContent {
refContent[i] = byte(i % 256)
}
referencedDigest := createTestBlob(t, store.ObjectsDir(), refContent)
unrefContent := make([]byte, 500)
for i := range unrefContent {
unrefContent[i] = byte((i + 50) % 256)
}
unreferencedDigest := createTestBlob(t, store.ObjectsDir(), unrefContent)
// Reference the 800-byte blob
createTestManifest(t, store.refsDir, "protect-me", map[string]string{
"big/file": referencedDigest,
})
// Total: 1300 bytes. Limit: 500 bytes.
// Even though we're over limit, the referenced blob must be kept.
policy := RetentionPolicy{
MaxSize: "500",
MinCopies: 1,
}
result, err := store.ApplyRetention(policy, false) // actually delete
if err != nil {
t.Fatalf("ApplyRetention: %v", err)
}
// Referenced blob must still exist
if _, err := os.Stat(filepath.Join(store.ObjectsDir(), referencedDigest)); err != nil {
t.Errorf("referenced blob was deleted despite having refs >= min_copies")
}
// Unreferenced blob should be deleted
if _, err := os.Stat(filepath.Join(store.ObjectsDir(), unreferencedDigest)); !os.IsNotExist(err) {
t.Errorf("unreferenced blob should have been deleted")
}
_ = result
}
func TestGCWithRetention(t *testing.T) {
store := setupTestCAS(t)
// Create blobs
digestA := createTestBlob(t, store.ObjectsDir(), []byte("blob-a-content"))
digestB := createTestBlob(t, store.ObjectsDir(), []byte("blob-b-content"))
// A is referenced, B is not
createTestManifest(t, store.refsDir, "gc-test", map[string]string{
"file/a": digestA,
})
// Make B old
oldTime := time.Now().Add(-90 * 24 * time.Hour)
os.Chtimes(filepath.Join(store.ObjectsDir(), digestB), oldTime, oldTime)
policy := RetentionPolicy{
MaxAge: "30d",
MinCopies: 1,
}
gcResult, retResult, err := store.GCWithRetention(&policy, true) // dry run
if err != nil {
t.Fatalf("GCWithRetention: %v", err)
}
// GC should find B as unreferenced
if len(gcResult.Unreferenced) != 1 {
t.Errorf("GC Unreferenced = %d, want 1", len(gcResult.Unreferenced))
}
// Retention should also flag B
if retResult == nil {
t.Fatal("expected retention result")
}
if len(retResult.Candidates) != 1 {
t.Errorf("Retention Candidates = %d, want 1", len(retResult.Candidates))
}
}
func TestParseDuration(t *testing.T) {
tests := []struct {
input string
expected time.Duration
wantErr bool
}{
{"30d", 30 * 24 * time.Hour, false},
{"7d", 7 * 24 * time.Hour, false},
{"2w", 14 * 24 * time.Hour, false},
{"12h", 12 * time.Hour, false},
{"0", 0, false},
{"", 0, false},
{"xyz", 0, true},
}
for _, tc := range tests {
got, err := ParseDuration(tc.input)
if tc.wantErr {
if err == nil {
t.Errorf("ParseDuration(%q) expected error", tc.input)
}
continue
}
if err != nil {
t.Errorf("ParseDuration(%q) error: %v", tc.input, err)
continue
}
if got != tc.expected {
t.Errorf("ParseDuration(%q) = %v, want %v", tc.input, got, tc.expected)
}
}
}
func TestParseSize(t *testing.T) {
tests := []struct {
input string
expected int64
wantErr bool
}{
{"10G", 10 * 1024 * 1024 * 1024, false},
{"500M", 500 * 1024 * 1024, false},
{"1T", 1024 * 1024 * 1024 * 1024, false},
{"1024K", 1024 * 1024, false},
{"1024", 1024, false},
{"0", 0, false},
{"", 0, false},
{"abc", 0, true},
}
for _, tc := range tests {
got, err := ParseSize(tc.input)
if tc.wantErr {
if err == nil {
t.Errorf("ParseSize(%q) expected error", tc.input)
}
continue
}
if err != nil {
t.Errorf("ParseSize(%q) error: %v", tc.input, err)
continue
}
if got != tc.expected {
t.Errorf("ParseSize(%q) = %d, want %d", tc.input, got, tc.expected)
}
}
}

301
pkg/storage/storage.go Normal file
View File

@@ -0,0 +1,301 @@
/*
Volt Storage - Git-attached persistent storage
Features:
- Git repositories for persistence
- Shared storage across VMs
- Copy-on-write overlays
- Snapshot/restore via git
- Multi-developer collaboration
*/
package storage
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
)
// AttachedStorage represents storage attached to a VM
type AttachedStorage struct {
Name string
Source string // Host path or git URL
Target string // Mount point inside VM
Type string // git, bind, overlay
ReadOnly bool
GitBranch string
GitRemote string
}
// Manager handles storage operations
type Manager struct {
baseDir string
cacheDir string
overlayDir string
}
// NewManager creates a new storage manager
func NewManager(baseDir string) *Manager {
return &Manager{
baseDir: baseDir,
cacheDir: filepath.Join(baseDir, "cache"),
overlayDir: filepath.Join(baseDir, "overlays"),
}
}
// Setup initializes storage directories
func (m *Manager) Setup() error {
dirs := []string{m.baseDir, m.cacheDir, m.overlayDir}
for _, dir := range dirs {
if err := os.MkdirAll(dir, 0755); err != nil {
return fmt.Errorf("failed to create %s: %w", dir, err)
}
}
return nil
}
// AttachGit clones or updates a git repository for VM use
func (m *Manager) AttachGit(vmName string, gitURL string, branch string) (*AttachedStorage, error) {
// Determine local path for this repo
repoName := filepath.Base(strings.TrimSuffix(gitURL, ".git"))
localPath := filepath.Join(m.cacheDir, "git", repoName)
// Clone or fetch
if _, err := os.Stat(filepath.Join(localPath, ".git")); os.IsNotExist(err) {
// Clone
fmt.Printf("Cloning %s...\n", gitURL)
cmd := exec.Command("git", "clone", "--depth=1", "-b", branch, gitURL, localPath)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
return nil, fmt.Errorf("git clone failed: %w", err)
}
} else {
// Fetch latest
fmt.Printf("Fetching latest from %s...\n", gitURL)
cmd := exec.Command("git", "-C", localPath, "fetch", "--depth=1", "origin", branch)
cmd.Run() // Ignore errors for offline operation
cmd = exec.Command("git", "-C", localPath, "checkout", branch)
cmd.Run()
}
// Create overlay for this VM (copy-on-write)
overlayPath := filepath.Join(m.overlayDir, vmName, repoName)
upperDir := filepath.Join(overlayPath, "upper")
workDir := filepath.Join(overlayPath, "work")
mergedDir := filepath.Join(overlayPath, "merged")
for _, dir := range []string{upperDir, workDir, mergedDir} {
os.MkdirAll(dir, 0755)
}
// Mount overlay
mountCmd := exec.Command("mount", "-t", "overlay", "overlay",
"-o", fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", localPath, upperDir, workDir),
mergedDir)
if err := mountCmd.Run(); err != nil {
// Fallback: just use the local path directly
mergedDir = localPath
}
return &AttachedStorage{
Name: repoName,
Source: gitURL,
Target: filepath.Join("/mnt", repoName),
Type: "git",
GitBranch: branch,
GitRemote: "origin",
}, nil
}
// AttachBind creates a bind mount from host to VM
func (m *Manager) AttachBind(vmName, hostPath, vmPath string, readOnly bool) (*AttachedStorage, error) {
// Verify source exists
if _, err := os.Stat(hostPath); err != nil {
return nil, fmt.Errorf("source path does not exist: %s", hostPath)
}
return &AttachedStorage{
Name: filepath.Base(hostPath),
Source: hostPath,
Target: vmPath,
Type: "bind",
ReadOnly: readOnly,
}, nil
}
// CreateOverlay creates a copy-on-write overlay
func (m *Manager) CreateOverlay(vmName, basePath, vmPath string) (*AttachedStorage, error) {
overlayPath := filepath.Join(m.overlayDir, vmName, filepath.Base(basePath))
upperDir := filepath.Join(overlayPath, "upper")
workDir := filepath.Join(overlayPath, "work")
mergedDir := filepath.Join(overlayPath, "merged")
for _, dir := range []string{upperDir, workDir, mergedDir} {
if err := os.MkdirAll(dir, 0755); err != nil {
return nil, fmt.Errorf("failed to create overlay dir: %w", err)
}
}
return &AttachedStorage{
Name: filepath.Base(basePath),
Source: basePath,
Target: vmPath,
Type: "overlay",
}, nil
}
// Snapshot creates a git commit of VM changes
func (m *Manager) Snapshot(vmName, storageName, message string) error {
overlayPath := filepath.Join(m.overlayDir, vmName, storageName, "upper")
// Check if there are changes
if _, err := os.Stat(overlayPath); os.IsNotExist(err) {
return fmt.Errorf("no overlay found for %s/%s", vmName, storageName)
}
// Create snapshot directory
snapshotDir := filepath.Join(m.baseDir, "snapshots", vmName, storageName)
os.MkdirAll(snapshotDir, 0755)
// Initialize git if needed
gitDir := filepath.Join(snapshotDir, ".git")
if _, err := os.Stat(gitDir); os.IsNotExist(err) {
exec.Command("git", "-C", snapshotDir, "init").Run()
exec.Command("git", "-C", snapshotDir, "config", "user.email", "volt@localhost").Run()
exec.Command("git", "-C", snapshotDir, "config", "user.name", "Volt").Run()
}
// Copy changes to snapshot dir
exec.Command("rsync", "-a", "--delete", overlayPath+"/", snapshotDir+"/").Run()
// Commit
timestamp := time.Now().Format("2006-01-02 15:04:05")
if message == "" {
message = fmt.Sprintf("Snapshot at %s", timestamp)
}
exec.Command("git", "-C", snapshotDir, "add", "-A").Run()
exec.Command("git", "-C", snapshotDir, "commit", "-m", message).Run()
return nil
}
// Restore restores VM storage from a snapshot
func (m *Manager) Restore(vmName, storageName, commitHash string) error {
snapshotDir := filepath.Join(m.baseDir, "snapshots", vmName, storageName)
overlayUpper := filepath.Join(m.overlayDir, vmName, storageName, "upper")
// Checkout specific commit
if commitHash != "" {
exec.Command("git", "-C", snapshotDir, "checkout", commitHash).Run()
}
// Restore to overlay upper
os.RemoveAll(overlayUpper)
os.MkdirAll(overlayUpper, 0755)
exec.Command("rsync", "-a", snapshotDir+"/", overlayUpper+"/").Run()
return nil
}
// ListSnapshots returns available snapshots for a storage
func (m *Manager) ListSnapshots(vmName, storageName string) ([]Snapshot, error) {
snapshotDir := filepath.Join(m.baseDir, "snapshots", vmName, storageName)
// Get git log
out, err := exec.Command("git", "-C", snapshotDir, "log", "--oneline", "-20").Output()
if err != nil {
return nil, fmt.Errorf("failed to list snapshots: %w", err)
}
var snapshots []Snapshot
for _, line := range strings.Split(string(out), "\n") {
if line == "" {
continue
}
parts := strings.SplitN(line, " ", 2)
if len(parts) == 2 {
snapshots = append(snapshots, Snapshot{
Hash: parts[0],
Message: parts[1],
})
}
}
return snapshots, nil
}
// Unmount unmounts all storage for a VM
func (m *Manager) Unmount(vmName string) error {
vmOverlayDir := filepath.Join(m.overlayDir, vmName)
// Find and unmount all merged directories
entries, err := os.ReadDir(vmOverlayDir)
if err != nil {
return nil // Nothing to unmount
}
for _, entry := range entries {
if entry.IsDir() {
mergedDir := filepath.Join(vmOverlayDir, entry.Name(), "merged")
exec.Command("umount", mergedDir).Run()
}
}
return nil
}
// Cleanup removes all storage for a VM
func (m *Manager) Cleanup(vmName string) error {
m.Unmount(vmName)
// Remove overlay directory
overlayPath := filepath.Join(m.overlayDir, vmName)
os.RemoveAll(overlayPath)
// Keep snapshots (can be manually cleaned)
return nil
}
// Snapshot represents a storage snapshot
type Snapshot struct {
Hash string
Message string
Time time.Time
}
// MountEntry generates fstab entry for storage
func (s *AttachedStorage) MountEntry() string {
opts := "defaults"
if s.ReadOnly {
opts += ",ro"
}
switch s.Type {
case "bind":
return fmt.Sprintf("%s %s none bind,%s 0 0", s.Source, s.Target, opts)
case "overlay":
return fmt.Sprintf("overlay %s overlay %s 0 0", s.Target, opts)
default:
return fmt.Sprintf("%s %s auto %s 0 0", s.Source, s.Target, opts)
}
}
// SyncToRemote pushes changes to git remote
func (m *Manager) SyncToRemote(vmName, storageName string) error {
snapshotDir := filepath.Join(m.baseDir, "snapshots", vmName, storageName)
return exec.Command("git", "-C", snapshotDir, "push", "origin", "HEAD").Run()
}
// SyncFromRemote pulls changes from git remote
func (m *Manager) SyncFromRemote(vmName, storageName string) error {
snapshotDir := filepath.Join(m.baseDir, "snapshots", vmName, storageName)
return exec.Command("git", "-C", snapshotDir, "pull", "origin", "HEAD").Run()
}

337
pkg/storage/tinyvol.go Normal file
View File

@@ -0,0 +1,337 @@
/*
TinyVol Assembly — Assemble directory trees from CAS blobs via hard-links.
TinyVol is the mechanism that turns a CAS blob manifest into a usable rootfs
directory tree. Instead of copying files, TinyVol creates hard-links from the
assembled tree into the CAS objects directory. This gives each workload its
own directory layout while sharing the actual file data on disk.
Features:
- Manifest-driven: reads a BlobManifest and creates the directory tree
- Hard-link based: no data duplication, instant assembly
- Assembly timing metrics
- Cleanup / disassembly
- Integrity verification of assembled trees
Copyright (c) Armored Gates LLC. All rights reserved.
*/
package storage
import (
"fmt"
"io"
"os"
"path/filepath"
"sort"
"strings"
"time"
)
// ── TinyVol Assembler ────────────────────────────────────────────────────────
// TinyVol assembles and manages CAS-backed directory trees.
type TinyVol struct {
cas *CASStore
baseDir string // root directory for assembled trees
}
// NewTinyVol creates a TinyVol assembler backed by the given CAS store.
// Assembled trees are created under baseDir (e.g. /var/lib/volt/tinyvol).
func NewTinyVol(cas *CASStore, baseDir string) *TinyVol {
if baseDir == "" {
baseDir = "/var/lib/volt/tinyvol"
}
return &TinyVol{
cas: cas,
baseDir: baseDir,
}
}
// ── Assembly ─────────────────────────────────────────────────────────────────
// AssemblyResult holds metrics from a TinyVol assembly operation.
type AssemblyResult struct {
TargetDir string // where the tree was assembled
FilesLinked int // number of files hard-linked
DirsCreated int // number of directories created
TotalBytes int64 // sum of all file sizes (logical, not on-disk)
Duration time.Duration // wall-clock time for assembly
Errors []string // non-fatal errors encountered
}
// Assemble creates a directory tree at targetDir from the given BlobManifest.
// Each file is hard-linked from the CAS objects directory — no data is copied.
//
// If targetDir is empty, a directory is created under the TinyVol base dir
// using the manifest name.
//
// The CAS objects directory and the target directory must be on the same
// filesystem for hard-links to work. If hard-linking fails (e.g. cross-device),
// Assemble falls back to a regular file copy with a warning.
func (tv *TinyVol) Assemble(bm *BlobManifest, targetDir string) (*AssemblyResult, error) {
start := time.Now()
if targetDir == "" {
targetDir = filepath.Join(tv.baseDir, bm.Name)
}
result := &AssemblyResult{TargetDir: targetDir}
// Resolve blob list from manifest.
entries, err := tv.cas.ResolveBlobList(bm)
if err != nil {
return nil, fmt.Errorf("tinyvol assemble: %w", err)
}
// Sort entries so directories are created in order.
sort.Slice(entries, func(i, j int) bool {
return entries[i].RelPath < entries[j].RelPath
})
// Track which directories we've created.
createdDirs := make(map[string]bool)
for _, entry := range entries {
destPath := filepath.Join(targetDir, entry.RelPath)
destDir := filepath.Dir(destPath)
// Create parent directories.
if !createdDirs[destDir] {
if err := os.MkdirAll(destDir, 0755); err != nil {
result.Errors = append(result.Errors,
fmt.Sprintf("mkdir %s: %v", destDir, err))
continue
}
// Count newly created directories.
parts := strings.Split(entry.RelPath, string(filepath.Separator))
for i := 1; i < len(parts); i++ {
partial := filepath.Join(targetDir, strings.Join(parts[:i], string(filepath.Separator)))
if !createdDirs[partial] {
createdDirs[partial] = true
result.DirsCreated++
}
}
createdDirs[destDir] = true
}
// Try hard-link first.
if err := os.Link(entry.BlobPath, destPath); err != nil {
// Cross-device or other error — fall back to copy.
if copyErr := copyFileForAssembly(entry.BlobPath, destPath); copyErr != nil {
result.Errors = append(result.Errors,
fmt.Sprintf("link/copy %s: %v / %v", entry.RelPath, err, copyErr))
continue
}
result.Errors = append(result.Errors,
fmt.Sprintf("hard-link failed for %s, fell back to copy", entry.RelPath))
}
// Accumulate size from blob.
if info, err := os.Stat(entry.BlobPath); err == nil {
result.TotalBytes += info.Size()
}
result.FilesLinked++
}
result.Duration = time.Since(start)
return result, nil
}
// AssembleFromRef assembles a tree from a manifest reference name (filename in
// the refs directory).
func (tv *TinyVol) AssembleFromRef(refName, targetDir string) (*AssemblyResult, error) {
bm, err := tv.cas.LoadManifest(refName)
if err != nil {
return nil, fmt.Errorf("tinyvol assemble from ref: %w", err)
}
return tv.Assemble(bm, targetDir)
}
// ── Disassembly / Cleanup ────────────────────────────────────────────────────
// Disassemble removes an assembled directory tree. This only removes the
// hard-links and directories — the CAS blobs remain untouched.
func (tv *TinyVol) Disassemble(targetDir string) error {
if targetDir == "" {
return fmt.Errorf("tinyvol disassemble: empty target directory")
}
// Safety: refuse to remove paths outside our base directory unless the
// target is an absolute path that was explicitly provided.
if !filepath.IsAbs(targetDir) {
targetDir = filepath.Join(tv.baseDir, targetDir)
}
if err := os.RemoveAll(targetDir); err != nil {
return fmt.Errorf("tinyvol disassemble %s: %w", targetDir, err)
}
return nil
}
// CleanupAll removes all assembled trees under the TinyVol base directory.
func (tv *TinyVol) CleanupAll() error {
entries, err := os.ReadDir(tv.baseDir)
if err != nil {
if os.IsNotExist(err) {
return nil
}
return fmt.Errorf("tinyvol cleanup all: %w", err)
}
for _, entry := range entries {
if entry.IsDir() {
path := filepath.Join(tv.baseDir, entry.Name())
if err := os.RemoveAll(path); err != nil {
return fmt.Errorf("tinyvol cleanup %s: %w", path, err)
}
}
}
return nil
}
// ── Verification ─────────────────────────────────────────────────────────────
// VerifyResult holds the outcome of verifying an assembled tree.
type VerifyResult struct {
TotalFiles int
Verified int
Mismatched int
Missing int
Errors []string
}
// Verify checks that an assembled tree matches its manifest. For each file
// in the manifest, it verifies the hard-link points to the correct CAS blob
// by comparing inode numbers.
func (tv *TinyVol) Verify(bm *BlobManifest, targetDir string) (*VerifyResult, error) {
result := &VerifyResult{}
for relPath, digest := range bm.Objects {
result.TotalFiles++
destPath := filepath.Join(targetDir, relPath)
blobPath := tv.cas.GetPath(digest)
// Check destination exists.
destInfo, err := os.Stat(destPath)
if err != nil {
result.Missing++
result.Errors = append(result.Errors,
fmt.Sprintf("missing: %s", relPath))
continue
}
// Check CAS blob exists.
blobInfo, err := os.Stat(blobPath)
if err != nil {
result.Mismatched++
result.Errors = append(result.Errors,
fmt.Sprintf("cas blob missing for %s: %s", relPath, digest))
continue
}
// Compare by checking if they are the same file (same inode).
if os.SameFile(destInfo, blobInfo) {
result.Verified++
} else {
// Not the same inode — could be a copy or different file.
// Check size as a quick heuristic.
if destInfo.Size() != blobInfo.Size() {
result.Mismatched++
result.Errors = append(result.Errors,
fmt.Sprintf("size mismatch for %s: assembled=%d cas=%d",
relPath, destInfo.Size(), blobInfo.Size()))
} else {
// Same size, probably a copy (cross-device assembly).
result.Verified++
}
}
}
return result, nil
}
// ── List ─────────────────────────────────────────────────────────────────────
// AssembledTree describes a currently assembled directory tree.
type AssembledTree struct {
Name string
Path string
Size int64 // total logical size
Files int
Created time.Time
}
// List returns all currently assembled trees under the TinyVol base dir.
func (tv *TinyVol) List() ([]AssembledTree, error) {
entries, err := os.ReadDir(tv.baseDir)
if err != nil {
if os.IsNotExist(err) {
return nil, nil
}
return nil, fmt.Errorf("tinyvol list: %w", err)
}
var trees []AssembledTree
for _, entry := range entries {
if !entry.IsDir() {
continue
}
treePath := filepath.Join(tv.baseDir, entry.Name())
info, err := entry.Info()
if err != nil {
continue
}
tree := AssembledTree{
Name: entry.Name(),
Path: treePath,
Created: info.ModTime(),
}
// Walk to count files and total size.
filepath.Walk(treePath, func(path string, fi os.FileInfo, err error) error {
if err != nil || fi.IsDir() {
return nil
}
tree.Files++
tree.Size += fi.Size()
return nil
})
trees = append(trees, tree)
}
return trees, nil
}
// ── Helpers ──────────────────────────────────────────────────────────────────
// copyFileForAssembly copies a single file (fallback when hard-linking fails).
func copyFileForAssembly(src, dst string) error {
sf, err := os.Open(src)
if err != nil {
return err
}
defer sf.Close()
// Preserve permissions from source.
srcInfo, err := sf.Stat()
if err != nil {
return err
}
df, err := os.OpenFile(dst, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, srcInfo.Mode())
if err != nil {
return err
}
defer df.Close()
_, err = copyBuffer(df, sf)
return err
}
// copyBuffer copies from src to dst using io.Copy.
func copyBuffer(dst *os.File, src *os.File) (int64, error) {
return io.Copy(dst, src)
}