Files
volt/pkg/cluster/scheduler.go.bak
Karl Clinger 81ad0b597c Volt CLI: source-available under AGPSL v5.0
Complete infrastructure platform CLI:
- Container runtime (systemd-nspawn)
- VoltVisor VMs (Neutron Stardust / QEMU)
- Stellarium CAS (content-addressed storage)
- ORAS Registry
- GitOps integration
- Landlock LSM security
- Compose orchestration
- Mesh networking

Copyright (c) Armored Gates LLC. All rights reserved.
Licensed under AGPSL v5.0
2026-03-21 00:31:12 -05:00

196 lines
5.7 KiB
Go

/*
Volt Cluster — Workload Scheduler.
Implements scheduling strategies for assigning workloads to cluster nodes.
The scheduler considers:
- Resource availability (CPU, memory, disk)
- Label selectors and affinity rules
- Node health status
- Current workload distribution (spread/pack strategies)
Strategies:
- BinPack: Pack workloads onto fewest nodes (maximize density)
- Spread: Distribute evenly across nodes (maximize availability)
- Manual: Explicit node selection by name/label
Copyright (c) Armored Gates LLC. All rights reserved.
AGPSL v5 — Source-available. Anti-competition clauses apply.
*/
package cluster
import (
"fmt"
"sort"
)
// ── Strategy ─────────────────────────────────────────────────────────────────
// ScheduleStrategy defines how workloads are assigned to nodes.
type ScheduleStrategy string
const (
StrategyBinPack ScheduleStrategy = "binpack"
StrategySpread ScheduleStrategy = "spread"
StrategyManual ScheduleStrategy = "manual"
)
// ── Scheduler ────────────────────────────────────────────────────────────────
// Scheduler assigns workloads to nodes based on a configurable strategy.
type Scheduler struct {
strategy ScheduleStrategy
}
// NewScheduler creates a scheduler with the given strategy.
func NewScheduler(strategy ScheduleStrategy) *Scheduler {
if strategy == "" {
strategy = StrategyBinPack
}
return &Scheduler{strategy: strategy}
}
// SelectNode chooses the best node for a workload based on the current strategy.
// Returns the selected NodeInfo or an error if no suitable node exists.
func (s *Scheduler) SelectNode(
nodes []*NodeInfo,
required WorkloadResources,
selector map[string]string,
existingSchedule []*ScheduledWorkload,
) (*NodeInfo, error) {
// Filter to eligible nodes
eligible := s.filterEligible(nodes, required, selector)
if len(eligible) == 0 {
return nil, fmt.Errorf("no eligible nodes: checked %d nodes, none meet resource/label requirements", len(nodes))
}
switch s.strategy {
case StrategySpread:
return s.selectSpread(eligible, existingSchedule), nil
case StrategyBinPack:
return s.selectBinPack(eligible), nil
case StrategyManual:
// Manual strategy returns the first eligible node matching the selector
return eligible[0], nil
default:
return s.selectBinPack(eligible), nil
}
}
// filterEligible returns nodes that are healthy, match labels, and have sufficient resources.
func (s *Scheduler) filterEligible(nodes []*NodeInfo, required WorkloadResources, selector map[string]string) []*NodeInfo {
var eligible []*NodeInfo
for _, node := range nodes {
// Must be ready
if node.Status != NodeStatusReady {
continue
}
// Must match label selector
if !matchLabels(node.Labels, selector) {
continue
}
// Must have sufficient resources
availMem := node.Resources.MemoryTotalMB - node.Resources.MemoryUsedMB
if required.MemoryMB > 0 && availMem < required.MemoryMB {
continue
}
// CPU check (basic — just core count)
if required.CPUCores > 0 && node.Resources.CPUCores < required.CPUCores {
continue
}
// Disk check
availDisk := (node.Resources.DiskTotalGB - node.Resources.DiskUsedGB) * 1024 // convert to MB
if required.DiskMB > 0 && availDisk < required.DiskMB {
continue
}
eligible = append(eligible, node)
}
return eligible
}
// selectBinPack picks the node with the LEAST available memory (pack tight).
func (s *Scheduler) selectBinPack(nodes []*NodeInfo) *NodeInfo {
sort.Slice(nodes, func(i, j int) bool {
availI := nodes[i].Resources.MemoryTotalMB - nodes[i].Resources.MemoryUsedMB
availJ := nodes[j].Resources.MemoryTotalMB - nodes[j].Resources.MemoryUsedMB
return availI < availJ // least available first
})
return nodes[0]
}
// selectSpread picks the node with the fewest currently scheduled workloads.
func (s *Scheduler) selectSpread(nodes []*NodeInfo, schedule []*ScheduledWorkload) *NodeInfo {
// Count workloads per node
counts := make(map[string]int)
for _, sw := range schedule {
if sw.Status == "running" || sw.Status == "pending" {
counts[sw.NodeID]++
}
}
// Sort by workload count (ascending)
sort.Slice(nodes, func(i, j int) bool {
return counts[nodes[i].NodeID] < counts[nodes[j].NodeID]
})
return nodes[0]
}
// ── Scoring (for future extensibility) ───────────────────────────────────────
// NodeScore represents a scored node for scheduling decisions.
type NodeScore struct {
Node *NodeInfo
Score float64
}
// ScoreNodes evaluates and ranks all eligible nodes for a workload.
// Higher scores are better.
func ScoreNodes(nodes []*NodeInfo, required WorkloadResources) []NodeScore {
var scores []NodeScore
for _, node := range nodes {
if node.Status != NodeStatusReady {
continue
}
score := 0.0
// Resource availability score (0-50 points)
if node.Resources.MemoryTotalMB > 0 {
memPct := float64(node.Resources.MemoryTotalMB-node.Resources.MemoryUsedMB) / float64(node.Resources.MemoryTotalMB)
score += memPct * 50
}
// CPU headroom score (0-25 points)
if node.Resources.CPUCores > required.CPUCores {
score += 25
}
// Health score (0-25 points)
if node.MissedBeats == 0 {
score += 25
} else {
score += float64(25-node.MissedBeats*5)
if score < 0 {
score = 0
}
}
scores = append(scores, NodeScore{Node: node, Score: score})
}
sort.Slice(scores, func(i, j int) bool {
return scores[i].Score > scores[j].Score
})
return scores
}