Volt CLI: source-available under AGPSL v5.0
Complete infrastructure platform CLI: - Container runtime (systemd-nspawn) - VoltVisor VMs (Neutron Stardust / QEMU) - Stellarium CAS (content-addressed storage) - ORAS Registry - GitOps integration - Landlock LSM security - Compose orchestration - Mesh networking Copyright (c) Armored Gates LLC. All rights reserved. Licensed under AGPSL v5.0
This commit is contained in:
891
pkg/security/scanner.go
Normal file
891
pkg/security/scanner.go
Normal file
@@ -0,0 +1,891 @@
|
||||
/*
|
||||
Vulnerability Scanner — Scan container rootfs and CAS references for known
|
||||
vulnerabilities using the OSV (Open Source Vulnerabilities) API.
|
||||
|
||||
Supports:
|
||||
- Debian/Ubuntu (dpkg status file)
|
||||
- Alpine (apk installed db)
|
||||
- RHEL/Fedora/Rocky (rpm query via librpm or rpm binary)
|
||||
|
||||
Copyright (c) Armored Gates LLC. All rights reserved.
|
||||
*/
|
||||
package security
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/armoredgate/volt/pkg/storage"
|
||||
)
|
||||
|
||||
// ── Types ────────────────────────────────────────────────────────────────────
|
||||
|
||||
// Package represents an installed package detected in a rootfs.
|
||||
type Package struct {
|
||||
Name string
|
||||
Version string
|
||||
Source string // "dpkg", "apk", "rpm"
|
||||
}
|
||||
|
||||
// VulnResult represents a single vulnerability finding.
|
||||
type VulnResult struct {
|
||||
ID string // CVE ID or OSV ID (e.g., "CVE-2024-1234" or "GHSA-xxxx")
|
||||
Package string // Affected package name
|
||||
Version string // Installed version
|
||||
FixedIn string // Version that fixes it, or "" if no fix available
|
||||
Severity string // CRITICAL, HIGH, MEDIUM, LOW, UNKNOWN
|
||||
Summary string // Short description
|
||||
References []string // URLs for more info
|
||||
}
|
||||
|
||||
// ScanReport is the result of scanning a rootfs for vulnerabilities.
|
||||
type ScanReport struct {
|
||||
Target string // Image or container name
|
||||
OS string // Detected OS (e.g., "Alpine Linux 3.19")
|
||||
Ecosystem string // OSV ecosystem (e.g., "Alpine", "Debian")
|
||||
PackageCount int // Total packages scanned
|
||||
Vulns []VulnResult // Found vulnerabilities
|
||||
ScanTime time.Duration // Wall-clock time for the scan
|
||||
}
|
||||
|
||||
// ── Severity Helpers ─────────────────────────────────────────────────────────
|
||||
|
||||
// severityRank maps severity strings to an integer for sorting/filtering.
|
||||
var severityRank = map[string]int{
|
||||
"CRITICAL": 4,
|
||||
"HIGH": 3,
|
||||
"MEDIUM": 2,
|
||||
"LOW": 1,
|
||||
"UNKNOWN": 0,
|
||||
}
|
||||
|
||||
// SeverityAtLeast returns true if sev is at or above the given threshold.
|
||||
func SeverityAtLeast(sev, threshold string) bool {
|
||||
return severityRank[strings.ToUpper(sev)] >= severityRank[strings.ToUpper(threshold)]
|
||||
}
|
||||
|
||||
// ── Counts ───────────────────────────────────────────────────────────────────
|
||||
|
||||
// VulnCounts holds per-severity counts.
|
||||
type VulnCounts struct {
|
||||
Critical int
|
||||
High int
|
||||
Medium int
|
||||
Low int
|
||||
Unknown int
|
||||
Total int
|
||||
}
|
||||
|
||||
// CountBySeverity tallies vulnerabilities by severity level.
|
||||
func (r *ScanReport) CountBySeverity() VulnCounts {
|
||||
var c VulnCounts
|
||||
for _, v := range r.Vulns {
|
||||
switch strings.ToUpper(v.Severity) {
|
||||
case "CRITICAL":
|
||||
c.Critical++
|
||||
case "HIGH":
|
||||
c.High++
|
||||
case "MEDIUM":
|
||||
c.Medium++
|
||||
case "LOW":
|
||||
c.Low++
|
||||
default:
|
||||
c.Unknown++
|
||||
}
|
||||
}
|
||||
c.Total = len(r.Vulns)
|
||||
return c
|
||||
}
|
||||
|
||||
// ── OS Detection ─────────────────────────────────────────────────────────────
|
||||
|
||||
// DetectOS reads /etc/os-release from rootfsPath and returns (prettyName, ecosystem, error).
|
||||
// The ecosystem is mapped to the OSV ecosystem name.
|
||||
func DetectOS(rootfsPath string) (string, string, error) {
|
||||
osRelPath := filepath.Join(rootfsPath, "etc", "os-release")
|
||||
f, err := os.Open(osRelPath)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("detect OS: %w", err)
|
||||
}
|
||||
defer f.Close()
|
||||
return parseOSRelease(f)
|
||||
}
|
||||
|
||||
// parseOSRelease parses an os-release formatted reader.
|
||||
func parseOSRelease(r io.Reader) (string, string, error) {
|
||||
var prettyName, id, versionID string
|
||||
|
||||
scanner := bufio.NewScanner(r)
|
||||
for scanner.Scan() {
|
||||
line := strings.TrimSpace(scanner.Text())
|
||||
if line == "" || strings.HasPrefix(line, "#") {
|
||||
continue
|
||||
}
|
||||
|
||||
parts := strings.SplitN(line, "=", 2)
|
||||
if len(parts) != 2 {
|
||||
continue
|
||||
}
|
||||
|
||||
key := parts[0]
|
||||
val := strings.Trim(parts[1], `"'`)
|
||||
|
||||
switch key {
|
||||
case "PRETTY_NAME":
|
||||
prettyName = val
|
||||
case "ID":
|
||||
id = val
|
||||
case "VERSION_ID":
|
||||
versionID = val
|
||||
}
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
return "", "", fmt.Errorf("parse os-release: %w", err)
|
||||
}
|
||||
|
||||
if prettyName == "" {
|
||||
if id != "" {
|
||||
prettyName = id
|
||||
if versionID != "" {
|
||||
prettyName += " " + versionID
|
||||
}
|
||||
} else {
|
||||
return "", "", fmt.Errorf("detect OS: no PRETTY_NAME or ID found in os-release")
|
||||
}
|
||||
}
|
||||
|
||||
ecosystem := mapIDToEcosystem(id, versionID)
|
||||
return prettyName, ecosystem, nil
|
||||
}
|
||||
|
||||
// mapIDToEcosystem maps /etc/os-release ID to OSV ecosystem.
|
||||
func mapIDToEcosystem(id, versionID string) string {
|
||||
switch strings.ToLower(id) {
|
||||
case "alpine":
|
||||
return "Alpine"
|
||||
case "debian":
|
||||
return "Debian"
|
||||
case "ubuntu":
|
||||
return "Ubuntu"
|
||||
case "rocky":
|
||||
return "Rocky Linux"
|
||||
case "rhel", "centos", "fedora":
|
||||
return "Rocky Linux" // best-effort mapping
|
||||
case "sles", "opensuse-leap", "opensuse-tumbleweed", "suse":
|
||||
return "SUSE"
|
||||
default:
|
||||
return "Linux" // fallback
|
||||
}
|
||||
}
|
||||
|
||||
// ── Package Listing ──────────────────────────────────────────────────────────
|
||||
|
||||
// ListPackages detects the package manager and extracts installed packages
|
||||
// from the rootfs at rootfsPath.
|
||||
func ListPackages(rootfsPath string) ([]Package, error) {
|
||||
var pkgs []Package
|
||||
var err error
|
||||
|
||||
// Try dpkg (Debian/Ubuntu)
|
||||
dpkgStatus := filepath.Join(rootfsPath, "var", "lib", "dpkg", "status")
|
||||
if fileExists(dpkgStatus) {
|
||||
pkgs, err = parseDpkgStatus(dpkgStatus)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("list packages (dpkg): %w", err)
|
||||
}
|
||||
return pkgs, nil
|
||||
}
|
||||
|
||||
// Try apk (Alpine)
|
||||
apkInstalled := filepath.Join(rootfsPath, "lib", "apk", "db", "installed")
|
||||
if fileExists(apkInstalled) {
|
||||
pkgs, err = parseApkInstalled(apkInstalled)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("list packages (apk): %w", err)
|
||||
}
|
||||
return pkgs, nil
|
||||
}
|
||||
|
||||
// Try rpm (RHEL/Rocky/Fedora)
|
||||
rpmDB := filepath.Join(rootfsPath, "var", "lib", "rpm")
|
||||
if dirExists(rpmDB) {
|
||||
pkgs, err = parseRpmDB(rootfsPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("list packages (rpm): %w", err)
|
||||
}
|
||||
return pkgs, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("no supported package manager found in rootfs (checked dpkg, apk, rpm)")
|
||||
}
|
||||
|
||||
// ── dpkg parser ──────────────────────────────────────────────────────────────
|
||||
|
||||
// parseDpkgStatus parses /var/lib/dpkg/status to extract installed packages.
|
||||
func parseDpkgStatus(path string) ([]Package, error) {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
return parseDpkgStatusReader(f)
|
||||
}
|
||||
|
||||
// parseDpkgStatusReader parses a dpkg status file from a reader.
|
||||
func parseDpkgStatusReader(r io.Reader) ([]Package, error) {
|
||||
var pkgs []Package
|
||||
var current Package
|
||||
inPackage := false
|
||||
|
||||
scanner := bufio.NewScanner(r)
|
||||
// Increase buffer for potentially long Description fields
|
||||
scanner.Buffer(make([]byte, 0, 1024*1024), 1024*1024)
|
||||
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
|
||||
// Empty line separates package entries
|
||||
if strings.TrimSpace(line) == "" {
|
||||
if inPackage && current.Name != "" && current.Version != "" {
|
||||
current.Source = "dpkg"
|
||||
pkgs = append(pkgs, current)
|
||||
}
|
||||
current = Package{}
|
||||
inPackage = false
|
||||
continue
|
||||
}
|
||||
|
||||
// Skip continuation lines (start with space/tab)
|
||||
if len(line) > 0 && (line[0] == ' ' || line[0] == '\t') {
|
||||
continue
|
||||
}
|
||||
|
||||
parts := strings.SplitN(line, ": ", 2)
|
||||
if len(parts) != 2 {
|
||||
continue
|
||||
}
|
||||
|
||||
key := parts[0]
|
||||
val := parts[1]
|
||||
|
||||
switch key {
|
||||
case "Package":
|
||||
current.Name = val
|
||||
inPackage = true
|
||||
case "Version":
|
||||
current.Version = val
|
||||
case "Status":
|
||||
// Only include installed packages
|
||||
if !strings.Contains(val, "installed") || strings.Contains(val, "not-installed") {
|
||||
inPackage = false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Don't forget the last entry if file doesn't end with blank line
|
||||
if inPackage && current.Name != "" && current.Version != "" {
|
||||
current.Source = "dpkg"
|
||||
pkgs = append(pkgs, current)
|
||||
}
|
||||
|
||||
return pkgs, scanner.Err()
|
||||
}
|
||||
|
||||
// ── apk parser ───────────────────────────────────────────────────────────────
|
||||
|
||||
// parseApkInstalled parses /lib/apk/db/installed to extract installed packages.
|
||||
func parseApkInstalled(path string) ([]Package, error) {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
return parseApkInstalledReader(f)
|
||||
}
|
||||
|
||||
// parseApkInstalledReader parses an Alpine apk installed DB from a reader.
|
||||
// Format: blocks separated by blank lines. P = package name, V = version.
|
||||
func parseApkInstalledReader(r io.Reader) ([]Package, error) {
|
||||
var pkgs []Package
|
||||
var current Package
|
||||
|
||||
scanner := bufio.NewScanner(r)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
|
||||
if strings.TrimSpace(line) == "" {
|
||||
if current.Name != "" && current.Version != "" {
|
||||
current.Source = "apk"
|
||||
pkgs = append(pkgs, current)
|
||||
}
|
||||
current = Package{}
|
||||
continue
|
||||
}
|
||||
|
||||
if len(line) < 2 || line[1] != ':' {
|
||||
continue
|
||||
}
|
||||
|
||||
key := line[0]
|
||||
val := line[2:]
|
||||
|
||||
switch key {
|
||||
case 'P':
|
||||
current.Name = val
|
||||
case 'V':
|
||||
current.Version = val
|
||||
}
|
||||
}
|
||||
|
||||
// Last entry
|
||||
if current.Name != "" && current.Version != "" {
|
||||
current.Source = "apk"
|
||||
pkgs = append(pkgs, current)
|
||||
}
|
||||
|
||||
return pkgs, scanner.Err()
|
||||
}
|
||||
|
||||
// ── rpm parser ───────────────────────────────────────────────────────────────
|
||||
|
||||
// parseRpmDB queries the RPM database in the rootfs using the rpm binary.
|
||||
func parseRpmDB(rootfsPath string) ([]Package, error) {
|
||||
// Try using rpm command with --root
|
||||
rpmBin, err := exec.LookPath("rpm")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("rpm binary not found (needed to query RPM database): %w", err)
|
||||
}
|
||||
|
||||
cmd := exec.Command(rpmBin, "--root", rootfsPath, "-qa", "--queryformat", "%{NAME}\\t%{VERSION}-%{RELEASE}\\n")
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("rpm query failed: %w", err)
|
||||
}
|
||||
|
||||
return parseRpmOutput(out)
|
||||
}
|
||||
|
||||
// parseRpmOutput parses tab-separated name\tversion output from rpm -qa.
|
||||
func parseRpmOutput(data []byte) ([]Package, error) {
|
||||
var pkgs []Package
|
||||
scanner := bufio.NewScanner(bytes.NewReader(data))
|
||||
for scanner.Scan() {
|
||||
line := strings.TrimSpace(scanner.Text())
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
parts := strings.SplitN(line, "\t", 2)
|
||||
if len(parts) != 2 {
|
||||
continue
|
||||
}
|
||||
pkgs = append(pkgs, Package{
|
||||
Name: parts[0],
|
||||
Version: parts[1],
|
||||
Source: "rpm",
|
||||
})
|
||||
}
|
||||
return pkgs, scanner.Err()
|
||||
}
|
||||
|
||||
// ── OSV API ──────────────────────────────────────────────────────────────────
|
||||
|
||||
const (
|
||||
osvQueryURL = "https://api.osv.dev/v1/query"
|
||||
osvQueryBatchURL = "https://api.osv.dev/v1/querybatch"
|
||||
osvBatchLimit = 1000 // max queries per batch
|
||||
osvHTTPTimeout = 30 * time.Second
|
||||
)
|
||||
|
||||
// osvQueryRequest is a single OSV query.
|
||||
type osvQueryRequest struct {
|
||||
Package *osvPackage `json:"package"`
|
||||
Version string `json:"version"`
|
||||
}
|
||||
|
||||
type osvPackage struct {
|
||||
Name string `json:"name"`
|
||||
Ecosystem string `json:"ecosystem"`
|
||||
}
|
||||
|
||||
// osvBatchRequest wraps multiple queries.
|
||||
type osvBatchRequest struct {
|
||||
Queries []osvQueryRequest `json:"queries"`
|
||||
}
|
||||
|
||||
// osvBatchResponse contains results for a batch query.
|
||||
type osvBatchResponse struct {
|
||||
Results []osvQueryResponse `json:"results"`
|
||||
}
|
||||
|
||||
// osvQueryResponse is the response for a single query.
|
||||
type osvQueryResponse struct {
|
||||
Vulns []osvVuln `json:"vulns"`
|
||||
}
|
||||
|
||||
// osvVuln represents a vulnerability from the OSV API.
|
||||
type osvVuln struct {
|
||||
ID string `json:"id"`
|
||||
Summary string `json:"summary"`
|
||||
Details string `json:"details"`
|
||||
Severity []struct {
|
||||
Type string `json:"type"`
|
||||
Score string `json:"score"`
|
||||
} `json:"severity"`
|
||||
DatabaseSpecific json.RawMessage `json:"database_specific"`
|
||||
Affected []struct {
|
||||
Package struct {
|
||||
Name string `json:"name"`
|
||||
Ecosystem string `json:"ecosystem"`
|
||||
} `json:"package"`
|
||||
Ranges []struct {
|
||||
Type string `json:"type"`
|
||||
Events []struct {
|
||||
Introduced string `json:"introduced,omitempty"`
|
||||
Fixed string `json:"fixed,omitempty"`
|
||||
} `json:"events"`
|
||||
} `json:"ranges"`
|
||||
} `json:"affected"`
|
||||
References []struct {
|
||||
Type string `json:"type"`
|
||||
URL string `json:"url"`
|
||||
} `json:"references"`
|
||||
}
|
||||
|
||||
// QueryOSV queries the OSV API for vulnerabilities affecting the given package.
|
||||
func QueryOSV(ecosystem, pkg, version string) ([]VulnResult, error) {
|
||||
return queryOSVWithClient(http.DefaultClient, ecosystem, pkg, version)
|
||||
}
|
||||
|
||||
func queryOSVWithClient(client *http.Client, ecosystem, pkg, version string) ([]VulnResult, error) {
|
||||
reqBody := osvQueryRequest{
|
||||
Package: &osvPackage{
|
||||
Name: pkg,
|
||||
Ecosystem: ecosystem,
|
||||
},
|
||||
Version: version,
|
||||
}
|
||||
|
||||
data, err := json.Marshal(reqBody)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("osv query marshal: %w", err)
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("POST", osvQueryURL, bytes.NewReader(data))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("osv query: %w", err)
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("osv query: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
return nil, fmt.Errorf("osv query: HTTP %d: %s", resp.StatusCode, string(body))
|
||||
}
|
||||
|
||||
var osvResp osvQueryResponse
|
||||
if err := json.NewDecoder(resp.Body).Decode(&osvResp); err != nil {
|
||||
return nil, fmt.Errorf("osv query decode: %w", err)
|
||||
}
|
||||
|
||||
return convertOSVVulns(osvResp.Vulns, pkg, version), nil
|
||||
}
|
||||
|
||||
// QueryOSVBatch queries the OSV batch endpoint for multiple packages at once.
|
||||
func QueryOSVBatch(ecosystem string, pkgs []Package) (map[string][]VulnResult, error) {
|
||||
return queryOSVBatchWithClient(&http.Client{Timeout: osvHTTPTimeout}, ecosystem, pkgs)
|
||||
}
|
||||
|
||||
func queryOSVBatchWithClient(client *http.Client, ecosystem string, pkgs []Package) (map[string][]VulnResult, error) {
|
||||
return queryOSVBatchWithURL(client, ecosystem, pkgs, osvQueryBatchURL)
|
||||
}
|
||||
|
||||
// queryOSVBatchWithURL is the internal implementation that accepts a custom URL (for testing).
|
||||
func queryOSVBatchWithURL(client *http.Client, ecosystem string, pkgs []Package, batchURL string) (map[string][]VulnResult, error) {
|
||||
results := make(map[string][]VulnResult)
|
||||
|
||||
// Process in batches of osvBatchLimit
|
||||
for i := 0; i < len(pkgs); i += osvBatchLimit {
|
||||
end := i + osvBatchLimit
|
||||
if end > len(pkgs) {
|
||||
end = len(pkgs)
|
||||
}
|
||||
batch := pkgs[i:end]
|
||||
|
||||
var queries []osvQueryRequest
|
||||
for _, p := range batch {
|
||||
queries = append(queries, osvQueryRequest{
|
||||
Package: &osvPackage{
|
||||
Name: p.Name,
|
||||
Ecosystem: ecosystem,
|
||||
},
|
||||
Version: p.Version,
|
||||
})
|
||||
}
|
||||
|
||||
batchReq := osvBatchRequest{Queries: queries}
|
||||
data, err := json.Marshal(batchReq)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("osv batch marshal: %w", err)
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("POST", batchURL, bytes.NewReader(data))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("osv batch: %w", err)
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("osv batch: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
return nil, fmt.Errorf("osv batch: HTTP %d: %s", resp.StatusCode, string(body))
|
||||
}
|
||||
|
||||
var batchResp osvBatchResponse
|
||||
if err := json.NewDecoder(resp.Body).Decode(&batchResp); err != nil {
|
||||
return nil, fmt.Errorf("osv batch decode: %w", err)
|
||||
}
|
||||
|
||||
// Map results back to packages
|
||||
for j, qr := range batchResp.Results {
|
||||
if j >= len(batch) {
|
||||
break
|
||||
}
|
||||
pkg := batch[j]
|
||||
vulns := convertOSVVulns(qr.Vulns, pkg.Name, pkg.Version)
|
||||
if len(vulns) > 0 {
|
||||
key := pkg.Name + "@" + pkg.Version
|
||||
results[key] = append(results[key], vulns...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// convertOSVVulns converts OSV API vulnerability objects to our VulnResult type.
|
||||
func convertOSVVulns(vulns []osvVuln, pkgName, pkgVersion string) []VulnResult {
|
||||
var results []VulnResult
|
||||
seen := make(map[string]bool)
|
||||
|
||||
for _, v := range vulns {
|
||||
if seen[v.ID] {
|
||||
continue
|
||||
}
|
||||
seen[v.ID] = true
|
||||
|
||||
result := VulnResult{
|
||||
ID: v.ID,
|
||||
Package: pkgName,
|
||||
Version: pkgVersion,
|
||||
Summary: v.Summary,
|
||||
}
|
||||
|
||||
// Extract severity
|
||||
result.Severity = extractSeverity(v)
|
||||
|
||||
// Extract fixed version
|
||||
result.FixedIn = extractFixedVersion(v, pkgName)
|
||||
|
||||
// Extract references
|
||||
for _, ref := range v.References {
|
||||
result.References = append(result.References, ref.URL)
|
||||
}
|
||||
|
||||
results = append(results, result)
|
||||
}
|
||||
|
||||
return results
|
||||
}
|
||||
|
||||
// extractSeverity tries to determine severity from OSV data.
|
||||
func extractSeverity(v osvVuln) string {
|
||||
// Try CVSS score from severity array
|
||||
for _, s := range v.Severity {
|
||||
if s.Type == "CVSS_V3" || s.Type == "CVSS_V2" {
|
||||
return cvssToSeverity(s.Score)
|
||||
}
|
||||
}
|
||||
|
||||
// Try database_specific.severity
|
||||
if len(v.DatabaseSpecific) > 0 {
|
||||
var dbSpec map[string]interface{}
|
||||
if json.Unmarshal(v.DatabaseSpecific, &dbSpec) == nil {
|
||||
if sev, ok := dbSpec["severity"].(string); ok {
|
||||
return normalizeSeverity(sev)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Heuristic from ID prefix
|
||||
id := strings.ToUpper(v.ID)
|
||||
if strings.HasPrefix(id, "CVE-") {
|
||||
return "UNKNOWN" // Can't determine from ID alone
|
||||
}
|
||||
|
||||
return "UNKNOWN"
|
||||
}
|
||||
|
||||
// cvssToSeverity converts a CVSS vector string to a severity category.
|
||||
// It extracts the base score from CVSS v3 vectors.
|
||||
func cvssToSeverity(cvss string) string {
|
||||
// CVSS v3 vectors look like: CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H
|
||||
// We need to parse the actual score, but the vector alone doesn't contain it.
|
||||
// For CVSS_V3 type, the score field might be the vector string or a numeric score.
|
||||
|
||||
// Try parsing as a float (some APIs return the numeric score)
|
||||
var score float64
|
||||
if _, err := fmt.Sscanf(cvss, "%f", &score); err == nil {
|
||||
switch {
|
||||
case score >= 9.0:
|
||||
return "CRITICAL"
|
||||
case score >= 7.0:
|
||||
return "HIGH"
|
||||
case score >= 4.0:
|
||||
return "MEDIUM"
|
||||
case score > 0:
|
||||
return "LOW"
|
||||
}
|
||||
}
|
||||
|
||||
// If it's a vector string, use heuristics
|
||||
upper := strings.ToUpper(cvss)
|
||||
if strings.Contains(upper, "AV:N") && strings.Contains(upper, "AC:L") {
|
||||
// Network accessible, low complexity — likely at least HIGH
|
||||
if strings.Contains(upper, "/C:H/I:H/A:H") {
|
||||
return "CRITICAL"
|
||||
}
|
||||
return "HIGH"
|
||||
}
|
||||
|
||||
return "UNKNOWN"
|
||||
}
|
||||
|
||||
// normalizeSeverity normalizes various severity labels to our standard set.
|
||||
func normalizeSeverity(sev string) string {
|
||||
switch strings.ToUpper(strings.TrimSpace(sev)) {
|
||||
case "CRITICAL":
|
||||
return "CRITICAL"
|
||||
case "HIGH", "IMPORTANT":
|
||||
return "HIGH"
|
||||
case "MEDIUM", "MODERATE":
|
||||
return "MEDIUM"
|
||||
case "LOW", "NEGLIGIBLE", "UNIMPORTANT":
|
||||
return "LOW"
|
||||
default:
|
||||
return "UNKNOWN"
|
||||
}
|
||||
}
|
||||
|
||||
// extractFixedVersion finds the fixed version from affected ranges.
|
||||
func extractFixedVersion(v osvVuln, pkgName string) string {
|
||||
for _, affected := range v.Affected {
|
||||
if affected.Package.Name != pkgName {
|
||||
continue
|
||||
}
|
||||
for _, r := range affected.Ranges {
|
||||
for _, event := range r.Events {
|
||||
if event.Fixed != "" {
|
||||
return event.Fixed
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Try any affected entry if package name didn't match exactly
|
||||
for _, affected := range v.Affected {
|
||||
for _, r := range affected.Ranges {
|
||||
for _, event := range r.Events {
|
||||
if event.Fixed != "" {
|
||||
return event.Fixed
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// ── Main Scan Functions ──────────────────────────────────────────────────────
|
||||
|
||||
// ScanRootfs scans a rootfs directory for vulnerabilities by detecting the OS,
|
||||
// listing installed packages, and querying the OSV API.
|
||||
func ScanRootfs(rootfsPath string) (*ScanReport, error) {
|
||||
return ScanRootfsWithTarget(rootfsPath, filepath.Base(rootfsPath))
|
||||
}
|
||||
|
||||
// ScanRootfsWithTarget scans a rootfs with a custom target name for the report.
|
||||
func ScanRootfsWithTarget(rootfsPath, targetName string) (*ScanReport, error) {
|
||||
start := time.Now()
|
||||
|
||||
report := &ScanReport{
|
||||
Target: targetName,
|
||||
}
|
||||
|
||||
// Verify rootfs exists
|
||||
if !dirExists(rootfsPath) {
|
||||
return nil, fmt.Errorf("rootfs path does not exist: %s", rootfsPath)
|
||||
}
|
||||
|
||||
// Detect OS
|
||||
osName, ecosystem, err := DetectOS(rootfsPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("scan: %w", err)
|
||||
}
|
||||
report.OS = osName
|
||||
report.Ecosystem = ecosystem
|
||||
|
||||
// List installed packages
|
||||
pkgs, err := ListPackages(rootfsPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("scan: %w", err)
|
||||
}
|
||||
report.PackageCount = len(pkgs)
|
||||
|
||||
if len(pkgs) == 0 {
|
||||
report.ScanTime = time.Since(start)
|
||||
return report, nil
|
||||
}
|
||||
|
||||
// Query OSV batch API
|
||||
vulnMap, err := QueryOSVBatch(ecosystem, pkgs)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("scan: osv query failed: %w", err)
|
||||
}
|
||||
|
||||
// Collect all vulnerabilities
|
||||
for _, vulns := range vulnMap {
|
||||
report.Vulns = append(report.Vulns, vulns...)
|
||||
}
|
||||
|
||||
// Sort by severity (critical first)
|
||||
sort.Slice(report.Vulns, func(i, j int) bool {
|
||||
ri := severityRank[report.Vulns[i].Severity]
|
||||
rj := severityRank[report.Vulns[j].Severity]
|
||||
if ri != rj {
|
||||
return ri > rj
|
||||
}
|
||||
return report.Vulns[i].ID < report.Vulns[j].ID
|
||||
})
|
||||
|
||||
report.ScanTime = time.Since(start)
|
||||
return report, nil
|
||||
}
|
||||
|
||||
// ScanCASRef scans a CAS reference by assembling it to a temporary directory,
|
||||
// scanning, and cleaning up.
|
||||
func ScanCASRef(casStore *storage.CASStore, ref string) (*ScanReport, error) {
|
||||
tv := storage.NewTinyVol(casStore, "")
|
||||
|
||||
// Load the manifest
|
||||
bm, err := casStore.LoadManifest(ref)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("scan cas ref: %w", err)
|
||||
}
|
||||
|
||||
// Assemble to a temp directory
|
||||
tmpDir, err := os.MkdirTemp("", "volt-scan-*")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("scan cas ref: create temp dir: %w", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
_, err = tv.Assemble(bm, tmpDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("scan cas ref: assemble: %w", err)
|
||||
}
|
||||
|
||||
// Scan the assembled rootfs
|
||||
report, err := ScanRootfsWithTarget(tmpDir, ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return report, nil
|
||||
}
|
||||
|
||||
// ── Formatting ───────────────────────────────────────────────────────────────
|
||||
|
||||
// FormatReport formats a ScanReport as a human-readable string.
|
||||
func FormatReport(r *ScanReport, minSeverity string) string {
|
||||
var b strings.Builder
|
||||
|
||||
fmt.Fprintf(&b, "🔍 Scanning: %s\n", r.Target)
|
||||
fmt.Fprintf(&b, " OS: %s\n", r.OS)
|
||||
fmt.Fprintf(&b, " Packages: %d detected\n", r.PackageCount)
|
||||
fmt.Fprintln(&b)
|
||||
|
||||
filtered := r.Vulns
|
||||
if minSeverity != "" {
|
||||
filtered = nil
|
||||
for _, v := range r.Vulns {
|
||||
if SeverityAtLeast(v.Severity, minSeverity) {
|
||||
filtered = append(filtered, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(filtered) == 0 {
|
||||
if minSeverity != "" {
|
||||
fmt.Fprintf(&b, " No vulnerabilities found at %s severity or above.\n", strings.ToUpper(minSeverity))
|
||||
} else {
|
||||
fmt.Fprintln(&b, " ✅ No vulnerabilities found.")
|
||||
}
|
||||
} else {
|
||||
for _, v := range filtered {
|
||||
fixInfo := fmt.Sprintf("(fixed in %s)", v.FixedIn)
|
||||
if v.FixedIn == "" {
|
||||
fixInfo = "(no fix available)"
|
||||
}
|
||||
fmt.Fprintf(&b, " %-10s %-20s %s %s %s\n",
|
||||
v.Severity, v.ID, v.Package, v.Version, fixInfo)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Fprintln(&b)
|
||||
counts := r.CountBySeverity()
|
||||
fmt.Fprintf(&b, " Summary: %d critical, %d high, %d medium, %d low (%d total)\n",
|
||||
counts.Critical, counts.High, counts.Medium, counts.Low, counts.Total)
|
||||
fmt.Fprintf(&b, " Scan time: %.1fs\n", r.ScanTime.Seconds())
|
||||
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// FormatReportJSON formats a ScanReport as JSON.
|
||||
func FormatReportJSON(r *ScanReport) (string, error) {
|
||||
data, err := json.MarshalIndent(r, "", " ")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(data), nil
|
||||
}
|
||||
|
||||
// ── Helpers ──────────────────────────────────────────────────────────────────
|
||||
|
||||
func fileExists(path string) bool {
|
||||
_, err := os.Stat(path)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func dirExists(path string) bool {
|
||||
info, err := os.Stat(path)
|
||||
return err == nil && info.IsDir()
|
||||
}
|
||||
Reference in New Issue
Block a user