From 798308d2bf52deb53d90114e43e99fd5506c75b9 Mon Sep 17 00:00:00 2001 From: adam Date: Sat, 7 Mar 2026 05:16:30 +0000 Subject: [PATCH] Add host drives section for non-enclosure drives --- frontend/src/App.jsx | 108 +++++++++++++++++++++++++++++++++++++++++ models/schemas.py | 22 +++++++++ routers/overview.py | 16 +++++++ services/host.py | 111 +++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 257 insertions(+) create mode 100644 services/host.py diff --git a/frontend/src/App.jsx b/frontend/src/App.jsx index a8360f6..60f9539 100644 --- a/frontend/src/App.jsx +++ b/frontend/src/App.jsx @@ -580,6 +580,107 @@ function DriveDetail({ slot, onClose, t }) { ); } +const driveTypeBadge = (type, t) => { + const labels = { nvme: "NVMe", raid: "RAID", ssd: "SSD", hdd: "HDD", disk: "Disk" }; + return ( + + {labels[type] || type} + + ); +}; + +function HostDrivesCard({ drives, onSelect, t }) { + if (!drives || drives.length === 0) return null; + return ( +
+
+
+
Host Drives
+
+ {drives.length} drive{drives.length !== 1 ? "s" : ""} · non-enclosure +
+
+
+
+
+ {drives.map((d) => { + const healthStatus = d.health_status || "healthy"; + const c = t.health[healthStatus] || t.health.healthy; + return ( + + ); + })} +
+
+
+ ); +} + function EnclosureCard({ enclosure, view, onSelect, selectedSerial, t }) { return (
))}
+ + {/* Host Drives */} + {data.host_drives && data.host_drives.length > 0 && ( +
+ +
+ )} )} diff --git a/models/schemas.py b/models/schemas.py index 4928632..618473c 100644 --- a/models/schemas.py +++ b/models/schemas.py @@ -76,12 +76,34 @@ class EnclosureWithDrives(BaseModel): slots: list[SlotWithDrive] +class HostDrive(BaseModel): + device: str + drive_type: str = "disk" + model: str | None = None + serial: str | None = None + wwn: str | None = None + firmware: str | None = None + capacity_bytes: int | None = None + smart_healthy: bool | None = None + smart_supported: bool = True + temperature_c: int | None = None + power_on_hours: int | None = None + reallocated_sectors: int | None = None + pending_sectors: int | None = None + uncorrectable_errors: int | None = None + zfs_pool: str | None = None + zfs_vdev: str | None = None + zfs_state: str | None = None + health_status: str = "healthy" + + class Overview(BaseModel): healthy: bool drive_count: int warning_count: int error_count: int enclosures: list[EnclosureWithDrives] + host_drives: list[HostDrive] = [] class HealthCheck(BaseModel): diff --git a/routers/overview.py b/routers/overview.py index ef4debf..c640796 100644 --- a/routers/overview.py +++ b/routers/overview.py @@ -6,10 +6,12 @@ from fastapi import APIRouter from models.schemas import ( DriveHealthSummary, EnclosureWithDrives, + HostDrive, Overview, SlotWithDrive, ) from services.enclosure import discover_enclosures, list_slots +from services.host import get_host_drives from services.smart import get_smart_data from services.zfs import get_zfs_pool_map @@ -119,10 +121,24 @@ async def get_overview(): slots=slots_out, )) + # Host drives (non-enclosure) + host_drives_raw = await get_host_drives() + host_drives_out: list[HostDrive] = [] + for hd in host_drives_raw: + total_drives += 1 + hs = hd.get("health_status", "healthy") + if hs == "error": + errors += 1 + all_healthy = False + elif hs == "warning": + warnings += 1 + host_drives_out.append(HostDrive(**hd)) + return Overview( healthy=all_healthy and errors == 0, drive_count=total_drives, warning_count=warnings, error_count=errors, enclosures=enc_results, + host_drives=host_drives_out, ) diff --git a/services/host.py b/services/host.py new file mode 100644 index 0000000..462bfb7 --- /dev/null +++ b/services/host.py @@ -0,0 +1,111 @@ +import asyncio +import json +import logging + +from services.enclosure import discover_enclosures, list_slots +from services.smart import get_smart_data +from services.zfs import get_zfs_pool_map + +logger = logging.getLogger(__name__) + + +async def get_host_drives() -> list[dict]: + """Discover non-enclosure block devices and return SMART data for each.""" + # Get all block devices via lsblk + try: + proc = await asyncio.create_subprocess_exec( + "lsblk", "-d", "-o", "NAME,SIZE,TYPE,MODEL,ROTA,TRAN", "-J", + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, + ) + stdout, _ = await proc.communicate() + lsblk_data = json.loads(stdout) + except (FileNotFoundError, json.JSONDecodeError) as e: + logger.warning("lsblk failed: %s", e) + return [] + + # Collect all enclosure-mapped devices + enclosure_devices: set[str] = set() + for enc in discover_enclosures(): + for slot in list_slots(enc["id"]): + if slot["device"]: + enclosure_devices.add(slot["device"]) + + # Filter to host-only disks + host_devices: list[dict] = [] + for dev in lsblk_data.get("blockdevices", []): + name = dev.get("name", "") + dev_type = dev.get("type", "") + + # Skip non-disk types and enclosure drives + if dev_type != "disk": + continue + if name in enclosure_devices: + continue + + # Determine drive type from transport/model + tran = (dev.get("tran") or "").lower() + model = (dev.get("model") or "").lower() + rota = dev.get("rota") + + if tran == "nvme" or name.startswith("nvme"): + drive_type = "nvme" + elif "perc" in model or "raid" in model or "megaraid" in model: + drive_type = "raid" + elif rota is False or rota == "0" or rota == 0: + drive_type = "ssd" + else: + drive_type = "hdd" + + host_devices.append({"name": name, "drive_type": drive_type}) + + # Fetch SMART + ZFS data concurrently + pool_map = await get_zfs_pool_map() + smart_tasks = [get_smart_data(d["name"]) for d in host_devices] + smart_results = await asyncio.gather(*smart_tasks, return_exceptions=True) + + results: list[dict] = [] + for dev_info, smart in zip(host_devices, smart_results): + name = dev_info["name"] + + if isinstance(smart, Exception): + logger.warning("SMART query failed for host drive %s: %s", name, smart) + smart = {"device": name, "smart_supported": False} + + # Compute health_status (same logic as overview.py) + healthy = smart.get("smart_healthy") + realloc = smart.get("reallocated_sectors") or 0 + pending = smart.get("pending_sectors") or 0 + unc = smart.get("uncorrectable_errors") or 0 + + if healthy is False: + health_status = "error" + elif realloc > 0 or pending > 0 or unc > 0 or (healthy is None and smart.get("smart_supported", True)): + health_status = "warning" + else: + health_status = "healthy" + + zfs_info = pool_map.get(name, {}) + + results.append({ + "device": name, + "drive_type": dev_info["drive_type"], + "model": smart.get("model"), + "serial": smart.get("serial"), + "wwn": smart.get("wwn"), + "firmware": smart.get("firmware"), + "capacity_bytes": smart.get("capacity_bytes"), + "smart_healthy": healthy, + "smart_supported": smart.get("smart_supported", True), + "temperature_c": smart.get("temperature_c"), + "power_on_hours": smart.get("power_on_hours"), + "reallocated_sectors": smart.get("reallocated_sectors"), + "pending_sectors": smart.get("pending_sectors"), + "uncorrectable_errors": smart.get("uncorrectable_errors"), + "zfs_pool": zfs_info.get("pool"), + "zfs_vdev": zfs_info.get("vdev"), + "zfs_state": zfs_info.get("state"), + "health_status": health_status, + }) + + return results