k8s-widget: add Kubernetes.qml singleton
This commit is contained in:
120
dot_config/quickshell/shared/Kubernetes.qml
Normal file
120
dot_config/quickshell/shared/Kubernetes.qml
Normal file
@@ -0,0 +1,120 @@
|
||||
pragma Singleton
|
||||
|
||||
import Quickshell
|
||||
import Quickshell.Io
|
||||
import QtQuick
|
||||
|
||||
Singleton {
|
||||
id: root
|
||||
|
||||
property string status: "loading" // "loading" | "ok" | "degraded" | "error" | "stale"
|
||||
property var pods: [] // Array of {app, ready, cpuM, memMi}
|
||||
property int readyCount: 0
|
||||
property int totalCount: 0
|
||||
property var quota: null // quota object from metrics script
|
||||
property int lastUpdatedSecs: 0 // seconds since last successful status fetch
|
||||
|
||||
Process {
|
||||
id: statusProc
|
||||
command: ["bash", Config.scriptsDir + "/k8s-status.sh", Config.kubeNamespace]
|
||||
stdout: StdioCollector {
|
||||
onStreamFinished: {
|
||||
if (this.text.trim() === "") {
|
||||
root.status = (root.status === "ok" || root.status === "degraded") ? "stale" : "error";
|
||||
return;
|
||||
}
|
||||
try {
|
||||
let data = JSON.parse(this.text);
|
||||
// Preserve existing cpuM/memMi from last metrics fetch
|
||||
let byApp = {};
|
||||
for (let p of root.pods) byApp[p.app] = p;
|
||||
root.pods = data.pods.map(p => ({
|
||||
app: p.app,
|
||||
ready: p.ready,
|
||||
cpuM: byApp[p.app]?.cpuM ?? -1,
|
||||
memMi: byApp[p.app]?.memMi ?? -1
|
||||
}));
|
||||
root.readyCount = data.readyCount;
|
||||
root.totalCount = data.totalCount;
|
||||
root.status = data.pods.every(p => p.ready) ? "ok" : "degraded";
|
||||
root.lastUpdatedSecs = 0;
|
||||
} catch(e) {
|
||||
console.warn("Kubernetes: status parse error:", e);
|
||||
root.status = (root.status === "ok" || root.status === "degraded") ? "stale" : "error";
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Process {
|
||||
id: metricsProc
|
||||
command: ["bash", Config.scriptsDir + "/k8s-metrics.sh", Config.kubeNamespace]
|
||||
stdout: StdioCollector {
|
||||
onStreamFinished: {
|
||||
if (this.text.trim() === "") return;
|
||||
try {
|
||||
let data = JSON.parse(this.text);
|
||||
let byApp = {};
|
||||
for (let m of data.podMetrics) byApp[m.app] = m;
|
||||
root.pods = root.pods.map(p => ({
|
||||
app: p.app,
|
||||
ready: p.ready,
|
||||
cpuM: byApp[p.app]?.cpuM ?? -1,
|
||||
memMi: byApp[p.app]?.memMi ?? -1
|
||||
}));
|
||||
root.quota = data.quota;
|
||||
} catch(e) {
|
||||
console.warn("Kubernetes: metrics parse error:", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Ticker: increments lastUpdatedSecs every second (only once status is known)
|
||||
Timer {
|
||||
interval: 1000
|
||||
running: root.status !== "loading"
|
||||
repeat: true
|
||||
onTriggered: root.lastUpdatedSecs++
|
||||
}
|
||||
|
||||
// Status poller
|
||||
Timer {
|
||||
interval: Config.kubeStatusRefreshMs
|
||||
running: true
|
||||
repeat: true
|
||||
onTriggered: statusProc.running = true
|
||||
}
|
||||
|
||||
// Metrics poller
|
||||
Timer {
|
||||
interval: Config.kubeMetricsRefreshMs
|
||||
running: true
|
||||
repeat: true
|
||||
onTriggered: metricsProc.running = true
|
||||
}
|
||||
|
||||
// Stagger: fire metricsProc once at startup (500ms after status)
|
||||
Timer {
|
||||
id: metricsStagger
|
||||
interval: 500
|
||||
repeat: false
|
||||
onTriggered: metricsProc.running = true
|
||||
}
|
||||
|
||||
// Staleness check: flip to "stale" if no successful fetch for >60s
|
||||
Timer {
|
||||
interval: 10000
|
||||
running: true
|
||||
repeat: true
|
||||
onTriggered: {
|
||||
if (root.lastUpdatedSecs > 60 && (root.status === "ok" || root.status === "degraded"))
|
||||
root.status = "stale";
|
||||
}
|
||||
}
|
||||
|
||||
Component.onCompleted: {
|
||||
statusProc.running = true;
|
||||
metricsStagger.running = true;
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user