Skip to content

Commit cea59e4

Browse files
committed
refactor(core): use typed clientsets in health check diagnostics
Replace unstructured resource access with typed CoreV1 and AppsV1 clientsets for improved type safety and code clarity. Signed-off-by: Rohit Patil <ropatil@redhat.com>
1 parent 60c6270 commit cea59e4

File tree

1 file changed

+88
-124
lines changed

1 file changed

+88
-124
lines changed

pkg/toolsets/core/health_check.go

Lines changed: 88 additions & 124 deletions
Original file line numberDiff line numberDiff line change
@@ -247,84 +247,59 @@ func gatherNodeDiagnostics(params api.PromptHandlerParams) (string, error) {
247247
return sb.String(), nil
248248
}
249249

250-
// gatherPodDiagnostics collects pod status using existing methods
250+
// gatherPodDiagnostics collects pod status using CoreV1 clientset
251251
func gatherPodDiagnostics(params api.PromptHandlerParams, namespace string) (string, error) {
252-
var podList interface{ UnstructuredContent() map[string]interface{} }
253-
var err error
254-
255-
if namespace != "" {
256-
podList, err = kubernetes.NewCore(params).PodsListInNamespace(params, namespace, api.ListOptions{})
257-
} else {
258-
podList, err = kubernetes.NewCore(params).PodsListInAllNamespaces(params, api.ListOptions{})
259-
}
260-
252+
podList, err := params.CoreV1().Pods(namespace).List(params.Context, metav1.ListOptions{})
261253
if err != nil {
262254
return "", err
263255
}
264256

265-
items, ok := podList.UnstructuredContent()["items"].([]interface{})
266-
if !ok {
257+
if len(podList.Items) == 0 {
267258
return "No pods found", nil
268259
}
269260

270-
totalPods := len(items)
261+
totalPods := len(podList.Items)
271262
problemPods := []string{}
272263

273-
for _, item := range items {
274-
podMap, ok := item.(map[string]interface{})
275-
if !ok {
276-
continue
277-
}
278-
279-
metadata, _ := podMap["metadata"].(map[string]interface{})
280-
name, _ := metadata["name"].(string)
281-
ns, _ := metadata["namespace"].(string)
282-
283-
status, _ := podMap["status"].(map[string]interface{})
284-
phase, _ := status["phase"].(string)
285-
containerStatuses, _ := status["containerStatuses"].([]interface{})
286-
264+
for _, pod := range podList.Items {
287265
issues := []string{}
288266
restarts := int32(0)
289267
readyCount := 0
290-
totalContainers := len(containerStatuses)
268+
totalContainers := len(pod.Status.ContainerStatuses)
291269

292270
// Check container statuses
293-
for _, cs := range containerStatuses {
294-
csMap, _ := cs.(map[string]interface{})
295-
ready, _ := csMap["ready"].(bool)
296-
restartCount, _ := csMap["restartCount"].(float64)
297-
restarts += int32(restartCount)
298-
299-
if ready {
271+
for _, cs := range pod.Status.ContainerStatuses {
272+
if cs.Ready {
300273
readyCount++
301274
}
275+
restarts += cs.RestartCount
302276

303-
state, _ := csMap["state"].(map[string]interface{})
304-
if waiting, ok := state["waiting"].(map[string]interface{}); ok {
305-
reason, _ := waiting["reason"].(string)
306-
message, _ := waiting["message"].(string)
277+
// Check waiting state
278+
if cs.State.Waiting != nil {
279+
reason := cs.State.Waiting.Reason
307280
if reason == "CrashLoopBackOff" || reason == "ImagePullBackOff" || reason == "ErrImagePull" {
308-
issues = append(issues, fmt.Sprintf("Container waiting: %s - %s", reason, message))
281+
issues = append(issues, fmt.Sprintf("Container waiting: %s - %s", reason, cs.State.Waiting.Message))
309282
}
310283
}
311-
if terminated, ok := state["terminated"].(map[string]interface{}); ok {
312-
reason, _ := terminated["reason"].(string)
284+
285+
// Check terminated state
286+
if cs.State.Terminated != nil {
287+
reason := cs.State.Terminated.Reason
313288
if reason == "Error" || reason == "OOMKilled" {
314289
issues = append(issues, fmt.Sprintf("Container terminated: %s", reason))
315290
}
316291
}
317292
}
318293

319294
// Check pod phase
320-
if phase != "Running" && phase != "Succeeded" {
321-
issues = append(issues, fmt.Sprintf("Pod in %s phase", phase))
295+
if pod.Status.Phase != v1.PodRunning && pod.Status.Phase != v1.PodSucceeded {
296+
issues = append(issues, fmt.Sprintf("Pod in %s phase", pod.Status.Phase))
322297
}
323298

324299
// Report pods with issues or high restart count
325300
if len(issues) > 0 || restarts > 5 {
326301
problemPods = append(problemPods, fmt.Sprintf("- **%s/%s** (Phase: %s, Ready: %d/%d, Restarts: %d)\n - %s",
327-
ns, name, phase, readyCount, totalContainers, restarts, strings.Join(issues, "\n - ")))
302+
pod.Namespace, pod.Name, pod.Status.Phase, readyCount, totalContainers, restarts, strings.Join(issues, "\n - ")))
328303
}
329304
}
330305

@@ -339,79 +314,86 @@ func gatherPodDiagnostics(params api.PromptHandlerParams, namespace string) (str
339314
return sb.String(), nil
340315
}
341316

342-
// gatherWorkloadDiagnostics collects workload controller status
317+
// gatherWorkloadDiagnostics collects workload controller status using AppsV1 clientset
343318
func gatherWorkloadDiagnostics(params api.PromptHandlerParams, kind string, namespace string) (string, error) {
344-
gvk := &schema.GroupVersionKind{
345-
Group: "apps",
346-
Version: "v1",
347-
Kind: kind,
348-
}
349-
350-
workloadList, err := kubernetes.NewCore(params).ResourcesList(params, gvk, namespace, api.ListOptions{})
351-
if err != nil {
352-
return "", err
353-
}
354-
355-
items, ok := workloadList.UnstructuredContent()["items"].([]interface{})
356-
if !ok || len(items) == 0 {
357-
return fmt.Sprintf("No %ss found", kind), nil
358-
}
359-
360319
workloadsWithIssues := []string{}
361320

362-
for _, item := range items {
363-
workloadMap, ok := item.(map[string]interface{})
364-
if !ok {
365-
continue
321+
switch kind {
322+
case "Deployment":
323+
deploymentList, err := params.AppsV1().Deployments(namespace).List(params.Context, metav1.ListOptions{})
324+
if err != nil {
325+
return "", err
326+
}
327+
if len(deploymentList.Items) == 0 {
328+
return "No Deployments found", nil
366329
}
367330

368-
metadata, _ := workloadMap["metadata"].(map[string]interface{})
369-
name, _ := metadata["name"].(string)
370-
ns, _ := metadata["namespace"].(string)
331+
for _, deployment := range deploymentList.Items {
332+
issues := []string{}
333+
ready := fmt.Sprintf("%d/%d", deployment.Status.ReadyReplicas, deployment.Status.Replicas)
371334

372-
status, _ := workloadMap["status"].(map[string]interface{})
373-
spec, _ := workloadMap["spec"].(map[string]interface{})
374-
issues := []string{}
375-
ready := "Unknown"
335+
if deployment.Status.UnavailableReplicas > 0 {
336+
issues = append(issues, fmt.Sprintf("%d replicas unavailable", deployment.Status.UnavailableReplicas))
337+
}
376338

377-
switch kind {
378-
case "Deployment":
379-
replicas, _ := status["replicas"].(float64)
380-
readyReplicas, _ := status["readyReplicas"].(float64)
381-
unavailableReplicas, _ := status["unavailableReplicas"].(float64)
339+
if len(issues) > 0 {
340+
workloadsWithIssues = append(workloadsWithIssues, fmt.Sprintf("- **%s/%s** (Ready: %s)\n - %s",
341+
deployment.Namespace, deployment.Name, ready, strings.Join(issues, "\n - ")))
342+
}
343+
}
382344

383-
ready = fmt.Sprintf("%d/%d", int(readyReplicas), int(replicas))
345+
case "StatefulSet":
346+
statefulSetList, err := params.AppsV1().StatefulSets(namespace).List(params.Context, metav1.ListOptions{})
347+
if err != nil {
348+
return "", err
349+
}
350+
if len(statefulSetList.Items) == 0 {
351+
return "No StatefulSets found", nil
352+
}
384353

385-
if unavailableReplicas > 0 {
386-
issues = append(issues, fmt.Sprintf("%d replicas unavailable", int(unavailableReplicas)))
354+
for _, sts := range statefulSetList.Items {
355+
issues := []string{}
356+
specReplicas := int32(1)
357+
if sts.Spec.Replicas != nil {
358+
specReplicas = *sts.Spec.Replicas
387359
}
360+
ready := fmt.Sprintf("%d/%d", sts.Status.ReadyReplicas, specReplicas)
388361

389-
case "StatefulSet":
390-
specReplicas, _ := spec["replicas"].(float64)
391-
readyReplicas, _ := status["readyReplicas"].(float64)
392-
393-
ready = fmt.Sprintf("%d/%d", int(readyReplicas), int(specReplicas))
362+
if sts.Status.ReadyReplicas < specReplicas {
363+
issues = append(issues, fmt.Sprintf("Only %d/%d replicas ready", sts.Status.ReadyReplicas, specReplicas))
364+
}
394365

395-
if readyReplicas < specReplicas {
396-
issues = append(issues, fmt.Sprintf("Only %d/%d replicas ready", int(readyReplicas), int(specReplicas)))
366+
if len(issues) > 0 {
367+
workloadsWithIssues = append(workloadsWithIssues, fmt.Sprintf("- **%s/%s** (Ready: %s)\n - %s",
368+
sts.Namespace, sts.Name, ready, strings.Join(issues, "\n - ")))
397369
}
370+
}
398371

399-
case "DaemonSet":
400-
desiredNumberScheduled, _ := status["desiredNumberScheduled"].(float64)
401-
numberReady, _ := status["numberReady"].(float64)
402-
numberUnavailable, _ := status["numberUnavailable"].(float64)
372+
case "DaemonSet":
373+
daemonSetList, err := params.AppsV1().DaemonSets(namespace).List(params.Context, metav1.ListOptions{})
374+
if err != nil {
375+
return "", err
376+
}
377+
if len(daemonSetList.Items) == 0 {
378+
return "No DaemonSets found", nil
379+
}
403380

404-
ready = fmt.Sprintf("%d/%d", int(numberReady), int(desiredNumberScheduled))
381+
for _, ds := range daemonSetList.Items {
382+
issues := []string{}
383+
ready := fmt.Sprintf("%d/%d", ds.Status.NumberReady, ds.Status.DesiredNumberScheduled)
405384

406-
if numberUnavailable > 0 {
407-
issues = append(issues, fmt.Sprintf("%d pods unavailable", int(numberUnavailable)))
385+
if ds.Status.NumberUnavailable > 0 {
386+
issues = append(issues, fmt.Sprintf("%d pods unavailable", ds.Status.NumberUnavailable))
408387
}
409-
}
410388

411-
if len(issues) > 0 {
412-
workloadsWithIssues = append(workloadsWithIssues, fmt.Sprintf("- **%s/%s** (Ready: %s)\n - %s",
413-
ns, name, ready, strings.Join(issues, "\n - ")))
389+
if len(issues) > 0 {
390+
workloadsWithIssues = append(workloadsWithIssues, fmt.Sprintf("- **%s/%s** (Ready: %s)\n - %s",
391+
ds.Namespace, ds.Name, ready, strings.Join(issues, "\n - ")))
392+
}
414393
}
394+
395+
default:
396+
return "", fmt.Errorf("unsupported workload kind: %s", kind)
415397
}
416398

417399
var sb strings.Builder
@@ -425,41 +407,23 @@ func gatherWorkloadDiagnostics(params api.PromptHandlerParams, kind string, name
425407
return sb.String(), nil
426408
}
427409

428-
// gatherPVCDiagnostics collects PVC status
410+
// gatherPVCDiagnostics collects PVC status using CoreV1 clientset
429411
func gatherPVCDiagnostics(params api.PromptHandlerParams, namespace string) (string, error) {
430-
gvk := &schema.GroupVersionKind{
431-
Group: "",
432-
Version: "v1",
433-
Kind: "PersistentVolumeClaim",
434-
}
435-
436-
pvcList, err := kubernetes.NewCore(params).ResourcesList(params, gvk, namespace, api.ListOptions{})
412+
pvcList, err := params.CoreV1().PersistentVolumeClaims(namespace).List(params.Context, metav1.ListOptions{})
437413
if err != nil {
438414
return "", err
439415
}
440416

441-
items, ok := pvcList.UnstructuredContent()["items"].([]interface{})
442-
if !ok || len(items) == 0 {
417+
if len(pvcList.Items) == 0 {
443418
return "No PVCs found", nil
444419
}
445420

446421
pvcsWithIssues := []string{}
447422

448-
for _, item := range items {
449-
pvcMap, ok := item.(map[string]interface{})
450-
if !ok {
451-
continue
452-
}
453-
454-
metadata, _ := pvcMap["metadata"].(map[string]interface{})
455-
name, _ := metadata["name"].(string)
456-
ns, _ := metadata["namespace"].(string)
457-
458-
status, _ := pvcMap["status"].(map[string]interface{})
459-
phase, _ := status["phase"].(string)
460-
461-
if phase != "Bound" {
462-
pvcsWithIssues = append(pvcsWithIssues, fmt.Sprintf("- **%s/%s** (Status: %s)\n - PVC not bound", ns, name, phase))
423+
for _, pvc := range pvcList.Items {
424+
if pvc.Status.Phase != v1.ClaimBound {
425+
pvcsWithIssues = append(pvcsWithIssues, fmt.Sprintf("- **%s/%s** (Status: %s)\n - PVC not bound",
426+
pvc.Namespace, pvc.Name, pvc.Status.Phase))
463427
}
464428
}
465429

0 commit comments

Comments
 (0)