mirror of
https://gitee.com/XM-GO/PandaX.git
synced 2026-04-24 03:18:35 +08:00
233 lines
6.7 KiB
Go
233 lines
6.7 KiB
Go
package pods
|
|
|
|
import (
|
|
"fmt"
|
|
v1 "k8s.io/api/core/v1"
|
|
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
"pandax/apps/devops/services/k8s/common"
|
|
"pandax/apps/devops/services/k8s/dataselect"
|
|
"pandax/apps/devops/services/k8s/event"
|
|
)
|
|
|
|
// getPodStatusPhase returns one of four pod status phases (Pending, Running, Succeeded, Failed, Unknown, Terminating)
|
|
func getPodStatusPhase(pod v1.Pod, warnings []common.Event) v1.PodPhase {
|
|
// For terminated pods that failed
|
|
if pod.Status.Phase == v1.PodFailed {
|
|
return v1.PodFailed
|
|
}
|
|
|
|
// For successfully terminated pods
|
|
if pod.Status.Phase == v1.PodSucceeded {
|
|
return v1.PodSucceeded
|
|
}
|
|
|
|
ready := false
|
|
initialized := false
|
|
for _, c := range pod.Status.Conditions {
|
|
if c.Type == v1.PodReady {
|
|
ready = c.Status == v1.ConditionTrue
|
|
}
|
|
if c.Type == v1.PodInitialized {
|
|
initialized = c.Status == v1.ConditionTrue
|
|
}
|
|
}
|
|
|
|
if initialized && ready && pod.Status.Phase == v1.PodRunning {
|
|
return v1.PodRunning
|
|
}
|
|
|
|
// If the pod would otherwise be pending but has warning then label it as
|
|
// failed and show and error to the user.
|
|
if len(warnings) > 0 {
|
|
return v1.PodFailed
|
|
}
|
|
|
|
if pod.DeletionTimestamp != nil && pod.Status.Reason == "NodeLost" {
|
|
return v1.PodUnknown
|
|
} else if pod.DeletionTimestamp != nil {
|
|
return "Terminating"
|
|
}
|
|
|
|
// pending
|
|
return v1.PodPending
|
|
}
|
|
|
|
type PodCell v1.Pod
|
|
|
|
func (self PodCell) GetProperty(name dataselect.PropertyName) dataselect.ComparableValue {
|
|
switch name {
|
|
case dataselect.NameProperty:
|
|
return dataselect.StdComparableString(self.ObjectMeta.Name)
|
|
case dataselect.StatusProperty:
|
|
return dataselect.StdComparableString(getPodStatus(v1.Pod(self)))
|
|
case dataselect.CreationTimestampProperty:
|
|
return dataselect.StdComparableTime(self.ObjectMeta.CreationTimestamp.Time)
|
|
case dataselect.NamespaceProperty:
|
|
return dataselect.StdComparableString(self.ObjectMeta.Namespace)
|
|
default:
|
|
// if name is not supported then just return a constant dummy value, sort will have no effect.
|
|
return nil
|
|
}
|
|
}
|
|
|
|
// getPodStatus returns status string calculated based on the same logic as kubectl
|
|
// Base code: https://github.com/kubernetes/kubernetes/blob/master/pkg/printers/internalversion/printers.go#L734
|
|
func getPodStatus(pod v1.Pod) string {
|
|
restarts := 0
|
|
readyContainers := 0
|
|
|
|
reason := string(pod.Status.Phase)
|
|
if pod.Status.Reason != "" {
|
|
reason = pod.Status.Reason
|
|
}
|
|
|
|
initializing := false
|
|
for i := range pod.Status.InitContainerStatuses {
|
|
container := pod.Status.InitContainerStatuses[i]
|
|
restarts += int(container.RestartCount)
|
|
switch {
|
|
case container.State.Terminated != nil && container.State.Terminated.ExitCode == 0:
|
|
continue
|
|
case container.State.Terminated != nil:
|
|
// initialization is failed
|
|
if len(container.State.Terminated.Reason) == 0 {
|
|
if container.State.Terminated.Signal != 0 {
|
|
reason = fmt.Sprintf("Init: Signal %d", container.State.Terminated.Signal)
|
|
} else {
|
|
reason = fmt.Sprintf("Init: ExitCode %d", container.State.Terminated.ExitCode)
|
|
}
|
|
} else {
|
|
reason = "Init:" + container.State.Terminated.Reason
|
|
}
|
|
initializing = true
|
|
case container.State.Waiting != nil && len(container.State.Waiting.Reason) > 0 && container.State.Waiting.Reason != "PodInitializing":
|
|
reason = fmt.Sprintf("Init: %s", container.State.Waiting.Reason)
|
|
initializing = true
|
|
default:
|
|
reason = fmt.Sprintf("Init: %d/%d", i, len(pod.Spec.InitContainers))
|
|
initializing = true
|
|
}
|
|
break
|
|
}
|
|
if !initializing {
|
|
restarts = 0
|
|
hasRunning := false
|
|
for i := len(pod.Status.ContainerStatuses) - 1; i >= 0; i-- {
|
|
container := pod.Status.ContainerStatuses[i]
|
|
|
|
restarts += int(container.RestartCount)
|
|
if container.State.Waiting != nil && container.State.Waiting.Reason != "" {
|
|
reason = container.State.Waiting.Reason
|
|
} else if container.State.Terminated != nil && container.State.Terminated.Reason != "" {
|
|
reason = container.State.Terminated.Reason
|
|
} else if container.State.Terminated != nil && container.State.Terminated.Reason == "" {
|
|
if container.State.Terminated.Signal != 0 {
|
|
reason = fmt.Sprintf("Signal: %d", container.State.Terminated.Signal)
|
|
} else {
|
|
reason = fmt.Sprintf("ExitCode: %d", container.State.Terminated.ExitCode)
|
|
}
|
|
} else if container.Ready && container.State.Running != nil {
|
|
hasRunning = true
|
|
readyContainers++
|
|
}
|
|
}
|
|
|
|
// change pod status back to "Running" if there is at least one container still reporting as "Running" status
|
|
if reason == "Completed" && hasRunning {
|
|
if hasPodReadyCondition(pod.Status.Conditions) {
|
|
reason = string(v1.PodRunning)
|
|
} else {
|
|
reason = "NotReady"
|
|
}
|
|
}
|
|
}
|
|
|
|
if pod.DeletionTimestamp != nil && pod.Status.Reason == "NodeLost" {
|
|
reason = string(v1.PodUnknown)
|
|
} else if pod.DeletionTimestamp != nil {
|
|
reason = "Terminating"
|
|
}
|
|
|
|
if len(reason) == 0 {
|
|
reason = string(v1.PodUnknown)
|
|
}
|
|
|
|
return reason
|
|
}
|
|
|
|
// getRestartCount return the restart count of given pod (total number of its containers restarts).
|
|
func getRestartCount(pod v1.Pod) int32 {
|
|
var restartCount int32 = 0
|
|
for _, containerStatus := range pod.Status.ContainerStatuses {
|
|
restartCount += containerStatus.RestartCount
|
|
}
|
|
return restartCount
|
|
}
|
|
|
|
func toCells(std []v1.Pod) []dataselect.DataCell {
|
|
cells := make([]dataselect.DataCell, len(std))
|
|
for i := range std {
|
|
cells[i] = PodCell(std[i])
|
|
}
|
|
return cells
|
|
}
|
|
|
|
func fromCells(cells []dataselect.DataCell) []v1.Pod {
|
|
std := make([]v1.Pod, len(cells))
|
|
for i := range std {
|
|
std[i] = v1.Pod(cells[i].(PodCell))
|
|
}
|
|
return std
|
|
}
|
|
|
|
func getStatus(list *v1.PodList, events []v1.Event) common.ResourceStatus {
|
|
info := common.ResourceStatus{}
|
|
if list == nil {
|
|
return info
|
|
}
|
|
|
|
for _, pod := range list.Items {
|
|
warnings := event.GetPodsEventWarnings(events, []v1.Pod{pod})
|
|
switch getPodStatusPhase(pod, warnings) {
|
|
case v1.PodFailed:
|
|
info.Failed++
|
|
case v1.PodSucceeded:
|
|
info.Succeeded++
|
|
case v1.PodRunning:
|
|
info.Running++
|
|
case v1.PodPending:
|
|
info.Pending++
|
|
case v1.PodUnknown:
|
|
info.Unknown++
|
|
case "Terminating":
|
|
info.Terminating++
|
|
}
|
|
}
|
|
|
|
return info
|
|
}
|
|
|
|
func hasPodReadyCondition(conditions []v1.PodCondition) bool {
|
|
for _, condition := range conditions {
|
|
if condition.Type == v1.PodReady && condition.Status == v1.ConditionTrue {
|
|
return true
|
|
}
|
|
}
|
|
return false
|
|
}
|
|
|
|
func getPodConditions(pod v1.Pod) []common.Condition {
|
|
var conditions []common.Condition
|
|
for _, condition := range pod.Status.Conditions {
|
|
conditions = append(conditions, common.Condition{
|
|
Type: string(condition.Type),
|
|
Status: meta.ConditionStatus(condition.Status),
|
|
LastProbeTime: condition.LastProbeTime,
|
|
LastTransitionTime: condition.LastTransitionTime,
|
|
Reason: condition.Reason,
|
|
Message: condition.Message,
|
|
})
|
|
}
|
|
return conditions
|
|
}
|