【修改】k8s 配置

This commit is contained in:
PandaGoAdmin
2022-01-22 17:07:04 +08:00
parent c6ebe89865
commit 33cc74711d
439 changed files with 9936 additions and 21687 deletions

View File

@@ -0,0 +1,13 @@
package pods
import (
"k8s.io/client-go/kubernetes"
"pandax/apps/devops/services/k8s/common"
"pandax/apps/devops/services/k8s/dataselect"
"pandax/apps/devops/services/k8s/event"
)
// GetEventsForPod gets events that are associated with this pod.
func GetEventsForPod(client *kubernetes.Clientset, dsQuery *dataselect.DataSelectQuery, namespace, podName string) (*common.EventList, error) {
return event.GetResourceEvents(client, dsQuery, namespace, podName)
}

View File

@@ -0,0 +1,41 @@
package pods
import (
"fmt"
"strings"
"k8s.io/apimachinery/pkg/api/meta"
)
// FormatMap formats map[string]string to a string.
func FormatMap(m map[string]string) (fmtStr string) {
for key, value := range m {
fmtStr += fmt.Sprintf("%v=%q\n", key, value)
}
fmtStr = strings.TrimSuffix(fmtStr, "\n")
return
}
// ExtractFieldPathAsString extracts the field from the given object
// and returns it as a string. The object must be a pointer to an
// API type.
func ExtractFieldPathAsString(obj interface{}, fieldPath string) (string, error) {
accessor, err := meta.Accessor(obj)
if err != nil {
return "", nil
}
switch fieldPath {
case "metadata.annotations":
return FormatMap(accessor.GetAnnotations()), nil
case "metadata.labels":
return FormatMap(accessor.GetLabels()), nil
case "metadata.name":
return accessor.GetName(), nil
case "metadata.namespace":
return accessor.GetNamespace(), nil
}
return "", fmt.Errorf("unsupported fieldPath: %v", fieldPath)
}

View File

@@ -0,0 +1,147 @@
package pods
import (
"context"
"io"
v1 "k8s.io/api/core/v1"
metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
"pandax/apps/devops/services/pkg/k8s/logs"
)
// maximum number of lines loaded from the apiserver
var lineReadLimit int64 = 5000
// maximum number of bytes loaded from the apiserver
var byteReadLimit int64 = 500000
// PodContainerList is a list of containers of a pod.
type PodContainerList struct {
Containers []string `json:"containers"`
}
// GetPodContainers returns containers that a pod has.
func GetPodContainers(client kubernetes.Interface, namespace, podID string) (*PodContainerList, error) {
pod, err := client.CoreV1().Pods(namespace).Get(context.TODO(), podID, metaV1.GetOptions{})
if err != nil {
return nil, err
}
containers := &PodContainerList{Containers: make([]string, 0)}
for _, container := range pod.Spec.Containers {
containers.Containers = append(containers.Containers, container.Name)
}
return containers, nil
}
// GetLogDetails returns logs for particular pod and container. When container is null, logs for the first one
// are returned. Previous indicates to read archived logs created by log rotation or container crash
func GetLogDetails(client kubernetes.Interface, namespace, podID string, container string,
logSelector *logs.Selection, usePreviousLogs bool) (*logs.LogDetails, error) {
pod, err := client.CoreV1().Pods(namespace).Get(context.TODO(), podID, metaV1.GetOptions{})
if err != nil {
return nil, err
}
if len(container) == 0 {
container = pod.Spec.Containers[0].Name
}
logOptions := mapToLogOptions(container, logSelector, usePreviousLogs)
rawLogs, err := readRawLogs(client, namespace, podID, logOptions)
if err != nil {
return nil, err
}
details := ConstructLogDetails(podID, rawLogs, container, logSelector)
return details, nil
}
// Maps the log selection to the corresponding api object
// Read limits are set to avoid out of memory issues
func mapToLogOptions(container string, logSelector *logs.Selection, previous bool) *v1.PodLogOptions {
logOptions := &v1.PodLogOptions{
Container: container,
Follow: false,
Previous: previous,
Timestamps: true,
}
if logSelector.LogFilePosition == logs.Beginning {
logOptions.LimitBytes = &byteReadLimit
} else {
logOptions.TailLines = &lineReadLimit
}
return logOptions
}
// Construct a request for getting the logs for a pod and retrieves the logs.
func readRawLogs(client kubernetes.Interface, namespace, podID string, logOptions *v1.PodLogOptions) (
string, error) {
readCloser, err := openStream(client, namespace, podID, logOptions)
if err != nil {
return err.Error(), nil
}
defer readCloser.Close()
result, err := io.ReadAll(readCloser)
if err != nil {
return "", err
}
return string(result), nil
}
// GetLogFile returns a stream to the log file which can be piped directly to the response. This avoids out of memory
// issues. Previous indicates to read archived logs created by log rotation or container crash
func GetLogFile(client kubernetes.Interface, namespace, podID string, container string, opts *v1.PodLogOptions) (io.ReadCloser, error) {
logOptions := &v1.PodLogOptions{
Container: container,
Follow: false,
Previous: opts.Previous,
Timestamps: opts.Timestamps,
}
logStream, err := openStream(client, namespace, podID, logOptions)
return logStream, err
}
func openStream(client kubernetes.Interface, namespace, podID string, logOptions *v1.PodLogOptions) (io.ReadCloser, error) {
return client.CoreV1().RESTClient().Get().
Namespace(namespace).
Name(podID).
Resource("pods").
SubResource("log").
VersionedParams(logOptions, scheme.ParameterCodec).Stream(context.TODO())
}
// ConstructLogDetails creates a new log details structure for given parameters.
func ConstructLogDetails(podID string, rawLogs string, container string, logSelector *logs.Selection) *logs.LogDetails {
parsedLines := logs.ToLogLines(rawLogs)
logLines, fromDate, toDate, logSelection, lastPage := parsedLines.SelectLogs(logSelector)
readLimitReached := isReadLimitReached(int64(len(rawLogs)), int64(len(parsedLines)), logSelector.LogFilePosition)
truncated := readLimitReached && lastPage
info := logs.LogInfo{
PodName: podID,
ContainerName: container,
FromDate: fromDate,
ToDate: toDate,
Truncated: truncated,
}
return &logs.LogDetails{
Info: info,
Selection: logSelection,
LogLines: logLines,
}
}
// Checks if the amount of log file returned from the apiserver is equal to the read limits
func isReadLimitReached(bytesLoaded int64, linesLoaded int64, logFilePosition string) bool {
return (logFilePosition == logs.Beginning && bytesLoaded >= byteReadLimit) ||
(logFilePosition == logs.End && linesLoaded >= lineReadLimit)
}

View File

@@ -0,0 +1,232 @@
package pods
import (
"fmt"
v1 "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"pandax/apps/devops/services/k8s/common"
"pandax/apps/devops/services/k8s/dataselect"
"pandax/apps/devops/services/k8s/event"
)
// getPodStatusPhase returns one of four pod status phases (Pending, Running, Succeeded, Failed, Unknown, Terminating)
func getPodStatusPhase(pod v1.Pod, warnings []common.Event) v1.PodPhase {
// For terminated pods that failed
if pod.Status.Phase == v1.PodFailed {
return v1.PodFailed
}
// For successfully terminated pods
if pod.Status.Phase == v1.PodSucceeded {
return v1.PodSucceeded
}
ready := false
initialized := false
for _, c := range pod.Status.Conditions {
if c.Type == v1.PodReady {
ready = c.Status == v1.ConditionTrue
}
if c.Type == v1.PodInitialized {
initialized = c.Status == v1.ConditionTrue
}
}
if initialized && ready && pod.Status.Phase == v1.PodRunning {
return v1.PodRunning
}
// If the pod would otherwise be pending but has warning then label it as
// failed and show and error to the user.
if len(warnings) > 0 {
return v1.PodFailed
}
if pod.DeletionTimestamp != nil && pod.Status.Reason == "NodeLost" {
return v1.PodUnknown
} else if pod.DeletionTimestamp != nil {
return "Terminating"
}
// pending
return v1.PodPending
}
type PodCell v1.Pod
func (self PodCell) GetProperty(name dataselect.PropertyName) dataselect.ComparableValue {
switch name {
case dataselect.NameProperty:
return dataselect.StdComparableString(self.ObjectMeta.Name)
case dataselect.StatusProperty:
return dataselect.StdComparableString(getPodStatus(v1.Pod(self)))
case dataselect.CreationTimestampProperty:
return dataselect.StdComparableTime(self.ObjectMeta.CreationTimestamp.Time)
case dataselect.NamespaceProperty:
return dataselect.StdComparableString(self.ObjectMeta.Namespace)
default:
// if name is not supported then just return a constant dummy value, sort will have no effect.
return nil
}
}
// getPodStatus returns status string calculated based on the same logic as kubectl
// Base code: https://github.com/kubernetes/kubernetes/blob/master/pkg/printers/internalversion/printers.go#L734
func getPodStatus(pod v1.Pod) string {
restarts := 0
readyContainers := 0
reason := string(pod.Status.Phase)
if pod.Status.Reason != "" {
reason = pod.Status.Reason
}
initializing := false
for i := range pod.Status.InitContainerStatuses {
container := pod.Status.InitContainerStatuses[i]
restarts += int(container.RestartCount)
switch {
case container.State.Terminated != nil && container.State.Terminated.ExitCode == 0:
continue
case container.State.Terminated != nil:
// initialization is failed
if len(container.State.Terminated.Reason) == 0 {
if container.State.Terminated.Signal != 0 {
reason = fmt.Sprintf("Init: Signal %d", container.State.Terminated.Signal)
} else {
reason = fmt.Sprintf("Init: ExitCode %d", container.State.Terminated.ExitCode)
}
} else {
reason = "Init:" + container.State.Terminated.Reason
}
initializing = true
case container.State.Waiting != nil && len(container.State.Waiting.Reason) > 0 && container.State.Waiting.Reason != "PodInitializing":
reason = fmt.Sprintf("Init: %s", container.State.Waiting.Reason)
initializing = true
default:
reason = fmt.Sprintf("Init: %d/%d", i, len(pod.Spec.InitContainers))
initializing = true
}
break
}
if !initializing {
restarts = 0
hasRunning := false
for i := len(pod.Status.ContainerStatuses) - 1; i >= 0; i-- {
container := pod.Status.ContainerStatuses[i]
restarts += int(container.RestartCount)
if container.State.Waiting != nil && container.State.Waiting.Reason != "" {
reason = container.State.Waiting.Reason
} else if container.State.Terminated != nil && container.State.Terminated.Reason != "" {
reason = container.State.Terminated.Reason
} else if container.State.Terminated != nil && container.State.Terminated.Reason == "" {
if container.State.Terminated.Signal != 0 {
reason = fmt.Sprintf("Signal: %d", container.State.Terminated.Signal)
} else {
reason = fmt.Sprintf("ExitCode: %d", container.State.Terminated.ExitCode)
}
} else if container.Ready && container.State.Running != nil {
hasRunning = true
readyContainers++
}
}
// change pod status back to "Running" if there is at least one container still reporting as "Running" status
if reason == "Completed" && hasRunning {
if hasPodReadyCondition(pod.Status.Conditions) {
reason = string(v1.PodRunning)
} else {
reason = "NotReady"
}
}
}
if pod.DeletionTimestamp != nil && pod.Status.Reason == "NodeLost" {
reason = string(v1.PodUnknown)
} else if pod.DeletionTimestamp != nil {
reason = "Terminating"
}
if len(reason) == 0 {
reason = string(v1.PodUnknown)
}
return reason
}
// getRestartCount return the restart count of given pod (total number of its containers restarts).
func getRestartCount(pod v1.Pod) int32 {
var restartCount int32 = 0
for _, containerStatus := range pod.Status.ContainerStatuses {
restartCount += containerStatus.RestartCount
}
return restartCount
}
func toCells(std []v1.Pod) []dataselect.DataCell {
cells := make([]dataselect.DataCell, len(std))
for i := range std {
cells[i] = PodCell(std[i])
}
return cells
}
func fromCells(cells []dataselect.DataCell) []v1.Pod {
std := make([]v1.Pod, len(cells))
for i := range std {
std[i] = v1.Pod(cells[i].(PodCell))
}
return std
}
func getStatus(list *v1.PodList, events []v1.Event) common.ResourceStatus {
info := common.ResourceStatus{}
if list == nil {
return info
}
for _, pod := range list.Items {
warnings := event.GetPodsEventWarnings(events, []v1.Pod{pod})
switch getPodStatusPhase(pod, warnings) {
case v1.PodFailed:
info.Failed++
case v1.PodSucceeded:
info.Succeeded++
case v1.PodRunning:
info.Running++
case v1.PodPending:
info.Pending++
case v1.PodUnknown:
info.Unknown++
case "Terminating":
info.Terminating++
}
}
return info
}
func hasPodReadyCondition(conditions []v1.PodCondition) bool {
for _, condition := range conditions {
if condition.Type == v1.PodReady && condition.Status == v1.ConditionTrue {
return true
}
}
return false
}
func getPodConditions(pod v1.Pod) []common.Condition {
var conditions []common.Condition
for _, condition := range pod.Status.Conditions {
conditions = append(conditions, common.Condition{
Type: string(condition.Type),
Status: meta.ConditionStatus(condition.Status),
LastProbeTime: condition.LastProbeTime,
LastTransitionTime: condition.LastTransitionTime,
Reason: condition.Reason,
Message: condition.Message,
})
}
return conditions
}

View File

@@ -0,0 +1,419 @@
package pods
import (
"context"
"encoding/base64"
"fmt"
v1 "k8s.io/api/core/v1"
res "k8s.io/apimachinery/pkg/api/resource"
metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/kubernetes"
"math"
"pandax/apps/devops/entity/k8s"
k8scommon "pandax/apps/devops/services/k8s/common"
"pandax/apps/devops/services/k8s/controller"
"pandax/apps/devops/services/k8s/dataselect"
"pandax/apps/devops/services/pkg/k8s/pvc"
"strconv"
)
// PodDetail is a presentation layer view of Kubernetes Pod resource.
type PodDetail struct {
ObjectMeta k8s.ObjectMeta `json:"objectMeta"`
TypeMeta k8s.TypeMeta `json:"typeMeta"`
PodPhase string `json:"podPhase"`
PodIP string `json:"podIP"`
NodeName string `json:"nodeName"`
ServiceAccountName string `json:"serviceAccountName"`
RestartCount int32 `json:"restartCount"`
QOSClass string `json:"qosClass"`
Controller *controller.ResourceOwner `json:"controller,omitempty"`
Containers []Container `json:"containers"`
InitContainers []Container `json:"initContainers"`
Conditions []k8scommon.Condition `json:"conditions"`
ImagePullSecrets []v1.LocalObjectReference `json:"imagePullSecrets,omitempty"`
EventList k8scommon.EventList `json:"eventList"`
PersistentvolumeclaimList pvc.PersistentVolumeClaimList `json:"persistentVolumeClaimList"`
SecurityContext *v1.PodSecurityContext `json:"securityContext"`
}
// Container represents a docker/rkt/etc. container that lives in a pod.
type Container struct {
// Name of the container.
Name string `json:"name"`
// Image URI of the container.
Image string `json:"image"`
// Ports of the container
Ports []v1.ContainerPort `json:"ports"`
// List of environment variables.
Env []EnvVar `json:"env"`
// Commands of the container
Commands []string `json:"commands"`
// Command arguments
Args []string `json:"args"`
// Information about mounted volumes
VolumeMounts []VolumeMount `json:"volumeMounts"`
// Security configuration that will be applied to a container.
SecurityContext *v1.SecurityContext `json:"securityContext"`
// Status of a pod container
Status *v1.ContainerStatus `json:"status"`
// Resource of a pod limit requests cpu mem
Resources v1.ResourceRequirements `json:"resource"`
// Probes
LivenessProbe *v1.Probe `json:"livenessProbe"`
ReadinessProbe *v1.Probe `json:"readinessProbe"`
StartupProbe *v1.Probe `json:"startupProbe"`
Lifecycle *v1.Lifecycle `json:"lifecycle"`
// ImagePullPolicy of a pod
ImagePullPolicy v1.PullPolicy `json:"imagePullPolicy"`
}
// EnvVar represents an environment variable of a container.
type EnvVar struct {
// Name of the variable.
Name string `json:"name"`
// Value of the variable. May be empty if value from is defined.
Value string `json:"value"`
// Defined for derived variables. If non-null, the value is get from the reference.
// Note that this is an API struct. This is intentional, as EnvVarSources are plain struct
// references.
ValueFrom *v1.EnvVarSource `json:"valueFrom"`
}
type VolumeMount struct {
// Name of the variable.
Name string `json:"name"`
// Is the volume read only ?
ReadOnly bool `json:"readOnly"`
// Path within the container at which the volume should be mounted. Must not contain ':'.
MountPath string `json:"mountPath"`
// Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root).
SubPath string `json:"subPath"`
// Information about the Volume itself
Volume v1.Volume `json:"volume"`
}
// GetPodDetail returns the details of a named Pod from a particular namespace.
func GetPodDetail(client *kubernetes.Clientset, namespace, name string) (*PodDetail, error) {
global.Log.Info(fmt.Sprintf("Getting details of %s pod in %s namespace", name, namespace))
channels := &k8scommon.ResourceChannels{
ConfigMapList: k8scommon.GetConfigMapListChannel(client, k8scommon.NewSameNamespaceQuery(namespace), 1),
SecretList: k8scommon.GetSecretListChannel(client, k8scommon.NewSameNamespaceQuery(namespace), 1),
}
pod, err := client.CoreV1().Pods(namespace).Get(context.TODO(), name, metaV1.GetOptions{})
if err != nil {
return nil, err
}
podController, err := getPodController(client, k8scommon.NewSameNamespaceQuery(namespace), pod)
if err != nil {
return nil, err
}
configMapList := <-channels.ConfigMapList.List
err = <-channels.ConfigMapList.Error
if err != nil {
return nil, err
}
secretList := <-channels.SecretList.List
err = <-channels.SecretList.Error
if err != nil {
return nil, err
}
eventList, err := GetEventsForPod(client, dataselect.DefaultDataSelect, pod.Namespace, pod.Name)
if err != nil {
return nil, err
}
persistentVolumeClaimList, err := pvc.GetPodPersistentVolumeClaims(client, namespace, name, dataselect.DefaultDataSelect)
if err != nil {
return nil, err
}
podDetail := toPodDetail(pod, configMapList, secretList, podController, eventList, persistentVolumeClaimList)
return &podDetail, nil
}
func getPodController(client *kubernetes.Clientset, nsQuery *k8scommon.NamespaceQuery, pod *v1.Pod) (*controller.ResourceOwner, error) {
channels := &k8scommon.ResourceChannels{
PodList: k8scommon.GetPodListChannel(client, nsQuery, 1),
EventList: k8scommon.GetEventListChannel(client, nsQuery, 1),
}
pods := <-channels.PodList.List
err := <-channels.PodList.Error
if err != nil {
return nil, err
}
events := <-channels.EventList.List
if err := <-channels.EventList.Error; err != nil {
events = &v1.EventList{}
}
var ctrl controller.ResourceOwner
ownerRef := metaV1.GetControllerOf(pod)
if ownerRef != nil {
var rc controller.ResourceController
rc, err = controller.NewResourceController(*ownerRef, pod.Namespace, client)
if err == nil {
ctrl = rc.Get(pods.Items, events.Items)
}
}
return &ctrl, nil
}
func toPodDetail(pod *v1.Pod, configMaps *v1.ConfigMapList, secrets *v1.SecretList, controller *controller.ResourceOwner,
events *k8scommon.EventList, persistentVolumeClaimList *pvc.PersistentVolumeClaimList) PodDetail {
return PodDetail{
ObjectMeta: k8s.NewObjectMeta(pod.ObjectMeta),
TypeMeta: k8s.NewTypeMeta(k8s.ResourceKindPod),
PodPhase: getPodStatus(*pod),
PodIP: pod.Status.PodIP,
RestartCount: getRestartCount(*pod),
QOSClass: string(pod.Status.QOSClass),
NodeName: pod.Spec.NodeName,
ServiceAccountName: pod.Spec.ServiceAccountName,
Controller: controller,
Containers: extractContainerInfo(pod.Spec.Containers, pod, configMaps, secrets),
InitContainers: extractContainerInfo(pod.Spec.InitContainers, pod, configMaps, secrets),
Conditions: getPodConditions(*pod),
ImagePullSecrets: pod.Spec.ImagePullSecrets,
EventList: *events,
PersistentvolumeclaimList: *persistentVolumeClaimList,
SecurityContext: pod.Spec.SecurityContext,
}
}
func extractContainerInfo(containerList []v1.Container, pod *v1.Pod, configMaps *v1.ConfigMapList, secrets *v1.SecretList) []Container {
containers := make([]Container, 0)
for _, container := range containerList {
vars := make([]EnvVar, 0)
for _, envVar := range container.Env {
variable := EnvVar{
Name: envVar.Name,
Value: envVar.Value,
ValueFrom: envVar.ValueFrom,
}
if variable.ValueFrom != nil {
variable.Value = evalValueFrom(variable.ValueFrom, &container, pod,
configMaps, secrets)
}
vars = append(vars, variable)
}
vars = append(vars, evalEnvFrom(container, configMaps, secrets)...)
volume_mounts := extractContainerMounts(container, pod)
containers = append(containers, Container{
Name: container.Name,
Image: container.Image,
Ports: container.Ports,
Resources: container.Resources,
Env: vars,
Commands: container.Command,
Args: container.Args,
VolumeMounts: volume_mounts,
SecurityContext: container.SecurityContext,
Status: extractContainerStatus(pod, &container),
LivenessProbe: container.LivenessProbe,
ReadinessProbe: container.ReadinessProbe,
StartupProbe: container.StartupProbe,
Lifecycle: container.Lifecycle,
ImagePullPolicy: container.ImagePullPolicy,
})
}
return containers
}
func evalEnvFrom(container v1.Container, configMaps *v1.ConfigMapList, secrets *v1.SecretList) []EnvVar {
vars := make([]EnvVar, 0)
for _, envFromVar := range container.EnvFrom {
switch {
case envFromVar.ConfigMapRef != nil:
name := envFromVar.ConfigMapRef.LocalObjectReference.Name
for _, configMap := range configMaps.Items {
if configMap.ObjectMeta.Name == name {
for key, value := range configMap.Data {
valueFrom := &v1.EnvVarSource{
ConfigMapKeyRef: &v1.ConfigMapKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
Key: key,
},
}
variable := EnvVar{
Name: envFromVar.Prefix + key,
Value: value,
ValueFrom: valueFrom,
}
vars = append(vars, variable)
}
break
}
}
case envFromVar.SecretRef != nil:
name := envFromVar.SecretRef.LocalObjectReference.Name
for _, secret := range secrets.Items {
if secret.ObjectMeta.Name == name {
for key, value := range secret.Data {
valueFrom := &v1.EnvVarSource{
SecretKeyRef: &v1.SecretKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
Key: key,
},
}
variable := EnvVar{
Name: envFromVar.Prefix + key,
Value: base64.StdEncoding.EncodeToString(value),
ValueFrom: valueFrom,
}
vars = append(vars, variable)
}
break
}
}
}
}
return vars
}
// evalValueFrom evaluates environment value from given source. For more details check:
// https://github.com/kubernetes/kubernetes/blob/d82e51edc5f02bff39661203c9b503d054c3493b/pkg/kubectl/describe.go#L1056
func evalValueFrom(src *v1.EnvVarSource, container *v1.Container, pod *v1.Pod, configMaps *v1.ConfigMapList, secrets *v1.SecretList) string {
switch {
case src.ConfigMapKeyRef != nil:
name := src.ConfigMapKeyRef.LocalObjectReference.Name
for _, configMap := range configMaps.Items {
if configMap.ObjectMeta.Name == name {
return configMap.Data[src.ConfigMapKeyRef.Key]
}
}
case src.SecretKeyRef != nil:
name := src.SecretKeyRef.LocalObjectReference.Name
for _, secret := range secrets.Items {
if secret.ObjectMeta.Name == name {
return base64.StdEncoding.EncodeToString([]byte(
secret.Data[src.SecretKeyRef.Key]))
}
}
case src.ResourceFieldRef != nil:
valueFrom, err := extractContainerResourceValue(src.ResourceFieldRef, container)
if err != nil {
valueFrom = ""
}
resource := src.ResourceFieldRef.Resource
if valueFrom == "0" && (resource == "limits.cpu" || resource == "limits.memory") {
valueFrom = "node allocatable"
}
return valueFrom
case src.FieldRef != nil:
gv, err := schema.ParseGroupVersion(src.FieldRef.APIVersion)
if err != nil {
global.Log.Warn(err.Error())
return ""
}
gvk := gv.WithKind("Pod")
internalFieldPath, _, err := runtime.NewScheme().ConvertFieldLabel(gvk, src.FieldRef.FieldPath, "")
if err != nil {
global.Log.Warn(err.Error())
return ""
}
valueFrom, err := ExtractFieldPathAsString(pod, internalFieldPath)
if err != nil {
global.Log.Warn(err.Error())
return ""
}
return valueFrom
}
return ""
}
func extractContainerMounts(container v1.Container, pod *v1.Pod) []VolumeMount {
volume_mounts := make([]VolumeMount, 0)
for _, a_volume_mount := range container.VolumeMounts {
volume_mount := VolumeMount{
Name: a_volume_mount.Name,
ReadOnly: a_volume_mount.ReadOnly,
MountPath: a_volume_mount.MountPath,
SubPath: a_volume_mount.SubPath,
Volume: getVolume(pod.Spec.Volumes, a_volume_mount.Name),
}
volume_mounts = append(volume_mounts, volume_mount)
}
return volume_mounts
}
func extractContainerStatus(pod *v1.Pod, container *v1.Container) *v1.ContainerStatus {
for _, status := range pod.Status.ContainerStatuses {
if status.Name == container.Name {
return &status
}
}
return nil
}
// extractContainerResourceValue extracts the value of a resource in an already known container.
func extractContainerResourceValue(fs *v1.ResourceFieldSelector, container *v1.Container) (string,
error) {
divisor := res.Quantity{}
if divisor.Cmp(fs.Divisor) == 0 {
divisor = res.MustParse("1")
} else {
divisor = fs.Divisor
}
switch fs.Resource {
case "limits.cpu":
return strconv.FormatInt(int64(math.Ceil(float64(container.Resources.Limits.
Cpu().MilliValue())/float64(divisor.MilliValue()))), 10), nil
case "limits.memory":
return strconv.FormatInt(int64(math.Ceil(float64(container.Resources.Limits.
Memory().Value())/float64(divisor.Value()))), 10), nil
case "requests.cpu":
return strconv.FormatInt(int64(math.Ceil(float64(container.Resources.Requests.
Cpu().MilliValue())/float64(divisor.MilliValue()))), 10), nil
case "requests.memory":
return strconv.FormatInt(int64(math.Ceil(float64(container.Resources.Requests.
Memory().Value())/float64(divisor.Value()))), 10), nil
}
return "", fmt.Errorf("Unsupported container resource : %v", fs.Resource)
}
func getVolume(volumes []v1.Volume, volumeName string) v1.Volume {
for _, volume := range volumes {
if volume.Name == volumeName {
// yes, this is exponential, but N is VERY small, so the malloc for creating a named dictionary would probably take longer
return volume
}
}
return v1.Volume{}
}

View File

@@ -0,0 +1,154 @@
package pods
import (
"context"
"fmt"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"pandax/apps/devops/entity/k8s"
k8scommon "pandax/apps/devops/services/k8s/common"
"pandax/apps/devops/services/k8s/dataselect"
"pandax/apps/devops/services/k8s/event"
)
// PodList contains a list of Pods in the cluster.
type PodList struct {
ListMeta k8s.ListMeta `json:"listMeta"`
// Basic information about resources status on the list.
Status k8scommon.ResourceStatus `json:"status"`
// Unordered list of Pods.
Pods []Pod `json:"pods"`
}
type PodStatus struct {
Status string `json:"status"`
PodPhase v1.PodPhase `json:"podPhase"`
ContainerStates []v1.ContainerState `json:"containerStates"`
}
// Pod is a presentation layer view of Kubernetes Pod resource. This means it is Pod plus additional augmented data
// we can get from other sources (like services that target it).
type Pod struct {
ObjectMeta k8s.ObjectMeta `json:"objectMeta"`
TypeMeta k8s.TypeMeta `json:"typeMeta"`
// Status determined based on the same logic as kubectl.
Status string `json:"status"`
// RestartCount of containers restarts.
RestartCount int32 `json:"restartCount"`
// Pod warning events
Warnings []k8scommon.Event `json:"warnings"`
// NodeName of the Node this Pod runs on.
NodeName string `json:"nodeName"`
// ContainerImages holds a list of the Pod images.
ContainerImages []string `json:"containerImages"`
// Pod ip address
PodIP string `json:"podIP"`
}
var EmptyPodList = &PodList{
Pods: make([]Pod, 0),
ListMeta: k8s.ListMeta{
TotalItems: 0,
},
}
func GetPodsList(client *kubernetes.Clientset, nsQuery *k8scommon.NamespaceQuery, dsQuery *dataselect.DataSelectQuery) (*PodList, error) {
global.Log.Info("Getting list of all pods in the cluster")
channels := &k8scommon.ResourceChannels{
PodList: k8scommon.GetPodListChannelWithOptions(client, nsQuery, metav1.ListOptions{}, 1),
EventList: k8scommon.GetEventListChannel(client, nsQuery, 1),
}
return GetPodListFromChannels(channels, dsQuery)
}
// GetPodListFromChannels returns a list of all Pods in the cluster
// reading required resource list once from the channels.
func GetPodListFromChannels(channels *k8scommon.ResourceChannels, dsQuery *dataselect.DataSelectQuery) (*PodList, error) {
pods := <-channels.PodList.List
err := <-channels.PodList.Error
if err != nil {
return nil, err
}
eventList := <-channels.EventList.List
err = <-channels.EventList.Error
if err != nil {
return nil, err
}
podList := ToPodList(pods.Items, eventList.Items, dsQuery)
podList.Status = getStatus(pods, eventList.Items)
return &podList, nil
}
func ToPodList(pods []v1.Pod, events []v1.Event, dsQuery *dataselect.DataSelectQuery) PodList {
podList := PodList{
Pods: make([]Pod, 0),
}
podCells, filteredTotal := dataselect.GenericDataSelectWithFilter(toCells(pods), dsQuery)
pods = fromCells(podCells)
podList.ListMeta = k8s.ListMeta{TotalItems: filteredTotal}
for _, pod := range pods {
warnings := event.GetPodsEventWarnings(events, []v1.Pod{pod})
podDetail := ToPod(&pod, warnings)
podList.Pods = append(podList.Pods, podDetail)
}
return podList
}
func ToPod(pod *v1.Pod, warnings []k8scommon.Event) Pod {
podDetail := Pod{
ObjectMeta: k8s.NewObjectMeta(pod.ObjectMeta),
TypeMeta: k8s.NewTypeMeta(k8s.ResourceKindPod),
Warnings: warnings,
Status: getPodStatus(*pod),
RestartCount: getRestartCount(*pod),
NodeName: pod.Spec.NodeName,
ContainerImages: k8scommon.GetContainerImages(&pod.Spec),
PodIP: pod.Status.PodIP,
}
return podDetail
}
func DeleteCollectionPods(client *kubernetes.Clientset, podList []k8s.RemovePodsData) (err error) {
global.Log.Info("批量删除容器组开始")
for _, v := range podList {
global.Log.Info(fmt.Sprintf("delete pods%v, ns: %v", v.PodName, v.Namespace))
err := client.CoreV1().Pods(v.Namespace).Delete(
context.TODO(),
v.PodName,
metav1.DeleteOptions{},
)
if err != nil {
global.Log.Error(err.Error())
return err
}
}
global.Log.Info("删除容器组已完成")
return nil
}
func DeletePod(client *kubernetes.Clientset, namespace string, name string) (err error) {
global.Log.Info(fmt.Sprintf("请求删除单个pod%v, namespace: %v", name, namespace))
return client.CoreV1().Pods(namespace).Delete(
context.TODO(),
name,
metav1.DeleteOptions{},
)
}