【修改】k8s 配置

This commit is contained in:
PandaGoAdmin
2022-01-22 17:07:04 +08:00
parent c6ebe89865
commit 33cc74711d
439 changed files with 9936 additions and 21687 deletions

View File

@@ -0,0 +1,376 @@
package deployment
import (
"context"
"pandax/base/global"
"pandax/base/utils"
"fmt"
"go.uber.org/zap"
apps "k8s.io/api/apps/v1"
autoscalingv1 "k8s.io/api/autoscaling/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
deploymentutil "k8s.io/kubectl/pkg/util/deployment"
"pandax/apps/devops/entity/k8s"
k8scommon "pandax/apps/devops/services/k8s/common"
"pandax/apps/devops/services/k8s/dataselect"
"pandax/apps/devops/services/k8s/event"
"time"
)
// DeploymentList contains a list of Deployments in the cluster.
type DeploymentList struct {
ListMeta k8s.ListMeta `json:"listMeta"`
// Basic information about resources status on the list.
Status k8scommon.ResourceStatus `json:"status"`
// Unordered list of Deployments.
Deployments []Deployment `json:"deployments"`
}
// Deployment is a presentation layer view of Kubernetes Deployment resource. This means
// it is Deployment plus additional augmented data we can get from other sources
// (like services that target the same pods).
type Deployment struct {
ObjectMeta k8s.ObjectMeta `json:"objectMeta"`
TypeMeta k8s.TypeMeta `json:"typeMeta"`
// Aggregate information about pods belonging to this Deployment.
Pods k8scommon.PodInfo `json:"pods"`
// Container images of the Deployment.
ContainerImages []string `json:"containerImages"`
// Init Container images of the Deployment.
InitContainerImages []string `json:"initContainerImages"`
// Deployment replicas ready
DeploymentStatus DeploymentStatus `json:"deploymentStatus"`
}
type DeploymentStatus struct {
// Total number of non-terminated pods targeted by this deployment (their labels match the selector).
// +optional
Replicas int32 `json:"replicas"`
// Total number of non-terminated pods targeted by this deployment that have the desired template spec.
// +optional
UpdatedReplicas int32 `json:"updatedReplicas"`
// Total number of ready pods targeted by this deployment.
// +optional
ReadyReplicas int32 `json:"readyReplicas"`
// Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.
// +optional
AvailableReplicas int32 `json:"availableReplicas"`
// Total number of unavailable pods targeted by this deployment. This is the total number of
// pods that are still required for the deployment to have 100% available capacity. They may
// either be pods that are running but not yet available or pods that still have not been created.
// +optional
UnavailableReplicas int32 `json:"unavailableReplicas"`
}
// GetDeploymentList 返回集群中所有deployment的列表
func GetDeploymentList(client *kubernetes.Clientset, nsQuery *k8scommon.NamespaceQuery, dsQuery *dataselect.DataSelectQuery) (*DeploymentList, error) {
global.Log.Info("Getting list of all deployments in the cluster")
channels := &k8scommon.ResourceChannels{
DeploymentList: k8scommon.GetDeploymentListChannel(client, nsQuery, 1),
PodList: k8scommon.GetPodListChannel(client, nsQuery, 1),
EventList: k8scommon.GetEventListChannel(client, nsQuery, 1),
ReplicaSetList: k8scommon.GetReplicaSetListChannel(client, nsQuery, 1),
}
return GetDeploymentListFromChannels(channels, dsQuery)
}
// GetDeploymentListFromChannels returns a list of all Deployments in the cluster
// reading required resource list once from the channels.
func GetDeploymentListFromChannels(channels *k8scommon.ResourceChannels, dsQuery *dataselect.DataSelectQuery) (*DeploymentList, error) {
deployments := <-channels.DeploymentList.List
err := <-channels.DeploymentList.Error
if err != nil {
return nil, err
}
pods := <-channels.PodList.List
err = <-channels.PodList.Error
if err != nil {
return nil, err
}
events := <-channels.EventList.List
err = <-channels.EventList.Error
if err != nil {
return nil, err
}
rs := <-channels.ReplicaSetList.List
err = <-channels.ReplicaSetList.Error
if err != nil {
return nil, err
}
deploymentList := toDeploymentList(deployments.Items, pods.Items, events.Items, rs.Items, dsQuery)
deploymentList.Status = getStatus(deployments, rs.Items, pods.Items, events.Items)
return deploymentList, nil
}
func toDeploymentList(deployments []apps.Deployment, pods []v1.Pod, events []v1.Event, rs []apps.ReplicaSet, dsQuery *dataselect.DataSelectQuery) *DeploymentList {
deploymentList := &DeploymentList{
Deployments: make([]Deployment, 0),
ListMeta: k8s.ListMeta{TotalItems: len(deployments)},
}
deploymentCells, filteredTotal := dataselect.GenericDataSelectWithFilter(toCells(deployments), dsQuery)
deployments = fromCells(deploymentCells)
deploymentList.ListMeta = k8s.ListMeta{TotalItems: filteredTotal}
for _, deployment := range deployments {
deploymentList.Deployments = append(deploymentList.Deployments, toDeployment(&deployment, rs, pods, events))
}
return deploymentList
}
func toDeployment(deployment *apps.Deployment, rs []apps.ReplicaSet, pods []v1.Pod, events []v1.Event) Deployment {
matchingPods := k8scommon.FilterDeploymentPodsByOwnerReference(*deployment, rs, pods)
podInfo := k8scommon.GetPodInfo(deployment.Status.Replicas, deployment.Spec.Replicas, matchingPods)
podInfo.Warnings = event.GetPodsEventWarnings(events, matchingPods)
return Deployment{
ObjectMeta: k8s.NewObjectMeta(deployment.ObjectMeta),
TypeMeta: k8s.NewTypeMeta(k8s.ResourceKindDeployment),
Pods: podInfo,
ContainerImages: k8scommon.GetContainerImages(&deployment.Spec.Template.Spec),
InitContainerImages: k8scommon.GetInitContainerImages(&deployment.Spec.Template.Spec),
DeploymentStatus: getDeploymentStatus(deployment),
}
}
func getDeploymentStatus(deployment *apps.Deployment) DeploymentStatus {
return DeploymentStatus{
Replicas: deployment.Status.Replicas,
UpdatedReplicas: deployment.Status.UpdatedReplicas,
ReadyReplicas: deployment.Status.ReadyReplicas,
AvailableReplicas: deployment.Status.AvailableReplicas,
UnavailableReplicas: deployment.Status.UnavailableReplicas,
}
}
func DeleteCollectionDeployment(client *kubernetes.Clientset, deploymentList []k8s.RemoveDeploymentData) (err error) {
global.Log.Info("批量删除deployment开始")
for _, v := range deploymentList {
global.Log.Info(fmt.Sprintf("delete deployment%v, ns: %v", v.DeploymentName, v.Namespace))
err := client.AppsV1().Deployments(v.Namespace).Delete(
context.TODO(),
v.DeploymentName,
metav1.DeleteOptions{},
)
if err != nil {
global.Log.Error(err.Error())
return err
}
}
global.Log.Info("删除deployment已完成")
return nil
}
func DeleteDeployment(client *kubernetes.Clientset, ns string, deploymentName string) (err error) {
global.Log.Info(fmt.Sprintf("请求删除单个deployment%v, namespace: %v", deploymentName, ns))
return client.AppsV1().Deployments(ns).Delete(
context.TODO(),
deploymentName,
metav1.DeleteOptions{},
)
}
func ScaleDeployment(client *kubernetes.Clientset, ns string, deploymentName string, scaleNumber int32) (err error) {
global.Log.Info(fmt.Sprintf("start scale of %v deployment in %v namespace", deploymentName, ns))
scaleData, err := client.AppsV1().Deployments(ns).GetScale(
context.TODO(),
deploymentName,
metav1.GetOptions{},
)
global.Log.Info(fmt.Sprintf("The deployment has changed from %v to %v", scaleData.Spec.Replicas, scaleNumber))
scale := autoscalingv1.Scale{
TypeMeta: scaleData.TypeMeta,
ObjectMeta: scaleData.ObjectMeta,
Spec: autoscalingv1.ScaleSpec{Replicas: scaleNumber},
Status: scaleData.Status,
}
_, err = client.AppsV1().Deployments(ns).UpdateScale(
context.TODO(),
deploymentName,
&scale,
metav1.UpdateOptions{},
)
if err != nil {
global.Log.Error("扩缩容出现异常", zap.Any("err: ", err))
return err
}
return nil
}
func RestartDeployment(client *kubernetes.Clientset, deploymentName string, namespace string) (err error) {
global.Log.Info(fmt.Sprintf("下发应用重启指令, 名称空间:%v, 无状态应用:%v", namespace, deploymentName))
data := fmt.Sprintf(`{"spec":{"template":{"metadata":{"annotations":{"kubectl.kubernetes.io/restartedAt":"%s"}}}}}`, time.Now().String())
_, err = client.AppsV1().Deployments(namespace).Patch(
context.Background(),
deploymentName,
types.StrategicMergePatchType,
[]byte(data),
metav1.PatchOptions{
FieldManager: "kubectl-rollout",
})
if err != nil {
global.Log.Error("应用重启失败", zap.Any("err: ", err))
return err
}
return nil
}
func RollbackDeployment(client *kubernetes.Clientset, deploymentName string, namespace string, reVersion int64) (err error) {
/*
该Api方法已移除, 不推荐使用 client.ExtensionsV1beta1().Deployments(namespace).Rollback(v1beta1.DeploymentRollback{})
https://github.com/kubernetes/kubernetes/pull/59970
Because of the removal of /rollback endpoint in apps/v1.Deployments, the example and kubectl, if switched to apps/v1.Deployments, need to do the rollback logic themselves. That includes:
1.List all ReplicaSets the Deployment owns
2.Find the ReplicaSet of a specific revision
3.Copy that ReplicaSet's template back to the Deployment's template
The rollback logic currently lives in Deployment controller code, which still uses extensions/v1beta1 Deployment client:
https://github.com/kubernetes/kubernetes/blob/ecc5eb67d965295db95ba2df5f3d3ff43a258a05/pkg/controller/deployment/rollback.go#L30-L69
*/
global.Log.Info(fmt.Sprintf("应用:%v, 所属空间:%v, 版本回滚到%v", deploymentName, namespace, reVersion))
if reVersion < 0 {
return revisionNotFoundErr(reVersion)
}
deployment, err := client.AppsV1().Deployments(namespace).Get(context.TODO(), deploymentName, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("failed to retrieve Deployment %s: %v", deploymentName, err)
}
if deployment.Spec.Paused {
return fmt.Errorf("skipped rollback (deployment \"%s\" is paused)", deployment.Name)
}
// If rollback revision is 0, rollback to the last revision
if reVersion == 0 {
global.Log.Warn("传递回滚版本号是0, 默认回退上一次版本!")
rsForRevision, err := deploymentRevision(deployment, client, reVersion)
if err != nil {
return err
}
for k, _ := range rsForRevision.Annotations {
if k == "deployment.kubernetes.io/revision" {
deployment.Spec.Template = rsForRevision.Spec.Template
if _, rollbackErr := client.AppsV1().Deployments(namespace).Update(context.TODO(), deployment, metav1.UpdateOptions{}); rollbackErr != nil {
global.Log.Error("版本回退失败", zap.Any("err: ", err))
return rollbackErr
}
global.Log.Info("The rollback task was executed successfully")
return nil
}
}
}
selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
if err != nil {
return err
}
options := metav1.ListOptions{LabelSelector: selector.String()}
replicaSetList, err := client.AppsV1().ReplicaSets(namespace).List(context.TODO(), options)
if err != nil {
return err
}
if len(replicaSetList.Items) <= 1 {
return revisionNotFoundErr(reVersion)
}
for _, v := range replicaSetList.Items {
// reVersion = nginx-56656dc477 Or reVersion = 5
// v.ObjectMeta.Name Or v.Annotations["deployment.kubernetes.io/revision"] == reVersion
currentVersion := utils.ParseStringToInt64(v.Annotations["deployment.kubernetes.io/revision"])
if currentVersion == reVersion {
deployment.Spec.Template = v.Spec.Template
if _, rollbackErr := client.AppsV1().Deployments(namespace).Update(context.TODO(), deployment, metav1.UpdateOptions{}); rollbackErr != nil {
global.Log.Error("版本回退失败", zap.Any("err: ", err))
return rollbackErr
}
global.Log.Info("The rollback task was executed successfully")
return nil
}
}
return nil
}
func deploymentRevision(deployment *apps.Deployment, c kubernetes.Interface, toRevision int64) (revision *apps.ReplicaSet, err error) {
_, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(deployment, c.AppsV1())
if err != nil {
return nil, fmt.Errorf("failed to retrieve replica sets from deployment %s: %v", deployment.Name, err)
}
allRSs := allOldRSs
if newRS != nil {
allRSs = append(allRSs, newRS)
}
var (
latestReplicaSet *apps.ReplicaSet
latestRevision = int64(-1)
previousReplicaSet *apps.ReplicaSet
previousRevision = int64(-1)
)
for _, rs := range allRSs {
if v, err := deploymentutil.Revision(rs); err == nil {
if toRevision == 0 {
if latestRevision < v {
// newest one we've seen so far
previousRevision = latestRevision
previousReplicaSet = latestReplicaSet
latestRevision = v
latestReplicaSet = rs
} else if previousRevision < v {
// second newest one we've seen so far
previousRevision = v
previousReplicaSet = rs
}
} else if toRevision == v {
return rs, nil
}
}
}
if toRevision > 0 {
return nil, revisionNotFoundErr(toRevision)
}
if previousReplicaSet == nil {
return nil, fmt.Errorf("no rollout history found for deployment %q", deployment.Name)
}
return previousReplicaSet, nil
}
func revisionNotFoundErr(r int64) error {
global.Log.Warn("没有找到可回滚的版本!")
return fmt.Errorf("unable to find specified revision %v in history", r)
}

View File

@@ -0,0 +1,85 @@
package deployment
import (
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8scommon "pandax/apps/devops/services/k8s/common"
"pandax/apps/devops/services/k8s/dataselect"
"pandax/apps/devops/services/k8s/event"
)
// The code below allows to perform complex data section on Deployment
type DeploymentCell apps.Deployment
// GetProperty is used to get property of the deployment
func (self DeploymentCell) GetProperty(name dataselect.PropertyName) dataselect.ComparableValue {
switch name {
case dataselect.NameProperty:
return dataselect.StdComparableString(self.ObjectMeta.Name)
case dataselect.CreationTimestampProperty:
return dataselect.StdComparableTime(self.ObjectMeta.CreationTimestamp.Time)
case dataselect.NamespaceProperty:
return dataselect.StdComparableString(self.ObjectMeta.Namespace)
default:
// if name is not supported then just return a constant dummy value, sort will have no effect.
return nil
}
}
func toCells(std []apps.Deployment) []dataselect.DataCell {
cells := make([]dataselect.DataCell, len(std))
for i := range std {
cells[i] = DeploymentCell(std[i])
}
return cells
}
func fromCells(cells []dataselect.DataCell) []apps.Deployment {
std := make([]apps.Deployment, len(cells))
for i := range std {
std[i] = apps.Deployment(cells[i].(DeploymentCell))
}
return std
}
func getStatus(list *apps.DeploymentList, rs []apps.ReplicaSet, pods []v1.Pod, events []v1.Event) k8scommon.ResourceStatus {
info := k8scommon.ResourceStatus{}
if list == nil {
return info
}
for _, deployment := range list.Items {
matchingPods := k8scommon.FilterDeploymentPodsByOwnerReference(deployment, rs, pods)
podInfo := k8scommon.GetPodInfo(deployment.Status.Replicas, deployment.Spec.Replicas, matchingPods)
warnings := event.GetPodsEventWarnings(events, matchingPods)
if len(warnings) > 0 {
info.Failed++
} else if podInfo.Pending > 0 {
info.Pending++
} else {
info.Running++
}
}
return info
}
func getConditions(deploymentConditions []apps.DeploymentCondition) []k8scommon.Condition {
conditions := make([]k8scommon.Condition, 0)
for _, condition := range deploymentConditions {
conditions = append(conditions, k8scommon.Condition{
Type: string(condition.Type),
Status: metaV1.ConditionStatus(condition.Status),
Reason: condition.Reason,
Message: condition.Message,
LastTransitionTime: condition.LastTransitionTime,
LastProbeTime: condition.LastUpdateTime,
})
}
return conditions
}

View File

@@ -0,0 +1,205 @@
package deployment
import (
"context"
"fmt"
"pandax/base/global"
"pandax/base/utils"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/client-go/kubernetes"
k8scommon "pandax/apps/devops/services/k8s/common"
"pandax/apps/devops/services/k8s/event"
"pandax/apps/devops/services/k8s/service"
"sort"
)
// RollingUpdateStrategy is behavior of a rolling update. See RollingUpdateDeployment K8s object.
type RollingUpdateStrategy struct {
MaxSurge *intstr.IntOrString `json:"maxSurge"`
MaxUnavailable *intstr.IntOrString `json:"maxUnavailable"`
}
// StatusInfo is the status information of the deployment
type StatusInfo struct {
// Total number of desired replicas on the deployment
Replicas int32 `json:"replicas"`
// Number of non-terminated pods that have the desired template spec
Updated int32 `json:"updated"`
// Number of available pods (ready for at least minReadySeconds)
// targeted by this deployment
Available int32 `json:"available"`
// Total number of unavailable pods targeted by this deployment.
Unavailable int32 `json:"unavailable"`
}
// DeploymentDetail is a presentation layer view of Kubernetes Deployment resource.
type DeploymentDetail struct {
// Extends list item structure.
Deployment `json:",inline"`
// Label selector of the service.
Selector map[string]string `json:"selector"`
// Status information on the deployment
StatusInfo `json:"statusInfo"`
// Conditions describe the state of a deployment at a certain point.
Conditions []k8scommon.Condition `json:"conditions"`
// The deployment strategy to use to replace existing pods with new ones.
// Valid options: Recreate, RollingUpdate
Strategy apps.DeploymentStrategyType `json:"strategy"`
// Min ready seconds
MinReadySeconds int32 `json:"minReadySeconds"`
// Rolling update strategy containing maxSurge and maxUnavailable
RollingUpdateStrategy *RollingUpdateStrategy `json:"rollingUpdateStrategy,omitempty"`
// Optional field that specifies the number of old Replica Sets to retain to allow rollback.
RevisionHistoryLimit *int32 `json:"revisionHistoryLimit"`
// Events Info
Events []v1.Event `json:"events"`
// Deployment history image version
HistoryVersion []HistoryVersion `json:"historyVersion"`
PodList *PodList `json:"podList"`
SvcList *service.ServiceList `json:"svcList"`
}
// GetDeploymentDetail returns model object of deployment and error, if any.
func GetDeploymentDetail(client *kubernetes.Clientset, namespace string, deploymentName string) (*DeploymentDetail, error) {
global.Log.Info(fmt.Sprintf("Getting details of %s deployment in %s namespace", deploymentName, namespace))
deployment, err := client.AppsV1().Deployments(namespace).Get(context.TODO(), deploymentName, metaV1.GetOptions{})
if err != nil {
return nil, err
}
selector, err := metaV1.LabelSelectorAsSelector(deployment.Spec.Selector)
if err != nil {
return nil, err
}
options := metaV1.ListOptions{LabelSelector: selector.String()}
channels := &k8scommon.ResourceChannels{
ReplicaSetList: k8scommon.GetReplicaSetListChannelWithOptions(client,
k8scommon.NewSameNamespaceQuery(namespace), options, 1),
PodList: k8scommon.GetPodListChannelWithOptions(client,
k8scommon.NewSameNamespaceQuery(namespace), options, 1),
EventList: k8scommon.GetEventListChannelWithOptions(client,
k8scommon.NewSameNamespaceQuery(namespace), options, 1),
}
rawRs := <-channels.ReplicaSetList.List
err = <-channels.ReplicaSetList.Error
if err != nil {
return nil, err
}
rawPods := <-channels.PodList.List
err = <-channels.PodList.Error
if err != nil {
return nil, err
}
rawEvents := <-channels.EventList.List
err = <-channels.EventList.Error
if err != nil {
return nil, err
}
// Extra Info
var rollingUpdateStrategy *RollingUpdateStrategy
if deployment.Spec.Strategy.RollingUpdate != nil {
rollingUpdateStrategy = &RollingUpdateStrategy{
MaxSurge: deployment.Spec.Strategy.RollingUpdate.MaxSurge,
MaxUnavailable: deployment.Spec.Strategy.RollingUpdate.MaxUnavailable,
}
}
events, _ := event.GetEvents(client, namespace, fmt.Sprintf("involvedObject.name=%v", deploymentName))
serviceList, _ := service.GetToService(client, namespace, deploymentName)
return &DeploymentDetail{
Deployment: toDeployment(deployment, rawRs.Items, rawPods.Items, rawEvents.Items),
Selector: deployment.Spec.Selector.MatchLabels,
StatusInfo: GetStatusInfo(&deployment.Status),
Conditions: getConditions(deployment.Status.Conditions),
Strategy: deployment.Spec.Strategy.Type,
MinReadySeconds: deployment.Spec.MinReadySeconds,
RollingUpdateStrategy: rollingUpdateStrategy,
RevisionHistoryLimit: deployment.Spec.RevisionHistoryLimit,
Events: events,
PodList: getDeploymentToPod(client, deployment),
SvcList: serviceList,
HistoryVersion: getDeploymentHistory(namespace, deploymentName, rawRs.Items),
}, nil
}
// GetStatusInfo is used to get the status information from the *apps.DeploymentStatus
func GetStatusInfo(deploymentStatus *apps.DeploymentStatus) StatusInfo {
return StatusInfo{
Replicas: deploymentStatus.Replicas,
Updated: deploymentStatus.UpdatedReplicas,
Available: deploymentStatus.AvailableReplicas,
Unavailable: deploymentStatus.UnavailableReplicas,
}
}
type HistoryVersion struct {
CreateTime metaV1.Time `json:"create_time"`
Image string `json:"image"`
Version int64 `json:"version"`
Namespace string `json:"namespace"`
Name string `json:"name"`
}
func getDeploymentHistory(namespace string, deploymentName string, rs []apps.ReplicaSet) []HistoryVersion {
var historyVersion []HistoryVersion
for _, v := range rs {
if namespace == v.Namespace && deploymentName == v.OwnerReferences[0].Name {
history := HistoryVersion{
CreateTime: v.CreationTimestamp,
Image: v.Spec.Template.Spec.Containers[0].Image,
Version: utils.ParseStringToInt64(v.Annotations["deployment.kubernetes.io/revision"]),
Namespace: v.Namespace,
Name: v.OwnerReferences[0].Name,
}
historyVersion = append(historyVersion, history)
}
}
// Sort the map by date
//sort.Slice(historyVersion, func(i, j int) bool {
// return historyVersion[j].CreateTime.Before(&historyVersion[i].CreateTime)
//})
// Sort the map by version
sort.Sort(historiesByRevision(historyVersion))
return historyVersion
}
type historiesByRevision []HistoryVersion
func (h historiesByRevision) Len() int {
return len(h)
}
func (h historiesByRevision) Swap(i, j int) {
h[i], h[j] = h[j], h[i]
}
func (h historiesByRevision) Less(i, j int) bool {
return h[j].Version < h[i].Version
}

View File

@@ -0,0 +1,61 @@
package deployment
import (
"context"
"fmt"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"log"
"testing"
)
func TestGetDeploymentToPod(t *testing.T) {
rules := clientcmd.NewDefaultClientConfigLoadingRules()
overrides := &clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{InsecureSkipTLSVerify: true}}
config, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(rules, overrides).ClientConfig()
if err != nil {
log.Fatalf("Couldn't get Kubernetes default config: %s", err)
}
client, err := kubernetes.NewForConfig(config)
if err != nil {
log.Fatalln(err)
}
namespace := "default"
name := "nginx"
selector := getDeployment(client, namespace, name)
pod, err := getPod(client, namespace, selector)
if err != nil {
log.Fatalln(err)
}
fmt.Printf("podList: %v\n", pod)
}
func getDeployment(client *kubernetes.Clientset, namespace, name string) (selector labels.Selector) {
fmt.Println("开始获取deployment")
deployment, err := client.AppsV1().Deployments(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
fmt.Println(err)
}
selector, _ = metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
return selector
}
func getPod(client *kubernetes.Clientset, namespace string, selector labels.Selector) (*v1.PodList, error) {
fmt.Println("根据deployment过滤pod")
pod, err := client.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{
LabelSelector: selector.String(),
})
if err != nil {
return nil, err
}
return pod, nil
}

View File

@@ -0,0 +1,50 @@
package deployment
import (
"context"
"pandax/base/global"
"go.uber.org/zap"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"pandax/apps/devops/entity/k8s"
k8scommon "pandax/apps/devops/services/k8s/common"
"pandax/apps/devops/services/k8s/event"
"pandax/apps/devops/services/k8s/pods"
)
type PodList struct {
ListMeta k8s.ListMeta `json:"listMeta"`
// Basic information about resources status on the list.
Status k8scommon.ResourceStatus `json:"status"`
// Unordered list of Pods.
Pods []pods.Pod `json:"pods"`
}
func getDeploymentToPod(client *kubernetes.Clientset, deployment *apps.Deployment) (po *PodList) {
selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
if err != nil {
return nil
}
options := metav1.ListOptions{LabelSelector: selector.String()}
podData, err := client.CoreV1().Pods(deployment.Namespace).List(context.TODO(), options)
if err != nil {
global.Log.Error("Get a pod exception from the deployment", zap.Any("err", err))
}
podList := PodList{
Pods: make([]pods.Pod, 0),
}
podList.ListMeta = k8s.ListMeta{TotalItems: len(podData.Items)}
for _, pod := range podData.Items {
warnings := event.GetPodsEventWarnings(nil, []v1.Pod{pod})
podDetail := pods.ToPod(&pod, warnings)
podList.Pods = append(podList.Pods, podDetail)
}
return &podList
}

View File

@@ -0,0 +1,57 @@
package deployment
import (
"context"
"encoding/json"
"fmt"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"log"
"os"
"strings"
"testing"
)
func TestGetDeploymentToSVC(t *testing.T) {
rules := clientcmd.NewDefaultClientConfigLoadingRules()
overrides := &clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{InsecureSkipTLSVerify: true}}
config, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(rules, overrides).ClientConfig()
if err != nil {
log.Fatalf("Couldn't get Kubernetes default config: %s", err)
}
client, err := kubernetes.NewForConfig(config)
if err != nil {
log.Fatalln(err)
}
namespace := "develop"
name := "service"
//selector := getDeployment(client, namespace, name)
svcData, err := getSvc(client, namespace, name)
if err != nil {
log.Fatalln(err)
}
svcJSON, _ := json.Marshal(svcData)
fmt.Printf("svcList: %s\n", svcJSON)
}
func getSvc(client *kubernetes.Clientset, namespace string, name string) (svc *v1.Service, err error) {
svcList, err := client.CoreV1().Services(namespace).List(context.TODO(), metav1.ListOptions{})
fmt.Printf("开始获取svc: %v\n", svcList)
if err != nil {
fmt.Println(err)
return nil, err
}
for _, svc := range svcList.Items {
if strings.Contains(svc.Name, name) {
fmt.Fprintf(os.Stdout, "service name: %v\n", svc.Name)
return &svc, nil
}
}
return svc, nil
}