/*
Copyright 2016 The Kubernetes Authors.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

// ### ATTENTION ###
//
// This code implements both ReplicaSet and ReplicationController.
//
// For RC, the objects are converted on the way in and out (see ../replication/),
// as if ReplicationController were just an older API version of ReplicaSet.
// However, RC and RS still have separate storage and separate instantiations
// of the ReplicaSetController object.
//
// Use rsc.Kind in log messages rather than hard-coding "ReplicaSet".

package replicaset

import (
	"context"
	"fmt"
	"reflect"
	"sort"
	"strings"
	"sync"
	"time"

	apps "k8s.io/api/apps/v1"
	v1 "k8s.io/api/core/v1"
	apierrors "k8s.io/apimachinery/pkg/api/errors"
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
	"k8s.io/apimachinery/pkg/labels"
	"k8s.io/apimachinery/pkg/runtime/schema"
	"k8s.io/apimachinery/pkg/types"
	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
	"k8s.io/apimachinery/pkg/util/wait"
	appsinformers "k8s.io/client-go/informers/apps/v1"
	coreinformers "k8s.io/client-go/informers/core/v1"
	clientset "k8s.io/client-go/kubernetes"
	"k8s.io/client-go/kubernetes/scheme"
	v1core "k8s.io/client-go/kubernetes/typed/core/v1"
	appslisters "k8s.io/client-go/listers/apps/v1"
	corelisters "k8s.io/client-go/listers/core/v1"
	"k8s.io/client-go/tools/cache"
	"k8s.io/client-go/tools/record"
	"k8s.io/client-go/util/workqueue"
	"k8s.io/component-base/metrics/legacyregistry"
	podutil "k8s.io/kubernetes/pkg/api/v1/pod"
	"k8s.io/kubernetes/pkg/controller"
	"k8s.io/kubernetes/pkg/controller/replicaset/metrics"
)

const (
	// Realistic value of the burstReplica field for the replica set manager based off
	// performance requirements for kubernetes 1.0.
	BurstReplicas = 500

	// The number of times we retry updating a ReplicaSet's status.
	statusUpdateRetries = 1

	// controllerUIDIndex is the name for the ReplicaSet store's index function,
	// which is to index by ReplicaSet's controllerUID.
	controllerUIDIndex = "controllerUID"
)

//这段代码定义了三个常量:
//1. BurstReplicas:一个整型常量,表示replica set管理器的burstReplica字段的现实值,基于kubernetes 1.0的性能需求。
//2. statusUpdateRetries:一个整型常量,表示更新ReplicaSet状态时重试的次数。
//3. controllerUIDIndex:一个字符串常量,表示ReplicaSet存储的索引函数的名称,该函数通过ReplicaSet的controllerUID进行索引。

// ReplicaSetController is responsible for synchronizing ReplicaSet objects stored
// in the system with actual running pods.
type ReplicaSetController struct {
	// GroupVersionKind indicates the controller type.
	// Different instances of this struct may handle different GVKs.
	// For example, this struct can be used (with adapters) to handle ReplicationController.
	schema.GroupVersionKind

	kubeClient clientset.Interface
	podControl controller.PodControlInterface

	eventBroadcaster record.EventBroadcaster

	// A ReplicaSet is temporarily suspended after creating/deleting these many replicas.
	// It resumes normal action after observing the watch events for them.
	burstReplicas int
	// To allow injection of syncReplicaSet for testing.
	syncHandler func(ctx context.Context, rsKey string) error

	// A TTLCache of pod creates/deletes each rc expects to see.
	expectations *controller.UIDTrackingControllerExpectations

	// A store of ReplicaSets, populated by the shared informer passed to NewReplicaSetController
	rsLister appslisters.ReplicaSetLister
	// rsListerSynced returns true if the pod store has been synced at least once.
	// Added as a member to the struct to allow injection for testing.
	rsListerSynced cache.InformerSynced
	rsIndexer      cache.Indexer

	// A store of pods, populated by the shared informer passed to NewReplicaSetController
	podLister corelisters.PodLister
	// podListerSynced returns true if the pod store has been synced at least once.
	// Added as a member to the struct to allow injection for testing.
	podListerSynced cache.InformerSynced

	// Controllers that need to be synced
	queue workqueue.RateLimitingInterface
}

//该函数定义了一个名为ReplicaSetController的结构体,用于同步系统中存储的ReplicaSet对象和实际运行的Pods。
//该结构体包含了一些字段,如GroupVersionKind表示控制器类型,kubeClient用于与Kubernetes集群进行交互,
//podControl用于创建、删除和更新Pods,eventBroadcaster用于记录事件,burstReplicas表示在创建或删除一定数量的副本后,
//ReplicaSet将暂时暂停,syncHandler是一个函数,用于同步ReplicaSet,expectations用于跟踪Pod的创建和删除期望,
//rsLister和podLister用于列出ReplicaSet和Pod,rsListerSynced和podListerSynced用于检查存储是否已同步,queue用于存储需要同步的控制器。

// NewReplicaSetController configures a replica set controller with the specified event recorder
func NewReplicaSetController(ctx context.Context, rsInformer appsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, kubeClient clientset.Interface, burstReplicas int) *ReplicaSetController {
	logger := klog.FromContext(ctx)
	eventBroadcaster := record.NewBroadcaster(record.WithContext(ctx))
	if err := metrics.Register(legacyregistry.Register); err != nil {
		logger.Error(err, "unable to register metrics")
	}
	return NewBaseController(logger, rsInformer, podInformer, kubeClient, burstReplicas,
		apps.SchemeGroupVersion.WithKind("ReplicaSet"),
		"replicaset_controller",
		"replicaset",
		controller.RealPodControl{
			KubeClient: kubeClient,
			Recorder:   eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "replicaset-controller"}),
		},
		eventBroadcaster,
	)
}

//该函数用于创建并配置一个ReplicaSet控制器,它接收多个参数,包括上下文、ReplicaSet信息、Pod信息、Kubernetes客户端、突发副本数等。
//函数内部主要进行了以下操作:
//1. 从上下文中获取日志记录器。
//2. 创建事件广播器,并在上下文中注册。
//3. 尝试注册指标,如果失败则记录错误日志。
//4. 调用NewBaseController函数创建ReplicaSet控制器实例,传入相关参数,包括日志记录器、ReplicaSet信息、Pod信息、Kubernetes客户端、突发副本数、资源类型、控制器名称、资源缩写、Pod控制对象、事件广播器等。
//5. 返回创建的ReplicaSet控制器实例。
//该函数主要是为了初始化和配置ReplicaSet控制器,使其能够管理ReplicaSet对象,并处理相关的事件和操作。

// NewBaseController is the implementation of NewReplicaSetController with additional injected
// parameters so that it can also serve as the implementation of NewReplicationController.
func NewBaseController(logger klog.Logger, rsInformer appsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, kubeClient clientset.Interface, burstReplicas int,
	gvk schema.GroupVersionKind, metricOwnerName, queueName string, podControl controller.PodControlInterface, eventBroadcaster record.EventBroadcaster) *ReplicaSetController {

	rsc := &ReplicaSetController{
		GroupVersionKind: gvk,
		kubeClient:       kubeClient,
		podControl:       podControl,
		eventBroadcaster: eventBroadcaster,
		burstReplicas:    burstReplicas,
		expectations:     controller.NewUIDTrackingControllerExpectations(controller.NewControllerExpectations()),
		queue:            workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), queueName),
	}

	rsInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
		AddFunc: func(obj interface{}) {
			rsc.addRS(logger, obj)
		},
		UpdateFunc: func(oldObj, newObj interface{}) {
			rsc.updateRS(logger, oldObj, newObj)
		},
		DeleteFunc: func(obj interface{}) {
			rsc.deleteRS(logger, obj)
		},
	})
	rsInformer.Informer().AddIndexers(cache.Indexers{
		controllerUIDIndex: func(obj interface{}) ([]string, error) {
			rs, ok := obj.(*apps.ReplicaSet)
			if !ok {
				return []string{}, nil
			}
			controllerRef := metav1.GetControllerOf(rs)
			if controllerRef == nil {
				return []string{}, nil
			}
			return []string{string(controllerRef.UID)}, nil
		},
	})
	rsc.rsIndexer = rsInformer.Informer().GetIndexer()
	rsc.rsLister = rsInformer.Lister()
	rsc.rsListerSynced = rsInformer.Informer().HasSynced

	podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
		AddFunc: func(obj interface{}) {
			rsc.addPod(logger, obj)
		},
		// This invokes the ReplicaSet for every pod change, eg: host assignment. Though this might seem like
		// overkill the most frequent pod update is status, and the associated ReplicaSet will only list from
		// local storage, so it should be ok.
		UpdateFunc: func(oldObj, newObj interface{}) {
			rsc.updatePod(logger, oldObj, newObj)
		},
		DeleteFunc: func(obj interface{}) {
			rsc.deletePod(logger, obj)
		},
	})
	rsc.podLister = podInformer.Lister()
	rsc.podListerSynced = podInformer.Informer().HasSynced

	rsc.syncHandler = rsc.syncReplicaSet

	return rsc
}

//该函数创建了一个ReplicaSetController对象,并通过注入的参数初始化了其属性。
//它还为ReplicaSet和Pod的informers设置了事件处理函数,并返回创建的ReplicaSetController对象。
//- 接收多个参数,包括日志记录器、ReplicaSet和Pod的informers、Kubernetes客户端、突发副本数量、资源版本、指标所有者名称、队列名称和Pod控制接口。
//- 创建一个ReplicaSetController对象,并设置其属性,包括资源版本、Kubernetes客户端、Pod控制接口、事件广播器、突发副本数量、期望和队列。
//- 为ReplicaSet的informer添加事件处理函数,包括添加、更新和删除操作。
//- 为Pod的informer添加事件处理函数,包括添加、更新和删除操作。
//- 返回创建的ReplicaSetController对象。
//该函数主要是为了创建一个ReplicaSetController对象,并通过注入的参数初始化其属性,同时为相关的informers设置事件处理函数,以便能够正确地管理和同步ReplicaSet和Pod。

// Run begins watching and syncing.
func (rsc *ReplicaSetController) Run(ctx context.Context, workers int) {
	defer utilruntime.HandleCrash()

	// Start events processing pipeline.
	rsc.eventBroadcaster.StartStructuredLogging(3)
	rsc.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: rsc.kubeClient.CoreV1().Events("")})
	defer rsc.eventBroadcaster.Shutdown()

	defer rsc.queue.ShutDown()

	controllerName := strings.ToLower(rsc.Kind)
	logger := klog.FromContext(ctx)
	logger.Info("Starting controller", "name", controllerName)
	defer logger.Info("Shutting down controller", "name", controllerName)

	if !cache.WaitForNamedCacheSync(rsc.Kind, ctx.Done(), rsc.podListerSynced, rsc.rsListerSynced) {
		return
	}

	for i := 0; i < workers; i++ {
		go wait.UntilWithContext(ctx, rsc.worker, time.Second)
	}

	<-ctx.Done()
}

//该函数是ReplicaSetController类型的Run方法,用于启动副本集控制器的监控和同步。
//它首先启动事件处理管道,然后创建指定数量的工作协程,并等待控制器的缓存同步完成。
//之后,无限循环地等待上下文取消或超时,直到被取消或中断。
//当接收到上下文取消信号时,会优雅地关闭事件处理管道和队列,并记录日志信息。

// getReplicaSetsWithSameController returns a list of ReplicaSets with the same
// owner as the given ReplicaSet.
func (rsc *ReplicaSetController) getReplicaSetsWithSameController(logger klog.Logger, rs *apps.ReplicaSet) []*apps.ReplicaSet {
	controllerRef := metav1.GetControllerOf(rs)
	if controllerRef == nil {
		utilruntime.HandleError(fmt.Errorf("ReplicaSet has no controller: %v", rs))
		return nil
	}

	objects, err := rsc.rsIndexer.ByIndex(controllerUIDIndex, string(controllerRef.UID))
	if err != nil {
		utilruntime.HandleError(err)
		return nil
	}
	relatedRSs := make([]*apps.ReplicaSet, 0, len(objects))
	for _, obj := range objects {
		relatedRSs = append(relatedRSs, obj.(*apps.ReplicaSet))
	}

	if klogV := logger.V(2); klogV.Enabled() {
		klogV.Info("Found related ReplicaSets", "replicaSet", klog.KObj(rs), "relatedReplicaSets", klog.KObjSlice(relatedRSs))
	}

	return relatedRSs
}

//该函数用于返回与给定ReplicaSet具有相同控制器的所有ReplicaSet列表。
//它首先通过检查给定ReplicaSet的控制器引用来确定其控制器。
//然后,它使用控制器的UID作为索引从rsIndexer中检索所有相关对象,并将它们转换为ReplicaSet类型的切片。
//最后,如果日志级别足够高,则记录相关信息,并返回相关ReplicaSet的切片。

// getPodReplicaSets returns a list of ReplicaSets matching the given pod.
func (rsc *ReplicaSetController) getPodReplicaSets(pod *v1.Pod) []*apps.ReplicaSet {
	rss, err := rsc.rsLister.GetPodReplicaSets(pod)
	if err != nil {
		return nil
	}
	if len(rss) > 1 {
		// ControllerRef will ensure we don't do anything crazy, but more than one
		// item in this list nevertheless constitutes user error.
		utilruntime.HandleError(fmt.Errorf("user error! more than one %v is selecting pods with labels: %+v", rsc.Kind, pod.Labels))
	}
	return rss
}

//该函数用于获取与给定Pod相匹配的ReplicaSets列表。
//它通过调用rsLister的GetPodReplicaSets方法来获取匹配的ReplicaSets,如果获取出错则返回nil。
//如果获取到的ReplicaSets数量大于1,则会记录一条错误日志,并返回所有获取到的ReplicaSets。

// resolveControllerRef returns the controller referenced by a ControllerRef,
// or nil if the ControllerRef could not be resolved to a matching controller
// of the correct Kind.
func (rsc *ReplicaSetController) resolveControllerRef(namespace string, controllerRef *metav1.OwnerReference) *apps.ReplicaSet {
	// We can't look up by UID, so look up by Name and then verify UID.
	// Don't even try to look up by Name if it's the wrong Kind.
	if controllerRef.Kind != rsc.Kind {
		return nil
	}
	rs, err := rsc.rsLister.ReplicaSets(namespace).Get(controllerRef.Name)
	if err != nil {
		return nil
	}
	if rs.UID != controllerRef.UID {
		// The controller we found with this Name is not the same one that the
		// ControllerRef points to.
		return nil
	}
	return rs
}

//该函数是一个Go语言函数,用于解析一个ControllerRef引用所指向的控制器,如果无法解析或解析结果不符合预期,则返回nil。
//函数接受三个参数:
//- namespace:字符串类型,表示命名空间。
//- controllerRef:指向metav1.OwnerReference类型的指针,表示一个控制器引用。
//函数返回一个指向apps.ReplicaSet类型的指针,表示解析得到的控制器。
//函数的主要逻辑如下:
//1. 首先,检查controllerRef的Kind是否与rsc.Kind相等,如果不相等,则直接返回nil。
//2. 然后,通过调用rsc.rsLister.ReplicaSets(namespace).Get(controllerRef.Name)来获取具有指定名称的ReplicaSet对象,其中namespace参数为所处的命名空间。
//3. 如果获取过程中出现错误,则直接返回nil。
//4. 最后,检查获取到的ReplicaSet对象的UID是否与controllerRef的UID相等,如果不相等,则返回nil,表示解析失败;
//如果相等,则返回该ReplicaSet对象,表示解析成功。

func (rsc *ReplicaSetController) enqueueRS(rs *apps.ReplicaSet) {
	key, err := controller.KeyFunc(rs)
	if err != nil {
		utilruntime.HandleError(fmt.Errorf("couldn't get key for object %#v: %v", rs, err))
		return
	}

	rsc.queue.Add(key)
}

//该函数用于将指定的ReplicaSet对象放入队列中,以便后续处理。
//它首先通过调用controller.KeyFunc方法获取ReplicaSet的键值,如果获取失败,则通过utilruntime.HandleError方法记录错误信息并返回。
//如果获取成功,则将键值添加到rsc.queue队列中。

func (rsc *ReplicaSetController) enqueueRSAfter(rs *apps.ReplicaSet, duration time.Duration) {
	key, err := controller.KeyFunc(rs)
	if err != nil {
		utilruntime.HandleError(fmt.Errorf("couldn't get key for object %#v: %v", rs, err))
		return
	}

	rsc.queue.AddAfter(key, duration)
}

//该函数用于将指定的ReplicaSet对象加入到队列中,并设定延迟时间为duration。
//首先通过controller.KeyFunc(rs)获取ReplicaSet的键值,如果获取失败则通过utilruntime.HandleError处理错误并返回。
//如果获取成功,则将键值和延迟时间添加到rsc.queue队列中。

func (rsc *ReplicaSetController) addRS(logger klog.Logger, obj interface{}) {
	rs := obj.(*apps.ReplicaSet)
	logger.V(4).Info("Adding", "replicaSet", klog.KObj(rs))
	rsc.enqueueRS(rs)
}

//该函数是一个Go语言的方法,名为addRS,它属于ReplicaSetController类型。
//该方法接受两个参数:logger是一个klog.Logger类型,用于记录日志信息;obj是一个空接口类型,表示可以传入任何类型的对象。
//函数体内部将obj断言为*apps.ReplicaSet类型,并将其赋值给变量rs。
//接着,通过logger记录一条日志信息,日志级别为4,内容为"Adding replicaSet",并附带rs对象的相关信息。
//最后,调用rsc.enqueueRS(rs)方法,将rs对象放入队列中进行后续处理。

// callback when RS is updated
func (rsc *ReplicaSetController) updateRS(logger klog.Logger, old, cur interface{}) {
	oldRS := old.(*apps.ReplicaSet)
	curRS := cur.(*apps.ReplicaSet)

	// TODO: make a KEP and fix informers to always call the delete event handler on re-create
	if curRS.UID != oldRS.UID {
		key, err := controller.KeyFunc(oldRS)
		if err != nil {
			utilruntime.HandleError(fmt.Errorf("couldn't get key for object %#v: %v", oldRS, err))
			return
		}
		rsc.deleteRS(logger, cache.DeletedFinalStateUnknown{
			Key: key,
			Obj: oldRS,
		})
	}

	// You might imagine that we only really need to enqueue the
	// replica set when Spec changes, but it is safer to sync any
	// time this function is triggered. That way a full informer
	// resync can requeue any replica set that don't yet have pods
	// but whose last attempts at creating a pod have failed (since
	// we don't block on creation of pods) instead of those
	// replica sets stalling indefinitely. Enqueueing every time
	// does result in some spurious syncs (like when Status.Replica
	// is updated and the watch notification from it retriggers
	// this function), but in general extra resyncs shouldn't be
	// that bad as ReplicaSets that haven't met expectations yet won't
	// sync, and all the listing is done using local stores.
	if *(oldRS.Spec.Replicas) != *(curRS.Spec.Replicas) {
		logger.V(4).Info("replicaSet updated. Desired pod count change.", "replicaSet", klog.KObj(oldRS), "oldReplicas", *(oldRS.Spec.Replicas), "newReplicas", *(curRS.Spec.Replicas))
	}
	rsc.enqueueRS(curRS)
}

//该函数是ReplicaSetController的一个方法,用于处理ReplicaSet(RS)更新的回调。
//主要功能包括:
//1. 类型断言:将old和cur接口转换为*apps.ReplicaSet类型的oldRS和curRS。
//2. 检查UID是否变化:如果curRS的UID与oldRS的UID不相同,则将oldRS作为被删除的对象处理。
//通过调用deleteRS方法,传入cache.DeletedFinalStateUnknown结构体,其中包含oldRS的关键字key和对象本身。
//3. 触发重新排队:如果oldRS和curRS的副本数量(Spec.Replicas)不相等,则记录日志并调用enqueueRS方法,将curRS加入队列以进行后续处理。
//该函数的主要目的是在RS更新时进行相应处理,包括处理RS的删除和重新排队,以确保RS的状态能够及时更新和同步。

func (rsc *ReplicaSetController) deleteRS(logger klog.Logger, obj interface{}) {
	rs, ok := obj.(*apps.ReplicaSet)
	if !ok {
		tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
		if !ok {
			utilruntime.HandleError(fmt.Errorf("couldn't get object from tombstone %#v", obj))
			return
		}
		rs, ok = tombstone.Obj.(*apps.ReplicaSet)
		if !ok {
			utilruntime.HandleError(fmt.Errorf("tombstone contained object that is not a ReplicaSet %#v", obj))
			return
		}
	}

	key, err := controller.KeyFunc(rs)
	if err != nil {
		utilruntime.HandleError(fmt.Errorf("couldn't get key for object %#v: %v", rs, err))
		return
	}

	logger.V(4).Info("Deleting", "replicaSet", klog.KObj(rs))

	// Delete expectations for the ReplicaSet so if we create a new one with the same name it starts clean
	rsc.expectations.DeleteExpectations(logger, key)

	rsc.queue.Add(key)
}

//该函数是一个Go语言函数,它定义了一个名为deleteRS的方法,该方法属于ReplicaSetController类型。
//该方法的主要功能是从Kubernetes集群中删除一个ReplicaSet对象,并记录相关日志。
//具体来说,该方法首先尝试将传入的obj参数转换为*apps.ReplicaSet类型,如果转换失败,则尝试将obj参数转换为cache.DeletedFinalStateUnknown类型。
//如果转换成功,则从DeletedFinalStateUnknown对象中获取原始的ReplicaSet对象。
//如果转换失败,则记录错误日志并返回。
//接下来,该方法通过调用controller.KeyFunc函数获取ReplicaSet对象的键值(key),如果获取失败,则记录错误日志并返回。
//然后,该方法使用logger对象记录一条删除ReplicaSet的日志信息。
//最后,该方法删除ReplicaSet对象的期望状态,并将该对象的键值添加到rsc队列中,以便后续处理。
//总结来说,该方法的主要作用是处理ReplicaSet对象的删除操作,并进行相关的日志记录和状态管理。

// When a pod is created, enqueue the replica set that manages it and update its expectations.
func (rsc *ReplicaSetController) addPod(logger klog.Logger, obj interface{}) {
	pod := obj.(*v1.Pod)

	if pod.DeletionTimestamp != nil {
		// on a restart of the controller manager, it's possible a new pod shows up in a state that
		// is already pending deletion. Prevent the pod from being a creation observation.
		rsc.deletePod(logger, pod)
		return
	}

	// If it has a ControllerRef, that's all that matters.
	if controllerRef := metav1.GetControllerOf(pod); controllerRef != nil {
		rs := rsc.resolveControllerRef(pod.Namespace, controllerRef)
		if rs == nil {
			return
		}
		rsKey, err := controller.KeyFunc(rs)
		if err != nil {
			return
		}
		logger.V(4).Info("Pod created", "pod", klog.KObj(pod), "detail", pod)
		rsc.expectations.CreationObserved(logger, rsKey)
		rsc.queue.Add(rsKey)
		return
	}

	// Otherwise, it's an orphan. Get a list of all matching ReplicaSets and sync
	// them to see if anyone wants to adopt it.
	// DO NOT observe creation because no controller should be waiting for an
	// orphan.
	rss := rsc.getPodReplicaSets(pod)
	if len(rss) == 0 {
		return
	}
	logger.V(4).Info("Orphan Pod created", "pod", klog.KObj(pod), "detail", pod)
	for _, rs := range rss {
		rsc.enqueueRS(rs)
	}
}

//该函数是ReplicaSetController的一个方法,用于处理Pod创建事件。
//它首先检查Pod是否已被标记为删除,如果是,则调用deletePod方法进行处理并返回。
//接着,它检查Pod是否有ControllerRef,如果有,则解析ControllerRef并获取对应的ReplicaSet。
//如果解析失败或ReplicaSet不存在,则返回。然后,该函数记录Pod的创建事件,并将对应的ReplicaSet加入到队列中以进行后续处理。
//如果Pod没有ControllerRef,则将其视为孤儿Pod,并尝试找到匹配的ReplicaSet进行收养。如果没有找到匹配的ReplicaSet,则返回。
//最后,该函数记录孤儿Pod的创建事件,并将所有匹配的ReplicaSet加入到队列中以进行后续处理。

// When a pod is updated, figure out what replica set/s manage it and wake them
// up. If the labels of the pod have changed we need to awaken both the old
// and new replica set. old and cur must be *v1.Pod types.
func (rsc *ReplicaSetController) updatePod(logger klog.Logger, old, cur interface{}) {
	curPod := cur.(*v1.Pod)
	oldPod := old.(*v1.Pod)
	if curPod.ResourceVersion == oldPod.ResourceVersion {
		// Periodic resync will send update events for all known pods.
		// Two different versions of the same pod will always have different RVs.
		return
	}

	labelChanged := !reflect.DeepEqual(curPod.Labels, oldPod.Labels)
	if curPod.DeletionTimestamp != nil {
		// when a pod is deleted gracefully it's deletion timestamp is first modified to reflect a grace period,
		// and after such time has passed, the kubelet actually deletes it from the store. We receive an update
		// for modification of the deletion timestamp and expect an rs to create more replicas asap, not wait
		// until the kubelet actually deletes the pod. This is different from the Phase of a pod changing, because
		// an rs never initiates a phase change, and so is never asleep waiting for the same.
		rsc.deletePod(logger, curPod)
		if labelChanged {
			// we don't need to check the oldPod.DeletionTimestamp because DeletionTimestamp cannot be unset.
			rsc.deletePod(logger, oldPod)
		}
		return
	}

	curControllerRef := metav1.GetControllerOf(curPod)
	oldControllerRef := metav1.GetControllerOf(oldPod)
	controllerRefChanged := !reflect.DeepEqual(curControllerRef, oldControllerRef)
	if controllerRefChanged && oldControllerRef != nil {
		// The ControllerRef was changed. Sync the old controller, if any.
		if rs := rsc.resolveControllerRef(oldPod.Namespace, oldControllerRef); rs != nil {
			rsc.enqueueRS(rs)
		}
	}

	// If it has a ControllerRef, that's all that matters.
	if curControllerRef != nil {
		rs := rsc.resolveControllerRef(curPod.Namespace, curControllerRef)
		if rs == nil {
			return
		}
		logger.V(4).Info("Pod objectMeta updated.", "pod", klog.KObj(oldPod), "oldObjectMeta", oldPod.ObjectMeta, "curObjectMeta", curPod.ObjectMeta)
		rsc.enqueueRS(rs)
		// TODO: MinReadySeconds in the Pod will generate an Available condition to be added in
		// the Pod status which in turn will trigger a requeue of the owning replica set thus
		// having its status updated with the newly available replica. For now, we can fake the
		// update by resyncing the controller MinReadySeconds after the it is requeued because
		// a Pod transitioned to Ready.
		// Note that this still suffers from #29229, we are just moving the problem one level
		// "closer" to kubelet (from the deployment to the replica set controller).
		if !podutil.IsPodReady(oldPod) && podutil.IsPodReady(curPod) && rs.Spec.MinReadySeconds > 0 {
			logger.V(2).Info("pod will be enqueued after a while for availability check", "duration", rs.Spec.MinReadySeconds, "kind", rsc.Kind, "pod", klog.KObj(oldPod))
			// Add a second to avoid milliseconds skew in AddAfter.
			// See https://github.com/kubernetes/kubernetes/issues/39785#issuecomment-279959133 for more info.
			rsc.enqueueRSAfter(rs, (time.Duration(rs.Spec.MinReadySeconds)*time.Second)+time.Second)
		}
		return
	}

	// Otherwise, it's an orphan. If anything changed, sync matching controllers
	// to see if anyone wants to adopt it now.
	if labelChanged || controllerRefChanged {
		rss := rsc.getPodReplicaSets(curPod)
		if len(rss) == 0 {
			return
		}
		logger.V(4).Info("Orphan Pod objectMeta updated.", "pod", klog.KObj(oldPod), "oldObjectMeta", oldPod.ObjectMeta, "curObjectMeta", curPod.ObjectMeta)
		for _, rs := range rss {
			rsc.enqueueRS(rs)
		}
	}
}

//该函数是一个Go语言函数,名为updatePod,属于ReplicaSetController类型。
//该函数在Pod更新时,确定哪些副本集管理该Pod,并唤醒它们。
//如果Pod的标签发生变化,需要唤醒旧的和新的副本集。函数参数包括logger、old和cur,其中logger是日志记录器,old和cur是待比较的Pod对象。
//函数主要通过比较新旧Pod的资源版本、标签和控制器引用,来确定是否需要进行相应操作,如删除Pod、更新控制器引用、唤醒副本集等。

// When a pod is deleted, enqueue the replica set that manages the pod and update its expectations.
// obj could be an *v1.Pod, or a DeletionFinalStateUnknown marker item.
func (rsc *ReplicaSetController) deletePod(logger klog.Logger, obj interface{}) {
	pod, ok := obj.(*v1.Pod)

	// When a delete is dropped, the relist will notice a pod in the store not
	// in the list, leading to the insertion of a tombstone object which contains
	// the deleted key/value. Note that this value might be stale. If the pod
	// changed labels the new ReplicaSet will not be woken up till the periodic resync.
	if !ok {
		tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
		if !ok {
			utilruntime.HandleError(fmt.Errorf("couldn't get object from tombstone %+v", obj))
			return
		}
		pod, ok = tombstone.Obj.(*v1.Pod)
		if !ok {
			utilruntime.HandleError(fmt.Errorf("tombstone contained object that is not a pod %#v", obj))
			return
		}
	}

	controllerRef := metav1.GetControllerOf(pod)
	if controllerRef == nil {
		// No controller should care about orphans being deleted.
		return
	}
	rs := rsc.resolveControllerRef(pod.Namespace, controllerRef)
	if rs == nil {
		return
	}
	rsKey, err := controller.KeyFunc(rs)
	if err != nil {
		utilruntime.HandleError(fmt.Errorf("couldn't get key for object %#v: %v", rs, err))
		return
	}
	logger.V(4).Info("Pod deleted", "delete_by", utilruntime.GetCaller(), "deletion_timestamp", pod.DeletionTimestamp, "pod", klog.KObj(pod))
	rsc.expectations.DeletionObserved(logger, rsKey, controller.PodKey(pod))
	rsc.queue.Add(rsKey)
}

//该函数是 ReplicaSetController 的一个方法,用于处理 Pod 被删除的情况。
//它会将管理该 Pod 的 ReplicaSet 加入队列,并更新其期望值。如果删除成功,则会将 ReplicaSet 的键加入期望值中,并将其加入队列中进行后续处理。
//如果删除操作被丢弃,则会在存储中注意到不在列表中的 Pod,从而插入一个包含删除键/值的墓碑对象。
//如果墓碑对象不是 Pod,则会记录错误并返回。
//如果 Pod 没有控制器引用,则直接返回。如果控制器引用解析失败,则返回。
//如果获取 ReplicaSet 的键失败,则记录错误并返回。最后,记录日志并更新期望值和队列。

// worker runs a worker thread that just dequeues items, processes them, and marks them done.
// It enforces that the syncHandler is never invoked concurrently with the same key.
func (rsc *ReplicaSetController) worker(ctx context.Context) {
	for rsc.processNextWorkItem(ctx) {
	}
}

//该函数是一个工作线程,它不断从队列中取出项目,进行处理,并标记为完成。
//它确保相同的键永远不会并发调用syncHandler。
//函数通过调用processNextWorkItem循环执行任务,直到无法继续执行为止。

func (rsc *ReplicaSetController) processNextWorkItem(ctx context.Context) bool {
	key, quit := rsc.queue.Get()
	if quit {
		return false
	}
	defer rsc.queue.Done(key)

	err := rsc.syncHandler(ctx, key.(string))
	if err == nil {
		rsc.queue.Forget(key)
		return true
	}

	utilruntime.HandleError(fmt.Errorf("sync %q failed with %v", key, err))
	rsc.queue.AddRateLimited(key)

	return true
}

// manageReplicas checks and updates replicas for the given ReplicaSet.
// Does NOT modify <filteredPods>.
// It will requeue the replica set in case of an error while creating/deleting pods.
func (rsc *ReplicaSetController) manageReplicas(ctx context.Context, filteredPods []*v1.Pod, rs *apps.ReplicaSet) error {
	diff := len(filteredPods) - int(*(rs.Spec.Replicas))
	rsKey, err := controller.KeyFunc(rs)
	if err != nil {
		utilruntime.HandleError(fmt.Errorf("couldn't get key for %v %#v: %v", rsc.Kind, rs, err))
		return nil
	}
	logger := klog.FromContext(ctx)
	if diff < 0 {
		diff *= -1
		if diff > rsc.burstReplicas {
			diff = rsc.burstReplicas
		}
		// TODO: Track UIDs of creates just like deletes. The problem currently
		// is we'd need to wait on the result of a create to record the pod's
		// UID, which would require locking *across* the create, which will turn
		// into a performance bottleneck. We should generate a UID for the pod
		// beforehand and store it via ExpectCreations.
		rsc.expectations.ExpectCreations(logger, rsKey, diff)
		logger.V(2).Info("Too few replicas", "replicaSet", klog.KObj(rs), "need", *(rs.Spec.Replicas), "creating", diff)
		// Batch the pod creates. Batch sizes start at SlowStartInitialBatchSize
		// and double with each successful iteration in a kind of "slow start".
		// This handles attempts to start large numbers of pods that would
		// likely all fail with the same error. For example a project with a
		// low quota that attempts to create a large number of pods will be
		// prevented from spamming the API service with the pod create requests
		// after one of its pods fails.  Conveniently, this also prevents the
		// event spam that those failures would generate.
		successfulCreations, err := slowStartBatch(diff, controller.SlowStartInitialBatchSize, func() error {
			err := rsc.podControl.CreatePods(ctx, rs.Namespace, &rs.Spec.Template, rs, metav1.NewControllerRef(rs, rsc.GroupVersionKind))
			if err != nil {
				if apierrors.HasStatusCause(err, v1.NamespaceTerminatingCause) {
					// if the namespace is being terminated, we don't have to do
					// anything because any creation will fail
					return nil
				}
			}
			return err
		})

		// Any skipped pods that we never attempted to start shouldn't be expected.
		// The skipped pods will be retried later. The next controller resync will
		// retry the slow start process.
		if skippedPods := diff - successfulCreations; skippedPods > 0 {
			logger.V(2).Info("Slow-start failure. Skipping creation of pods, decrementing expectations", "podsSkipped", skippedPods, "kind", rsc.Kind, "replicaSet", klog.KObj(rs))
			for i := 0; i < skippedPods; i++ {
				// Decrement the expected number of creates because the informer won't observe this pod
				rsc.expectations.CreationObserved(logger, rsKey)
			}
		}
		return err
	} else if diff > 0 {
		if diff > rsc.burstReplicas {
			diff = rsc.burstReplicas
		}
		logger.V(2).Info("Too many replicas", "replicaSet", klog.KObj(rs), "need", *(rs.Spec.Replicas), "deleting", diff)

		relatedPods, err := rsc.getIndirectlyRelatedPods(logger, rs)
		utilruntime.HandleError(err)

		// Choose which Pods to delete, preferring those in earlier phases of startup.
		podsToDelete := getPodsToDelete(filteredPods, relatedPods, diff)

		// Snapshot the UIDs (ns/name) of the pods we're expecting to see
		// deleted, so we know to record their expectations exactly once either
		// when we see it as an update of the deletion timestamp, or as a delete.
		// Note that if the labels on a pod/rs change in a way that the pod gets
		// orphaned, the rs will only wake up after the expectations have
		// expired even if other pods are deleted.
		rsc.expectations.ExpectDeletions(logger, rsKey, getPodKeys(podsToDelete))

		errCh := make(chan error, diff)
		var wg sync.WaitGroup
		wg.Add(diff)
		for _, pod := range podsToDelete {
			go func(targetPod *v1.Pod) {
				defer wg.Done()
				if err := rsc.podControl.DeletePod(ctx, rs.Namespace, targetPod.Name, rs); err != nil {
					// Decrement the expected number of deletes because the informer won't observe this deletion
					podKey := controller.PodKey(targetPod)
					rsc.expectations.DeletionObserved(logger, rsKey, podKey)
					if !apierrors.IsNotFound(err) {
						logger.V(2).Info("Failed to delete pod, decremented expectations", "pod", podKey, "kind", rsc.Kind, "replicaSet", klog.KObj(rs))
						errCh <- err
					}
				}
			}(pod)
		}
		wg.Wait()

		select {
		case err := <-errCh:
			// all errors have been reported before and they're likely to be the same, so we'll only return the first one we hit.
			if err != nil {
				return err
			}
		default:
		}
	}

	return nil
}

//该函数是用于检查和更新给定 ReplicaSet 的副本数量的。
//它首先计算过滤后的 Pod 数量与 ReplicaSet 规定的副本数量之间的差值,然后根据差值进行相应的操作。
//如果差值小于 0,表示副本数量不足,它将通过调用 expectations.ExpectCreations 方法来期望创建相应数量的 Pod,并使用 slowStartBatch 方法批量创建 Pod。
//如果创建过程中出现错误,会通过调用 expectations.CreationObserved 方法来减少期望的创建数量。
//如果差值大于 0,表示副本数量过多,它将通过调用 expectations.ExpectDeletions 方法来期望删除相应数量的 Pod,并使用 getPodsToDelete 方法选择要删除的 Pod。
//删除 Pod 的操作是并发进行的,通过调用 podControl.DeletePod 方法来删除 Pod,并通过调用 expectations.DeletionObserved 方法来减少期望的删除数量。

// syncReplicaSet will sync the ReplicaSet with the given key if it has had its expectations fulfilled,
// meaning it did not expect to see any more of its pods created or deleted. This function is not meant to be
// invoked concurrently with the same key.
func (rsc *ReplicaSetController) syncReplicaSet(ctx context.Context, key string) error {
	logger := klog.FromContext(ctx)
	startTime := time.Now()
	defer func() {
		logger.Info("Finished syncing", "kind", rsc.Kind, "key", key, "duration", time.Since(startTime))
	}()

	namespace, name, err := cache.SplitMetaNamespaceKey(key)
	if err != nil {
		return err
	}
	rs, err := rsc.rsLister.ReplicaSets(namespace).Get(name)
	if apierrors.IsNotFound(err) {
		logger.V(4).Info("deleted", "kind", rsc.Kind, "key", key)
		rsc.expectations.DeleteExpectations(logger, key)
		return nil
	}
	if err != nil {
		return err
	}

	rsNeedsSync := rsc.expectations.SatisfiedExpectations(logger, key)
	selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector)
	if err != nil {
		utilruntime.HandleError(fmt.Errorf("error converting pod selector to selector for rs %v/%v: %v", namespace, name, err))
		return nil
	}

	// list all pods to include the pods that don't match the rs`s selector
	// anymore but has the stale controller ref.
	// TODO: Do the List and Filter in a single pass, or use an index.
	allPods, err := rsc.podLister.Pods(rs.Namespace).List(labels.Everything())
	if err != nil {
		return err
	}
	// Ignore inactive pods.
	filteredPods := controller.FilterActivePods(logger, allPods)

	// NOTE: filteredPods are pointing to objects from cache - if you need to
	// modify them, you need to copy it first.
	filteredPods, err = rsc.claimPods(ctx, rs, selector, filteredPods)
	if err != nil {
		return err
	}

	var manageReplicasErr error
	if rsNeedsSync && rs.DeletionTimestamp == nil {
		manageReplicasErr = rsc.manageReplicas(ctx, filteredPods, rs)
	}
	rs = rs.DeepCopy()
	newStatus := calculateStatus(rs, filteredPods, manageReplicasErr)

	// Always updates status as pods come up or die.
	updatedRS, err := updateReplicaSetStatus(logger, rsc.kubeClient.AppsV1().ReplicaSets(rs.Namespace), rs, newStatus)
	if err != nil {
		// Multiple things could lead to this update failing. Requeuing the replica set ensures
		// Returning an error causes a requeue without forcing a hotloop
		return err
	}
	// Resync the ReplicaSet after MinReadySeconds as a last line of defense to guard against clock-skew.
	if manageReplicasErr == nil && updatedRS.Spec.MinReadySeconds > 0 &&
		updatedRS.Status.ReadyReplicas == *(updatedRS.Spec.Replicas) &&
		updatedRS.Status.AvailableReplicas != *(updatedRS.Spec.Replicas) {
		rsc.queue.AddAfter(key, time.Duration(updatedRS.Spec.MinReadySeconds)*time.Second)
	}
	return manageReplicasErr
}

//该函数是一个用于同步ReplicaSet的状态的函数。
//它首先通过给定的键获取ReplicaSet,并检查其期望是否已满足。
//如果已满足,它将进行一系列操作来更新ReplicaSet的状态,
//包括列出所有Pods、筛选出活动的Pods、确定Pods是否应该属于该ReplicaSet、管理ReplicaSet的副本数量,并最后更新ReplicaSet的状态。
//如果在更新状态时出现错误,该函数将返回该错误,以便进行重新尝试。

func (rsc *ReplicaSetController) claimPods(ctx context.Context, rs *apps.ReplicaSet, selector labels.Selector, filteredPods []*v1.Pod) ([]*v1.Pod, error) {
	// If any adoptions are attempted, we should first recheck for deletion with
	// an uncached quorum read sometime after listing Pods (see #42639).
	canAdoptFunc := controller.RecheckDeletionTimestamp(func(ctx context.Context) (metav1.Object, error) {
		fresh, err := rsc.kubeClient.AppsV1().ReplicaSets(rs.Namespace).Get(ctx, rs.Name, metav1.GetOptions{})
		if err != nil {
			return nil, err
		}
		if fresh.UID != rs.UID {
			return nil, fmt.Errorf("original %v %v/%v is gone: got uid %v, wanted %v", rsc.Kind, rs.Namespace, rs.Name, fresh.UID, rs.UID)
		}
		return fresh, nil
	})
	cm := controller.NewPodControllerRefManager(rsc.podControl, rs, selector, rsc.GroupVersionKind, canAdoptFunc)
	return cm.ClaimPods(ctx, filteredPods)
}

//该函数是用于将Pod声明为属于ReplicaSet的。
//它首先通过给定的标签选择器从filteredPods中选择Pod,然后使用PodControllerRefManager来管理这些Pod与ReplicaSet之间的关系。
//如果成功,这些Pod将被标记为属于该ReplicaSet。
//函数返回被成功声明的Pod列表以及可能出现的错误。

// slowStartBatch tries to call the provided function a total of 'count' times,
// starting slow to check for errors, then speeding up if calls succeed.
//
// It groups the calls into batches, starting with a group of initialBatchSize.
// Within each batch, it may call the function multiple times concurrently.
//
// If a whole batch succeeds, the next batch may get exponentially larger.
// If there are any failures in a batch, all remaining batches are skipped
// after waiting for the current batch to complete.
//
// It returns the number of successful calls to the function.
func slowStartBatch(count int, initialBatchSize int, fn func() error) (int, error) {
	remaining := count
	successes := 0
	for batchSize := min(remaining, initialBatchSize); batchSize > 0; batchSize = min(2*batchSize, remaining) {
		errCh := make(chan error, batchSize)
		var wg sync.WaitGroup
		wg.Add(batchSize)
		for i := 0; i < batchSize; i++ {
			go func() {
				defer wg.Done()
				if err := fn(); err != nil {
					errCh <- err
				}
			}()
		}
		wg.Wait()
		curSuccesses := batchSize - len(errCh)
		successes += curSuccesses
		if len(errCh) > 0 {
			return successes, <-errCh
		}
		remaining -= batchSize
	}
	return successes, nil
}

//该函数slowStartBatch尝试总计调用提供的函数count次,开始时速度较慢以检查错误,如果调用成功则加速。
//它将调用分组为批次,从初始批次大小开始。
//在每个批次内,它可以并发调用函数多次。
//如果整个批次成功,则下一批次可能会指数级增长。
//如果批次中存在任何失败,则在等待当前批次完成后跳过所有剩余批次。
//该函数返回成功调用函数的次数。
//函数详细说明:
//- 参数count指定要调用函数的总次数。
//- 参数initialBatchSize指定开始的批次大小。
//- 参数fn是需要调用的函数,其返回类型为error。
//- 函数内部使用remaining变量来记录剩余的调用次数,successes变量来记录成功的调用次数。
//- 使用for循环来执行批次调用,批次大小从initialBatchSize开始,每轮迭代后根据成功调用次数和剩余次数进行调整。
//- 在每个批次中,使用goroutine并发调用函数fn,并将错误通过errCh通道返回。
//- 使用sync.WaitGroup等待每个批次的所有goroutine完成。
//- 计算每个批次的成功调用次数,并累加到successes中。
//- 如果存在错误,返回当前的成功调用次数和第一个错误。
//- 循环结束后,返回最终的成功调用次数和nil错误。

// getIndirectlyRelatedPods returns all pods that are owned by any ReplicaSet
// that is owned by the given ReplicaSet's owner.
func (rsc *ReplicaSetController) getIndirectlyRelatedPods(logger klog.Logger, rs *apps.ReplicaSet) ([]*v1.Pod, error) {
	var relatedPods []*v1.Pod
	seen := make(map[types.UID]*apps.ReplicaSet)
	for _, relatedRS := range rsc.getReplicaSetsWithSameController(logger, rs) {
		selector, err := metav1.LabelSelectorAsSelector(relatedRS.Spec.Selector)
		if err != nil {
			// This object has an invalid selector, it does not match any pods
			continue
		}
		pods, err := rsc.podLister.Pods(relatedRS.Namespace).List(selector)
		if err != nil {
			return nil, err
		}
		for _, pod := range pods {
			if otherRS, found := seen[pod.UID]; found {
				logger.V(5).Info("Pod is owned by both", "pod", klog.KObj(pod), "kind", rsc.Kind, "replicaSets", klog.KObjSlice([]klog.KMetadata{otherRS, relatedRS}))
				continue
			}
			seen[pod.UID] = relatedRS
			relatedPods = append(relatedPods, pod)
		}
	}
	logger.V(4).Info("Found related pods", "kind", rsc.Kind, "replicaSet", klog.KObj(rs), "pods", klog.KObjSlice(relatedPods))
	return relatedPods, nil
}

//该函数用于获取与给定ReplicaSet的所有者相关的所有Pod。
//具体步骤如下:
//1. 创建一个空的Pod切片和一个用于存储已处理的ReplicaSet的映射。
//2. 遍历与给定ReplicaSet具有相同控制器的所有ReplicaSet。
//3. 将ReplicaSet的标签选择器转换为标签选择器对象。
//4. 列出与该ReplicaSet选择器匹配的所有Pod。
//5. 遍历Pod列表,如果Pod已被处理,则跳过;否则将该Pod添加到结果列表中,并将处理过的ReplicaSet添加到映射中。
//6. 返回结果列表。
//该函数通过查找和处理与给定ReplicaSet的所有者相关的所有ReplicaSet和Pod,来获取与给定ReplicaSet间接相关的所有Pod。

func getPodsToDelete(filteredPods, relatedPods []*v1.Pod, diff int) []*v1.Pod {
	// No need to sort pods if we are about to delete all of them.
	// diff will always be <= len(filteredPods), so not need to handle > case.
	if diff < len(filteredPods) {
		podsWithRanks := getPodsRankedByRelatedPodsOnSameNode(filteredPods, relatedPods)
		sort.Sort(podsWithRanks)
		reportSortingDeletionAgeRatioMetric(filteredPods, diff)
	}
	return filteredPods[:diff]
}

//该函数用于根据给定条件获取要删除的Pod列表。
//函数参数包括:filteredPods表示待筛选的Pod列表,relatedPods表示相关的Pod列表,diff表示要删除的Pod数量。
//函数首先判断如果diff小于filteredPods的长度,则调用getPodsRankedByRelatedPodsOnSameNode函数根据相关Pod在同一节点上的排名对filteredPods进行排序。
//然后调用reportSortingDeletionAgeRatioMetric函数报告排序和删除年龄比例的指标。
//最后,函数返回filteredPods列表中的前diff个Pod作为要删除的Pod列表。

func reportSortingDeletionAgeRatioMetric(filteredPods []*v1.Pod, diff int) {
	now := time.Now()
	youngestTime := time.Time{}
	// first we need to check all of the ready pods to get the youngest, as they may not necessarily be sorted by timestamp alone
	for _, pod := range filteredPods {
		if pod.CreationTimestamp.Time.After(youngestTime) && podutil.IsPodReady(pod) {
			youngestTime = pod.CreationTimestamp.Time
		}
	}

	// for each pod chosen for deletion, report the ratio of its age to the youngest pod's age
	for _, pod := range filteredPods[:diff] {
		if !podutil.IsPodReady(pod) {
			continue
		}
		ratio := float64(now.Sub(pod.CreationTimestamp.Time).Milliseconds() / now.Sub(youngestTime).Milliseconds())
		metrics.SortingDeletionAgeRatio.Observe(ratio)
	}
}

//该函数用于报告删除的Pod的年龄与最年轻Pod年龄的比率指标。
//首先,它遍历所有就绪的Pod,找到最年轻的Pod。
//然后,它计算被选择删除的每个Pod的年龄与最年轻Pod年龄的比率,并观察该指标。

// getPodsRankedByRelatedPodsOnSameNode returns an ActivePodsWithRanks value
// that wraps podsToRank and assigns each pod a rank equal to the number of
// active pods in relatedPods that are colocated on the same node with the pod.
// relatedPods generally should be a superset of podsToRank.
func getPodsRankedByRelatedPodsOnSameNode(podsToRank, relatedPods []*v1.Pod) controller.ActivePodsWithRanks {
	podsOnNode := make(map[string]int)
	for _, pod := range relatedPods {
		if controller.IsPodActive(pod) {
			podsOnNode[pod.Spec.NodeName]++
		}
	}
	ranks := make([]int, len(podsToRank))
	for i, pod := range podsToRank {
		ranks[i] = podsOnNode[pod.Spec.NodeName]
	}
	return controller.ActivePodsWithRanks{Pods: podsToRank, Rank: ranks, Now: metav1.Now()}
}

func getPodKeys(pods []*v1.Pod) []string {
	podKeys := make([]string, 0, len(pods))
	for _, pod := range pods {
		podKeys = append(podKeys, controller.PodKey(pod))
	}
	return podKeys
}

//第一个函数getPodsRankedByRelatedPodsOnSameNode根据相关Pods在同一节点上的活动状态,为待排名的Pods分配排名。
//它接受两个参数:podsToRank和relatedPods,返回一个ActivePodsWithRanks值。
//它首先遍历relatedPods,统计每个节点上的活动Pods数量,然后根据此为podsToRank中的每个Pod分配排名。
//最后,返回包含排名信息的ActivePodsWithRanks结构体。
//第二个函数getPodKeys接受一个Pods切片作为参数,返回一个包含所有Pods的Key的切片。
//它遍历输入的Pods切片,使用controller.PodKey函数获取每个Pod的Key,并将其添加到结果切片中。