CKA (Certified Kubernetes Administrator)/Kode Kloud

02.Scheduling - Node Affinity

seulseul 2022. 1. 19. 15:08

LABS – CERTIFIED KUBERNETES ADMINISTRATOR WITH PRACTICE TESTS > SCHEDULING – NODE AFFINITY

Scheduling

 

01. How many Labels exist on node node01?

ask : 5

# 클러스터의 노드를 레이블과 함께 나열하자.
kubectl get nodes --show-labels

root@controlplane:~# kubectl describe node node01 | grep -i label
Labels:             beta.kubernetes.io/arch=amd64

root@controlplane:~# kubectl describe node node01                
Name:               node01
Roles:              <none>
Labels:             beta.kubernetes.io/arch=amd64
                    beta.kubernetes.io/os=linux
                    kubernetes.io/arch=amd64
                    kubernetes.io/hostname=node01
                    kubernetes.io/os=linux
Annotations:        flannel.alpha.coreos.com/backend-data: {"VNI":1,"VtepMAC":"d6:38:97:18:d9:80"}
                    flannel.alpha.coreos.com/backend-type: vxlan
                    flannel.alpha.coreos.com/kube-subnet-manager: true
                    flannel.alpha.coreos.com/public-ip: 10.9.6.3
                    kubeadm.alpha.kubernetes.io/cri-socket: /var/run/dockershim.sock
                    node.alpha.kubernetes.io/ttl: 0
                    volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp:  Wed, 19 Jan 2022 05:27:27 +0000
Taints:             <none>
Unschedulable:      false
Lease:
  HolderIdentity:  node01
  AcquireTime:     <unset>
  RenewTime:       Wed, 19 Jan 2022 05:39:28 +0000
Conditions:
  Type                 Status  LastHeartbeatTime                 LastTransitionTime                Reason                       Message
  ----                 ------  -----------------                 ------------------                ------                       -------
  NetworkUnavailable   False   Wed, 19 Jan 2022 05:27:34 +0000   Wed, 19 Jan 2022 05:27:34 +0000   FlannelIsUp                  Flannel is running on this node
  MemoryPressure       False   Wed, 19 Jan 2022 05:37:42 +0000   Wed, 19 Jan 2022 05:27:27 +0000   KubeletHasSufficientMemory   kubelet has sufficient memory available
  DiskPressure         False   Wed, 19 Jan 2022 05:37:42 +0000   Wed, 19 Jan 2022 05:27:27 +0000   KubeletHasNoDiskPressure     kubelet has no disk pressure
  PIDPressure          False   Wed, 19 Jan 2022 05:37:42 +0000   Wed, 19 Jan 2022 05:27:27 +0000   KubeletHasSufficientPID      kubelet has sufficient PID available
  Ready                True    Wed, 19 Jan 2022 05:37:42 +0000   Wed, 19 Jan 2022 05:27:38 +0000   KubeletReady                 kubelet is posting ready status
Addresses:
  InternalIP:  10.9.6.3
  Hostname:    node01
Capacity:
  cpu:                36
  ephemeral-storage:  507944172Ki
  hugepages-1Gi:      0
  hugepages-2Mi:      0
  memory:             214588000Ki
  pods:               110
Allocatable:
  cpu:                36
  ephemeral-storage:  468121348141
  hugepages-1Gi:      0
  hugepages-2Mi:      0
  memory:             214485600Ki
  pods:               110
System Info:
  Machine ID:                 e8574b7bc3784a8dbca351f47f17972f
  System UUID:                df4f7349-5604-cb04-432c-f7c9a46f5ce0
  Boot ID:                    06683fbc-cc7d-4b3c-873c-79494ee99344
  Kernel Version:             5.4.0-1060-gcp
  OS Image:                   Ubuntu 18.04.5 LTS
  Operating System:           linux
  Architecture:               amd64
  Container Runtime Version:  docker://19.3.0
  Kubelet Version:            v1.20.0
  Kube-Proxy Version:         v1.20.0
PodCIDR:                      10.244.1.0/24
PodCIDRs:                     10.244.1.0/24
Non-terminated Pods:          (2 in total)
  Namespace                   Name                     CPU Requests  CPU Limits  Memory Requests  Memory Limits  AGE
  ---------                   ----                     ------------  ----------  ---------------  -------------  ---
  kube-system                 kube-flannel-ds-ft4fz    100m (0%)     100m (0%)   50Mi (0%)        300Mi (0%)     12m
  kube-system                 kube-proxy-h6v5f         0 (0%)        0 (0%)      0 (0%)           0 (0%)         12m
Allocated resources:
  (Total limits may be over 100 percent, i.e., overcommitted.)
  Resource           Requests   Limits
  --------           --------   ------
  cpu                100m (0%)  100m (0%)
  memory             50Mi (0%)  300Mi (0%)
  ephemeral-storage  0 (0%)     0 (0%)
  hugepages-1Gi      0 (0%)     0 (0%)
  hugepages-2Mi      0 (0%)     0 (0%)
Events:
  Type    Reason                   Age   From        Message
  ----    ------                   ----  ----        -------
  Normal  Starting                 12m   kubelet     Starting kubelet.
  Normal  NodeHasSufficientMemory  12m   kubelet     Node node01 status is now: NodeHasSufficientMemory
  Normal  NodeHasNoDiskPressure    12m   kubelet     Node node01 status is now: NodeHasNoDiskPressure
  Normal  NodeHasSufficientPID     12m   kubelet     Node node01 status is now: NodeHasSufficientPID
  Normal  NodeAllocatableEnforced  12m   kubelet     Updated Node Allocatable limit across pods
  Normal  Starting                 11m   kube-proxy  Starting kube-proxy.
  Normal  NodeReady                11m   kubelet     Node node01 status is now: NodeReady

02. What is the value set to the label beta.kubernetes.io/arch on node01?

ask : amd64

-- beta.kubernetes.io/arch=amd64

 

03. Apply a label color=blue to node node01

- color = blue

# 노드 한 개를 선택하고, 레이블을 추가하자.
kubectl label nodes node01 color=blue

# kubectl label nodes <your-node-name> color=blue
# <your-node-name> 는 선택한 노드의 이름이다.
# 선택한 노드가 color=blue 레이블을 갖고 있는지 확인하자.
# kubectl get nodes --show-labels

04. Create a new deployment named blue with the nginx image and 3 replicas.

  • Name: blue
  • Replicas: 3
  • Image: nginx
kubectl create deployment blue --image=nginx --replicas=3

05. Which nodes can the pods for the blue deployment be placed on?

Make sure to check taints on both nodes!

ask : controlplane & node01

Check if controlplane and node01 have any taints 

on them that will prevent the pods to be scheduled on them. 

If there are no taints, the pods can be scheduled on either node.

So run the following command to check the taints on both nodes.

kubectl describe node controlplane | grep -i taints
kubectl describe node node01 | grep -i taints

root@controlplane:~# kubectl describe node controlplane | grep -i taints
Taints:             <none>
root@controlplane:~# kubectl describe node node01 | grep -i taints
Taints:             <none>

06. Set Node Affinity to the deployment to place the pods on node01 only.

 

  • Name: blue
  • Replicas: 3
  • Image: nginx
  • NodeAffinity: requiredDuringSchedulingIgnoredDuringExecution
  • Key: color
  • values: blue
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: blue
spec:
  replicas: 3
  selector:
    matchLabels:
      run: nginx
  template:
    metadata:
      labels:
        run: nginx
    spec:
      containers:
      - image: nginx
        imagePullPolicy: Always
        name: nginx
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
            - matchExpressions:
              - key: color
                operator: In
                values:
                - blue

apply 로 명령어 칠경우 아래와 같은 에러가 나므로 kubectl create -f sample.yaml 로 !

root@controlplane:~# kubectl apply -f sample.yaml 
Warning: resource deployments/blue is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically.
The Deployment "blue" is invalid: spec.selector: Invalid value: v1.LabelSelector{MatchLabels:map[string]string{"app":"blue", "run":"nginx"}, MatchExpressions:[]v1.LabelSelectorRequirement(nil)}: field is immutable

07. Which nodes are the pods placed on now?

 

ask : node01

root@controlplane:~# kubectl get pods -o wide
NAME                    READY   STATUS    RESTARTS   AGE     IP           NODE     NOMINATED NODE   READINESS GATES
blue-77978c84c6-bfgtk   1/1     Running   0          3m41s   10.244.1.8   node01   <none>           <none>
blue-77978c84c6-j2ss8   1/1     Running   0          3m41s   10.244.1.7   node01   <none>           <none>
blue-77978c84c6-pc7km   1/1     Running   0          3m41s   10.244.1.6   node01   <none>           <none>

 

08. Create a new deployment named red with the nginx image and 2 replicas,

and ensure it gets placed on the controlplane node only.


Use the label - node-role.kubernetes.io/master - set on the controlplane node.

  • Name: red
  • Replicas: 2
  • Image: nginx
  • NodeAffinity: requiredDuringSchedulingIgnoredDuringExecution
  • Key: node-role.kubernetes.io/master
  • Use the right operator
---
# 오답...
apiVersion: apps/v1
kind: Deployment
metadata:
  name: red
spec:
  replicas: 2
  selector:
    matchLabels:
      run: nginx
  template:
    metadata:
      labels:
        run: nginx
    spec:
      containers:
      - image: nginx
        imagePullPolicy: Always
        name: nginx
      nodeName: controlplane
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
            - matchExpressions:
              - key: label
                operator: In
                values:
                - node-role.kubernetes.io/master
# 정답...
apiVersion: apps/v1
kind: Deployment
metadata:
  name: red
spec:
  replicas: 2
  selector:
    matchLabels:
      run: nginx
  template:
    metadata:
      labels:
        run: nginx
    spec:
      containers:
      - image: nginx
        imagePullPolicy: Always
        name: nginx
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
            - matchExpressions:
              - key: node-role.kubernetes.io/master
                operator: Exists
# tip
# -- dry-run=client 옵션을 적용하면 실제 Kubernetes 에 resource 가 생성되지않고,
# nginx-pod.yaml 파일을 생성해줌

kubectl run red --image=nginx --dry-run=client -o yaml > nginx-pod.yaml
 

# Bookmark

https://kubernetes.io/ko/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity/

 

노드 어피니티를 사용해 노드에 파드 할당

이 문서는 쿠버네티스 클러스터의 특정 노드에 노드 어피니티를 사용해 쿠버네티스 파드를 할당하는 방법을 설명한다. 시작하기 전에 쿠버네티스 클러스터가 필요하고, kubectl 커맨드-라인 툴이

kubernetes.io