728x90
반응형
Practice Test - Application failure
# 1
$ k get svc,deploy -n alpha
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/mysql ClusterIP 10.43.220.175 <none> 3306/TCP 6m6s
service/web-service NodePort 10.43.220.159 <none> 8080:30081/TCP 6m6s
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/webapp-mysql 1/1 1 1 6m6s
$ k get svc mysql -n alpha -oyaml >> service.yaml
$ vi service.yaml
apiVersion: v1
kind: Service
metadata:
creationTimestamp: "2023-06-24T06:23:43Z"
name: mysql-service
namespace: alpha
resourceVersion: "849"
uid: 76969195-fbc1-4833-800d-4e9d6759f3e7
spec:
internalTrafficPolicy: Cluster
ipFamilies:
- IPv4
ipFamilyPolicy: SingleStack
ports:
- port: 3306
protocol: TCP
targetPort: 3306
selector:
name: mysql
sessionAffinity: None
type: ClusterIP
status:
loadBalancer: {}
$ k delete svc mysql -n alpha
$ k apply -f service.yaml
# 2
$ k describe svc mysql-service -n beta
Name: mysql-service
Namespace: beta
Labels: <none>
Annotations: <none>
Selector: name=mysql
Type: ClusterIP
IP Family Policy: SingleStack
IP Families: IPv4
IP: 10.43.152.188
IPs: 10.43.152.188
Port: <unset> 3306/TCP
TargetPort: 8080/TCP
Endpoints: 10.42.0.13:8080
Session Affinity: None
Events: <none>
$ k get svc mysql-service -n beta -oyaml >> service.yaml
$ vi service.yaml
: 3306
protocol: TCP
targetPort: 3306
selector:
name: mysql
sessionAffinity: None
type: ClusterIP
status:
loadBalancer: {}
apiVersion: v1
kind: Service
metadata:
creationTimestamp: "2023-06-24T06:43:04Z"
name: mysql-service
namespace: beta
resourceVersion: "1385"
uid: 66fa34e4-8556-4899-b51a-2b0f510d9894
spec:
internalTrafficPolicy: Cluster
ipFamilies:
- IPv4
ipFamilyPolicy: SingleStack
ports:
- port: 3306
protocol: TCP
targetPort: 3306
selector:
name: mysql
sessionAffinity: None
type: ClusterIP
status:
loadBalancer: {}
$ k delete svc mysql-service -n beta
$ k apply -f service.yaml
$ k get ep -n mysql-service -n beta
NAME ENDPOINTS AGE
web-service 10.42.0.14:8080 6m10s
mysql-service 10.42.0.13:3306 14s
# 3
$ k get svc -n gamma
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
mysql-service ClusterIP 10.43.115.113 <none> 3306/TCP 40s
web-service NodePort 10.43.195.147 <none> 8080:30081/TCP 40s
$ k get ep -n gamma
NAME ENDPOINTS AGE
mysql-service <none> 46s
web-service 10.42.0.16:8080 46s
$ k describe svc mysql-service -n gamma
Name: mysql-service
Namespace: gamma
Labels: <none>
Annotations: <none>
Selector: name=sql00001
Type: ClusterIP
IP Family Policy: SingleStack
IP Families: IPv4
IP: 10.43.115.113
IPs: 10.43.115.113
Port: <unset> 3306/TCP
TargetPort: 3306/TCP
Endpoints: <none>
Session Affinity: None
Events: <none>
$ k describe po mysql -n gamma
Name: mysql
Namespace: gamma
Priority: 0
Service Account: default
Node: controlplane/192.8.181.6
Start Time: Sat, 24 Jun 2023 06:50:15 +0000
Labels: name=mysql
...
$ k get svc mysql-service -n gamma -oyaml >> service.yaml
$ vi service.yaml
...
ports:
- port: 3306
protocol: TCP
targetPort: 3306
selector:
name: mysql
...
$ k delete svc mysql-service -n gamma
$ k apply -f service.yaml
# 4
$ k get deploy webapp-mysql -n delta -oyaml >> deploy.yaml
$ vi deploy.yaml
...
spec:
containers:
- env:
- name: DB_Host
value: mysql-service
- name: DB_User
value: root
- name: DB_Password
value: paswrd
...
$ k apply -f deploy.yaml
# 5
$ k get deploy webapp-mysql -n epsilon -oyaml >> deploy.yaml
$ vi deploy.yaml
...
containers:
- env:
- name: DB_Host
value: mysql-service
- name: DB_User
value: root
- name: DB_Password
value: paswrd
...
$ vi pod.yaml
...
spec:
containers:
- env:
- name: MYSQL_ROOT_PASSWORD
value: paswrd
...
$ k get po mysql -n epsilon -oyaml >> pod.yaml
$ k apply -f pod.yaml --force
$ k apply -f deploy.yaml
# 6
$ k edit po mysql -n zeta
...
spec:
containers:
- env:
- name: MYSQL_ROOT_PASSWORD
value: paswrd
image: mysql:5.6
imagePullPolicy: IfNotPresent
...
$ k apply -f /tmp/xxx.yaml --force
$ k edit deploy webapp-mysql -n zeta
...
spec:
containers:
- env:
- name: DB_Host
value: mysql-service
- name: DB_User
value: root
- name: DB_Password
value: paswrd
...
$ k get svc web-service -n zeta -oyaml > service.yaml
$ vi service.yaml
...
- nodePort: 30081
port: 8080
protocol: TCP
targetPort: 8080
...
$ k delete svc web-service -n zeta
$ k apply -f service.yaml
Practice Test - ControlPlane failure
### 1 ###
$ k get all -A
...
$ k get po -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-5d78c9869d-s5jvq 1/1 Running 0 8m42s
coredns-5d78c9869d-zrvwj 1/1 Running 0 8m43s
etcd-controlplane 1/1 Running 0 8m52s
kube-apiserver-controlplane 1/1 Running 0 8m58s
kube-controller-manager-controlplane 1/1 Running 0 8m52s
kube-proxy-qb29g 1/1 Running 0 8m43s
kube-scheduler-controlplane 0/1 CrashLoopBackOff 3 (16s ago) 61s
$ k describe po kube-scheduler-controlplane -n kube-system
Warning BackOff 25s (x10 over 99s) kubelet Back-off restarting failed container kube-scheduler in pod kube-scheduler-controlplane_kube-system(615fa42cfd7659b933451bebf94c8d18)
Normal Pulled 12s (x5 over 101s) kubelet Container image "registry.k8s.io/kube-scheduler:v1.27.0" already present on machine
Normal Created 12s (x5 over 101s) kubelet Created container kube-scheduler
Warning Failed 12s (x5 over 100s) kubelet Error: failed to create containerd task: failed to create shim task: OCI runtime create failed: runc create failed: unable to start container process: exec: "kube-schedulerrrr": executable file not found in $PATH: unknown
$ cd /etc/kubernetes/manifests/
controlplane /etc/kubernetes/manifests ➜ ls
etcd.yaml kube-apiserver.yaml kube-controller-manager.yaml kube-scheduler.yaml
$ cat kube-scheduler.yaml
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
component: kube-scheduler
tier: control-plane
name: kube-scheduler
namespace: kube-system
spec:
containers:
- command:
- kube-schedulerrrr
- --authentication-kubeconfig=/etc/kubernetes/scheduler.conf
- --authorization-kubeconfig=/etc/kubernetes/scheduler.conf
- --bind-address=127.0.0.1
- --kubeconfig=/etc/kubernetes/scheduler.conf
- --leader-elect=true
image: registry.k8s.io/kube-scheduler:v1.27.0
...
$ vi kube-scheduler.yaml
...
spec:
containers:
- command:
- kube-scheduler
- --authentication-kubeconfig=/etc/kubernetes/scheduler.conf
...
$ k get po -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-5d78c9869d-s5jvq 1/1 Running 0 12m
coredns-5d78c9869d-zrvwj 1/1 Running 0 12m
etcd-controlplane 1/1 Running 0 12m
kube-apiserver-controlplane 1/1 Running 0 12m
kube-controller-manager-controlplane 1/1 Running 0 12m
kube-proxy-qb29g 1/1 Running 0 12m
kube-scheduler-controlplane 0/1 CrashLoopBackOff 5 (107s ago) 4m45s
$ k delete po -n kube-system
$ k delete po kube-scheduler-controlplane -n kube-system
pod "kube-scheduler-controlplane" deleted
### 2, 3 ###
$ k get deploy
NAME READY UP-TO-DATE AVAILABLE AGE
app 1/1 1 1 7m9s
$ kubectl scale deploy app --replicas=2
deployment.apps/app scaled
$ k get po
NAME READY STATUS RESTARTS AGE
app-55d5fc5fcc-hxvlx 1/1 Running 0 9m24s
$ k get all -A
NAMESPACE NAME READY STATUS RESTARTS AGE
default pod/app-55d5fc5fcc-hxvlx 1/1 Running 0 9m51s
kube-flannel pod/kube-flannel-ds-f6kdb 1/1 Running 0 17m
kube-system pod/coredns-5d78c9869d-s5jvq 1/1 Running 0 17m
kube-system pod/coredns-5d78c9869d-zrvwj 1/1 Running 0 17m
kube-system pod/etcd-controlplane 1/1 Running 0 17m
kube-system pod/kube-apiserver-controlplane 1/1 Running 0 17m
kube-system pod/kube-controller-manager-controlplane 0/1 Error 5 (87s ago) 3m5s
kube-system pod/kube-proxy-qb29g 1/1 Running 0 17m
kube-system pod/kube-scheduler-controlplane 1/1 Running 0 4m59s
...
$ k logs kube-controller-manager-controlplane -n kube-system
I0624 07:22:35.167271 1 serving.go:348] Generated self-signed cert in-memory
E0624 07:22:35.167536 1 run.go:74] "command failed" err="stat /etc/kubernetes/controller-manager-XXXX.conf: no such file or directory"
$ cd /etc/kubernetes/manifests
$ cat kube-controller-manager.yaml | grep XXX
- --kubeconfig=/etc/kubernetes/controller-manager-XXXX.conf
$ ls /etc/kubernetes/
admin.conf controller-manager.conf kubelet.conf manifests pki scheduler.conf
$ vi kube-controller-manager.yaml
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
component: kube-controller-manager
tier: control-plane
name: kube-controller-manager
namespace: kube-system
spec:
containers:
- command:
- kube-controller-manager
- --allocate-node-cidrs=true
- --authentication-kubeconfig=/etc/kubernetes/controller-manager.conf
- --authorization-kubeconfig=/etc/kubernetes/controller-manager.conf
- --bind-address=127.0.0.1
- --client-ca-file=/etc/kubernetes/pki/ca.crt
- --cluster-cidr=10.244.0.0/16
- --cluster-name=kubernetes
- --cluster-signing-cert-file=/etc/kubernetes/pki/ca.crt
- --cluster-signing-key-file=/etc/kubernetes/pki/ca.key
- --controllers=*,bootstrapsigner,tokencleaner
- --kubeconfig=/etc/kubernetes/controller-manager.conf
...
k get po
NAME READY STATUS RESTARTS AGE
app-55d5fc5fcc-hxvlx 1/1 Running 0 14m
app-55d5fc5fcc-mr6fs 1/1 Running 0 8s
### 4 ###
$ k get po -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-5d78c9869d-s5jvq 1/1 Running 0 23m
coredns-5d78c9869d-zrvwj 1/1 Running 0 23m
etcd-controlplane 1/1 Running 0 24m
kube-apiserver-controlplane 1/1 Running 0 24m
kube-controller-manager-controlplane 0/1 CrashLoopBackOff 3 (8s ago) 66s
kube-proxy-qb29g 1/1 Running 0 23m
kube-scheduler-controlplane 1/1 Running 0 11m
$ k logs kube-controller-manager-controlplane -n kube-system
I0624 07:31:05.146466 1 serving.go:348] Generated self-signed cert in-memory
E0624 07:31:05.629807 1 run.go:74] "command failed" err="unable to load client CA provider: open /etc/kubernetes/pki/ca.crt: no such file or directory"
$ cd /etc/kubernetes/manifests
$ cat kube-controller-manager.yaml | grep -i crt
- --client-ca-file=/etc/kubernetes/pki/apiserver.crt
- --cluster-signing-cert-file=/etc/kubernetes/pki/ca.crt
- --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt
- --root-ca-file=/etc/kubernetes/pki/ca.crt
$ cat kube-controller-manager.yaml | grep -i pki
- --client-ca-file=/etc/kubernetes/pki/apiserver.crt
- --cluster-signing-cert-file=/etc/kubernetes/pki/ca.crt
- --cluster-signing-key-file=/etc/kubernetes/pki/ca.key
- --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt
- --root-ca-file=/etc/kubernetes/pki/ca.crt
- --service-account-private-key-file=/etc/kubernetes/pki/sa.key
- mountPath: /etc/kubernetes/pki
path: /etc/kubernetes/WRONG-PKI-DIRECTORY
$ vi kube-controller-manager.yaml
...
- hostPath:
path: /etc/kubernetes/pki
type: DirectoryOrCreate
name: k8s-certs
...
Practice Test - Worker Node failure
### 1 ###
$ k get no
$ ssh node01
# node01
$ ps -ef | grep kubelet
root 4895 4827 0 03:55 pts/0 00:00:00 grep kubelet
$ service kubelet start
$ ps -ef | grep kubelet
root 5048 1 0 03:56 ? 00:00:00 /usr/bin/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf --config=/var/lib/kubelet/config.yaml --container-runtime-endpoint=unix:///var/run/containerd/containerd.sock --pod-infra-container-image=registry.k8s.io/pause:3.9
root 5186 4827 0 03:56 pts/0 00:00:00 grep kubelet
$ exit
# control plane
$ k get no
NAME STATUS ROLES AGE VERSION
controlplane Ready control-plane 14m v1.27.0
node01 NotReady <none> 14m v1.27.0
### 2 ###
$ k get no
$ ssh node01
# node01
$ ps -ef | grep kubelet
$ journalctl -u kubelet -f
Jun 24 04:24:52 node01 kubelet[5747]: E0624 04:24:52.663591 5747 run.go:74] "command failed" err="failed to construct kubelet dependencies: unable to load client CA file /etc/kubernetes/pki/WRONG-CA-FILE.crt: open /etc/kubernetes/pki/WRONG-CA-FILE.crt: no such file or directory"
$ cd /etc/kubernetes
$ cat kubelet.conf
...
- name: default-auth
user:
client-certificate: /var/lib/kubelet/pki/kubelet-client-current.pem
client-key: /var/lib/kubelet/pki/kubelet-client-current.pem
...
$ cd /var/lib/kubelet
$ cat config.yaml
apiVersion: kubelet.config.k8s.io/v1beta1
authentication:
anonymous:
enabled: false
webhook:
cacheTTL: 0s
enabled: true
x509:
clientCAFile: /etc/kubernetes/pki/WRONG-CA-FILE.crt
...
$ ls /etc/kubernetes/pki/
ca.crt
$ vi config.yaml
apiVersion: kubelet.config.k8s.io/v1beta1
authentication:
anonymous:
enabled: false
webhook:
cacheTTL: 0s
enabled: true
x509:
clientCAFile: /etc/kubernetes/pki/ca.crt
$ ps -ef | grep kubelet
root 10752 1 0 04:08 ? 00:00:00 /usr/bin/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf --config=/var/lib/kubelet/config.yaml --container-runtime-endpoint=unix:///var/run/containerd/containerd.sock --pod-infra-container-image=registry.k8s.io/pause:3.9
root 11422 9656 0 04:08 pts/0 00:00:00 grep kubelet
### 3 ###
$ k get no
$ ssh node01
# node01
$ journalctl -u kubelet -f
...
Jun 24 04:14:22 node01 kubelet[12603]: E0624 04:14:22.042937 12603 kubelet_node_status.go:92] "Unable to register node with API server" err="Post \"https://controlplane:6553/api/v1/nodes\": dial tcp 192.10.191.8:6553: connect: connection refused" node="node01"
...
$ ps -ef | grep kubelet
root 12603 1 0 04:12 ? 00:00:11 /usr/bin/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf --config=/var/lib/kubelet/config.yaml --container-runtime-endpoint=unix:///var/run/containerd/containerd.sock --pod-infra-container-image=registry.k8s.io/pause:3.9
root 13889 13200 0 04:17 pts/0 00:00:00 grep kubelet
$ cat /etc/kubernetes/kubelet.conf | grep 6553
server: https://controlplane:6553
$ vi /etc/kubernetes/kubelet.conf
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUMvakNDQWVhZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJek1EWXlOREEzTkRBek1Gb1hEVE16TURZeU1UQTNOREF6TUZvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTDVGCkh5bENYUXVyQmg5cFYxQXBlbms3U1psVVp4aDZyaHhGb1pSaGhOaUgxNjE5Mi96TnN0bFN4M2M2M0xiTG9hWWkKc3NpSUhQMm5iWGt2bENQeHJjdUIwNWR1aDhFR01veFdFQ25zNGFaQ0NDWjFnSGkxVU50OEFLQlhEYklVMDdQagorOU45TXN0VXVXL3NZMmJSWU1vcmp4NEpXeWpmWlJTczZjcEpjaStlKzgzSStoVWYwcGNhY0FQWnk0VzhDMG1KCkx1UUZBbXNNU3VBYmRSY3BrcldTenhFdmg2T0hTNmtSdVUrNzg5WFg4UE1hZXRERG1rRWhESUZpL0l6MW5TWnMKaEhmK3lhQnk0M3RlVDk3ZlNKb2hnUUo3ZWZPNDNNMnhDb1dsdW43OVk5NGhUdDhHamRNcTZkK0JKQTIvdXRVdQo0OU1WMWJzYUhxaDhSTmgrWkhFQ0F3RUFBYU5aTUZjd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZQdW1BLy9Ba3J2b0UzcS9MdmdoakhOY1RLcUNNQlVHQTFVZEVRUU8KTUF5Q0NtdDFZbVZ5Ym1WMFpYTXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBTDIxRld5bUxOa2g4Z0VxcFB2VQo0Ry9mS3BRQmxXOVBUTU9xRUUya2k2MGFzRldpTWFmKzRZMDR4ZEJyb2JFWCt6RmNWYmZjc2NiVmxDd2N1aXo4ClNTZWVHUVFxalVJN1ZRczBKY2JjQWFPOVBleWVCenBwSFlSaWZ2L3pLQU9abEtxNnJqbGREWi9RQ3k0cTQyNU0KcHhLWjB3UUwvOUhyT3RmMFFHNkdRaWJVdUpxeXVUTytqendac2J0T2ZRVmYrYVJMNGFRT1FqR1lPY0tQWHNYMAphK0VJNkZWNGlJVFpKVFhsM0FmYnZRU1Y1Qnl0OTZFQzV5NWpJR2FMK2trZTV4NmpORlNTdUVaMnJmVlk3TDZqCnVzYkx4akVWbHhMRldnWHFYalRtekNZVXhVWVV6QVJXVjlNcmpETzFtNlR0RXdvZ3BFSTlENjZPeEJlekRqQnYKV0NNPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
server: https://controlplane:6443
name: default-cluster
...
$ service kubelet restart
Practice Test - Troubleshoot Network
### 1 ###
$ k get po -n triton
NAME READY STATUS RESTARTS AGE
mysql 0/1 ContainerCreating 0 39s
webapp-mysql-898f7cd74-vxb4p 0/1 ContainerCreating 0 38s
$ k get po -A
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system coredns-5d78c9869d-sxkwf 1/1 Running 0 27m
kube-system coredns-5d78c9869d-xhvd5 1/1 Running 0 27m
kube-system etcd-controlplane 1/1 Running 0 27m
kube-system kube-apiserver-controlplane 1/1 Running 0 27m
kube-system kube-controller-manager-controlplane 1/1 Running 0 27m
kube-system kube-proxy-hdb9d 1/1 Running 0 27m
kube-system kube-scheduler-controlplane 1/1 Running 0 27m
triton mysql 0/1 ContainerCreating 0 66s
triton webapp-mysql-898f7cd74-vxb4p 0/1 ContainerCreating 0 65s
$ kubectl apply -f https://github.com/weaveworks/weave/releases/download/v2.8.1/weave-daemonset-k8s.yaml
$ k get po -ANAMESPACE NAME READY STATUS RESTARTS AGE
kube-system coredns-5d78c9869d-sxkwf 1/1 Running 0 29m
kube-system coredns-5d78c9869d-xhvd5 1/1 Running 0 29m
kube-system etcd-controlplane 1/1 Running 0 29m
kube-system kube-apiserver-controlplane 1/1 Running 0 29m
kube-system kube-controller-manager-controlplane 1/1 Running 0 29m
kube-system kube-proxy-hdb9d 1/1 Running 0 29m
kube-system kube-scheduler-controlplane 1/1 Running 0 29m
kube-system weave-net-vnvcm 2/2 Running 0 24s
triton mysql 1/1 Running 0 3m8s
triton webapp-mysql-898f7cd74-vxb4p 1/1 Running 0 3m7s
### 2 ###
$ k get po -A
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system coredns-5d78c9869d-sxkwf 1/1 Running 0 30m
kube-system coredns-5d78c9869d-xhvd5 1/1 Running 0 30m
kube-system etcd-controlplane 1/1 Running 0 30m
kube-system kube-apiserver-controlplane 1/1 Running 0 30m
kube-system kube-controller-manager-controlplane 1/1 Running 0 30m
kube-system kube-proxy-lb2ml 0/1 CrashLoopBackOff 1 (6s ago) 10s
kube-system kube-scheduler-controlplane 1/1 Running 0 30m
kube-system weave-net-vnvcm 2/2 Running 0 55s
triton mysql 1/1 Running 0 9s
triton webapp-mysql-898f7cd74-928wd 1/1 Running 0 8s
triton webapp-mysql-898f7cd74-vxb4p 1/1 Terminating 0 3m38s
$ k logs kube-proxy-lb2ml -n kube-system
E0624 13:26:36.637568 1 run.go:74] "command failed" err="failed complete: open /var/lib/kube-proxy/configuration.conf: no such file or directory"
$ k get ds kube-proxy -n kube-system -oyaml
...
spec:
containers:
- command:
- /usr/local/bin/kube-proxy
- --config=/var/lib/kube-proxy/configuration.conf
- --hostname-override=$(NODE_NAME)
...
volumeMounts:
- mountPath: /var/lib/kube-proxy
name: kube-proxy
- mountPath: /run/xtables.lock
name: xtables-lock
- mountPath: /lib/modules
name: lib-modules
readOnly: true
...
volumes:
- configMap:
defaultMode: 420
name: kube-proxy
name: kube-proxy
- hostPath:
path: /run/xtables.lock
type: FileOrCreate
name: xtables-lock
- hostPath:
path: /lib/modules
type: ""
name: lib-modules
...
$ k get cm -n kube-system
NAME DATA AGE
coredns 1 34m
extension-apiserver-authentication 6 34m
kube-apiserver-legacy-service-account-token-tracking 1 34m
kube-proxy 2 34m
kube-root-ca.crt 1 33m
kubeadm-config 1 34m
kubelet-config 1 34m
weave-net 0 33m
$ k describe cm kube-proxy -n kube-system
Name: kube-proxy
Namespace: kube-system
Labels: app=kube-proxy
Annotations: kubeadm.kubernetes.io/component-config.hash: sha256:c15650807a67e3988e859d6c4e9d56e3a39f279034149529187be619e5647ea0
Data
====
config.conf:
----
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 0.0.0.0
bindAddressHardFail: false
clientConnection:
acceptContentTypes: ""
burst: 0
contentType: ""
kubeconfig: /var/lib/kube-proxy/kubeconfig.conf
...
$ k edit ds kube-proxy -n kube-system
...
spec:
containers:
- command:
- /usr/local/bin/kube-proxy
- --config=/var/lib/kube-proxy/config.conf
- --hostname-override=$(NODE_NAME)
...
$ k get po -A
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system coredns-5d78c9869d-sxkwf 1/1 Running 0 36m
kube-system coredns-5d78c9869d-xhvd5 1/1 Running 0 36m
kube-system etcd-controlplane 1/1 Running 0 36m
kube-system kube-apiserver-controlplane 1/1 Running 0 36m
kube-system kube-controller-manager-controlplane 1/1 Running 0 36m
kube-system kube-proxy-s6qrc 1/1 Running 0 50s
kube-system kube-scheduler-controlplane 1/1 Running 0 36m
kube-system weave-net-vnvcm 2/2 Running 0 7m22s
triton mysql 1/1 Running 0 6m36s
triton webapp-mysql-898f7cd74-928wd 1/1 Running 0 6m35s
728x90
'기타 > 자격증' 카테고리의 다른 글
[CKA] 자격증 취득 후기 및 준비 방법 (0) | 2023.07.12 |
---|---|
[CKA][실습] 9. Networking (2) (0) | 2023.06.22 |
[CKA][실습] 9. Networking (0) | 2023.06.19 |
[CKA][실습] 8. Install (0) | 2023.06.19 |
[CKA][실습] 7. Storage (0) | 2023.06.12 |