Hi guys
I have problem with coredns service in kubernetes cluster
All pods run on cluster get IP Apipa as dns nameserver
Name server 169.254.25.10
I would appreciate any help
mahmoudi@master1:~$ sudo kubectl get nodes -o wide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
master1 Ready control-plane 414d v1.26.2 192.168.33.101 <none> Ubuntu 20.04.6 LTS 5.4.0-182-generic containerd://1.6.19
master2 Ready control-plane 414d v1.26.2 192.168.33.102 <none> Ubuntu 20.04.6 LTS 5.4.0-176-generic containerd://1.6.19
master3 NotReady,SchedulingDisabled control-plane 414d v1.26.2 192.168.33.103 <none> Ubuntu 20.04.6 LTS 5.4.0-176-generic containerd://1.6.19
worker1 Ready <none> 414d v1.26.2 192.168.33.111 <none> Ubuntu 20.04.6 LTS 5.4.0-167-generic containerd://1.6.19
worker2 Ready <none> 414d v1.26.2 192.168.33.112 <none> Ubuntu 20.04.6 LTS 5.4.0-182-generic containerd://1.6.19
worker3 Ready <none> 414d v1.26.2 192.168.33.113 <none> Ubuntu 20.04.6 LTS 5.4.0-182-generic containerd://1.6.19
worker4 NotReady,SchedulingDisabled <none> 235d v1.26.2 192.168.33.114 <none> Ubuntu 20.04.5 LTS 5.4.0-167-generic containerd://1.6.19
worker5 Ready <none> 235d v1.26.2 192.168.33.115 <none> Ubuntu 20.04.5 LTS 5.4.0-165-generic containerd://1.6.19
worker6 Ready,SchedulingDisabled <none> 235d v1.26.2 192.168.33.116 <none> Ubuntu 20.04.5 LTS 5.4.0-167-generic containerd://1.6.19
--------------------------------------------------------------------------------------------------------------------------------------------------------------------
mahmoudi@master1:~$ sudo kubectl get pods -n kube-system | grep coredns
coredns-5b85949d7b-k6jpj 0/1 CrashLoopBackOff 259 (81s ago) 21h
coredns-6666b4f8bc-7f5ww 1/1 Running 0 23h
coredns-6666b4f8bc-n9q26 1/1 Running 0 23h
---------------------------------------------------------------------------------------------------------------------------------------------------------------
exec kubectl exec -i -t -n kube-system coredns-6666b4f8bc-7f5ww -c coredns -- sh -c "clear; (bash || ash || sh)"
error: Internal error occurred: error executing command in container: failed to exec in container: failed to start exec "8c0e56eb386d6d8eacd42a10595be2fd065cacb20d9966bb2940179294433b3f": OCI runtime exec failed: exec failed: unable to start container process: exec: "sh": executable file not found in $PATH: unknown
Terminal will auto-close in 15 seconds ...
------------------------------------------------------------------------------------------------------------------------------------------------------------------
argocd@argocd-server-6f7dd665d9-qvmtx:~$ cat /etc/resolv.conf
search argocd.svc.cluster.local svc.cluster.local cluster.local eniac-bank.local
nameserver 169.254.25.10
options ndots:5
------------------------------------------------------------------------------------------------------------------------------------------------------------------
configMap
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
uid: 47c9d51b-5ce1-43d7-9a47-627f72510b37
resourceVersion: '147723944'
creationTimestamp: '2023-04-11T07:09:05Z'
labels:
addonmanager.kubernetes.io/mode: EnsureExists
k8slens-edit-resource-version: v1
annotations:
kubectl.kubernetes.io/last-applied-configuration: >
{"apiVersion":"v1","data":{"Corefile":".:53 {\n errors\n health
{\n lameduck 5s\n }\n ready\n kubernetes cluster.local
in-addr.arpa ip6.arpa {\n pods insecure\n fallthrough
in-addr.arpa ip6.arpa\n ttl 30\n }\n prometheus :9153\n
forward . /etc/resolve.conf {\n # prefer_udp\n #
max_concurrent 1000\n }\n cache 30\n loop\n reload\n
loadbalance\n}\n"},"kind":"ConfigMap","metadata":{"annotations":{},"creationTimestamp":"2023-04-11T07:09:05Z","labels":{"addonmanager.kubernetes.io/mode":"EnsureExists"},"name":"coredns","namespace":"kube-system","resourceVersion":"147603749","uid":"47c9d51b-5ce1-43d7-9a47-627f72510b37"}}
managedFields:
- manager: kubectl-client-side-apply
operation: Update
apiVersion: v1
time: '2024-05-28T05:01:38Z'
fieldsType: FieldsV1
fieldsV1:
f:data: {}
f:metadata:
f:annotations:
.: {}
f:kubectl.kubernetes.io/last-applied-configuration: {}
f:labels:
.: {}
f:addonmanager.kubernetes.io/mode: {}
- manager: node-fetch
operation: Update
apiVersion: v1
time: '2024-05-28T14:14:29Z'
fieldsType: FieldsV1
fieldsV1:
f:data:
f:Corefile: {}
f:metadata:
f:labels:
f:k8slens-edit-resource-version: {}
selfLink: /api/v1/namespaces/kube-system/configmaps/coredns
data:
Corefile: |
.:53 {
debug
health
ready
kubernetes cluster.local in-addr.arpa ip6.arpa {
pods insecure
fallthrough in-addr.arpa ip6.arpa
ttl 30
}
prometheus :9153
forward . 192.168.33.101{
cache 30
loop
reload
loadbalance
}
--------------------------------------------------------------------------------------------------------------------------------------------
coredns deployment
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
uid: cac9bf78-6fc2-466d-82f6-7878a0c3dbd4
resourceVersion: '147726201'
generation: 32
creationTimestamp: '2023-04-11T07:09:06Z'
labels:
addonmanager.kubernetes.io/mode: Reconcile
k8s-app: kube-dns
k8slens-edit-resource-version: v1
kubernetes.io/name: coredns
annotations:
deployment.kubernetes.io/revision: '29'
kubectl.kubernetes.io/last-applied-configuration: >
{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{},"labels":{"addonmanager.kubernetes.io/mode":"Reconcile","k8s-app":"kube-dns","kubernetes.io/name":"coredns"},"name":"coredns","namespace":"kube-system"},"spec":{"selector":{"matchLabels":{"k8s-app":"kube-dns"}},"strategy":{"rollingUpdate":{"maxSurge":"10%","maxUnavailable":0},"type":"RollingUpdate"},"template":{"metadata":{"annotations":{"createdby":"kubespray","seccomp.security.alpha.kubernetes.io/pod":"runtime/default"},"labels":{"k8s-app":"kube-dns"}},"spec":{"affinity":{"nodeAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"preference":{"matchExpressions":[{"key":"node-role.kubernetes.io/control-plane","operator":"In","values":[""]}]},"weight":100}]},"podAntiAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":[{"labelSelector":{"matchLabels":{"k8s-app":"kube-dns"}},"topologyKey":"kubernetes.io/hostname"}]}},"containers":[{"args":["-conf","/etc/coredns/Corefile"],"image":"registry.k8s.io/coredns/coredns:v1.9.3","imagePullPolicy":"IfNotPresent","livenessProbe":{"failureThreshold":10,"httpGet":{"path":"/health","port":8080,"scheme":"HTTP"},"successThreshold":1,"timeoutSeconds":5},"name":"coredns","ports":[{"containerPort":53,"name":"dns","protocol":"UDP"},{"containerPort":53,"name":"dns-tcp","protocol":"TCP"},{"containerPort":9153,"name":"metrics","protocol":"TCP"}],"readinessProbe":{"failureThreshold":10,"httpGet":{"path":"/ready","port":8181,"scheme":"HTTP"},"successThreshold":1,"timeoutSeconds":5},"resources":{"limits":{"memory":"300Mi"},"requests":{"cpu":"100m","memory":"70Mi"}},"securityContext":{"allowPrivilegeEscalation":false,"capabilities":{"add":["NET_BIND_SERVICE"],"drop":["all"]},"readOnlyRootFilesystem":true},"volumeMounts":[{"mountPath":"/etc/coredns","name":"config-volume"}]}],"dnsPolicy":"Default","nodeSelector":{"kubernetes.io/os":"linux"},"priorityClassName":"system-cluster-critical","serviceAccountName":"coredns","tolerations":[{"effect":"NoSchedule","key":"node-role.kubernetes.io/master"},{"effect":"NoSchedule","key":"node-role.kubernetes.io/control-plane"}],"volumes":[{"configMap":{"items":[{"key":"Corefile","path":"Corefile"}],"name":"coredns"},"name":"config-volume"}]}}}}
managedFields:
- manager: cluster-proportional-autoscaler
operation: Update
apiVersion: apps/v1
fieldsType: FieldsV1
fieldsV1:
f:spec:
f:replicas: {}
subresource: scale
- manager: kubectl-client-side-apply
operation: Update
apiVersion: apps/v1
time: '2023-04-11T07:09:06Z'
fieldsType: FieldsV1
fieldsV1:
f:metadata:
f:annotations: {}
f:labels:
.: {}
f:addonmanager.kubernetes.io/mode: {}
f:k8s-app: {}
f:kubernetes.io/name: {}
f:spec:
f:progressDeadlineSeconds: {}
f:revisionHistoryLimit: {}
f:selector: {}
f:strategy:
f:rollingUpdate:
.: {}
f:maxSurge: {}
f:maxUnavailable: {}
f:type: {}
f:template:
f:metadata:
f:annotations:
.: {}
f:createdby: {}
f:seccomp.security.alpha.kubernetes.io/pod: {}
f:labels:
.: {}
f:k8s-app: {}
f:spec:
f:affinity:
.: {}
f:nodeAffinity:
.: {}
f:preferredDuringSchedulingIgnoredDuringExecution: {}
f:podAntiAffinity:
.: {}
f:requiredDuringSchedulingIgnoredDuringExecution: {}
f:containers:
k:{"name":"coredns"}:
.: {}
f:args: {}
f:imagePullPolicy: {}
f:livenessProbe:
.: {}
f:failureThreshold: {}
f:httpGet:
.: {}
f:path: {}
f:port: {}
f:scheme: {}
f:periodSeconds: {}
f:successThreshold: {}
f:timeoutSeconds: {}
f:name: {}
f:ports:
.: {}
k:{"containerPort":53,"protocol":"TCP"}:
.: {}
f:containerPort: {}
f:name: {}
f:protocol: {}
k:{"containerPort":53,"protocol":"UDP"}:
.: {}
f:containerPort: {}
f:name: {}
f:protocol: {}
k:{"containerPort":9153,"protocol":"TCP"}:
.: {}
f:containerPort: {}
f:name: {}
f:protocol: {}
f:readinessProbe:
.: {}
f:failureThreshold: {}
f:httpGet:
.: {}
f:path: {}
f:port: {}
f:scheme: {}
f:periodSeconds: {}
f:successThreshold: {}
f:timeoutSeconds: {}
f:resources:
.: {}
f:limits:
.: {}
f:memory: {}
f:requests:
.: {}
f:cpu: {}
f:memory: {}
f:securityContext:
.: {}
f:allowPrivilegeEscalation: {}
f:capabilities:
.: {}
f:add: {}
f:drop: {}
f:readOnlyRootFilesystem: {}
f:terminationMessagePath: {}
f:terminationMessagePolicy: {}
f:volumeMounts:
.: {}
k:{"mountPath":"/etc/coredns"}:
.: {}
f:mountPath: {}
f:name: {}
f:dnsPolicy: {}
f:nodeSelector: {}
f:priorityClassName: {}
f:restartPolicy: {}
f:schedulerName: {}
f:securityContext: {}
f:serviceAccount: {}
f:serviceAccountName: {}
f:terminationGracePeriodSeconds: {}
f:tolerations: {}
f:volumes:
.: {}
k:{"name":"config-volume"}:
.: {}
f:configMap:
.: {}
f:defaultMode: {}
f:items: {}
f:name: {}
f:name: {}
- manager: node-fetch
operation: Update
apiVersion: apps/v1
time: '2024-05-28T14:14:41Z'
fieldsType: FieldsV1
fieldsV1:
f:metadata:
f:annotations:
f:kubectl.kubernetes.io/last-applied-configuration: {}
f:labels:
f:k8slens-edit-resource-version: {}
f:spec:
f:template:
f:metadata:
f:annotations:
f:kubectl.kubernetes.io/restartedAt: {}
f:spec:
f:containers:
k:{"name":"coredns"}:
f:image: {}
- manager: kube-controller-manager
operation: Update
apiVersion: apps/v1
time: '2024-05-28T14:24:42Z'
fieldsType: FieldsV1
fieldsV1:
f:metadata:
f:annotations:
f:deployment.kubernetes.io/revision: {}
f:status:
f:availableReplicas: {}
f:conditions:
.: {}
k:{"type":"Available"}:
.: {}
f:lastTransitionTime: {}
f:lastUpdateTime: {}
f:message: {}
f:reason: {}
f:status: {}
f:type: {}
k:{"type":"Progressing"}:
.: {}
f:lastTransitionTime: {}
f:lastUpdateTime: {}
f:message: {}
f:reason: {}
f:status: {}
f:type: {}
f:observedGeneration: {}
f:readyReplicas: {}
f:replicas: {}
f:unavailableReplicas: {}
f:updatedReplicas: {}
subresource: status
selfLink: /apis/apps/v1/namespaces/kube-system/deployments/coredns
status:
observedGeneration: 32
replicas: 3
updatedReplicas: 1
readyReplicas: 2
availableReplicas: 2
unavailableReplicas: 1
conditions:
- type: Available
status: 'True'
lastUpdateTime: '2024-05-28T04:08:18Z'
lastTransitionTime: '2024-05-28T04:08:18Z'
reason: MinimumReplicasAvailable
message: Deployment has minimum availability.
- type: Progressing
status: 'False'
lastUpdateTime: '2024-05-28T14:24:42Z'
lastTransitionTime: '2024-05-28T14:24:42Z'
reason: ProgressDeadlineExceeded
message: ReplicaSet "coredns-5b85949d7b" has timed out progressing.
spec:
replicas: 2
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
creationTimestamp: null
labels:
k8s-app: kube-dns
annotations:
createdby: kubespray
kubectl.kubernetes.io/restartedAt: '2024-05-28T14:14:41Z'
seccomp.security.alpha.kubernetes.io/pod: runtime/default
spec:
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
defaultMode: 420
containers:
- name: coredns
image: registry.k8s.io/coredns/coredns:v1.9.3
args:
- '-conf'
- /etc/coredns/Corefile
ports:
- name: dns
containerPort: 53
protocol: UDP
- name: dns-tcp
containerPort: 53
protocol: TCP
- name: metrics
containerPort: 9153
protocol: TCP
resources:
limits:
memory: 300Mi
requests:
cpu: 100m
memory: 70Mi
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
timeoutSeconds: 5
periodSeconds: 10
successThreshold: 1
failureThreshold: 10
readinessProbe:
httpGet:
path: /ready
port: 8181
scheme: HTTP
timeoutSeconds: 5
periodSeconds: 10
successThreshold: 1
failureThreshold: 10
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: IfNotPresent
securityContext:
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: Default
nodeSelector:
kubernetes.io/os: linux
serviceAccountName: coredns
serviceAccount: coredns
securityContext: {}
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
preference:
matchExpressions:
- key: node-role.kubernetes.io/control-plane
operator: In
values:
- ''
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
k8s-app: kube-dns
topologyKey: kubernetes.io/hostname
schedulerName: default-scheduler
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
priorityClassName: system-cluster-critical
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 0
maxSurge: 10%
revisionHistoryLimit: 10
progressDeadlineSeconds: 600
----------------------------------------------------------------------------------------------------------------------------------------------------------
describ pod coredns
mahmoudi@master1:~$ sudo kubectl get pods -n kube-system | grep coredns
coredns-598d4968c-95g2v 0/1 CrashLoopBackOff 4 (41s ago) 2m34s
coredns-6666b4f8bc-7f5ww 1/1 Running 0 23h
coredns-6666b4f8bc-n9q26 1/1 Running 0 23h
mahmoudi@master1:~$ sudo kubectl describe pod coredns-6666b4f8bc-7f5ww -n kube-system
Name: coredns-6666b4f8bc-7f5ww
Namespace: kube-system
Priority: 2000000000
Priority Class Name: system-cluster-critical
Service Account: coredns
Node: worker2/192.168.33.112
Start Time: Tue, 28 May 2024 12:35:16 +0000
Labels: k8s-app=kube-dns
pod-template-hash=6666b4f8bc
Annotations: cni.projectcalico.org/containerID: beb809e1ea810a64279ba20b5d7d3940273eaabf45bb7158a7274f46bd58b423
cni.projectcalico.org/podIP: 10.233.125.48/32
cni.projectcalico.org/podIPs: 10.233.125.48/32
createdby: kubespray
kubectl.kubernetes.io/restartedAt: 2024-05-28T12:35:09Z
seccomp.security.alpha.kubernetes.io/pod: runtime/default
Status: Running
IP: 10.233.125.48
IPs:
IP: 10.233.125.48
Controlled By: ReplicaSet/coredns-6666b4f8bc
Containers:
coredns:
Container ID: containerd://5c0f3429db9cd59d3bfeda7254bfcafacb57124995735a230a5be189621aad1e
Image: registry.k8s.io/coredns/coredns:v1.9.3
Image ID: registry.aliyuncs.com/google_containers/coredns@sha256:8e352a029d304ca7431c6507b56800636c321cb52289686a581ab70aaa8a2e2a
Ports: 53/UDP, 53/TCP, 9153/TCP
Host Ports: 0/UDP, 0/TCP, 0/TCP
Args:
-conf
/etc/coredns/Corefile
State: Running
Started: Tue, 28 May 2024 12:35:17 +0000
Ready: True
Restart Count: 0
Limits:
memory: 300Mi
Requests:
cpu: 100m
memory: 70Mi
Liveness: http-get http://:8080/health delay=0s timeout=5s period=10s #success=1 #failure=10
Readiness: http-get http://:8181/ready delay=0s timeout=5s period=10s #success=1 #failure=10
Environment: <none>
Mounts:
/etc/coredns from config-volume (rw)
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-nmz2w (ro)
Conditions:
Type Status
Initialized True
Ready True
ContainersReady True
PodScheduled True
Volumes:
config-volume:
Type: ConfigMap (a volume populated by a ConfigMap)
Name: coredns
Optional: false
kube-api-access-nmz2w:
Type: Projected (a volume that contains injected data from multiple sources)
TokenExpirationSeconds: 3607
ConfigMapName: kube-root-ca.crt
ConfigMapOptional: <nil>
DownwardAPI: true
QoS Class: Burstable
Node-Selectors: kubernetes.io/os=linux
Tolerations: node-role.kubernetes.io/control-plane:NoSchedule
node-role.kubernetes.io/master:NoSchedule
node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events: <none>
mahmoudi@master1:~$
---------------------------------------------------------------------------------------------------------------------------------------------------------
nodelocaldns log
[ERROR] plugin/errors: 2 metadata.google.internal. AAAA: read udp 192.168.36.113:47728->192.168.36.251:53: i/o timeout
[ERROR] plugin/errors: 2 metadata.google.internal. AAAA: read udp 192.168.36.113:46556->192.168.36.251:53: i/o timeout
[ERROR] plugin/errors: 2 metadata.google.internal. A: read udp 192.168.36.113:59384->192.168.36.251:53: i/o timeout
[ERROR] plugin/errors: 2 metadata.google.internal. A: read udp 192.168.36.113:51347->192.168.36.250:53: i/o timeout
[ERROR] plugin/errors: 2 metadata.google.internal. AAAA: read udp 192.168.36.113:41112->192.168.36.250:53: i/o timeout
[ERROR] plugin/errors: 2 metadata.google.internal. AAAA: read udp 192.168.36.113:45300->192.168.36.250:53: i/o timeout
[ERROR] plugin/errors: 2 metadata.google.internal. A: read udp 192.168.36.113:47077->192.168.36.251:53: i/o timeout
[ERROR] plugin/errors: 2 metadata.google.internal. AAAA: read udp 192.168.36.113:40354->192.168.36.251:53: i/o timeout
[ERROR] plugin/errors: 2 metadata.google.internal. AAAA: read udp 192.168.36.113:54457->192.168.36.251:53: i/o timeout
[ERROR] plugin/errors: 2 metadata.google.internal. AAAA: read udp 192.168.36.113:41556->192.168.36.250:53: i/o timeout
[ERROR] plugin/errors: 2 metadata.google.internal. A: read udp 169.254.25.10:55839->169.254.25.10:53: i/o timeout
[ERROR] plugin/errors: 2 metadata.google.internal. AAAA: read udp 192.168.36.113:60663->192.168.36.251:53: i/o timeout
[ERROR] plugin/errors: 2 metadata.google.internal. A: read udp 192.168.36.113:55421->192.168.36.251:53: i/o timeout
[ERROR] plugin/errors: 2 metadata.google.internal. A: read udp 192.168.36.113:45406->192.168.36.251:53: i/o timeout
[ERROR] plugin/errors: 2 metadata.google.internal. A: read udp 192.168.36.113:35593->192.168.36.250:53: i/o timeout
[ERROR] plugin/errors: 2 metadata.google.internal. AAAA: read udp 169.254.25.10:49137->169.254.25.10:53: i/o timeout
[ERROR] plugin/errors: 2 metadata.google.internal. A: read udp 169.254.25.10:49108->169.254.25.10:53: i/o timeout
[ERROR] plugin/errors: 2 metadata.google.internal. AAAA: read udp 192.168.36.113:43127->192.168.36.251:53: i/o timeout
[ERROR] plugin/errors: 2 metadata.google.internal. AAAA: read udp 169.254.25.10:48515->169.254.25.10:53: i/o timeout
[ERROR] plugin/errors: 2 metadata.google.internal. AAAA: read udp 169.254.25.10:42813->169.254.25.10:53: i/o timeout
[ERROR] plugin/errors: 2 metadata.google.internal. A: read udp 192.168.36.113:45496->192.168.36.250:53: i/o timeout
[ERROR]