In a microk8s cluster of 3 nodes, if i shut down one node, the pods move to another node in min 50 seconds. is this time correct?

Hi all,
we created a cluster of 3 nodes but when we shut down a node, we noticed that the pods move to one of the two available nodes in at least 50 seconds. are these times correct? Or did we misconfigure something? Or is there a way to speed up the move? below the conf files: kube-apiserver & kube-controller-manager & kubelet
Any help is appreciated. Thank you

[root@poanalyticsiaiopvm05 ~]# cluster_nodes
NAME STATUS ROLES AGE VERSION
poanalyticsiaiopvm04 Ready 104d v1.25.6
poanalyticsiaiopvm03 Ready 109d v1.25.6
poanalyticsiaiopvm05 Ready 104d v1.25.6
[root@poanalyticsiaiopvm05 ~]#

[root@poanalyticsiaiopvm03 args]# cat kube-apiserver
–cert-dir=${SNAP_DATA}/certs
–service-cluster-ip-range=10.152.183.0/24
–authorization-mode=AlwaysAllow
–service-account-key-file=${SNAP_DATA}/certs/serviceaccount.key
–client-ca-file=${SNAP_DATA}/certs/ca.crt
–tls-cert-file=${SNAP_DATA}/certs/server.crt
–tls-private-key-file=${SNAP_DATA}/certs/server.key
–kubelet-client-certificate=${SNAP_DATA}/certs/server.crt
–kubelet-client-key=${SNAP_DATA}/certs/server.key
–secure-port=16443
–token-auth-file=${SNAP_DATA}/credentials/known_tokens.csv
–etcd-servers=“unix://${SNAP_DATA}/var/kubernetes/backend/kine.sock:12379”
–allow-privileged=true
–service-account-issuer=‘https://kubernetes.default.svc
–service-account-signing-key-file=${SNAP_DATA}/certs/serviceaccount.key
–event-ttl=1m
–default-not-ready-toleration-seconds=10 # mettere 10
–default-unreachable-toleration-seconds=10 # mettere 10
–profiling=false
–requestheader-client-ca-file=${SNAP_DATA}/certs/front-proxy-ca.crt
–requestheader-allowed-names=front-proxy-client
–requestheader-extra-headers-prefix=X-Remote-Extra-
–requestheader-group-headers=X-Remote-Group
–requestheader-username-headers=X-Remote-User
–proxy-client-cert-file=${SNAP_DATA}/certs/front-proxy-client.crt
–proxy-client-key-file=${SNAP_DATA}/certs/front-proxy-client.key

[root@poanalyticsiaiopvm03 args]# cat kube-controller-manager
–kubeconfig=${SNAP_DATA}/credentials/controller.config
–service-account-private-key-file=${SNAP_DATA}/certs/serviceaccount.key
–root-ca-file=${SNAP_DATA}/certs/ca.crt
–cluster-signing-cert-file=${SNAP_DATA}/certs/ca.crt
–cluster-signing-key-file=${SNAP_DATA}/certs/ca.key
–use-service-account-credentials
–leader-elect-lease-duration=40s
–leader-elect-renew-deadline=30s
–pod-eviction-timeout=10s # provato con 5 non va
–node-monitor-grace-period=10s # provato con 5 non va
–node-monitor-period=3s
–profiling=false

[root@poanalyticsiaiopvm03 args]# cat kubelet
–kubeconfig=${SNAP_DATA}/credentials/kubelet.config
–cert-dir=${SNAP_DATA}/certs
–client-ca-file=${SNAP_DATA}/certs/ca.crt
–anonymous-auth=false
–root-dir=${SNAP_COMMON}/var/lib/kubelet
–log-dir=${SNAP_COMMON}/var/log
–fail-swap-on=false
–feature-gates=DevicePlugins=true
–eviction-hard=“memory.available<100Mi,nodefs.available<1Gi,imagefs.available<1Gi”
–container-runtime=remote
–container-runtime-endpoint=${SNAP_COMMON}/run/containerd.sock
–containerd=${SNAP_COMMON}/run/containerd.sock
–node-labels=“microk8s.io/cluster=true,node.kubernetes.io/microk8s-controlplane=microk8s-controlplane
–authentication-token-webhook=true
–read-only-port=0
–node-status-update-frequency 3s
–resolv-conf=“”
–cluster-domain=cluster.local
–cluster-dns=10.152.183.10