Create load balancer for kube-apiserver then nc -v VIP got: No route to host

I want to successfully deploy a k8s cluster on my droplet

The cluster nodes can not connection LB VIP.
I’m having trouble following the official docs to install it, and I’m stuck at the load balancer step,

i post my all config in here , i think my keepalived + haproxy config is correct, but erorr still exist.

Cluster information:

Kubernetes version:v1.31.1
Cloud being used: (put bare-metal if not on a public cloud) DigitalOcean Droplet (not DOKS)
Installation method: kubeadm
Host OS: cent stream 9
CNI and version: not install yet
CRI and version: not install yet

haproxy.cfg

global
        log /dev/log    local0
        log /dev/log    local1 notice
        chroot /var/lib/haproxy
        stats socket /run/haproxy/admin.sock mode 660 level admin
        stats timeout 30s
        user haproxy
        group haproxy
        daemon

        # Default SSL material locations
        ca-base /etc/ssl/certs
        crt-base /etc/ssl/private

        # See: https://ssl-config.mozilla.org/#server=haproxy&server-version=2.0.3&config=intermediate
        ssl-default-bind-ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384
        ssl-default-bind-ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256
        ssl-default-bind-options ssl-min-ver TLSv1.2 no-tls-tickets

defaults
        log     global
        mode    http
        option  httplog
        option  dontlognull
        option http-server-close
        option                  redispatch
        retries                 1
        timeout connect 5000
        timeout client  50000
        timeout server  50000
        errorfile 400 /etc/haproxy/errors/400.http
        errorfile 403 /etc/haproxy/errors/403.http
        errorfile 408 /etc/haproxy/errors/408.http
        errorfile 500 /etc/haproxy/errors/500.http
        errorfile 502 /etc/haproxy/errors/502.http
        errorfile 503 /etc/haproxy/errors/503.http
        errorfile 504 /etc/haproxy/errors/504.http

frontend apiserver
        bind *:5000
        mode tcp
        option tcplog
        default_backend apiserverbackend
backend apiserverbackend
        mode http
        option httpchk GET /healthz
        http-check expect status 200
        balance     roundrobin
        default-server check inter 3s fall 3 rise 2
        server master1 10.99.99.4:6443 check ssl verify none
        server master2 10.99.99.9:6443 check ssl verify none
        server master3 10.99.99.8:6443 check ssl verify none


keepalived.conf

! Configuration File for keepalived
global_defs {
    router_id LVS_DEVEL
}
vrrp_script check_apiserver {
  script "/etc/keepalived/check_apiserver.sh"
  interval 3
  weight -2
  fall 10
  rise 2
}
vrrp_instance VI_1 {
  state MASTER
  interface eth1
  virtual_router_id 51
  priority 101
  advert_int 1
  authentication {
      auth_type PASS
      auth_pass 1111
  }
  unicast_src_ip 10.99.99.2
  unicast_peer {
    10.99.99.3
  }
  virtual_ipaddress {
    10.99.99.99
  }
  track_script {
    check_apiserver
  }
}

ip a is wokring

1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host noprefixroute 
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
    link/ether 3a:f0:eb:13:ff:b9 brd ff:ff:ff:ff:ff:ff
    altname enp0s3
    altname ens3
    inet 167.71.95.103/20 brd 167.71.95.255 scope global eth0
       valid_lft forever preferred_lft forever
    inet 10.17.0.5/16 brd 10.17.255.255 scope global eth0
       valid_lft forever preferred_lft forever
    inet6 fe80::38f0:ebff:fe13:ffb9/64 scope link 
       valid_lft forever preferred_lft forever
3: eth1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
    link/ether 8e:99:21:72:51:83 brd ff:ff:ff:ff:ff:ff
    altname enp0s4
    altname ens4
    inet 10.99.99.2/24 brd 10.99.99.255 scope global eth1
       valid_lft forever preferred_lft forever
    inet 10.99.99.99/32 scope global eth1
       valid_lft forever preferred_lft forever
    inet6 fe80::8c99:21ff:fe72:5183/64 scope link 
       valid_lft forever preferred_lft forever
root@lb1:~# 

cluster node machine ip info

[root@centos-s-2vcpu-2gb-nyc3-02 ~]# ip route show
default via 138.197.96.1 dev eth0 proto static metric 100 
10.17.0.0/16 dev eth0 proto kernel scope link src 10.17.0.7 metric 100 
10.99.99.0/24 dev eth1 proto kernel scope link src 10.99.99.4 metric 101 
138.197.96.0/20 dev eth0 proto kernel scope link src 138.197.99.141 metric 100 
[root@centos-s-2vcpu-2gb-nyc3-02 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
    link/ether 6a:23:c4:67:3e:ed brd ff:ff:ff:ff:ff:ff
    altname enp0s3
    altname ens3
    inet 138.197.99.141/20 brd 138.197.111.255 scope global noprefixroute eth0
       valid_lft forever preferred_lft forever
    inet 10.17.0.7/16 brd 10.17.255.255 scope global noprefixroute eth0
       valid_lft forever preferred_lft forever
    inet6 fe80::6823:c4ff:fe67:3eed/64 scope link 
       valid_lft forever preferred_lft forever
3: eth1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
    link/ether 22:da:6e:1a:8f:81 brd ff:ff:ff:ff:ff:ff
    altname enp0s4
    altname ens4
    inet 10.99.99.4/24 brd 10.99.99.255 scope global noprefixroute eth1
       valid_lft forever preferred_lft forever
    inet6 fe80::20da:6eff:fe1a:8f81/64 scope link 
       valid_lft forever preferred_lft forever

[root@centos-s-2vcpu-2gb-nyc3-02 ~]# curl http://169.254.169.254/metadata/v1/interfaces/public/0/anchor_ipv4/address
10.17.0.7
[root@centos-s-2vcpu-2gb-nyc3-02 ~]# iptables -L -v -n
Chain INPUT (policy ACCEPT 19M packets, 3237M bytes)
 pkts bytes target     prot opt in     out     source               destination         
  19M 3237M KUBE-FIREWALL  0    --  *      *       0.0.0.0/0            0.0.0.0/0           

Chain FORWARD (policy ACCEPT 0 packets, 0 bytes)
 pkts bytes target     prot opt in     out     source               destination         

Chain OUTPUT (policy ACCEPT 23M packets, 3165M bytes)
 pkts bytes target     prot opt in     out     source               destination         
  23M 3165M KUBE-FIREWALL  0    --  *      *       0.0.0.0/0            0.0.0.0/0           

Chain KUBE-FIREWALL (2 references)
 pkts bytes target     prot opt in     out     source               destination         
    0     0 DROP       0    --  *      *      !127.0.0.0/8          127.0.0.0/8          /* block incoming localnet connections */ ! ctstate RELATED,ESTABLISHED,DNAT

Chain KUBE-KUBELET-CANARY (0 references)
 pkts bytes target     prot opt in     out     source               destination        

I checked this Guestbook Example: Load Balancer
but my problem is different it, even i have not setup cluster successful yet.