There is a simple way to bootstrap a node with automatically renew tls
API server
To support bootstrap token based discovery and to join nodes to cluster ; we need to make sure below flags are in place on API server.
--client-ca-file=/var/lib/kubernetes/ca.pem
--enable-bootstrap-token-auth=true
If not present , then add these flags to /etc/systemd/system/kube-apiserver.service
unit file.
Controller
make sure below flags are in place on kube-controller-manager .
--controllers=*,bootstrapsigner,tokencleaner
--experimental-cluster-signing-duration=8760h0m0s
--cluster-signing-cert-file=/var/lib/kubernetes/ca.pem
--cluster-signing-key-file=/var/lib/kubernetes/ca-key.pem
If not present , then add these flags to /etc/systemd/system/kube-controller-manager.service
unit file.
- Reload and restart API server and Controller unit files
{
sudo systemctl daemon-reload
sudo systemctl restart kube-apiserver.service
sudo systemctl restart kube-controller-manager.service
}
RBAC Permission to enable certificate signign
- To allow kubelet to create CSR
cat <<EOF | kubectl create -f -
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: create-csrs-for-bootstrapping
subjects:
- kind: Group
name: system:bootstrappers
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: ClusterRole
name: system:node-bootstrapper
apiGroup: rbac.authorization.k8s.io
EOF
- CSR auto signing for bootstrapper
cat <<EOF | kubectl create -f -
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: auto-approve-csrs-for-group
subjects:
- kind: Group
name: system:bootstrappers
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: ClusterRole
name: system:certificates.k8s.io:certificatesigningrequests:nodeclient
apiGroup: rbac.authorization.k8s.io
EOF
- Certificates self renewal
cat <<EOF | kubectl create -f -
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: auto-approve-renewals-for-nodes
subjects:
- kind: Group
name: system:nodes
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: ClusterRole
name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
apiGroup: rbac.authorization.k8s.io
EOF
Create bootstrap token
$ echo $(openssl rand -hex 3).$(openssl rand -hex 8)
Output
80a6ee.fd219151288b08d8
$ vi bootstrap-token.yaml
apiVersion: v1
kind: Secret
metadata:
# Name MUST be of form "bootstrap-token-<token id>"
name: bootstrap-token-80a6ee
namespace: kube-system
# Type MUST be 'bootstrap.kubernetes.io/token'
type: bootstrap.kubernetes.io/token
stringData:
# Human readable description. Optional.
description: "The default bootstrap token."
# Token ID and secret. Required.
token-id: 80a6ee
token-secret: fd219151288b08d8
# Expiration. Optional.
expiration: 2019-12-05T12:00:00Z
# Allowed usages.
usage-bootstrap-authentication: "true"
usage-bootstrap-signing: "true"
# Extra groups to authenticate the token as. Must start with "system:bootstrappers:"
auth-extra-groups: system:bootstrappers:worker,system:bootstrappers:ingress
$ kubectl create -f bootstrap-token.yaml
Create cluster-info for clients which will be downloaded if needed by client
KUBERNETES_MASTER=$(awk '/master/{print $1;exit}' /etc/hosts)
$ kubectl config set-cluster bootstrap \
--kubeconfig=bootstrap-kubeconfig-public \
--server=https://${KUBERNETES_MASTER}:6443 \
--certificate-authority=ca.pem \
--embed-certs=true
$ kubectl -n kube-public create configmap cluster-info \
--from-file=kubeconfig=bootstrap-kubeconfig-public
$ kubectl -n kube-public get configmap cluster-info -o yaml
- RBAC to allow anonymous users to access the
cluster-info
ConfigMap
$ kubectl create role anonymous-for-cluster-info --resource=configmaps --resource-name=cluster-info --namespace=kube-public --verb=get,list,watch
$ kubectl create rolebinding anonymous-for-cluster-info-binding --role=anonymous-for-cluster-info --user=system:anonymous --namespace=kube-public
Create bootstrap-kubeconfig for worker nodes
$ kubectl config set-cluster bootstrap \
--kubeconfig=bootstrap-kubeconfig \
--server=https://${KUBERNETES_MASTER}:6443 \
--certificate-authority=ca.pem \
--embed-certs=true
$ kubectl config set-credentials kubelet-bootstrap \
--kubeconfig=bootstrap-kubeconfig \
--token=80a6ee.fd219151288b08d8
$ kubectl config set-context bootstrap \
--kubeconfig=bootstrap-kubeconfig \
--user=kubelet-bootstrap \
--cluster=bootstrap
$ kubectl config --kubeconfig=bootstrap-kubeconfig use-context bootstrap
Copy the bootstrap-kubeconfig to worker node and then execute below steps from worker node.
Kubelet configuration
$ sudo swapoff /dev/dm-1 ##<--- select appropriate swap device based on your OS config
- Install and start docker service
- Once docker is installed , execute below steps to make docker ready for
kubelet
integration.
$ sudo vi /lib/systemd/system/docker.service
- Disable iptables, default bridge network and masquerading on docker
ExecStart=/usr/bin/dockerd -H fd:// --bridge=none --iptables=false --ip-masq=false
- Cleanup all docker specific networking from worker nodes
$ sudo iptables -t nat -F
$ sudo ip link set docker0 down
$ sudo ip link delete docker0
$ sudo systemctl restart docker
- Move bootstrap config file to
/var/lib/kubelet/
$ mkdir /var/lib/kubelet/
$ sudo mv bootstrap-kubeconfig /var/lib/kubelet/
- Create a systemd untit file and add necessary flags.
$ cat <<EOF |sudo tee /etc/systemd/system/kubelet.service
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/kubernetes/kubernetes
After=containerd.service
Requires=containerd.service
[Service]
ExecStart=/usr/local/bin/kubelet \
--bootstrap-kubeconfig=/var/lib/kubelet/bootstrap-kubeconfig \
--cert-dir=/var/lib/kubelet/ \
--kubeconfig=/var/lib/kubelet/kubeconfig \
--rotate-certificates=true \
--runtime-cgroups=/systemd/system.slice \
--kubelet-cgroups=/systemd/system.slice
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
- Reload and start the kubelet service
$ sudo systemctl daemon-reload
$ sudo systemctl start kubelet
Now execute kubectl get nodes and see if the node is listed there.