Skip to content

Onboarding a new node to the cluster

Setup dietpi by following the instructions

Setup the node

Login to the node

ssh <node_ip_address>

Change the hostname for the node

dietpi-config

6: Security options

Hostname: Set it to cluster-n (where n is the node number)

Install open-iscsi for Longhorn

apt-get install nfs-common
apt-get install -y open-iscsi
apt-get install -y util-linux

Login to the master node with SSH. Now we will setup the master node to be able to ssh into the worker.

First we copy the ssh key to the worker node, this so the master node can login without password.

ssh-copy-id <node_ip_address>

Add the new worker ip-address and desired hostname to the list

vim /etc/hosts

127.0.0.1 localhost
127.0.1.1 cluster-master
#::1 localhost ip6-localhost ip6-loopback
#ff02::1 ip6-allnodes
#ff02::2 ip6-allrouters

192.168.86.100 cluster-master

192.168.86.101 cluster-1
192.168.86.102 cluster-2
192.168.86.103 cluster-3
192.168.86.104 cluster-4
192.168.86.105 cluster-5
192.168.86.106 cluster-6

Add the node to the ansible hosts file. I added the new node to the workers group, but also a new group called new for setup purposes. Later on you could delete this again.

vim /etc/ansible/hosts
[control]
cluster-master  ansible_connection=local

[workers]
cluster-1  ansible_connection=ssh var_disk=sda var_uuid=bde90210-e569-4c01-a562-b4136fc26082
cluster-2  ansible_connection=ssh var_disk=sda var_uuid=986d965a-8099-425c-8fbe-8cfa389ae939
cluster-3  ansible_connection=ssh var_disk=sda var_uuid=3c0da453-421d-4033-99eb-7f06a5cc3fd4
cluster-4  ansible_connection=ssh
cluster-5  ansible_connection=ssh
cluster-6  ansible_connection=ssh

[new]
cluster-6  ansible_connection=ssh

[cluster:children]
control
workers
new

Let's test if Ansible can reach the new node(s)

ansible new -m ping

Responds:

root@cluster-master:~# ansible new -m ping
cluster-6 | SUCCESS => {
    "ansible_facts": {
        "discovered_interpreter_python": "/usr/bin/python3"
    },
    "changed": false,
    "ping": "pong"
}

Install K3S

We can now use ansible to install K3S on the node

Retrieve the K3s Token:

Run the following command on the master node to retrieve the K3s token:

cat /var/lib/rancher/k3s/server/node-token

This command will display the token on your terminal. Use the part after ::server:

ansible new -b -m shell -a "curl -sfL https://get.k3s.io | K3S_URL=https://<cluster_master_ip_addres>:6443 K3S_TOKEN=<server_token> sh -s -"

After this you should see the new node in the of nodes

kubectl get nodes
NAME             STATUS   ROLES                  AGE    VERSION
cluster-master   Ready    control-plane,master   224d   v1.27.5+k3s1
cluster-3        Ready    worker                 224d   v1.27.5+k3s1
cluster-1        Ready    worker                 224d   v1.27.5+k3s1
cluster-5        Ready    worker                 43m    v1.29.3+k3s1
cluster-4        Ready    worker                 79m    v1.29.3+k3s1
cluster-2        Ready    worker                 224d   v1.27.5+k3s1
cluster-6        Ready    <none>                 7s     v1.29.3+k3s1

Now label the node

kubectl label nodes cluster-5 "node-role.kubernetes.io/worker=true"

Add the private registry for pulling private images

Check if the directory exists

ansible new -b -m file -a "path=/etc/rancher/k3s state=directory"
cluster-6 | CHANGED => {
    "ansible_facts": {
        "discovered_interpreter_python": "/usr/bin/python3"
    },
    "changed": true,
    "gid": 0,
    "group": "root",
    "mode": "0755",
    "owner": "root",
    "path": "/etc/rancher/k3s",
    "size": 4096,
    "state": "directory",
    "uid": 0
}

Copy the registries file from master node

ansible new -b -m copy -a "src=/etc/rancher/k3s/registries.yaml dest=/etc/rancher/k3s/registries.yaml"
cluster-6 | CHANGED => {
    "ansible_facts": {
        "discovered_interpreter_python": "/usr/bin/python3"
    },
    "changed": true,
    "checksum": "3bd94a6e004228d6c162b5416cc38cfd414ecbb1",
    "dest": "/etc/rancher/k3s/registries.yaml",
    "gid": 0,
    "group": "root",
    "md5sum": "14ab766dbc6dffb7ff63725829d1ad94",
    "mode": "0644",
    "owner": "root",
    "size": 515,
    "src": "/root/.ansible/tmp/ansible-tmp-1713712165.637154-2236082-36771667476167/source",
    "state": "file",
    "uid": 0
}

Restart the agent

ansible new -b -m shell -a "systemctl restart k3s-agent"
cluster-6 | CHANGED | rc=0 >>