Home » Installing Kubernetes on Ubuntu 20.04

Installing Kubernetes on Ubuntu 20.04

Introduction

Own/on-prem install Kubernetes using CONTAINERD as container orchestrator on Ubuntu 20.04. Support for Docker is ending soon.

  • POD CIDR : 192.168.5.0/24
  • SERVICE CIDR : 192.168.6.0/24
  • CALICO CIDR : 192.168.7.0/24
  • API : 192.168.1.108

Iniitialize Nodes and install container runtime

# Install container runtime (e.g. containerd)
#
# Do this in master and worker nodes
    $ sudo apt-get update
    $ sudo apt-get upgrade -y
    $ sudo apt-get install containerd -y
    $ sudo mkdir -p /etc/containerd
    $ sudo su -
    $ containerd config default  /etc/containerd/config.toml
#
# Install Kubernetes (master and worker nodes)
    $ curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add
    $ sudo apt-add-repository "deb http://apt.kubernetes.io/ kubernetes-xenial main"
    $ sudo apt-get install kubeadm kubelet kubectl -y
# 
# Prepare containerd pre-requisite
    $ sudo nano /etc/sysctl.conf
    # With that file open, add the following at the bottom:
        net.bridge.bridge-nf-call-iptables = 1
    # Save and close the file. 
        $ sudo -s
        $ sudo echo '1' > /proc/sys/net/ipv4/ip_forward
        $ exit
    # Reload the configurations with the command:
        $ sudo sysctl --system
        $ sudo modprobe overlay
        $ sudo modprobe br_netfilter
#
# Disable swap
    $ sudo nano /etc/fstab
        coment out (#) the lines that begin with /swap.img
        Save and close
    $ sudo swapoff -a
#    
# Initialize Kubernetes CLuster
    $ sudo kubeadm config images pull
    $ kubeadm init --pod-network-cidr="192.168.5.0/24" --apiserver-advertise-address="192.168.1.108" --cri-socket string --cri-socket="/run/containerd/containerd.sock"  --service-cidr="192.168.6.0/24" --service-dns-domain="subok-tech.local"

#
To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.1.108:6443 --token ozwoml.ovgnrof41uv721p1 \
        --discovery-token-ca-cert-hash sha256:0802fa810554bc1c9786994796a2e6e79dd4ff001be31c04ac2572abf0061a8d



Install Calico

We’ll be using calico in our cluster for security. 

# Download the Calico networking manifest for the Kubernetes API datastore
# Modify the CALICO_IPV4POOL_CIDR
# 
    $ curl https://docs.projectcalico.org/manifests/calico.yaml -O
    $ nano calico.yaml
    # modify the CALICO IPV4 pool of your choice (e.g. 192.168.7.0/24)
    ....
            - name: CALICO_IPV4POOL_CIDR
              value: "192.168.7.0/24"
    ....
    # Apply the manifests
    $ kubectl apply -f calico.yaml
#
# Install and configure calicoctl
# In this example we will install calicoctl as pod with 
#   datastore Kubernetes API datastore
#
    $ kubectl apply -f https://docs.projectcalico.org/manifests/calicoctl.yaml
    # Test
    $ kubectl exec -ti -n subokf5gc calicoctl -- /calicoctl get profiles -o wide
    # set an alias for convenience
    $ alias calicoctl="kubectl exec -i -n subokf5gc calicoctl -- /calicoctl"
    
# Additional Calico IPpool
# example : ippool2 cidr 192.168.20.0/24

    $ calicoctl create -f -<<EOF
        apiVersion: projectcalico.org/v3
        kind: IPPool
        metadata:
            name: ippool2
        spec:
            cidr: 192.168.20.0/24
            blockSize: 26
            ipipMode: Always
            natOutgoing: true
        EOF



Install Istio

Istio is a service mesh that offers security, connectivity and monitoring services. We’ll be exploring these features in later posts

# Install Istio using the default profile and latest version
    $ curl -L https://istio.io/downloadIstio | sh -
    # Install Istio using the default profile
    $ export PATH=$PWD/bin:$PATH
    $ istioctl install

# Delete Istio
    $  istioctl manifest generate | kubectl delete --ignore-not-found=true -f -

# Show namespace label
    $ kubectl get ns subokf5gc --show-labels

# set istio injection. This will instruct Istio to automatically inject Envoy sidecar 
# proxies when you deploy your application 
   $ kubectl label namespace default istio-injection=enabled

# Setup istio external ip using NodePort
# Set the ingress port
    $ kubectl -n istio-system get service istio-ingressgateway
    $ export INGRESS_PORT=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name=="http2")].nodePort}')
    $ export SECURE_INGRESS_PORT=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name=="https")].nodePort}')
    $ export TCP_INGRESS_PORT=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name=="tcp")].nodePort}')

# Get istio-ingressgateway host ip 
    $ export INGRESS_HOST=$(kubectl get po -l istio=ingressgateway -n istio-system -o jsonpath='{.items[0].status.hostIP}')

Test Networking (Pod to pod pings)

# Create three busybox instances
    $ kubectl create deployment pingtest --image=busybox --replicas=3 -- sleep infinity
    # Check their IP addresses
      $ kubectl get pods --selector=app=pingtest --output=wide
      
    NAME                        READY   STATUS    RESTARTS   AGE   IP              NODE       NOMINATED NODE   READINESS GATES
    pingtest-64f9cb6b84-m8dx2   1/1     Running   0          8h    192.168.7.66    k8s5gcn3   <none>           <none>
    pingtest-64f9cb6b84-nqrqx   1/1     Running   0          8h    192.168.7.65    k8s5gcn3   <none>           <none>
    pingtest-64f9cb6b84-wp6k8   1/1     Running   0          8h    192.168.7.194   k8s5gcn2   <none>           <none>

    # Connect to first pod
    $ kubectl exec -ti pingtest-64f9cb6b84-m8dx2 -- sh
        $ ping 192.168.7.65 -c 4
        
        / # ping 192.168.7.65 -c 4
        PING 192.168.7.65 (192.168.7.65): 56 data bytes
        64 bytes from 192.168.7.65: seq=0 ttl=63 time=0.128 ms
        64 bytes from 192.168.7.65: seq=1 ttl=63 time=0.106 ms
        64 bytes from 192.168.7.65: seq=2 ttl=63 time=0.087 ms
        64 bytes from 192.168.7.65: seq=3 ttl=63 time=0.118 ms

        --- 192.168.7.65 ping statistics ---
        4 packets transmitted, 4 packets received, 0% packet loss
round-trip min/avg/max = 0.087/0.109/0.128 ms
        
        # check routes
        $ ip route get 192.168.7.65   
        / # ip route get 192.168.7.65
        192.168.7.65 via 169.254.1.1 dev eth0  src 192.168.7.66

$ calicoctl get ippools -o wide
NAME                  CIDR             NAT    IPIPMODE   VXLANMODE   DISABLED   SELECTOR
default-ipv4-ippool   192.168.7.0/24   true   Always     Never       false      all()

    


Let's see how our k8s cluster look like!

k8s5gc@k85gcms01:~$ kubectl get all --all-namespaces
NAMESPACE      NAME                                           READY   STATUS    RESTARTS        AGE
default        pod/pingtest-64f9cb6b84-m8dx2                  1/1     Running   0               8h
default        pod/pingtest-64f9cb6b84-nqrqx                  1/1     Running   0               8h
default        pod/pingtest-64f9cb6b84-wp6k8                  1/1     Running   0               8h
istio-system   pod/istio-ingressgateway-67c99c69bd-bvb2q      1/1     Running   0               7h8m
istio-system   pod/istiod-6bf8dd57f8-9vfbm                    1/1     Running   0               7h9m
kube-system    pod/calico-kube-controllers-58497c65d5-zgw6x   1/1     Running   0               9h
kube-system    pod/calico-node-64vgz                          1/1     Running   0               9h
kube-system    pod/calico-node-f76bf                          1/1     Running   0               9h
kube-system    pod/calico-node-v7jrj                          1/1     Running   0               9h
kube-system    pod/calico-node-z265k                          1/1     Running   0               9h
kube-system    pod/calicoctl                                  1/1     Running   0               8h
kube-system    pod/coredns-78fcd69978-pt5kn                   1/1     Running   0               11h
kube-system    pod/coredns-78fcd69978-q2lqx                   1/1     Running   0               11h
kube-system    pod/etcd-k85gcms01                             1/1     Running   2               11h
kube-system    pod/kube-apiserver-k85gcms01                   1/1     Running   2               11h
kube-system    pod/kube-controller-manager-k85gcms01          1/1     Running   6 (4h32m ago)   11h
kube-system    pod/kube-proxy-8jsw6                           1/1     Running   0               11h
kube-system    pod/kube-proxy-nbp7k                           1/1     Running   0               11h
kube-system    pod/kube-proxy-tlr8v                           1/1     Running   0               11h
kube-system    pod/kube-proxy-x9nkl                           1/1     Running   0               11h
kube-system    pod/kube-scheduler-k85gcms01                   1/1     Running   5 (4h32m ago)   11h

NAMESPACE      NAME                           TYPE           CLUSTER-IP      EXTERNAL-IP   PORT(S)                                      AGE
default        service/kubernetes             ClusterIP      192.168.6.1     <none>        443/TCP                                      11h
istio-system   service/istio-ingressgateway   LoadBalancer   192.168.6.15    <pending>     15021:30451/TCP,80:31344/TCP,443:31000/TCP   7h8m
istio-system   service/istiod                 ClusterIP      192.168.6.110   <none>        15010/TCP,15012/TCP,443/TCP,15014/TCP        7h9m
kube-system    service/kube-dns               ClusterIP      192.168.6.10    <none>        53/UDP,53/TCP,9153/TCP                       11h

NAMESPACE     NAME                         DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR            AGE
kube-system   daemonset.apps/calico-node   4         4         4       4            4           kubernetes.io/os=linux   9h
kube-system   daemonset.apps/kube-proxy    4         4         4       4            4           kubernetes.io/os=linux   11h

NAMESPACE      NAME                                      READY   UP-TO-DATE   AVAILABLE   AGE
default        deployment.apps/pingtest                  3/3     3            3           8h
istio-system   deployment.apps/istio-ingressgateway      1/1     1            1           7h8m
istio-system   deployment.apps/istiod                    1/1     1            1           7h9m
kube-system    deployment.apps/calico-kube-controllers   1/1     1            1           9h
kube-system    deployment.apps/coredns                   2/2     2            2           11h

NAMESPACE      NAME                                                 DESIRED   CURRENT   READY   AGE
default        replicaset.apps/pingtest-64f9cb6b84                  3         3         3       8h
istio-system   replicaset.apps/istio-ingressgateway-67c99c69bd      1         1         1       7h8m
istio-system   replicaset.apps/istiod-6bf8dd57f8                    1         1         1       7h9m
kube-system    replicaset.apps/calico-kube-controllers-58497c65d5   1         1         1       9h
kube-system    replicaset.apps/coredns-78fcd69978                   2         2         2       11h

NAMESPACE      NAME                                                       REFERENCE                         TARGETS         MINPODS   MAXPODS   REPLICAS   AGE
istio-system   horizontalpodautoscaler.autoscaling/istio-ingressgateway   Deployment/istio-ingressgateway   <unknown>/80%   1         5         1          7h8m
istio-system   horizontalpodautoscaler.autoscaling/istiod                 Deployment/istiod                 <unknown>/80%   1         5         1          7h9m



On our next article we will demonstrate installation of 5G core on Kubernetes

Kubernetes Dashboard

# Deploy Kubernetes Dashboard
    $ kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.4.0/aio/deploy/recommended.yaml

# After successful deployment you a new namespace will be created --> kubernetes-dashoard
    ~$ kubectl get all -n kubernetes-dashboard

#OUTPUT

NAME                                            READY   STATUS    RESTARTS   AGE
pod/dashboard-metrics-scraper-c45b7869d-bwvxd   1/1     Running   0          17h
pod/kubernetes-dashboard-576cb95f94-nlpj8       1/1     Running   0          17h

NAME                                TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)    AGE
service/dashboard-metrics-scraper   ClusterIP   192.168.6.83    <none>        8000/TCP   17h
service/kubernetes-dashboard        ClusterIP   192.168.6.123   <none>        443/TCP    17h

NAME                                        READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/dashboard-metrics-scraper   1/1     1            1           17h
deployment.apps/kubernetes-dashboard        1/1     1            1           17h

NAME                                                  DESIRED   CURRENT   READY   AGE
replicaset.apps/dashboard-metrics-scraper-c45b7869d   1         1         1       17h
replicaset.apps/kubernetes-dashboard-576cb95f94       1         1         1       17h

Accessing Dashboard - Proxy

# Command Line Proxy
    $ kubectl proxy
    
Output :
    Starting to serve on 127.0.0.1:8001

Using “putty” port forwarding 

Enable Port forwarding 

Click “tunnels”

Source Port : 8001 (default port used by k8s dashboard)

Destination : 127.0.0.1:8001 (you local machine)

Click “Open” and login to your host.

After successful login execute

$ kubectl proxy

Open your browser and use :

http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/

You will be greeted by entering “token”

Kubernetes Dashboard - Token

# Display Secrets
    $ kubectl -n kubernetes-dashboard get secret

# OUTPUT

NAME                               TYPE                                  DATA   AGE
admin-user-token-jg7xl             kubernetes.io/service-account-token   3      17h
default-token-4gxns                kubernetes.io/service-account-token   3      17h
kubernetes-dashboard-certs         Opaque                                0      17h
kubernetes-dashboard-csrf          Opaque                                1      17h
kubernetes-dashboard-key-holder    Opaque                                2      17h
kubernetes-dashboard-token-nqpjs   kubernetes.io/service-account-token   3      17h

# From the output, we will be interested with 
#   kubernetes-dashboard-token-nqpjs to get the token

        $ kubectl -n kubernetes-dashboard describe secret kubernetes-dashboard-token-nqpjs
        
# OUTPUT 

Name:         kubernetes-dashboard-token-nqpjs
Namespace:    kubernetes-dashboard
Labels:       <none>
Annotations:  kubernetes.io/service-account.name: kubernetes-dashboard
              kubernetes.io/service-account.uid: 0f68c69f-aef0-4a99-939d-039041d9277d

Type:  kubernetes.io/service-account-token

Data
====
ca.crt:     1099 bytes
namespace:  20 bytes
token:      eyJhbGciOiJSUzI1NiIsImtpZCI6Ild5ZGN1QjNrQm5ZdG9YeVd6RU96T0E5Z3h0T2g2QTBnQmdHSERGZm13Y0UifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZC10b2tlbi1ucXBqcyIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6IjBmNjhjNjlmLWFlZjAtNGE5OS05MzlkLTAzOTA0MWQ5Mjc3ZCIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlcm5ldGVzLWRhc2hib2FyZDprdWJlcm5ldGVzLWRhc2hib2FyZCJ9.iJbGbIX22N610zV13JECblxuqcxzvcRHBPyocpzLk8cQyxtzmCeYXwBlKaC49mKEi4RoH_D7oPNCX5jpViDaW-VSFRqNLkFaV03zDIV3T39_iiBsTsynlLb0jAEs8RQwacUStTsFHHIqWp7Crmwobw6etffkSW9Zjgi3RbhTQPXPu_15PlIIDv2fOIwwfMOFx3du

# Copy the token and paste it in the browser, token request 

After successful login using token.. notice that you have limited information. In order to access info about your cluster, you will need to deploy user with elevated authorization (e.g. admin-user).

Kubernetes Dashboard - Deploy user

# with this deplyment we will created : admin-user

# admin-user.yaml
# service-account
apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-user
  namespace: kubernetes-dashboard
#  rbac
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: admin-user
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-user
  namespace: kubernetes-dashboard


# deploy 
    $ kubectl apply -f admin-user.yaml
    
# after successful deplyment, let's retrieve the token.
# here we will be interested with : admin-user-token-jg7xl
    $ kubectl -n kubernetes-dashboard get secret

# OUTPUT
NAME                               TYPE                                  DATA   AGE
admin-user-token-jg7xl             kubernetes.io/service-account-token   3      17h
default-token-4gxns                kubernetes.io/service-account-token   3      18h
kubernetes-dashboard-certs         Opaque                                0      18h
kubernetes-dashboard-csrf          Opaque                                1      18h
kubernetes-dashboard-key-holder    Opaque                                2      18h
kubernetes-dashboard-token-nqpjs   kubernetes.io/service-account-token   3      18h

# token
    $ kubectl -n kubernetes-dashboard describe secret admin-user-token-jg7xl
    
# OUTPUT

Name:         admin-user-token-jg7xl
Namespace:    kubernetes-dashboard
Labels:       <none>
Annotations:  kubernetes.io/service-account.name: admin-user
              kubernetes.io/service-account.uid: 768f7f49-580c-4f77-8298-1f9b654e79b4

Type:  kubernetes.io/service-account-token

Data
====
ca.crt:     1099 bytes
namespace:  20 bytes
token:      eyJhbGciOiJSUzI1NiIsImtpZCI6Ild5ZGN1QjNrQm5ZdG9YeVd6RU96T0E5Z3h0T2g2QTBnQmdHSERGZm13Y0UifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLWpnN3hsIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiI3NjhmN2Y0OS01ODBjLTRmNzctODI5OC0xZjliNjU0ZTc5YjQiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZXJuZXRlcy1kYXNoYm9hcmQ6YWRtaW4tdXNlciJ9.UQDmyfpezvy_TtOey0G7Aw8Vb4PUa2ikJ324xsRKLGpYiAtQhbSUl84HYHfEHKDxkipf0fuTZNI8r_bb3GMW0-1IbbSYd1elS0mifWq7kg4Hwz2doxfDsckkDyAQHKhkh4Hu9oEGrjzZ-N0T1ujzhmBEXGzod0EXXwJ6e6Pt1wu3o-SKKQ6RqbxoP2w5LfBxI9R7q-mh5FSA-8xzG

# Copy the token and paste to your browser token request form

Accessing Dashboard - NodePort

# Before the change
$ kubectl get all -n kubernetes-dashboard

NAME                                            READY   STATUS    RESTARTS   AGE
pod/dashboard-metrics-scraper-c45b7869d-bwvxd   1/1     Running   0          22h
pod/kubernetes-dashboard-576cb95f94-nlpj8       1/1     Running   0          22h

NAME                                TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)    AGE
service/dashboard-metrics-scraper   ClusterIP   192.168.6.83    <none>        8000/TCP   22h
service/kubernetes-dashboard        ClusterIP   192.168.6.123   <none>        443/TCP    22h

NAME                                        READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/dashboard-metrics-scraper   1/1     1            1           22h
deployment.apps/kubernetes-dashboard        1/1     1            1           22h


# Download k8s deployment file and edit
    $ wget https://raw.githubusercontent.com/kubernetes/dashboard/master/aio/deploy/recommended.yaml -O k8s-dashboard-deployment.yaml

# Modify service to use NodePort and assign external IP

   kind: Service
   apiVersion: v1
   metadata:
     labels:
       k8s-app: kubernetes-dashboard
     name: kubernetes-dashboard
     namespace: kubernetes-dashboard
   spec:
     type: NodePort
     ports:
     - port: 443
       targetPort: 8443
     externalIPs:
       - 192.168.1.108
     selector:
       k8s-app: kubernetes-dashboard
       
# Apply the changes
    $ kubectl apply -f k8s-dashboard-deployment.yaml
    
# Now check which port the Dashboard is exposed

$ kubectl get all -n kubernetes-dashboard

NAME                                            READY   STATUS    RESTARTS   AGE
pod/dashboard-metrics-scraper-c45b7869d-bwvxd   1/1     Running   0          23h
pod/kubernetes-dashboard-7d644ddc5d-fvprc       1/1     Running   0          34s

NAME                                TYPE        CLUSTER-IP      EXTERNAL-IP     PORT(S)         AGE
service/dashboard-metrics-scraper   ClusterIP   192.168.6.83    <none>          8000/TCP        23h
service/kubernetes-dashboard        NodePort    192.168.6.123   192.168.1.108   443:32513/TCP   23h


# Access the Dashboard using the following url. Noting/using the exposed port = 32513
    https://192.168.1.108:32513

Leave a Reply

Your email address will not be published. Required fields are marked *