Kubernetes and Cilium on Rocky Linux - 1 master 2 worker node cluster setup

 master, worker1, worker2


[root@master boobalan]# hostname

master.boobi.com

[root@master boobalan]# cat /etc/resolv.conf

# Generated by NetworkManager

search boobi.com

nameserver 8.8.8.8

nameserver 8.8.4.4


//install docker in all three nodes
//install kubernetes binaries on all node
//initialize and start kubelet service on all nodes
//perform kubeadm init command on master node to initialize cluster
//install network solution - cilium on master node
//join worker node to the cluster using kubeadm join command

once the setup done, k8s cluster is et up and ready to accept work load.


//some other basic step
    [root@master boobalan]# systemctl get-default
     multi-user.target
    //disabled gui - #systemctl set-default multi-user.targer
     hostname config - #hostnamectl set-hostname master
    disable firewall #systemctl disable firewalld.service
    disable selinux, #sestatus  #setenforce 0
    permitrootlogin yes - /etc/ssh/ssdhd_config
    disable swap #swapoff -a
        and disable it on /etc/fstab  -- swap line command it
        #/dev/mapper/rl-swap     none                    swap    defaults        0 0
    
//install docker in all 3 server

[root@master boobalan]# dnf config-manager --add-repo=https://download.docker.com/linux/centos/docker-ce.repo

//Adding repo from: https://download.docker.com/linux/centos/docker-ce.repo

[root@master boobalan]# ^C
[root@master boobalan]# dnf repolist
repo id                                                                                                             repo name
appstream                                                                                                           Rocky Linux 9 - AppSt
baseos                                                                                                              Rocky Linux 9 - BaseO
docker-ce-stable                                                                                                    Docker CE Stable - x8
extras                                                                                                              Rocky Linux 9 - Extra
[root@master boobalan]# dnf install docker-ce docker-ce-cli containerd.io -y
Docker CE Stable - x86_64
Rocky Linux 9 - BaseOS
Rocky Linux 9 - AppStream
Rocky Linux 9 - Extras
Dependencies resolved.
=========================================================================================================================================
 Package                                                             Architecture                                     Version
=========================================================================================================================================
Installing:
 containerd.io                                                       x86_64                                           1.6.28-3.2.el9
 docker-ce                                                           x86_64                                           3:26.0.0-1.el9
 docker-ce-cli                                                       x86_64                                           1:26.0.0-1.el9
Installing weak dependencies:
 docker-buildx-plugin                                                x86_64                                           0.13.1-1.el9
 docker-ce-rootless-extras                                           x86_64                                           26.0.0-1.el9
 docker-compose-plugin                                               x86_64                                           2.25.0-1.el9

Transaction Summary
=========================================================================================================================================
Install  6 Packages

Total download size: 98 M
Installed size: 384 M


//add local user to docker grup

[root@master boobalan]# usermod -aG docker $USER && newgrp docker

[root@master boobalan]# systemctl start docker && systemctl enable docker


[root@master boobalan]# systemctl status docker
● docker.service - Docker Application Container Engine
     Loaded: loaded (/usr/lib/systemd/system/docker.service; enabled; preset: disabled)
     Active: active (running) since Fri 2024-04-05 00:43:49 CEST; 2min 55s ago
TriggeredBy: ● docker.socket
       Docs: https://docs.docker.com
   Main PID: 29875 (dockerd)
      Tasks: 7
     Memory: 36.2M
        CPU: 1.611s
     CGroup: /system.slice/docker.service
             └─29875 /usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock

#systemctl start docker
#systemctl enable docker
#systemctl enable containerd
#systemctl start containerd

#docker version
#docker images

//now preload the docker images

[root@master boobalan]# docker images
REPOSITORY   TAG       IMAGE ID   CREATED   SIZE

[root@master boobalan]# docker pull nginx
Using default tag: latest
latest: Pulling from library/nginx
8a1e25ce7c4f: Pull complete
e78b137be355: Pull complete
39fc875bd2b2: Pull complete
035788421403: Pull complete
87c3fb37cbf2: Pull complete
c5cdd1ce752d: Pull complete
33952c599532: Pull complete
Digest: sha256:6db391d1c0cfb30588ba0bf72ea999404f2764febf0f1f196acd5867ac7efa7e
Status: Downloaded newer image for nginx:latest
docker.io/library/nginx:latest


[root@master boobalan]# docker images
REPOSITORY   TAG       IMAGE ID       CREATED       SIZE
nginx        latest    92b11f67642b   7 weeks ago   187MB


//location where docker images and other file stores
[root@master boobalan]# docker info | grep -i "docker root dir"
 Docker Root Dir: /var/lib/docker

[root@master boobalan]# ls /var/lib/docker/
buildkit  containers  engine-id  image  network  overlay2  plugins  runtimes  swarm  tmp  volumes


//before
[root@master boobalan]# du -sh /var/lib/docker/
160K    /var/lib/docker/

//after
[root@master boobalan]# du -sh /var/lib/docker/
190M    /var/lib/docker/

------------------------------------------------------------------------
///add kuberntes repository

[root@master boobalan]# cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://pkgs.k8s.io/core:/stable:/v1.29/rpm/
enabled=1
gpgcheck=1
gpgkey=https://pkgs.k8s.io/core:/stable:/v1.29/rpm/repodata/repomd.xml.key
EOF

///since we added repository so do #yum update once
#dnf update -y

///now install kubernetes
 
[root@master boobalan]# dnf install kubelet kubeadm kubectl -y

Installed:
  conntrack-tools-1.4.7-2.el9.x86_64
  cri-tools-1.29.0-150500.1.1.x86_64
  kubeadm-1.29.3-150500.1.1.x86_64
  kubectl-1.29.3-150500.1.1.x86_64
  kubelet-1.29.3-150500.1.1.x86_64
  kubernetes-cni-1.3.0-150500.1.1.x86_64
  libnetfilter_cthelper-1.0.0-22.el9.x86_64
  libnetfilter_cttimeout-1.0.0-19.el9.x86_64
  libnetfilter_queue-1.0.5-1.el9.x86_64
  socat-1.7.4.1-5.el9.x86_64

Complete!

[root@master boobalan]# sudo systemctl start kubelet
[root@master boobalan]# sudo systemctl status kubelet
● kubelet.service - kubelet: The Kubernetes Node Agent
     Loaded: loaded (/usr/lib/systemd/system/kubelet.service; disabled; preset>
    Drop-In: /usr/lib/systemd/system/kubelet.service.d
             └─10-kubeadm.conf
     Active: activating (auto-restart) (Result: exit-code) since Sun 2024-04-0>
       Docs: https://kubernetes.io/docs/
    Process: 6628 ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET>
   Main PID: 6628 (code=exited, status=1/FAILURE)
        CPU: 381ms

Apr 07 19:27:40 master.boobi.com systemd[1]: kubelet.service: Main process exi>
Apr 07 19:27:40 master.boobi.com systemd[1]: kubelet.service: Failed with resu>
lines 1-12/12 (END)


////////again issue it's not starting properly

---------------------------------------------------------------------------

//////some additional steps
[root@master boobalan]# sudo modprobe overlay
[root@master boobalan]# sudo modprobe br_netfilter
[root@master boobalan]# cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
overlay
br_netfilter
EOF

[root@master boobalan]# cat /etc/modules-load.d/k8s.conf
overlay
br_netfilter

[root@master boobalan]# cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables  = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward                 = 1
EOF

[root@master boobalan]# sudo sysctl --system


[root@master boobalan]# mv /etc/containerd/config.toml /etc/containerd/config.toml.bkp
[root@master boobalan]# containerd config default > /etc/containerd/config.toml

[root@master boobalan]# vim /etc/containerd/config.toml

//changed the line false to true

[root@master boobalan]# systemctl restart containerd
[root@master boobalan]# docker info |grep -i cgroup
 Cgroup Driver: systemd
 Cgroup Version: 2
  cgroupns

[root@master boobalan]# vi /etc/docker/daemon.json
[root@master boobalan]# cat /etc/docker/daemon.json
{
"exec-opts": ["native.cgroupdriver=systemd"]
}
[root@master boobalan]# systemctl restart docker


[root@master boobalan]# dnf install -y kubelet kubeadm kubectl --disableexcludes=kubernetes
Last metadata expiration check: 0:21:42 ago on Sun 07 Apr 2024 08:23:10 PM CEST.
Package kubelet-1.29.3-150500.1.1.x86_64 is already installed.
Package kubeadm-1.29.3-150500.1.1.x86_64 is already installed.
Package kubectl-1.29.3-150500.1.1.x86_64 is already installed.
Dependencies resolved.
Nothing to do.
Complete!
[root@master boobalan]# systemctl enable --now kubelet

////////install cilium

[root@master boobalan]# CILIUM_CLI_VERSION=$(curl -s https://raw.githubusercontent.com/cilium/cilium-cli/master/stable.txt)
[root@master boobalan]# CLI_ARCH=amd64
[root@master boobalan]# if [ "$(uname -m)" = "aarch64" ]; then CLI_ARCH=arm64; fi
[root@master boobalan]# curl -L --fail --remote-name-all https://github.com/cilium/cilium-cli/releases/download/${CILIUM_CLI_VERSION}/cilium-linux-${CLI_ARCH}.tar.gz{,.sha256sum}
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed
  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0
100 39.8M  100 39.8M    0     0  10.1M      0  0:00:03  0:00:03 --:--:-- 14.6M
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed
  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0
100    92  100    92    0     0    136      0 --:--:-- --:--:-- --:--:--     0
[root@master boobalan]# sha256sum --check cilium-linux-${CLI_ARCH}.tar.gz.sha256sum
cilium-linux-amd64.tar.gz: OK
[root@master boobalan]# sudo tar xzvfC cilium-linux-${CLI_ARCH}.tar.gz /usr/local/bin
cilium
[root@master boobalan]# rm cilium-linux-${CLI_ARCH}.tar.gz{,.sha256sum}
rm: remove regular file 'cilium-linux-amd64.tar.gz'? yes
rm: remove regular file 'cilium-linux-amd64.tar.gz.sha256sum'? yes

[root@master boobalan]# kubeadm config images pull
[config/images] Pulled registry.k8s.io/kube-apiserver:v1.29.3
[config/images] Pulled registry.k8s.io/kube-controller-manager:v1.29.3
[config/images] Pulled registry.k8s.io/kube-scheduler:v1.29.3
[config/images] Pulled registry.k8s.io/kube-proxy:v1.29.3
[config/images] Pulled registry.k8s.io/coredns/coredns:v1.11.1
[config/images] Pulled registry.k8s.io/pause:3.9
[config/images] Pulled registry.k8s.io/etcd:3.5.12-0

[root@master boobalan]# kubeadm init
[init] Using Kubernetes version: v1.29.3
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
W0407 20:55:02.852929   10016 checks.go:835] detected that the sandbox image "registry.k8s.io/pause:3.6" of the container runtime is inconsistent with that used by kubeadm. It is recommended that using "registry.k8s.io/pause:3.9" as the CRI sandbox image.
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local master.boobi.com] and IPs [10.96.0.1 192.168.198.140]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [localhost master.boobi.com] and IPs [192.168.198.140 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [localhost master.boobi.com] and IPs [192.168.198.140 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "super-admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 16.011975 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node master.boobi.com as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
[mark-control-plane] Marking the node master.boobi.com as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]
[bootstrap-token] Using token: dpdoaz.fvmmxwrsjvailyw2
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.198.140:6443 --token dpdoaz.fvmmxwrsjvailyw2 \
        --discovery-token-ca-cert-hash sha256:5dcf6e9dbd8e4f4ccd07ecefb15b1f1ec3585d3e81e6b63a09fdc953404f6f3f


////////now kubeadm sucessfully initialized

/////kube init only execute on master node
///worker node install the k8s and container

[root@master boobalan]# mkdir -p $HOME/.kube
[root@master boobalan]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@master boobalan]# sudo chown $(id -u):$(id -g) $HOME/.kube/config
[root@master boobalan]# export KUBECONFIG=/etc/kubernetes/admin.conf

[root@master boobalan]# kubectl get nodes                                      NAME               STATUS     ROLES           AGE   VERSION
master.boobi.com   NotReady   control-plane   44m   v1.29.3

///now copy the token and execute on the worker node to join the worker into the master

[root@worker1 boobalan]# kubeadm join 192.168.198.140:6443 --token dpdoaz.fvmmxwrsjvailyw2         --discovery-token-ca-cert-hash sha256:5dcf6e9dbd8e4f4ccd07ecefb15b1f1ec3585d3e81e6b63a09fdc953404f6f3f
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.

[root@worker1 boobalan]#


////now check the master

[root@master boobalan]# kubectl get nodes
NAME                STATUS     ROLES           AGE   VERSION
master.boobi.com    NotReady   control-plane   57m   v1.29.3
worker1.boobi.com   NotReady   <none>          62s   v1.29.3


[root@master boobalan]# kubectl cluster-info
Kubernetes control plane is running at https://192.168.198.140:6443
CoreDNS is running at https://192.168.198.140:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy

To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
[root@master boobalan]#



/////2nd node added
[root@master boobalan]# kubectl get nodes
NAME                STATUS     ROLES           AGE   VERSION
master.boobi.com    NotReady   control-plane   70m   v1.29.3
worker1.boobi.com   NotReady   <none>          13m   v1.29.3
worker2.boobi.com   NotReady   <none>          15s   v1.29.3
[root@master boobalan]#



/////cilium successfully installed
[root@master boobalan]# cilium install
ℹ️  Using Cilium version 1.15.3
🔮 Auto-detected cluster name: kubernetes
🔮 Auto-detected kube-proxy has been installed
[root@master boobalan]#





Ref : https://sunnykkc13.medium.com/kubernetes-setup-489ecb64a896
Ref : https://www.youtube.com/watch?v=APQFU8g-Lrw&ab_channel=AbhishekModi



---------------------------------------------------------------------

#dnf install kubeadm kubelet kubectl kubernetes-cni -y

#systemctl enable kubelet
#systemctl start kubelet

///i don't find any configuration files post k8s install
[root@master boobalan]# cat /etc/kubernetes/kubelet.conf
cat: /etc/kubernetes/kubelet.conf: No such file or directory
[root@master boobalan]# /etc/kubernetes/kubelet
bash: /etc/kubernetes/kubelet: No such file or directory


///regenerating kubelet configuration
[root@master boobalan]# kubeadm init phase kubelet-start
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[root@master boobalan]#



//install kubernetes binary on all nodes

//download latest kubectl
[root@master boobalan]# curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"

  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed
100   138  100   138    0     0    579      0 --:--:-- --:--:-- --:--:--   582
100 47.4M  100 47.4M    0     0  20.5M      0  0:00:02  0:00:02 --:--:-- 27.6M
[root@master boobalan]# ls
kubectl

///install kubectl
[root@master boobalan]# install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl

///check the version
[root@master boobalan]# kubectl version --client
Client Version: v1.29.3
Kustomize Version: v5.0.4-0.20230601165947-6ce0bf390ce3


////check detailed version
[root@master boobalan]# kubectl version --client --output=yaml
clientVersion:
  buildDate: "2024-03-15T00:08:19Z"
  compiler: gc
  gitCommit: 6813625b7cd706db5bc7388921be03071e1a492d
  gitTreeState: clean
  gitVersion: v1.29.3
  goVersion: go1.21.8
  major: "1"
  minor: "29"
  platform: linux/amd64
kustomizeVersion: v5.0.4-0.20230601165947-6ce0bf390ce3


//now check to check the software what we have installed
[root@master boobalan]# rpm -qa | grep kube

[root@master boobalan]# dnf list installed | grep -i kube

//above both result is nothing is return it means the software was not installed by dnf or yum or rpm, rather it was installed some other way to find how they installed

[root@master boobalan]# which kubectl
/usr/local/bin/kubectl

//here is how it was installed now if you want to uninstall the software

//just remove the file it will be uninstalled

[root@master boobalan]# rm /usr/local/bin/kubectl

//I have uninstalled the kubectl

//now again i going to install kubectl, kubelet, kubeadm,  and kubernetes-cni (cilium)


/////install kubectl
[root@master boobalan]# curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed
100 47.4M  100 47.4M    0     0  15.7M      0  0:00:03  0:00:03 --:--:-- 15.7M
[root@master boobalan]# chmod +x kubectl
[root@master boobalan]# mv kubectl /usr/local/bin/


[root@master boobalan]# kubectl version --client
Client Version: v1.29.3
Kustomize Version: v5.0.4-0.20230601165947-6ce0bf390ce3


///install kubelet
[root@master boobalan]# curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubelet
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed
100  106M  100  106M    0     0  16.0M      0  0:00:06  0:00:06 --:--:-- 18.1M
[root@master boobalan]# ls
kubelet
[root@master boobalan]# chmod +x kubelet
[root@master boobalan]# mv kubelet /usr/local/bin/
[root@master boobalan]# kubelet --version
Kubernetes v1.29.3


////install kubeadm
[root@master boobalan]# curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubeadm
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed
100 46.1M  100 46.1M    0     0  13.7M      0  0:00:03  0:00:03 --:--:-- 13.7M
[root@master boobalan]# chmod +x kubeadm
[root@master boobalan]# mv kubeadm /usr/local/bin/
[root@master boobalan]# kubeadm version
kubeadm version: &version.Info{Major:"1", Minor:"29", GitVersion:"v1.29.3", GitCommit:"6813625b7cd706db5bc7388921be03071e1a492d", GitTreeState:"clean", BuildDate:"2024-03-15T00:06:16Z", GoVersion:"go1.21.8", Compiler:"gc", Platform:"linux/amd64"}


/////now install cilium (kubernetes cni- cilium) on master node
[root@master boobalan]# CILIUM_CLI_VERSION=$(curl -s https://raw.githubusercontent.com/cilium/cilium-cli/main/stable.txt)
[root@master boobalan]#
^C
[root@master boobalan]# CLI_ARCH=amd64
[root@master boobalan]# if [ "$(uname -m)" = "aarch64" ]; then CLI_ARCH=arm64; fi

[root@master boobalan]#
[root@master boobalan]# curl -L --fail --remote-name-all https://github.com/cilium/cilium-cli/releases/download/${CILIUM_CLI_VERSION}/cilium-linux-${CLI_ARCH}.tar.gz{,.sha256sum}
sha256sum --check cilium-linux-${CLI_ARCH}.tar.gz.sha256sum
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed
  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0
100 39.8M  100 39.8M    0     0  13.3M      0  0:00:02  0:00:02 --:--:-- 29.8M
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed
  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0
100    92  100    92    0     0    114      0 --:--:-- --:--:-- --:--:--     0
cilium-linux-amd64.tar.gz: OK
[root@master boobalan]# ls
cilium-linux-amd64.tar.gz  cilium-linux-amd64.tar.gz.sha256sum
[root@master boobalan]# tar xzvfC cilium-linux-${CLI_ARCH}.tar.gz /usr/local/bin
cilium
[root@master boobalan]# ^C
[root@master boobalan]# ls /usr/local/bin/cilium
/usr/local/bin/cilium
[root@master boobalan]# rm cilium-linux-${CLI_ARCH}.tar.gz{,.sha256sum}
rm: remove regular file 'cilium-linux-amd64.tar.gz'? yes
rm: remove regular file 'cilium-linux-amd64.tar.gz.sha256sum'? yes
[root@master boobalan]# cilium install --version 1.14.2
ℹ️  Using Cilium version 1.14.2
⏭️ Skipping auto kube-proxy detection

Error: Unable to install Cilium: Kubernetes cluster unreachable: Get "http://localhost:8080/version": dial tcp [::1]:8080: connect: connection refused

[root@master boobalan]# cilium version
cilium-cli: v0.16.4 compiled with go1.22.1 on linux/amd64
cilium image (default): v1.15.3
cilium image (stable): v1.15.3
cilium image (running): unknown. Unable to obtain cilium version. Reason: Kubernetes cluster unreachable: Get "http://localhost:8080/version": dial tcp [::1]:8080: connect: connection refused


//but we have installed all mentioned components

//alternate way of install these 3 components
cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
enabled=1
gpgcheck=1
repo_gpgcheck=1
exclude=kubelet kubeadm kubectl
EOF
sudo dnf install -y kubelet kubeadm kubectl --disableexcludes=kubernetes
sudo systemctl enable --now kubelet

////but above one also not working let's go with official repository

# This overwrites any existing configuration in /etc/yum.repos.d/kubernetes.repo
cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://pkgs.k8s.io/core:/stable:/v1.29/rpm/
enabled=1
gpgcheck=1
gpgkey=https://pkgs.k8s.io/core:/stable:/v1.29/rpm/repodata/repomd.xml.key
EOF

[root@master boobalan]# dnf install -y kubelet kubeadm kubectl



[root@master boobalan]# rpm -qa | grep kube kubernetes-cni-1.3.0-150500.1.1.x86_64 kubelet-1.29.3-150500.1.1.x86_64 kubectl-1.29.3-150500.1.1.x86_64 kubeadm-1.29.3-150500.1.1.x86_64

[root@master boobalan]# systemctl is-enabled kubelet.service
disabled
[root@master boobalan]# systemctl enable kubelet.service
Created symlink /etc/systemd/system/multi-user.target.wants/kubelet.service → /usr/lib/systemd/system/kubelet.service.
[root@master boobalan]# systemctl start kubelet.service
[root@master boobalan]# systemctl status kubelet.service
● kubelet.service - kubelet: The Kubernetes Node Agent
     Loaded: loaded (/usr/lib/systemd/system/kubelet.service; enabled; preset:>
    Drop-In: /usr/lib/systemd/system/kubelet.service.d
             └─10-kubeadm.conf

////now the configuration
////initilize the kubernetes cluster on the master node

[root@master boobalan]# kubeadm init
[init] Using Kubernetes version: v1.29.3
[preflight] Running pre-flight checks
        [WARNING Swap]: swap is supported for cgroup v2 only; the NodeSwap feature gate of the kubelet is beta but disabled by default
error execution phase preflight: [preflight] Some fatal errors occurred:
        [ERROR NumCPU]: the number of available CPUs 1 is less than the required 2
        [ERROR CRI]: container runtime is not running: output: time="2024-04-07T13:42:37+02:00" level=fatal msg="validate service connection: validate CRI v1 runtime API for endpoint \"unix:///var/run/containerd/containerd.sock\": rpc error: code = Unimplemented desc = unknown service runtime.v1.RuntimeService"
, error: exit status 1


/////getting above error because master should have atlease 2 cpu core but this machine have only one

[root@master boobalan]# cat /proc/cpuinfo | grep processor | wc -l
1

//i have increased the cpu core

[root@master boobalan]# cat /proc/cpuinfo | grep processor | wc -l
2


//however still having error
[root@master boobalan]# kubeadm init
[init] Using Kubernetes version: v1.29.3
[preflight] Running pre-flight checks
        [WARNING Swap]: swap is supported for cgroup v2 only; the NodeSwap feature gate of the kubelet is beta but disabled by default
error execution phase preflight: [preflight] Some fatal errors occurred:
        [ERROR CRI]: container runtime is not running: output: time="2024-04-07T13:49:54+02:00" level=fatal msg="validate service connection: validate CRI v1 runtime API for endpoint \"unix:///var/run/containerd/containerd.sock\": rpc error: code = Unimplemented desc = unknown service runtime.v1.RuntimeService"
, error: exit status 1
[preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...`
To see the stack trace of this error execute with --v=5 or higher

///////checking the containerd service
[root@master boobalan]# systemctl is-enabled containerd
disabled
[root@master boobalan]# systemctl enable containerd
Created symlink /etc/systemd/system/multi-user.target.wants/containerd.service → /usr/lib/systemd/system/containerd.service.
[root@master boobalan]# systemctl restart containerd
[root@master boobalan]#



///check the log 
[root@master boobalan]# journalctl -u kubelet -n 100 --no-pager

///the error seems like unable to locate kubelet configuration file /var/lib/kubelet/config.yaml

//to avoid such issue i'm restoring the snap 

///kubelet is the only service , rest other like kubectl and kubeadm are just component we can't find them in systemctl 

--------------------------------------------------------------------------------Next stage --------------

Understanding Manifest Configuration YAML Files:
Manifest configuration YAML files are used to define Kubernetes resources such as pods, services, deployments, etc. These files follow a specific structure and syntax. You'll use them to create and manage your Kubernetes resources.

Creating Pods and Deploying Applications:
Start by creating a simple pod definition YAML file. Define the pod's metadata, such as name and labels, as well as its container specifications, including image, ports, and any other configurations.

Once you have your pod definition, use the kubectl apply command to create the pod in your cluster.
You can also explore deploying applications using Deployment objects, which provide features like scaling, rolling updates, and more.

Scaling Applications:
Kubernetes allows you to scale applications horizontally by adding or removing replicas of your pods.
Experiment with scaling your deployed applications using the kubectl scale command or by updating the replicas field in your Deployment YAML file.

Exploring Cilium Plugins:
Cilium is a popular networking and security plugin for Kubernetes. It provides features like network policy enforcement, load balancing, and more.

Install Cilium on your Kubernetes cluster and configure it to work with your existing setup.
Explore Cilium's features and experiment with network policies to control traffic between pods.
Here are the steps you can follow to achieve these goals:

Understanding Manifest Configuration YAML Files:
Research the structure of YAML files used to define Kubernetes resources.
Familiarize yourself with the key components such as metadata, spec, and status.
Practice creating simple YAML files for different Kubernetes resources.

Creating Pods and Deploying Applications:
Create a basic pod definition YAML file with a single container.
Use the kubectl apply command to create the pod in your cluster.
Verify that the pod is running correctly and accessible.

Scaling Applications:
Deploy an application using a Deployment object instead of directly creating pods.
Experiment with scaling the application up and down using kubectl scale or by updating the replicas field in the Deployment YAML.

Exploring Cilium Plugins:
Install Cilium on your Kubernetes cluster following the official documentation.
Configure Cilium to work with your cluster and enable its features.
Create and apply network policies to control traffic between pods and enforce security rules.
Once you've completed these steps, you'll have a good understanding of basic Kubernetes operations, including deploying applications, scaling them, and managing network policies with Cilium. Let me know if you need further assistance with any specific step along the way!


[root@master boobalan]# kubectl get nodes
NAME                STATUS     ROLES           AGE   VERSION
master.boobi.com    Ready      control-plane   40d   v1.29.3
worker1.boobi.com   Ready      <none>          40d   v1.29.3
worker2.boobi.com   NotReady   <none>          40d   v1.29.3


//now we need to make sure the nodes are ready
//to check if any error on the node, following command

[root@master boobalan]# kubectl describe nodes worker1.boobi.com
Name:               worker1.boobi.com
Roles:              <none>
Labels:             beta.kubernetes.io/arch=amd64
                    beta.kubernetes.io/os=linux
                    kubernetes.io/arch=amd64
                    kubernetes.io/hostname=worker1.boobi.com
                    kubernetes.io/os=linux
Annotations:        kubeadm.alpha.kubernetes.io/cri-socket: unix:///var/run/containerd/containerd.sock
                    node.alpha.kubernetes.io/ttl: 0
                    volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp:  Sun, 07 Apr 2024 21:51:45 +0200
Taints:             node.kubernetes.io/not-ready:NoSchedule
Unschedulable:      false
Lease:
  HolderIdentity:  worker1.boobi.com
  AcquireTime:     <unset>
  RenewTime:       Sat, 04 May 2024 14:37:01 +0200
.
.
.
.

//cluster config file
[root@master boobalan]# echo $HOME
/root

[root@master boobalan]# cat $HOME/.kube/config
.
.
server: https://192.168.198.140:6443
  name: kubernetes
contexts:
- context:
    cluster: kubernetes
    user: kubernetes-admin
  name: kubernetes-admin@kubernetes
current-context: kubernetes-admin@kubernetes
kind: Config
preferences: {}
.
.

//copy this config file and add into to openlens to manage the cluster over GUI



Deployments  -- cleaver way of deploy pods
|
ReplicaSets -- deployment will send instruction to replica sets
|
Pods -- replica set send instruction to pod

Service - expose to outside world

Storage - base external storage will be present here (netapp, aws, etc)


kubelet -master send communication to worker , over kubelet, kubelet running on workers

//if  we create a pod that will not do any replica or ha,  where as if we create deployment it will act as HA according to our replica sets we have configured

[root@master boobalan]# kubectl get deployment
NAME        READY   UP-TO-DATE   AVAILABLE   AGE
my-app      2/2     2            2           25m
nginx-dep   2/2     2            2           53m

//so here my-app is a deployemnt (microserivce) have running 2 pods if any pod get fail it create another one

//service will expose the application into outside the world

[root@master boobalan]# kubectl get service
NAME         TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)          AGE
kubernetes   ClusterIP   10.96.0.1        <none>        443/TCP          40d
my-app       NodePort    10.105.115.192   <none>        8080:32142/TCP   25m

//here the kubernetes is cluster , and my-app is one of the service we can access over outside world

//delete all and create a new one step by step
[root@master boobalan]# kubectl delete deployment my-app
deployment.apps "my-app" deleted
[root@master boobalan]# kubectl delete service my-app
service "my-app" deleted
[root@master boobalan]# kubectl delete deployment nginx-dep
deployment.apps "nginx-dep" deleted
[root@master boobalan]# kubectl delete pod nginx-pod -n akilan
pod "nginx-pod" deleted


///when creating a pod or deployement we can create in a single command or create a .yaml file and apply -f it will apply accordingly

//and in the background it create a another yaml file which contain more details called (manifest file)


///create a pod , create a .yaml file with node details
[root@master boobalan]# cat nginx-pod.yaml
apiVersion: v1
kind: Pod
metadata:
  name: nginx-pod-standalone
  namespace: akilan
spec:
  containers:
  - name: nginx
    image: nginx:latest
    ports:
    - containerPort: 80

//apply the yaml file to get this running on cluster
[root@master boobalan]# kubectl apply -f nginx-pod.yaml
pod/nginx-pod-standalone created

[root@master boobalan]# kubectl -n akilan get pods
NAME                   READY   STATUS    RESTARTS   AGE
nginx-pod-standalone   1/1     Running   0          6m21s

//pod getting run successfully, now expose this as service

[root@master boobalan]# cat nginx-service.yaml
apiVersion: v1
kind: Service
metadata:
  name: nginx-service-standalone
  namespace: akilan
spec:
  selector:
    app: nginx
  ports:
    - protocol: TCP
      port: 80
      targetPort: 80
  type: NodePort

//apply this service to expose this pod to outside world
[root@master boobalan]# kubectl apply -f nginx-service.yaml
service/nginx-service-standalone created

[root@master boobalan]# kubectl get service -n akilan
NAME                       TYPE       CLUSTER-IP     EXTERNAL-IP   PORT(S)        AGE
nginx-service-standalone   NodePort   10.105.9.123   <none>        80:31323/TCP   27s

//this ip is not in our local range need to check

////we can add end of every command this line it will show what will execute background dry run
 --dry-run=client -o yaml

//meanwhile we can go for a deployment setup, here we go for single yaml file contain both deployment and service 


[root@master boobalan]# cat my_microservice.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: my-microservice
spec:
  selector:
    matchLabels:
      run: my-microservice
  replicas: 2
  template:
    metadata:
      labels:
        run: my-microservice
    spec:
      containers:
      - name: my-microservice
        image: nginx:latest
        ports:
        - containerPort: 8080

---

apiVersion: v1
kind: Service
metadata:
  name: my-microservice
  labels:
    run: my-microservice
spec:
  type: NodePort
  ports:
  - port: 8080
    protocol: TCP
  selector:
    run: my-microservice


///////in the above config file i have configured deployement and service in the same file, let's apply the setup

[root@master boobalan]# kubectl apply -f my_microservice.yaml -n akilan
deployment.apps/my-microservice created
service/my-microservice created

//both  dep and service created

[root@master boobalan]# kubectl get pods -n akilan
NAME                               READY   STATUS    RESTARTS      AGE
my-microservice-6448c89947-7z6sh   1/1     Running   0             113s
my-microservice-6448c89947-bp7rs   1/1     Running   0             113s
nginx-pod-standalone               1/1     Running   1 (11m ago)   82m


//here standalone failed then app will be down. where as the my-microservice if any pod failed then automatically new pod will be created 








//above is the screenshot for opnlens


[root@master boobalan]# kubectl describe pod my-microservice-6448c89947-7z6sh -n akilan
Name:             my-microservice-6448c89947-7z6sh
Namespace:        akilan
Priority:         0
Service Account:  default
Node:             worker1.boobi.com/192.168.198.141
Start Time:       Sat, 18 May 2024 16:50:47 +0200
Labels:           pod-template-hash=6448c89947
                  run=my-microservice
Annotations:      <none>
Status:           Running
IP:               10.0.2.35
.
.

[root@master boobalan]# kubectl get pod my-microservice-6448c89947-7z6sh -n akilan
NAME                               READY   STATUS    RESTARTS   AGE
my-microservice-6448c89947-7z6sh   1/1     Running   0          13m

[root@master boobalan]# kubectl get pod my-microservice-6448c89947-7z6sh -n akilan -o wide
NAME                               READY   STATUS    RESTARTS   AGE   IP          NODE                NOMINATED NODE   READINESS GATES
my-microservice-6448c89947-7z6sh   1/1     Running   0          13m   10.0.2.35   worker1.boobi.com   <none>           <none>

[root@master boobalan]# kubectl get deployment my-microservice -n akilan
NAME              READY   UP-TO-DATE   AVAILABLE   AGE
my-microservice   2/2     2            2           15m

[root@master boobalan]# kubectl get service my-microservice -n akilan
NAME              TYPE       CLUSTER-IP       EXTERNAL-IP   PORT(S)          AGE
my-microservice   NodePort   10.108.108.192   <none>        8080:31065/TCP   16m


[root@master boobalan]# kubectl -n akilan get service nginx-service-standalone -o wide
NAME                       TYPE       CLUSTER-IP     EXTERNAL-IP   PORT(S)        AGE    SELECTOR
nginx-service-standalone   NodePort   10.105.9.123   <none>        80:31323/TCP   158m   app=nginx


[root@master boobalan]# kubectl get nodes -o wide
NAME                STATUS     ROLES           AGE   VERSION   INTERNAL-IP       EXTERNAL-IP   OS-IMAGE                      KERNEL-VERSION                     CONTAINER-RUNTIME
master.boobi.com    Ready      control-plane   40d   v1.29.3   192.168.198.140   <none>        Rocky Linux 9.3 (Blue Onyx)   5.14.0-362.24.1.el9_3.0.1.x86_64   containerd://1.6.28
worker1.boobi.com   Ready      <none>          40d   v1.29.3   192.168.198.141   <none>        Rocky Linux 9.3 (Blue Onyx)   5.14.0-362.24.1.el9_3.0.1.x86_64   containerd://1.6.28
worker2.boobi.com   NotReady   <none>          40d   v1.29.3   192.168.198.142   <none>        Rocky Linux 9.3 (Blue Onyx)   5.14.0-362.24.1.el9_3.0.1.x86_64   containerd://1.6.28


///to check pod live logs
[root@master boobalan]# kubectl logs -f my-microservice-6448c89947-7z6sh -n akilan
/docker-entrypoint.sh: /docker-entrypoint.d/ is not empty, will attempt to perform configuration
/docker-entrypoint.sh: Looking for shell scripts in /docker-entrypoint.d/
/docker-entrypoint.sh: Launching /docker-entrypoint.d/10-listen-on-ipv6-by-default.sh
10-listen-on-ipv6-by-default.sh: info: Getting the checksum of /etc/nginx/conf.d/default.conf
10-listen-on-ipv6-by-default.sh: info: Enabled listen on IPv6 in /etc/nginx/conf.d/default.conf


[root@master boobalan]# kubectl logs my-microservice-6448c89947-7z6sh -n akilan/docker-entrypoint.sh: /docker-entrypoint.d/ is not empty, will attempt to perform configuration
/docker-entrypoint.sh: Looking for shell scripts in /docker-entrypoint.d/
/docker-entrypoint.sh: Launching /docker-entrypoint.d/10-listen-on-ipv6-by-default.sh


//to login pod
[root@master boobalan]# kubectl exec -it my-microservice-6448c89947-7z6sh -n akilan -- /bin/bash
root@my-microservice-6448c89947-7z6sh:/#

///if some time some type of config a pod have multiple container for that 
kubectl exec -it <pod_name> -c <container_name> -- /bin/bash

///checking the end point
[root@master boobalan]# kubectl get endpoints nginx-service-standalone -n akilan
NAME                       ENDPOINTS   AGE
nginx-service-standalone   <none>      165m

[root@master boobalan]# kubectl get endpoints -n akilan                        NAME                       ENDPOINTS                        AGE
my-microservice            10.0.2.13:8080,10.0.2.216:8080   93m
nginx-service-standalone   <none>                           165m
[root@master boobalan]#


////here the standalone pod don't have end point configured , 

//check pod labels
[root@master boobalan]# kubectl get pods -n akilan --show-labels
NAME                               READY   STATUS    RESTARTS      AGE    LABELS
my-microservice-6448c89947-7z6sh   1/1     Running   1 (19m ago)   103m   pod-template-hash=6448c89947,run=my-microservice
my-microservice-6448c89947-bp7rs   1/1     Running   1 (19m ago)   103m   pod-template-hash=6448c89947,run=my-microservice
nginx-pod-standalone               1/1     Running   2 (19m ago)   3h4m   <none>

//check service selector
[root@master boobalan]# kubectl describe service nginx-service-standalone -n akilan
Name:                     nginx-service-standalone
Namespace:                akilan
Labels:                   <none>
Annotations:              <none>
Selector:                 app=nginx
Type:                     NodePort
IP Family Policy:         SingleStack
IP Families:              IPv4
IP:                       10.105.9.123
IPs:                      10.105.9.123
Port:                     <unset>  80/TCP
TargetPort:               80/TCP
NodePort:                 <unset>  31323/TCP
Endpoints:                <none>
Session Affinity:         None
External Traffic Policy:  Cluster
Events:                   <none>


//updating pod lable
[root@master boobalan]# kubectl label pods nginx-pod-standalone -n akilan app=nginx
pod/nginx-pod-standalone labeled

[root@master boobalan]# kubectl edit service nginx-service-standalone -n akilan

/////this edit will show the full manifest file


[root@master boobalan]# kubectl get endpoints nginx-service-standalone -n akilan
NAME                       ENDPOINTS       AGE
nginx-service-standalone   10.0.2.209:80   3h10m


////now I am successfully able to access the nginx-service-standalone by using any of of my cluster or worker ip end with the service port which listed in service output

http://192.168.198.140:31323/

Initially, the service nginx-service-standalone had no endpoints, indicating that the selector did not match any pod labels.
By ensuring that the labels on your pods matched the service selector, the service was able to correctly map to the pod endpoints.
Endpoints Confirmation:

After the labels were corrected, you verified that the service had the correct endpoint (10.0.2.209:80).
Accessing the Service:

Using the NodePort on the node's internal IP (192.168.198.140:31323), you successfully accessed the NGINX service.


[root@master boobalan]# kubectl get services -n akilan -o wide
NAME                       TYPE       CLUSTER-IP       EXTERNAL-IP   PORT(S)          AGE     SELECTOR
my-microservice            NodePort   10.108.108.192   <none>        8080:31065/TCP   133m    run=my-microservice
nginx-service-standalone   NodePort   10.105.9.123     <none>        80:31323/TCP     3h25m   app=nginx

///however still I am not able to access my-microservice , but standalone was working fine

//to troubleshoot
[root@master boobalan]# kubectl get endpoints -n akilan
NAME                       ENDPOINTS                        AGE
my-microservice            10.0.2.13:8080,10.0.2.216:8080   136m
nginx-service-standalone   10.0.2.209:80                    3h28m


[root@master boobalan]# kubectl get pods -n akilan --show-labels
NAME                               READY   STATUS    RESTARTS      AGE     LABELS
my-microservice-6448c89947-7z6sh   1/1     Running   1 (52m ago)   137m    pod-template-hash=6448c89947,run=my-microservice
my-microservice-6448c89947-bp7rs   1/1     Running   1 (52m ago)   137m    pod-template-hash=6448c89947,run=my-microservice
nginx-pod-standalone               1/1     Running   2 (52m ago)   3h38m   app=nginx


[root@master boobalan]# kubectl describe service my-microservice -n akilan
Name:                     my-microservice
Namespace:                akilan
Labels:                   run=my-microservice
Annotations:              <none>
Selector:                 run=my-microservice
Type:                     NodePort
IP Family Policy:         SingleStack
IP Families:              IPv4
IP:                       10.108.108.192
IPs:                      10.108.108.192
Port:                     <unset>  8080/TCP
TargetPort:               8080/TCP
NodePort:                 <unset>  31065/TCP
Endpoints:                10.0.2.13:8080,10.0.2.216:8080
Session Affinity:         None
External Traffic Policy:  Cluster
Events:                   <none>

//////If ENDPOINTS is <none>, there is an issue with the pod labels or service selector.



[root@master boobalan]# kubectl edit service my-microservice -n akilan
.
.

    targetPort: 8080
  selector:
    run: my-microservice
  sessionAffinity: None
  type: NodePort
status:
  loadBalancer: {}


////i think here selector rum:microserive

///I am suspecting the service -it showing 8080 so I am going to change the port to 80

[root@master boobalan]# cat my-microservice.yaml
.
.

---

apiVersion: v1
kind: Service
metadata:
  name: my-microservice
  labels:
    run: my-microservice
spec:
  type: NodePort
  ports:
  - port: 80
    protocol: TCP
  selector:
    run: my-microservice

////updated port 80
[root@master boobalan]# kubectl apply -f my-microservice.yaml -n akilan
deployment.apps/my-microservice unchanged
service/my-microservice configured
[root@master boobalan]#

[root@master boobalan]# kubectl get services -n akilan
NAME                       TYPE       CLUSTER-IP       EXTERNAL-IP   PORT(S)        AGE
my-microservice            NodePort   10.108.108.192   <none>        80:31065/TCP   3h1m
nginx-service-standalone   NodePort   10.105.9.123     <none>        80:31323/TCP   4h13m

[root@master boobalan]# kubectl get endpoints my-microservice -n akilan
NAME              ENDPOINTS                    AGE
my-microservice   10.0.2.13:80,10.0.2.216:80   3h6m


///now it's changed yes now this also working fine
http://192.168.198.140:31065/

------------------------------------------------------

Kind 
  - Helm (third party, it contains all information deployment, HPA and some application major configs evertything) - yaml
  - Deployment - (in-build kubernetes conifg)


HPA - grand parent - ( auto scaling, capable of increase the pod based on cpu, or other conifg which we define,  scale - min -2 max -10)

  |
Deployment - parent - (if we set 4 pod it maintain 4 pod of all time)

 |
Replicaset - child (help deployment to create replicaset)



///rollout restart, it will restart the serivce without downtime
//it acutally we have perform the restart on the deployment

PS C:\Users\booba> kubectl.exe -n akilan rollout restart deployment my-microservice
deployment.apps/my-microservice restarted

//one pod will be created and one will be terminated.



Post a Comment

0 Comments