kubernetes master worker centos kubernetes/ingress-nginx/master/deploy
[root@localhost ~]# yum remove kubeadm kubelet
Loaded plugins: fastestmirror, langpacks
Resolving Dependencies
--> Running transaction check
---> Package kubeadm.x86_64 0:1.16.3-0 will be erased
---> Package kubelet.x86_64 0:1.16.3-0 will be erased
--> Processing Dependency: kubelet for package: kubernetes-cni-0.7.5-0.x86_64
--> Running transaction check
---> Package kubernetes-cni.x86_64 0:0.7.5-0 will be erased
--> Finished Dependency Resolution
Dependencies Resolved
================================================================================
Package Arch Version Repository Size
================================================================================
Removing:
kubeadm x86_64 1.16.3-0 @Kubernetes 42 M
kubelet x86_64 1.16.3-0 @Kubernetes 117 M
Removing for dependencies:
kubernetes-cni x86_64 0.7.5-0 @Kubernetes 35 M
Transaction Summary
================================================================================
Remove 2 Packages (+1 Dependent package)
Installed size: 195 M
Is this ok [y/N]: y
Downloading packages:
Running transaction check
Running transaction test
Transaction test succeeded
Running transaction
Erasing : kubeadm-1.16.3-0.x86_64 1/3
Erasing : kubelet-1.16.3-0.x86_64 2/3
Erasing : kubernetes-cni-0.7.5-0.x86_64 3/3
Verifying : kubeadm-1.16.3-0.x86_64 1/3
Verifying : kubernetes-cni-0.7.5-0.x86_64 2/3
Verifying : kubelet-1.16.3-0.x86_64 3/3
Removed:
kubeadm.x86_64 0:1.16.3-0 kubelet.x86_64 0:1.16.3-0
Dependency Removed:
kubernetes-cni.x86_64 0:0.7.5-0
Complete!
[root@localhost ~]# yum remove kubeadm kubelet kubectl
Loaded plugins: fastestmirror, langpacks
No Match for argument: kubeadm
No Match for argument: kubelet
Resolving Dependencies
--> Running transaction check
---> Package kubectl.x86_64 0:1.16.3-0 will be erased
--> Finished Dependency Resolution
Dependencies Resolved
================================================================================
Package Arch Version Repository Size
================================================================================
Removing:
kubectl x86_64 1.16.3-0 @Kubernetes 45 M
Transaction Summary
================================================================================
Remove 1 Package
Installed size: 45 M
Is this ok [y/N]: y
Downloading packages:
Running transaction check
Running transaction test
Transaction test succeeded
Running transaction
Erasing : kubectl-1.16.3-0.x86_64 1/1
Verifying : kubectl-1.16.3-0.x86_64 1/1
Removed:
kubectl.x86_64 0:1.16.3-0
Complete!
[root@localhost ~]# rpm -qa | grep kube
[root@localhost ~]# cat <<EOF > /etc/yum.repos.d/kubernetes.repo
> > [kubernetes]
> > name=Kubernetes
> > baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
> > enabled=1
> > gpgcheck=1
> > repo_gpgcheck=1
> > gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg
> > https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
> > EOF
> ^C
[root@localhost ~]# yum install kubeadm docker -y
Loaded plugins: fastestmirror, langpacks
Loading mirror speeds from cached hostfile
* base: mirrors.praction.in
* extras: mirrors.praction.in
* updates: mirrors.praction.in
Package 2:docker-1.13.1-103.git7f2769b.el7.centos.x86_64 already installed and latest version
Resolving Dependencies
--> Running transaction check
---> Package kubeadm.x86_64 0:1.16.3-0 will be installed
--> Processing Dependency: kubernetes-cni >= 0.7.5 for package: kubeadm-1.16.3-0.x86_64
--> Processing Dependency: kubelet >= 1.13.0 for package: kubeadm-1.16.3-0.x86_64
--> Processing Dependency: kubectl >= 1.13.0 for package: kubeadm-1.16.3-0.x86_64
--> Running transaction check
---> Package kubectl.x86_64 0:1.16.3-0 will be installed
---> Package kubelet.x86_64 0:1.16.3-0 will be installed
---> Package kubernetes-cni.x86_64 0:0.7.5-0 will be installed
--> Finished Dependency Resolution
Dependencies Resolved
================================================================================
Package Arch Version Repository Size
================================================================================
Installing:
kubeadm x86_64 1.16.3-0 Kubernetes 9.5 M
Installing for dependencies:
kubectl x86_64 1.16.3-0 Kubernetes 10 M
kubelet x86_64 1.16.3-0 Kubernetes 22 M
kubernetes-cni x86_64 0.7.5-0 Kubernetes 10 M
Transaction Summary
================================================================================
Install 1 Package (+3 Dependent packages)
Total download size: 52 M
Installed size: 239 M
Downloading packages:
(1/4): b45a63e77d36fc7e1ef84f1cd2f7b84bccf650c8248191a37d2 | 9.5 MB 00:04
(2/4): fd6465355a85b8ddbc0b2e7cb073e3a40160c7c359576b86e9b | 10 MB 00:04
(3/4): 548a0dcd865c16a50980420ddfa5fbccb8b59621179798e6dc9 | 10 MB 00:04
(4/4): 8a0e2b605c7a616d7cb72c25c9058b2327e41d869046c7c6cb3 | 22 MB 00:07
--------------------------------------------------------------------------------
Total 4.3 MB/s | 52 MB 00:11
Running transaction check
Running transaction test
Transaction test succeeded
Running transaction
Installing : kubelet-1.16.3-0.x86_64 1/4
Installing : kubernetes-cni-0.7.5-0.x86_64 2/4
Installing : kubectl-1.16.3-0.x86_64 3/4
Installing : kubeadm-1.16.3-0.x86_64 4/4
Verifying : kubeadm-1.16.3-0.x86_64 1/4
Verifying : kubernetes-cni-0.7.5-0.x86_64 2/4
Verifying : kubectl-1.16.3-0.x86_64 3/4
Verifying : kubelet-1.16.3-0.x86_64 4/4
Installed:
kubeadm.x86_64 0:1.16.3-0
Dependency Installed:
kubectl.x86_64 0:1.16.3-0 kubelet.x86_64 0:1.16.3-0
kubernetes-cni.x86_64 0:0.7.5-0
Complete!
[root@localhost ~]# systemctl restart docker && systemctl enable docker
[root@localhost ~]# systemctl restart kubelet && systemctl enable kubelet
[root@localhost ~]# kubeadm init
[init] Using Kubernetes version: v1.16.3
[preflight] Running pre-flight checks
[WARNING Firewalld]: firewalld is active, please ensure ports [6443 10250] are open or your cluster may not function correctly
[WARNING Hostname]: hostname "localhost.localdomain" could not be reached
[WARNING Hostname]: hostname "localhost.localdomain": lookup localhost.localdomain on 192.168.0.1:53: no such host
error execution phase preflight: [preflight] Some fatal errors occurred:
[ERROR Swap]: running with swap on is not supported. Please disable swap
[preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...`
To see the stack trace of this error execute with --v=5 or higher
[root@localhost ~]# ifconfig
docker0: flags=4099<UP,BROADCAST,MULTICAST> mtu 1500
inet 172.17.0.1 netmask 255.255.0.0 broadcast 0.0.0.0
ether 02:42:0f:c6:dd:11 txqueuelen 0 (Ethernet)
RX packets 0 bytes 0 (0.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 0 bytes 0 (0.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
ens33: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 192.168.0.101 netmask 255.255.255.0 broadcast 192.168.0.255
inet6 fe80::8961:8cea:862:8e15 prefixlen 64 scopeid 0x20<link>
ether 00:0c:29:ae:88:4d txqueuelen 1000 (Ethernet)
RX packets 40142 bytes 58702424 (55.9 MiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 35378 bytes 2665500 (2.5 MiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
[root@localhost ~]# vi /etc/hosts
[root@localhost ~]# cat /etc/hosts
192.168.0.101 kuber
192.168.0.105 worker
[root@localhost ~]# kubeadm init
[init] Using Kubernetes version: v1.16.3
[preflight] Running pre-flight checks
[WARNING Firewalld]: firewalld is active, please ensure ports [6443 10250] are open or your cluster may not function correctly
[WARNING Hostname]: hostname "localhost.localdomain" could not be reached
[WARNING Hostname]: hostname "localhost.localdomain": lookup localhost.localdomain on 192.168.0.1:53: no such host
error execution phase preflight: [preflight] Some fatal errors occurred:
[ERROR Swap]: running with swap on is not supported. Please disable swap
[preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...`
To see the stack trace of this error execute with --v=5 or higher
[root@localhost ~]# systemctl daemon-reload
[root@localhost ~]# systemctl restart kubelet
[root@localhost ~]# kubeadm init
[init] Using Kubernetes version: v1.16.3
[preflight] Running pre-flight checks
[WARNING Firewalld]: firewalld is active, please ensure ports [6443 10250] are open or your cluster may not function correctly
[WARNING Hostname]: hostname "localhost.localdomain" could not be reached
[WARNING Hostname]: hostname "localhost.localdomain": lookup localhost.localdomain on 192.168.0.1:53: no such host
error execution phase preflight: [preflight] Some fatal errors occurred:
[ERROR Swap]: running with swap on is not supported. Please disable swap
[preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...`
To see the stack trace of this error execute with --v=5 or higher
[root@localhost ~]# swapoff -a
[root@localhost ~]# kubeadm init
[init] Using Kubernetes version: v1.16.3
[preflight] Running pre-flight checks
[WARNING Firewalld]: firewalld is active, please ensure ports [6443 10250] are open or your cluster may not function correctly
[WARNING Hostname]: hostname "localhost.localdomain" could not be reached
[WARNING Hostname]: hostname "localhost.localdomain": lookup localhost.localdomain on 192.168.0.1:53: no such host
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
Message from syslogd@localhost at Nov 23 23:08:31 ...
kernel:NMI watchdog: BUG: soft lockup - CPU#1 stuck for 51s! [kworker/u256:0:6]
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Activating the kubelet service
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [localhost.localdomain kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.0.101]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [localhost.localdomain localhost] and IPs [192.168.0.101 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [localhost.localdomain localhost] and IPs [192.168.0.101 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[kubelet-check] Initial timeout of 40s passed.
[apiclient] All control plane components are healthy after 61.581272 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.16" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node localhost.localdomain as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node localhost.localdomain as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: xnqdau.hzgcjhzcqcd90wp8
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.0.101:6443 --token xnqdau.hzgcjhzcqcd90wp8 \
--discovery-token-ca-cert-hash sha256:04aa3c2cf20db92bea4a66208cbf8148855bc4aa756ce6978f2e839ad8b0a89e
You have mail in /var/spool/mail/root
[root@localhost ~]# vi /etc/hosts
[root@localhost ~]# cat /etc/hosts
192.168.0.101 kuber
192.168.0.105 worker
issue
[root@localhost ~]# kubectl get nodes
The connection to the server localhost:8080 was refused - did you specify the right host or port?
solution
[root@localhost ~]# mkdir -p $HOME/.kube
[root@localhost ~]# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@localhost ~]# chown $(id -u):$(id -g) $HOME/.kube/config
[root@localhost ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
kuber NotReady <none> 30s v1.16.3
localhost.localdomain NotReady master 2m59s v1.16.3
[root@localhost ~]# systemctl daemon-reload
[root@localhost ~]# systemctl restart kubelet
[root@localhost ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
kuber NotReady <none> 83s v1.16.3
localhost.localdomain NotReady master 3m52s v1.16.3
[root@localhost ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
kuber NotReady <none> 85s v1.16.3
localhost.localdomain NotReady master 3m54s v1.16.3
[root@localhost ~]# export kubever=$(kubectl version | base64 | tr -d '\n')
[root@localhost ~]# kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$kubever"
serviceaccount/weave-net created
clusterrole.rbac.authorization.k8s.io/weave-net created
clusterrolebinding.rbac.authorization.k8s.io/weave-net created
role.rbac.authorization.k8s.io/weave-net created
rolebinding.rbac.authorization.k8s.io/weave-net created
daemonset.apps/weave-net created
[root@localhost ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
kuber NotReady <none> 2m6s v1.16.3
localhost.localdomain NotReady master 4m35s v1.16.3
[root@localhost ~]# kubectl get pods --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system coredns-5644d7b6d9-f8zmw 0/1 Pending 0 4m51s
kube-system coredns-5644d7b6d9-wnbgc 0/1 Pending 0 4m50s
kube-system etcd-localhost.localdomain 1/1 Running 0 4m28s
kube-system kube-apiserver-localhost.localdomain 1/1 Running 0 4m9s
kube-system kube-controller-manager-localhost.localdomain 1/1 Running 0 4m18s
kube-system kube-proxy-6kzpk 1/1 Running 0 2m36s
kube-system kube-proxy-thpt8 1/1 Running 0 4m51s
kube-system kube-scheduler-localhost.localdomain 1/1 Running 0 4m1s
kube-system weave-net-dtf8j 2/2 Running 0 33s
kube-system weave-net-wjrqq 1/2 Running 0 33s
worker
[root@kuber ~]# kubeadm join 192.168.0.101:6443 --token xnqdau.hzgcjhzcqcd90wp8 \
> --discovery-token-ca-cert-hash sha256:04aa3c2cf20db92bea4a66208cbf8148855bc4aa756ce6978f2e839ad8b0a89e
[preflight] Running pre-flight checks
error execution phase preflight: [preflight] Some fatal errors occurred:
[ERROR Swap]: running with swap on is not supported. Please disable swap
[preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...`
To see the stack trace of this error execute with --v=5 or higher
[root@kuber ~]# swapoff -a
[root@kuber ~]# kubeadm join 192.168.0.101:6443 --token xnqdau.hzgcjhzcqcd90wp8 --discovery-token-ca-cert-hash sha256:04aa3c2cf20db92bea4a66208cbf8148855bc4aa756ce6978f2e839ad8b0a89e
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.16" ConfigMap in the kube-system namespace
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Activating the kubelet service
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
[root@localhost ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
kuber Ready <none> 2m52s v1.16.3
localhost.localdomain Ready master 5m21s v1.16.3
[root@localhost ~]# kubectl get pods --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system coredns-5644d7b6d9-f8zmw 1/1 Running 0 5m40s
kube-system coredns-5644d7b6d9-wnbgc 1/1 Running 0 5m39s
kube-system etcd-localhost.localdomain 1/1 Running 0 5m17s
kube-system kube-apiserver-localhost.localdomain 1/1 Running 0 4m58s
kube-system kube-controller-manager-localhost.localdomain 1/1 Running 0 5m7s
kube-system kube-proxy-6kzpk 1/1 Running 0 3m25s
kube-system kube-proxy-thpt8 1/1 Running 0 5m40s
kube-system kube-scheduler-localhost.localdomain 1/1 Running 0 4m50s
kube-system weave-net-dtf8j 2/2 Running 0 82s
kube-system weave-net-wjrqq 2/2 Running 0 82s
[root@localhost ~]#
[root@kuber ~]# kubectl create service nodeport nginx --tcp=80:80
service/nginx created
[root@kuber ~]#
[root@kuber ~]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 30m
nginx NodePort 10.97.170.90 <none> 80:32157/TCP 44s
[root@kuber ~]# kubectl delete deployment nginx
deployment.apps "nginx" deleted
[root@kuber ~]#
Loaded plugins: fastestmirror, langpacks
Resolving Dependencies
--> Running transaction check
---> Package kubeadm.x86_64 0:1.16.3-0 will be erased
---> Package kubelet.x86_64 0:1.16.3-0 will be erased
--> Processing Dependency: kubelet for package: kubernetes-cni-0.7.5-0.x86_64
--> Running transaction check
---> Package kubernetes-cni.x86_64 0:0.7.5-0 will be erased
--> Finished Dependency Resolution
Dependencies Resolved
================================================================================
Package Arch Version Repository Size
================================================================================
Removing:
kubeadm x86_64 1.16.3-0 @Kubernetes 42 M
kubelet x86_64 1.16.3-0 @Kubernetes 117 M
Removing for dependencies:
kubernetes-cni x86_64 0.7.5-0 @Kubernetes 35 M
Transaction Summary
================================================================================
Remove 2 Packages (+1 Dependent package)
Installed size: 195 M
Is this ok [y/N]: y
Downloading packages:
Running transaction check
Running transaction test
Transaction test succeeded
Running transaction
Erasing : kubeadm-1.16.3-0.x86_64 1/3
Erasing : kubelet-1.16.3-0.x86_64 2/3
Erasing : kubernetes-cni-0.7.5-0.x86_64 3/3
Verifying : kubeadm-1.16.3-0.x86_64 1/3
Verifying : kubernetes-cni-0.7.5-0.x86_64 2/3
Verifying : kubelet-1.16.3-0.x86_64 3/3
Removed:
kubeadm.x86_64 0:1.16.3-0 kubelet.x86_64 0:1.16.3-0
Dependency Removed:
kubernetes-cni.x86_64 0:0.7.5-0
Complete!
[root@localhost ~]# yum remove kubeadm kubelet kubectl
Loaded plugins: fastestmirror, langpacks
No Match for argument: kubeadm
No Match for argument: kubelet
Resolving Dependencies
--> Running transaction check
---> Package kubectl.x86_64 0:1.16.3-0 will be erased
--> Finished Dependency Resolution
Dependencies Resolved
================================================================================
Package Arch Version Repository Size
================================================================================
Removing:
kubectl x86_64 1.16.3-0 @Kubernetes 45 M
Transaction Summary
================================================================================
Remove 1 Package
Installed size: 45 M
Is this ok [y/N]: y
Downloading packages:
Running transaction check
Running transaction test
Transaction test succeeded
Running transaction
Erasing : kubectl-1.16.3-0.x86_64 1/1
Verifying : kubectl-1.16.3-0.x86_64 1/1
Removed:
kubectl.x86_64 0:1.16.3-0
Complete!
[root@localhost ~]# rpm -qa | grep kube
[root@localhost ~]# cat <<EOF > /etc/yum.repos.d/kubernetes.repo
> > [kubernetes]
> > name=Kubernetes
> > baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
> > enabled=1
> > gpgcheck=1
> > repo_gpgcheck=1
> > gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg
> > https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
> > EOF
> ^C
[root@localhost ~]# yum install kubeadm docker -y
Loaded plugins: fastestmirror, langpacks
Loading mirror speeds from cached hostfile
* base: mirrors.praction.in
* extras: mirrors.praction.in
* updates: mirrors.praction.in
Package 2:docker-1.13.1-103.git7f2769b.el7.centos.x86_64 already installed and latest version
Resolving Dependencies
--> Running transaction check
---> Package kubeadm.x86_64 0:1.16.3-0 will be installed
--> Processing Dependency: kubernetes-cni >= 0.7.5 for package: kubeadm-1.16.3-0.x86_64
--> Processing Dependency: kubelet >= 1.13.0 for package: kubeadm-1.16.3-0.x86_64
--> Processing Dependency: kubectl >= 1.13.0 for package: kubeadm-1.16.3-0.x86_64
--> Running transaction check
---> Package kubectl.x86_64 0:1.16.3-0 will be installed
---> Package kubelet.x86_64 0:1.16.3-0 will be installed
---> Package kubernetes-cni.x86_64 0:0.7.5-0 will be installed
--> Finished Dependency Resolution
Dependencies Resolved
================================================================================
Package Arch Version Repository Size
================================================================================
Installing:
kubeadm x86_64 1.16.3-0 Kubernetes 9.5 M
Installing for dependencies:
kubectl x86_64 1.16.3-0 Kubernetes 10 M
kubelet x86_64 1.16.3-0 Kubernetes 22 M
kubernetes-cni x86_64 0.7.5-0 Kubernetes 10 M
Transaction Summary
================================================================================
Install 1 Package (+3 Dependent packages)
Total download size: 52 M
Installed size: 239 M
Downloading packages:
(1/4): b45a63e77d36fc7e1ef84f1cd2f7b84bccf650c8248191a37d2 | 9.5 MB 00:04
(2/4): fd6465355a85b8ddbc0b2e7cb073e3a40160c7c359576b86e9b | 10 MB 00:04
(3/4): 548a0dcd865c16a50980420ddfa5fbccb8b59621179798e6dc9 | 10 MB 00:04
(4/4): 8a0e2b605c7a616d7cb72c25c9058b2327e41d869046c7c6cb3 | 22 MB 00:07
--------------------------------------------------------------------------------
Total 4.3 MB/s | 52 MB 00:11
Running transaction check
Running transaction test
Transaction test succeeded
Running transaction
Installing : kubelet-1.16.3-0.x86_64 1/4
Installing : kubernetes-cni-0.7.5-0.x86_64 2/4
Installing : kubectl-1.16.3-0.x86_64 3/4
Installing : kubeadm-1.16.3-0.x86_64 4/4
Verifying : kubeadm-1.16.3-0.x86_64 1/4
Verifying : kubernetes-cni-0.7.5-0.x86_64 2/4
Verifying : kubectl-1.16.3-0.x86_64 3/4
Verifying : kubelet-1.16.3-0.x86_64 4/4
Installed:
kubeadm.x86_64 0:1.16.3-0
Dependency Installed:
kubectl.x86_64 0:1.16.3-0 kubelet.x86_64 0:1.16.3-0
kubernetes-cni.x86_64 0:0.7.5-0
Complete!
[root@localhost ~]# systemctl restart docker && systemctl enable docker
[root@localhost ~]# systemctl restart kubelet && systemctl enable kubelet
[root@localhost ~]# kubeadm init
[init] Using Kubernetes version: v1.16.3
[preflight] Running pre-flight checks
[WARNING Firewalld]: firewalld is active, please ensure ports [6443 10250] are open or your cluster may not function correctly
[WARNING Hostname]: hostname "localhost.localdomain" could not be reached
[WARNING Hostname]: hostname "localhost.localdomain": lookup localhost.localdomain on 192.168.0.1:53: no such host
error execution phase preflight: [preflight] Some fatal errors occurred:
[ERROR Swap]: running with swap on is not supported. Please disable swap
[preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...`
To see the stack trace of this error execute with --v=5 or higher
[root@localhost ~]# ifconfig
docker0: flags=4099<UP,BROADCAST,MULTICAST> mtu 1500
inet 172.17.0.1 netmask 255.255.0.0 broadcast 0.0.0.0
ether 02:42:0f:c6:dd:11 txqueuelen 0 (Ethernet)
RX packets 0 bytes 0 (0.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 0 bytes 0 (0.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
ens33: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 192.168.0.101 netmask 255.255.255.0 broadcast 192.168.0.255
inet6 fe80::8961:8cea:862:8e15 prefixlen 64 scopeid 0x20<link>
ether 00:0c:29:ae:88:4d txqueuelen 1000 (Ethernet)
RX packets 40142 bytes 58702424 (55.9 MiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 35378 bytes 2665500 (2.5 MiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
[root@localhost ~]# vi /etc/hosts
[root@localhost ~]# cat /etc/hosts
192.168.0.101 kuber
192.168.0.105 worker
[root@localhost ~]# kubeadm init
[init] Using Kubernetes version: v1.16.3
[preflight] Running pre-flight checks
[WARNING Firewalld]: firewalld is active, please ensure ports [6443 10250] are open or your cluster may not function correctly
[WARNING Hostname]: hostname "localhost.localdomain" could not be reached
[WARNING Hostname]: hostname "localhost.localdomain": lookup localhost.localdomain on 192.168.0.1:53: no such host
error execution phase preflight: [preflight] Some fatal errors occurred:
[ERROR Swap]: running with swap on is not supported. Please disable swap
[preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...`
To see the stack trace of this error execute with --v=5 or higher
[root@localhost ~]# systemctl daemon-reload
[root@localhost ~]# systemctl restart kubelet
[root@localhost ~]# kubeadm init
[init] Using Kubernetes version: v1.16.3
[preflight] Running pre-flight checks
[WARNING Firewalld]: firewalld is active, please ensure ports [6443 10250] are open or your cluster may not function correctly
[WARNING Hostname]: hostname "localhost.localdomain" could not be reached
[WARNING Hostname]: hostname "localhost.localdomain": lookup localhost.localdomain on 192.168.0.1:53: no such host
error execution phase preflight: [preflight] Some fatal errors occurred:
[ERROR Swap]: running with swap on is not supported. Please disable swap
[preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...`
To see the stack trace of this error execute with --v=5 or higher
[root@localhost ~]# swapoff -a
[root@localhost ~]# kubeadm init
[init] Using Kubernetes version: v1.16.3
[preflight] Running pre-flight checks
[WARNING Firewalld]: firewalld is active, please ensure ports [6443 10250] are open or your cluster may not function correctly
[WARNING Hostname]: hostname "localhost.localdomain" could not be reached
[WARNING Hostname]: hostname "localhost.localdomain": lookup localhost.localdomain on 192.168.0.1:53: no such host
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
Message from syslogd@localhost at Nov 23 23:08:31 ...
kernel:NMI watchdog: BUG: soft lockup - CPU#1 stuck for 51s! [kworker/u256:0:6]
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Activating the kubelet service
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [localhost.localdomain kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.0.101]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [localhost.localdomain localhost] and IPs [192.168.0.101 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [localhost.localdomain localhost] and IPs [192.168.0.101 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[kubelet-check] Initial timeout of 40s passed.
[apiclient] All control plane components are healthy after 61.581272 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.16" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node localhost.localdomain as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node localhost.localdomain as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: xnqdau.hzgcjhzcqcd90wp8
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.0.101:6443 --token xnqdau.hzgcjhzcqcd90wp8 \
--discovery-token-ca-cert-hash sha256:04aa3c2cf20db92bea4a66208cbf8148855bc4aa756ce6978f2e839ad8b0a89e
You have mail in /var/spool/mail/root
[root@localhost ~]# vi /etc/hosts
[root@localhost ~]# cat /etc/hosts
192.168.0.101 kuber
192.168.0.105 worker
issue
[root@localhost ~]# kubectl get nodes
The connection to the server localhost:8080 was refused - did you specify the right host or port?
solution
[root@localhost ~]# mkdir -p $HOME/.kube
[root@localhost ~]# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@localhost ~]# chown $(id -u):$(id -g) $HOME/.kube/config
[root@localhost ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
kuber NotReady <none> 30s v1.16.3
localhost.localdomain NotReady master 2m59s v1.16.3
[root@localhost ~]# systemctl daemon-reload
[root@localhost ~]# systemctl restart kubelet
[root@localhost ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
kuber NotReady <none> 83s v1.16.3
localhost.localdomain NotReady master 3m52s v1.16.3
[root@localhost ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
kuber NotReady <none> 85s v1.16.3
localhost.localdomain NotReady master 3m54s v1.16.3
[root@localhost ~]# export kubever=$(kubectl version | base64 | tr -d '\n')
[root@localhost ~]# kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$kubever"
serviceaccount/weave-net created
clusterrole.rbac.authorization.k8s.io/weave-net created
clusterrolebinding.rbac.authorization.k8s.io/weave-net created
role.rbac.authorization.k8s.io/weave-net created
rolebinding.rbac.authorization.k8s.io/weave-net created
daemonset.apps/weave-net created
[root@localhost ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
kuber NotReady <none> 2m6s v1.16.3
localhost.localdomain NotReady master 4m35s v1.16.3
[root@localhost ~]# kubectl get pods --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system coredns-5644d7b6d9-f8zmw 0/1 Pending 0 4m51s
kube-system coredns-5644d7b6d9-wnbgc 0/1 Pending 0 4m50s
kube-system etcd-localhost.localdomain 1/1 Running 0 4m28s
kube-system kube-apiserver-localhost.localdomain 1/1 Running 0 4m9s
kube-system kube-controller-manager-localhost.localdomain 1/1 Running 0 4m18s
kube-system kube-proxy-6kzpk 1/1 Running 0 2m36s
kube-system kube-proxy-thpt8 1/1 Running 0 4m51s
kube-system kube-scheduler-localhost.localdomain 1/1 Running 0 4m1s
kube-system weave-net-dtf8j 2/2 Running 0 33s
kube-system weave-net-wjrqq 1/2 Running 0 33s
worker
[root@kuber ~]# kubeadm join 192.168.0.101:6443 --token xnqdau.hzgcjhzcqcd90wp8 \
> --discovery-token-ca-cert-hash sha256:04aa3c2cf20db92bea4a66208cbf8148855bc4aa756ce6978f2e839ad8b0a89e
[preflight] Running pre-flight checks
error execution phase preflight: [preflight] Some fatal errors occurred:
[ERROR Swap]: running with swap on is not supported. Please disable swap
[preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...`
To see the stack trace of this error execute with --v=5 or higher
[root@kuber ~]# swapoff -a
[root@kuber ~]# kubeadm join 192.168.0.101:6443 --token xnqdau.hzgcjhzcqcd90wp8 --discovery-token-ca-cert-hash sha256:04aa3c2cf20db92bea4a66208cbf8148855bc4aa756ce6978f2e839ad8b0a89e
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.16" ConfigMap in the kube-system namespace
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Activating the kubelet service
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
[root@localhost ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
kuber Ready <none> 2m52s v1.16.3
localhost.localdomain Ready master 5m21s v1.16.3
[root@localhost ~]# kubectl get pods --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system coredns-5644d7b6d9-f8zmw 1/1 Running 0 5m40s
kube-system coredns-5644d7b6d9-wnbgc 1/1 Running 0 5m39s
kube-system etcd-localhost.localdomain 1/1 Running 0 5m17s
kube-system kube-apiserver-localhost.localdomain 1/1 Running 0 4m58s
kube-system kube-controller-manager-localhost.localdomain 1/1 Running 0 5m7s
kube-system kube-proxy-6kzpk 1/1 Running 0 3m25s
kube-system kube-proxy-thpt8 1/1 Running 0 5m40s
kube-system kube-scheduler-localhost.localdomain 1/1 Running 0 4m50s
kube-system weave-net-dtf8j 2/2 Running 0 82s
kube-system weave-net-wjrqq 2/2 Running 0 82s
[root@localhost ~]#
kubectl create deployment nginx --image=nginx
kubectl get deployments
[root@kuber ~]# kubectl get deployments NAME READY UP-TO-DATE AVAILABLE AGE nginx 1/1 1 1 30m [root@kuber ~]#
[root@kuber ~]# kubectl describe deployment nginx
Name: nginx
Namespace: default
CreationTimestamp: Sat, 23 Nov 2019 23:31:23 +0530
Labels: app=nginx
Annotations: deployment.kubernetes.io/revision: 1
Selector: app=nginx
Replicas: 1 desired | 1 updated | 1 total | 1 available | 0 unavailable
StrategyType: RollingUpdate
MinReadySeconds: 0
RollingUpdateStrategy: 25% max unavailable, 25% max surge
Pod Template:
Labels: app=nginx
Containers:
nginx:
Image: nginx
Port: <none>
Host Port: <none>
Environment: <none>
Mounts: <none>
Volumes: <none>
Conditions:
Type Status Reason
---- ------ ------
Available True MinimumReplicasAvailable
Progressing True NewReplicaSetAvailable
OldReplicaSets: <none>
NewReplicaSet: nginx-86c57db685 (1/1 replicas created)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal ScalingReplicaSet 30m deployment-controller Scaled up replica set nginx-86c57db685 to 1
[root@kuber ~]#
[root@kuber ~]# kubectl get namespaces
NAME STATUS AGE
default Active 51m
kube-node-lease Active 51m
kube-public Active 51m
kube-system Active 51m
service/nginx created
[root@kuber ~]#
[root@kuber ~]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 30m
nginx NodePort 10.97.170.90 <none> 80:32157/TCP 44s
[root@kuber ~]# kubectl delete deployment nginx
deployment.apps "nginx" deleted
[root@kuber ~]#
[root@kuber ~]# kubectl get deployments
No resources found in default namespace.
[root@kuber ~]#
[root@kuber ~]# kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/static/mandatory.yaml
namespace/ingress-nginx created
configmap/nginx-configuration created
configmap/tcp-services created
configmap/udp-services created
serviceaccount/nginx-ingress-serviceaccount created
clusterrole.rbac.authorization.k8s.io/nginx-ingress-clusterrole created
role.rbac.authorization.k8s.io/nginx-ingress-role created
rolebinding.rbac.authorization.k8s.io/nginx-ingress-role-nisa-binding created
clusterrolebinding.rbac.authorization.k8s.io/nginx-ingress-clusterrole-nisa-binding created
deployment.apps/nginx-ingress-controller created
[root@kuber ~]#
[root@kuber ~]# kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/static/provider/baremetal/service-nodeport.yaml
service/ingress-nginx created
[root@kuber ~]# kubectl get pods --all-namespaces -l app.kubernetes.io/name=ingr ess-nginx --watch
NAMESPACE NAME READY STATUS RE STARTS AGE
ingress-nginx nginx-ingress-controller-568867bf56-6czqt 0/1 Pending 0 12s
ingress-nginx nginx-ingress-controller-568867bf56-6czqt 0/1 Pending 0 3m27s
ingress-nginx nginx-ingress-controller-568867bf56-6czqt 0/1 ContainerCreating 0 3m28s
ingress-nginx nginx-ingress-controller-568867bf56-6czqt 0/1 Running 0 5m17s
ingress-nginx nginx-ingress-controller-568867bf56-6czqt 1/1 Running 0 6m30s
[root@kuber ~]# kubectl -n ingress-nginx get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
ingress-nginx NodePort 10.108.2.158 <none> 80:30892/TCP,443:30881/TCP 12m
[root@kuber ~]#
[root@kuber ~]# kubectl -n ingress-nginx get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx-ingress-controller-568867bf56-6czqt 1/1 Running 0 15m 10.32.0.5 kuber <none> <none>
[root@kuber ~]#
[root@kuber ~]# kubectl apply -f https://docs.projectcalico.org/v2.6/getting-sta rted/kubernetes/installation/hosted/kubeadm/1.6/calico.yaml
clusterrolebinding.rbac.authorization.k8s.io/calico-cni-plugin unchanged
clusterrole.rbac.authorization.k8s.io/calico-cni-plugin unchanged
serviceaccount/calico-cni-plugin created
clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrole.rbac.authorization.k8s.io/calico-kube-controllers created
serviceaccount/calico-kube-controllers created
[root@kuber ~]# kubectl get pods --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
default nginx-86c57db685-scdzt 1/1 Running 0 56m
ingress-nginx nginx-ingress-controller-568867bf56-6czqt 1/1 Running 0 18m
kube-system coredns-5644d7b6d9-f8zmw 1/1 Running 0 76m
kube-system coredns-5644d7b6d9-wnbgc 1/1 Running 0 76m
kube-system etcd-localhost.localdomain 1/1 Running 0 76m
kube-system kube-apiserver-localhost.localdomain 1/1 Running 0 75m
kube-system kube-controller-manager-localhost.localdomain 1/1 Running 0 76m
kube-system kube-proxy-6kzpk 1/1 Running 0 74m
kube-system kube-proxy-thpt8 1/1 Running 0 76m
kube-system kube-scheduler-localhost.localdomain 1/1 Running 0 75m
kube-system weave-net-dtf8j 2/2 Running 0 72m
kube-system weave-net-wjrqq 2/2 Running 0 72m
[root@kuber ~]#
Comments
Post a Comment