kubernetes ubuntu
root@kiran:~# snap install microk8s --classic --channel=1.15/stable
snap "microk8s" is already installed, see 'snap help refresh'
root@kiran:~# microk8s.enable dashboard dns
Applying manifest
serviceaccount/kubernetes-dashboard created
service/kubernetes-dashboard created
secret/kubernetes-dashboard-certs created
secret/kubernetes-dashboard-csrf created
secret/kubernetes-dashboard-key-holder created
configmap/kubernetes-dashboard-settings created
role.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrole.rbac.authorization.k8s.io/kubernetes-dashboard created
rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
deployment.apps/kubernetes-dashboard created
service/dashboard-metrics-scraper created
deployment.apps/dashboard-metrics-scraper created
service/monitoring-grafana created
service/monitoring-influxdb created
service/heapster created
deployment.apps/monitoring-influxdb-grafana-v4 created
serviceaccount/heapster created
clusterrolebinding.rbac.authorization.k8s.io/heapster created
configmap/heapster-config created
configmap/eventer-config created
deployment.apps/heapster-v1.5.2 created
If RBAC is not enabled access the dashboard using the default token retrieved with:
token=$(microk8s.kubectl -n kube-system get secret | grep default-token | cut -d " " -f1)
microk8s.kubectl -n kube-system describe secret $token
In an RBAC enabled setup (microk8s.enable RBAC) you need to create a user with restricted
permissions as shown in https://github.com/kubernetes/dashboard/wiki/Creating-sample-user
Enabling DNS
Applying manifest
serviceaccount/coredns created
configmap/coredns created
deployment.apps/coredns created
service/kube-dns created
clusterrole.rbac.authorization.k8s.io/coredns created
clusterrolebinding.rbac.authorization.k8s.io/coredns created
Restarting kubelet
DNS is enabled
root@kiran:~#
snap "microk8s" is already installed, see 'snap help refresh'
root@kiran:~# microk8s.enable dashboard dns
Applying manifest
serviceaccount/kubernetes-dashboard created
service/kubernetes-dashboard created
secret/kubernetes-dashboard-certs created
secret/kubernetes-dashboard-csrf created
secret/kubernetes-dashboard-key-holder created
configmap/kubernetes-dashboard-settings created
role.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrole.rbac.authorization.k8s.io/kubernetes-dashboard created
rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
deployment.apps/kubernetes-dashboard created
service/dashboard-metrics-scraper created
deployment.apps/dashboard-metrics-scraper created
service/monitoring-grafana created
service/monitoring-influxdb created
service/heapster created
deployment.apps/monitoring-influxdb-grafana-v4 created
serviceaccount/heapster created
clusterrolebinding.rbac.authorization.k8s.io/heapster created
configmap/heapster-config created
configmap/eventer-config created
deployment.apps/heapster-v1.5.2 created
If RBAC is not enabled access the dashboard using the default token retrieved with:
token=$(microk8s.kubectl -n kube-system get secret | grep default-token | cut -d " " -f1)
microk8s.kubectl -n kube-system describe secret $token
In an RBAC enabled setup (microk8s.enable RBAC) you need to create a user with restricted
permissions as shown in https://github.com/kubernetes/dashboard/wiki/Creating-sample-user
Enabling DNS
Applying manifest
serviceaccount/coredns created
configmap/coredns created
deployment.apps/coredns created
service/kube-dns created
clusterrole.rbac.authorization.k8s.io/coredns created
clusterrolebinding.rbac.authorization.k8s.io/coredns created
Restarting kubelet
DNS is enabled
root@kiran:~#
root@kiran:~# sudo ufw allow in on cbr0 && sudo ufw allow out on cbr0
Rules updated
Rules updated (v6)
Rules updated
Rules updated (v6)
root@kiran:~# sudo ufw default allow routed
Default routed policy changed to 'allow'
(be sure to update your rules accordingly)
root@kiran:~# microk8s.kubectl get all --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system pod/coredns-9b8997588-877gg 1/1 Running 0 4m40s
kube-system pod/dashboard-metrics-scraper-566cddb686-mjp2w 1/1 Running 0 4m44s
kube-system pod/heapster-v1.5.2-5c58f64f8b-f75zj 4/4 Running 0 4m44s
kube-system pod/kubernetes-dashboard-678b7d865c-dbz8v 1/1 Running 0 4m44s
kube-system pod/monitoring-influxdb-grafana-v4-6d599df6bf-tccqb 2/2 Running 0 4m44s
NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
default service/kubernetes ClusterIP 10.152.183.1 <none> 443/TCP 77m
kube-system service/dashboard-metrics-scraper ClusterIP 10.152.183.233 <none> 8000/TCP 4m48s
kube-system service/heapster ClusterIP 10.152.183.95 <none> 80/TCP 4m47s
kube-system service/kube-dns ClusterIP 10.152.183.10 <none> 53/UDP,53/TCP,9153/TCP 4m43s
kube-system service/kubernetes-dashboard ClusterIP 10.152.183.189 <none> 443/TCP 4m48s
kube-system service/monitoring-grafana ClusterIP 10.152.183.35 <none> 80/TCP 4m48s
kube-system service/monitoring-influxdb ClusterIP 10.152.183.132 <none> 8083/TCP,8086/TCP 4m48s
NAMESPACE NAME READY UP-TO-DATE AVAILABLE AGE
kube-system deployment.apps/coredns 1/1 1 1 4m43s
kube-system deployment.apps/dashboard-metrics-scraper 1/1 1 1 4m48s
kube-system deployment.apps/heapster-v1.5.2 1/1 1 1 4m47s
kube-system deployment.apps/kubernetes-dashboard 1/1 1 1 4m48s
kube-system deployment.apps/monitoring-influxdb-grafana-v4 1/1 1 1 4m47s
NAMESPACE NAME DESIRED CURRENT READY AGE
kube-system replicaset.apps/coredns-9b8997588 1 1 1 4m42s
kube-system replicaset.apps/dashboard-metrics-scraper-566cddb686 1 1 1 4m44s
kube-system replicaset.apps/heapster-v1.5.2-5c58f64f8b 1 1 1 4m44s
kube-system replicaset.apps/kubernetes-dashboard-678b7d865c 1 1 1 4m45s
kube-system replicaset.apps/monitoring-influxdb-grafana-v4-6d599df6bf 1 1 1 4m45s
root@kiran:~#
root@kiran:~# snap install kubectl --classic
kubectl 1.16.0 from Canonical✓ installed
root@kiran:~# kubectl config view
apiVersion: v1
clusters: []
contexts: []
current-context: ""
kind: Config
preferences: {}
users: []
root@kiran:~# microk8s.kubectl config view
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: DATA+OMITTED
server: https://127.0.0.1:16443
name: microk8s-cluster
contexts:
- context:
cluster: microk8s-cluster
user: admin
name: microk8s
current-context: microk8s
kind: Config
preferences: {}
users:
- name: admin
user:
password: Wk95emtMODRvMW5BdllGWEtTN3FOSU9KU3FaN3F2UXhsN2N5Wi8ybzVyaz0K
username: admin
root@kiran:~# microk8s.kubectl cluster-info
Kubernetes master is running at https://127.0.0.1:16443
Heapster is running at https://127.0.0.1:16443/api/v1/namespaces/kube-system/services/heapster/proxy
CoreDNS is running at https://127.0.0.1:16443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
Grafana is running at https://127.0.0.1:16443/api/v1/namespaces/kube-system/services/monitoring-grafana/proxy
InfluxDB is running at https://127.0.0.1:16443/api/v1/namespaces/kube-system/services/monitoring-influxdb:http/proxy
To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
root@kiran:~#
kubectl 1.16.0 from Canonical✓ installed
root@kiran:~# kubectl config view
apiVersion: v1
clusters: []
contexts: []
current-context: ""
kind: Config
preferences: {}
users: []
root@kiran:~# microk8s.kubectl config view
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: DATA+OMITTED
server: https://127.0.0.1:16443
name: microk8s-cluster
contexts:
- context:
cluster: microk8s-cluster
user: admin
name: microk8s
current-context: microk8s
kind: Config
preferences: {}
users:
- name: admin
user:
password: Wk95emtMODRvMW5BdllGWEtTN3FOSU9KU3FaN3F2UXhsN2N5Wi8ybzVyaz0K
username: admin
root@kiran:~# microk8s.kubectl cluster-info
Kubernetes master is running at https://127.0.0.1:16443
Heapster is running at https://127.0.0.1:16443/api/v1/namespaces/kube-system/services/heapster/proxy
CoreDNS is running at https://127.0.0.1:16443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
Grafana is running at https://127.0.0.1:16443/api/v1/namespaces/kube-system/services/monitoring-grafana/proxy
InfluxDB is running at https://127.0.0.1:16443/api/v1/namespaces/kube-system/services/monitoring-influxdb:http/proxy
To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
root@kiran:~#
root@kiran:~# snap install kubectl --classic
kubectl 1.16.0 from Canonical✓ installed
root@kiran:~# kubectl config view
apiVersion: v1
clusters: []
contexts: []
current-context: ""
kind: Config
preferences: {}
users: []
root@kiran:~# microk8s.kubectl config view
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: DATA+OMITTED
server: https://127.0.0.1:16443
name: microk8s-cluster
contexts:
- context:
cluster: microk8s-cluster
user: admin
name: microk8s
current-context: microk8s
kind: Config
preferences: {}
users:
- name: admin
user:
password: Wk95emtMODRvMW5BdllGWEtTN3FOSU9KU3FaN3F2UXhsN2N5Wi8ybzVyaz0K
username: admin
root@kiran:~# microk8s.kubectl cluster-info
Kubernetes master is running at https://127.0.0.1:16443
Heapster is running at https://127.0.0.1:16443/api/v1/namespaces/kube-system/services/heapster/proxy
CoreDNS is running at https://127.0.0.1:16443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
Grafana is running at https://127.0.0.1:16443/api/v1/namespaces/kube-system/services/monitoring-grafana/proxy
InfluxDB is running at https://127.0.0.1:16443/api/v1/namespaces/kube-system/services/monitoring-influxdb:http/proxy
Comments
Post a Comment