Setup kubernetes in a single host
Page content
Replace microk8s with kubernetes in mini1
remove micro.k8s with snap command
cychong@mini1:~$ sudo snap remove microk8s
Save data of snap "microk8s" in automatic snapshot set
microk8s removed
cychong@mini1:~$
setup kubernetes
Reference : https://phoenixnap.com/kb/install-kubernetes-on-ubuntu
cychong@mini1:~$ sudo kubeadm init --pod-network-cidr=10.244.0.0/16
[init] Using Kubernetes version: v1.15.3
cychong@mini1:~$ kubectl get pods --all-namespaces -o wide
NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
kube-system coredns-5c98db65d4-r468f 0/1 Pending 0 2m3s <none> <none> <none> <none>
kube-system coredns-5c98db65d4-wcm2n 0/1 Pending 0 2m3s <none> <none> <none> <none>
kube-system etcd-mini1 1/1 Running 0 79s 192.168.1.100 mini1 <none> <none>
kube-system kube-apiserver-mini1 1/1 Running 0 76s 192.168.1.100 mini1 <none> <none>
kube-system kube-controller-manager-mini1 1/1 Running 0 72s 192.168.1.100 mini1 <none> <none>
kube-system kube-proxy-rzpkc 1/1 Running 0 2m4s 192.168.1.100 mini1 <none> <none>
kube-system kube-scheduler-mini1 1/1 Running 0 82s 192.168.1.100 mini1 <none> <none>
Install Calico
cychong@mini1:~$ wget https://docs.projectcalico.org/v3.8/manifests/calico.yaml
--2019-09-08 21:53:13-- https://docs.projectcalico.org/v3.8/manifests/calico.yaml
Resolving docs.projectcalico.org (docs.projectcalico.org)... 178.128.115.5, 2400:6180:0:d1::575:a001
Connecting to docs.projectcalico.org (docs.projectcalico.org)|178.128.115.5|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 20628 (20K) [application/x-yaml]
Saving to: ‘calico.yaml’
calico.yaml 100%[====================================================================================>] 20.14K --.-KB/s in 0.08s
2019-09-08 21:53:14 (240 KB/s) - ‘calico.yaml’ saved [20628/20628]
Change CALICO_IPV4POOL_CIDR
- name: CALICO_IPV4POOL_CIDR
value: "10.201.0.0/24"
cychong@mini1:~$ kubectl apply -f calico.yaml
configmap/calico-config created
customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networksets.crd.projectcalico.org created
clusterrole.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrole.rbac.authorization.k8s.io/calico-node created
clusterrolebinding.rbac.authorization.k8s.io/calico-node created
daemonset.apps/calico-node created
serviceaccount/calico-node created
deployment.apps/calico-kube-controllers created
serviceaccount/calico-kube-controllers created
cychong@mini1:~$ kubectl get pods --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system calico-kube-controllers-65b8787765-7nmjw 1/1 Running 0 3m26s
kube-system calico-node-8spzh 1/1 Running 0 3m26s
kube-system coredns-5c98db65d4-r468f 1/1 Running 0 7m33s
kube-system coredns-5c98db65d4-wcm2n 1/1 Running 0 7m33s
kube-system etcd-mini1 1/1 Running 0 6m49s
kube-system kube-apiserver-mini1 1/1 Running 0 6m46s
kube-system kube-controller-manager-mini1 1/1 Running 0 6m42s
kube-system kube-proxy-rzpkc 1/1 Running 0 7m34s
kube-system kube-scheduler-mini1 1/1 Running 0 6m52s
cychong@mini1:~$ kubectl get nodes
NAME STATUS ROLES AGE VERSION
mini1 Ready master 11m v1.15.3
cychong@mini1:~$ kubectl describe node mini1
Name: mini1
Roles: master
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=mini1
kubernetes.io/os=linux
node-role.kubernetes.io/master=
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: /var/run/dockershim.sock
node.alpha.kubernetes.io/ttl: 0
projectcalico.org/IPv4Address: 192.168.1.100/24
projectcalico.org/IPv4IPIPTunnelAddr: 10.244.51.64
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Sun, 08 Sep 2019 21:49:54 +0900
Taints: node-role.kubernetes.io/master:NoSchedule
Unschedulable: false
To use the master node as a worker node at the same time
cychong@mini1:~$ kubectl taint nodes --all node-role.kubernetes.io/master-
node/mini1 untainted
cychong@mini1:~$ kubectl describe node mini1
Name: mini1
Roles: master
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=mini1
kubernetes.io/os=linux
node-role.kubernetes.io/master=
Annotations: kubeadm.alpha.kubernetes.io/cri-socket: /var/run/dockershim.sock
node.alpha.kubernetes.io/ttl: 0
projectcalico.org/IPv4Address: 192.168.1.100/24
projectcalico.org/IPv4IPIPTunnelAddr: 10.244.51.64
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Sun, 08 Sep 2019 21:49:54 +0900
Taints: <none>
Unschedulable: false
install helm with snap
cychong@mini1:~$ sudo snap install helm --classic
[sudo] password for cychong:
helm 2.14.3 from Snapcrafters installed
if Kubelet is not started after reboot
Disable swap
cychong@mini1:~$ sudo swapoff -a
cychong@mini1:~$ sudo systemctl status kubelet
● kubelet.service - kubelet: The Kubernetes Node Agent
Loaded: loaded (/lib/systemd/system/kubelet.service; enabled; vendor preset: enabled)
Drop-In: /etc/systemd/system/kubelet.service.d
└─10-kubeadm.conf
Active: active (running) since Sun 2019-09-08 22:34:24 KST; 1s ago
Docs: https://kubernetes.io/docs/home/
Main PID: 16565 (kubelet)
Tasks: 11 (limit: 4306)
CGroup: /system.slice/kubelet.service
└─16565 /usr/bin/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf --config=/var/lib/kubelet/config.yaml --cgroup-dr
helm init
cychong@mini1:~$ sudo helm init --history-max 200
$HELM_HOME has been configured at /home/cychong/.helm.
Error: error installing: Post https://192.168.1.100:6443/apis/extensions/v1beta1/namespaces/kube-system/deployments: dial tcp 192.168.1.100:6443: connect: connection refused
Fix the Kubelet issue(due to the swap),
cychong@mini1:~$ sudo helm init --history-max 200
$HELM_HOME has been configured at /home/cychong/.helm.
Tiller (the Helm server-side component) has been installed into your Kubernetes Cluster.
Please note: by default, Tiller is deployed with an insecure 'allow unauthenticated users' policy.
To prevent this, run `helm init` with the --tiller-tls-verify flag.
For more information on securing your installation see: https://docs.helm.sh/using_helm/#securing-your-helm-installation
Instal ghost with Helm - failed
cychong@mini1:~$ sudo helm search ghost
NAME CHART VERSION APP VERSION DESCRIPTION
stable/ghost 7.2.1 2.30.2 A simple, powerful publishing platform that allows you to...
Helm은 기본적으로 maridb를 사용하고 있으므로 values.yaml
파일을 override 해서 helm을 사용해야 한다.
문제는
Helm install failed - “no available release name”
cychong@mini1:~/work/ghost-with-helm$ sudo helm install -f values.yaml stable/ghost
Error: no available release name found
해결책 : https://scriptcrunch.com/helm-error-no-available-release/
cychong@mini1:~/work/ghost-with-helm$ kubectl get deployment --all-namespaces
NAMESPACE NAME READY UP-TO-DATE AVAILABLE AGE
kube-system calico-kube-controllers 1/1 1 1 24h
kube-system coredns 2/2 2 2 25h
kube-system tiller-deploy 1/1 1 1 24h
cychong@mini1:~/work/ghost-with-helm$ kubectl delete deployment tiller-deploy -n kube-system
deployment.extensions "tiller-deploy" deleted
cychong@mini1:~/work/ghost-with-helm$ kubectl get deployment tiller-deploy --all-namespaces
error: a resource cannot be retrieved by name across all namespaces
cychong@mini1:~/work/ghost-with-helm$ sudo helm init --service-account=tiller
$HELM_HOME has been configured at /home/cychong/.helm.
Warning: Tiller is already installed in the cluster.
(Use --client-only to suppress this message, or --upgrade to upgrade Tiller to the current version.)
cychong@mini1:~/work/ghost-with-helm$ kubectl create -f rbac-config.yaml
serviceaccount/tiller created
clusterrolebinding.rbac.authorization.k8s.io/tiller created