Kubernetesインストール(HA構成)


https://kubernetes.io/ja/docs/setup/independent/high-availability/
https://qiita.com/rltnm7/items/3e3948893983737f736b


mmm121: CentOS7 Kubernetes master node
mmm122: CentOS7 Kubernetes master node
mmm123: CentOS7 Kubernetes master node
mmm124: CentOS7 Kubernetes worker node
mmm125: CentOS7 Kubernetes worker node
mmm126: CentOS7 Kubernetes worker node


CPU数=2
memory=4G

管理ユーザ名=kube
クラスタVIP=192.168.137.120
kubernetesVersion=v1.16.1

※Podが使用するネットワークとコントロールプレンのネットワークのアドレス範囲が重複しないように注意

--(1)クラスタノード準備
--全ノードで実行

yum update -y

useradd kube
echo kube | passwd --stdin kube

visudo

kube ALL=(ALL) NOPASSWD: ALL


swapoff -a

vim /etc/fstab
swapの行をコメントアウト


rm -rf /etc/sysctl.d/k8s.conf
touch /etc/sysctl.d/k8s.conf

echo "net.bridge.bridge-nf-call-ip6tables = 1" >> /etc/sysctl.d/k8s.conf
echo "net.bridge.bridge-nf-call-iptables = 1" >> /etc/sysctl.d/k8s.conf
echo "vm.swappiness = 0" >> /etc/sysctl.d/k8s.conf
echo "net.ipv4.ip_forward = 1" >> /etc/sysctl.d/k8s.conf


cat /etc/sysctl.d/k8s.conf

sysctl --system


yum remove docker \
docker-client \
docker-client-latest \
docker-common \
docker-latest \
docker-latest-logrotate \
docker-logrotate \
docker-engine


yum install -y yum-utils device-mapper-persistent-data lvm2

yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo

yum makecache fast
yum update -y
yum install -y docker-ce-18.09.6-3.el7.x86_64 docker-ce-cli-18.09.6-3.el7.x86_64 containerd.io
mkdir -p /etc/docker


vim /etc/docker/daemon.json

{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2",
"storage-opts": [
"overlay2.override_kernel_check=true"
]
}

mkdir -p /etc/systemd/system/docker.service.d


systemctl daemon-reload
systemctl enable docker
systemctl restart docker
systemctl status docker


vim /etc/yum.repos.d/kubernetes.repo

[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
exclude=kube*

yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes

 


docker info | grep Cgroup
mkdir -p /var/lib/kubelet

 


mkdir -p /etc/systemd/system/kubelet.service.d

rm -rf /etc/systemd/system/kubelet.service.d/20-extra-args.conf
touch /etc/systemd/system/kubelet.service.d/20-extra-args.conf

echo "[Service]" >> /etc/systemd/system/kubelet.service.d/20-extra-args.conf
echo "Environment=\"KUBELET_EXTRA_ARGS=--fail-swap-on=false\"" >> /etc/systemd/system/kubelet.service.d/20-extra-args.conf
cat /etc/systemd/system/kubelet.service.d/20-extra-args.conf

 


systemctl daemon-reload
systemctl enable kubelet


--(2)keepalivedのインストール

※ロードバランサを用いる方法はNginxやLVSで試したが動作確認不可

--mmm121での作業

yum install -y keepalived
mv /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.org

cat <<EOF > /etc/keepalived/keepalived.conf
global_defs {
}

vrrp_instance KUBE_APISERVER {
state BACKUP
interface ens160
virtual_router_id 51
priority 100
advert_int 1
preempt
virtual_ipaddress {
192.168.137.120/24
}
}
EOF

systemctl restart keepalived
systemctl enable keepalived
systemctl status keepalived

tcpdump -i ens160 vrrp -nn

--mmm122での作業

yum install -y keepalived
mv /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.org

cat <<EOF > /etc/keepalived/keepalived.conf
global_defs {
}

vrrp_instance KUBE_APISERVER {
state BACKUP
interface ens160
virtual_router_id 51
priority 90
advert_int 1
preempt
virtual_ipaddress {
192.168.137.120/24
}
}
EOF

systemctl restart keepalived
systemctl enable keepalived
systemctl status keepalived

tcpdump -i ens160 vrrp -nn

--mmm123での作業

yum install -y keepalived
mv /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.org

cat <<EOF > /etc/keepalived/keepalived.conf
global_defs {
}

vrrp_instance KUBE_APISERVER {
state BACKUP
interface ens160
virtual_router_id 51
priority 80
advert_int 1
preempt
virtual_ipaddress {
192.168.137.120/24
}
}
EOF

systemctl restart keepalived
systemctl enable keepalived
systemctl status keepalived

tcpdump -i ens160 vrrp -nn


--(3)1台目マスターノードの構築
--mmm121での作業

cd
vim kubeadm-config.yaml

apiVersion: kubeadm.k8s.io/v1beta1
kind: ClusterConfiguration
kubernetesVersion: stable
apiServer:
certSANs:
- 192.168.137.120
controlPlaneEndpoint: "192.168.137.120:6443"
kubernetesVersion: "v1.16.1"
networking:
podSubnet: "10.244.0.0/16"


kubeadm reset --force

※reset --forceを実行するとconfig.yamlが消える

vim /var/lib/kubelet/config.yaml

kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
cgroupDriver: "systemd"

cat /var/lib/kubelet/config.yaml

 

kubeadm init --config=kubeadm-config.yaml --v=7

 

---------------
※下記のようなメッセージが表示されるので、記録しておく


You can now join any number of control-plane nodes by copying certificate authorities
and service account keys on each node and then running the following as root:

kubeadm join 192.168.137.120:6443 --token 2584pj.6s8sl3rfjbyy923t \
--discovery-token-ca-cert-hash sha256:829b5eceffd043880d6d29e430dd62fa09fd48afd6f78a9b3639752f1fdc1ba1 \
--control-plane

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.137.120:6443 --token 2584pj.6s8sl3rfjbyy923t \
--discovery-token-ca-cert-hash sha256:829b5eceffd043880d6d29e430dd62fa09fd48afd6f78a9b3639752f1fdc1ba1

---------------

su - kube

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
kubectl cluster-info
kubectl get nodes


--(4)flannelを展開
--mmm121での作業


kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/2140ac876ef134e0ed5af15c65e414cf26827915/Documentation/kube-flannel.yml


kubectl get pods -n kube-system

kubectl get nodes


--(5)SSH公開鍵配布
--mmm121での作業

ssh-keygen -t rsa

ssh-copy-id mmm122
ssh-copy-id mmm123

--(6)クラスタ情報配布
--mmm121での作業

eval $(ssh-agent)
ssh-add ~/.ssh/id_rsa
ssh-add -l

USER=kube
CONTROL_PLANE_IPS="mmm122 mmm123"

for host in ${CONTROL_PLANE_IPS}; do
sudo -E -s scp /etc/kubernetes/pki/ca.crt "${USER}"@$host:
sudo -E -s scp /etc/kubernetes/pki/ca.key "${USER}"@$host:
sudo -E -s scp /etc/kubernetes/pki/sa.key "${USER}"@$host:
sudo -E -s scp /etc/kubernetes/pki/sa.pub "${USER}"@$host:
sudo -E -s scp /etc/kubernetes/pki/front-proxy-ca.crt "${USER}"@$host:
sudo -E -s scp /etc/kubernetes/pki/front-proxy-ca.key "${USER}"@$host:
sudo -E -s scp /etc/kubernetes/pki/etcd/ca.crt "${USER}"@$host:etcd-ca.crt
sudo -E -s scp /etc/kubernetes/pki/etcd/ca.key "${USER}"@$host:etcd-ca.key
sudo -E -s scp /etc/kubernetes/admin.conf "${USER}"@$host:
done


--(7)2台目マスターノードの構築
--mmm122での作業


USER=kube
mkdir -p /etc/kubernetes/pki/etcd
mv /home/${USER}/ca.crt /etc/kubernetes/pki/
mv /home/${USER}/ca.key /etc/kubernetes/pki/
mv /home/${USER}/sa.pub /etc/kubernetes/pki/
mv /home/${USER}/sa.key /etc/kubernetes/pki/
mv /home/${USER}/front-proxy-ca.crt /etc/kubernetes/pki/
mv /home/${USER}/front-proxy-ca.key /etc/kubernetes/pki/
mv /home/${USER}/etcd-ca.crt /etc/kubernetes/pki/etcd/ca.crt
mv /home/${USER}/etcd-ca.key /etc/kubernetes/pki/etcd/ca.key
mv /home/${USER}/admin.conf /etc/kubernetes/admin.conf

chown -R root:root /etc/kubernetes/pki
chown root:root /etc/kubernetes/admin.conf

kubeadm join 192.168.137.120:6443 --token 2584pj.6s8sl3rfjbyy923t \
--discovery-token-ca-cert-hash sha256:829b5eceffd043880d6d29e430dd62fa09fd48afd6f78a9b3639752f1fdc1ba1 \
--control-plane

su - kube

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

kubectl get nodes -o wide
kubectl -n kube-system get pods


--(8)3台目マスターノードの構築
--mmm123での作業


USER=kube
mkdir -p /etc/kubernetes/pki/etcd
mv /home/${USER}/ca.crt /etc/kubernetes/pki/
mv /home/${USER}/ca.key /etc/kubernetes/pki/
mv /home/${USER}/sa.pub /etc/kubernetes/pki/
mv /home/${USER}/sa.key /etc/kubernetes/pki/
mv /home/${USER}/front-proxy-ca.crt /etc/kubernetes/pki/
mv /home/${USER}/front-proxy-ca.key /etc/kubernetes/pki/
mv /home/${USER}/etcd-ca.crt /etc/kubernetes/pki/etcd/ca.crt
mv /home/${USER}/etcd-ca.key /etc/kubernetes/pki/etcd/ca.key
mv /home/${USER}/admin.conf /etc/kubernetes/admin.conf

chown -R root:root /etc/kubernetes/pki
chown root:root /etc/kubernetes/admin.conf

kubeadm join 192.168.137.120:6443 --token 2584pj.6s8sl3rfjbyy923t \
--discovery-token-ca-cert-hash sha256:829b5eceffd043880d6d29e430dd62fa09fd48afd6f78a9b3639752f1fdc1ba1 \
--control-plane

su - kube

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

kubectl get nodes -o wide
kubectl -n kube-system get pods


--(9)ワーカーノード追加
--mmm124,mmm125,mmm126での作業


kubeadm join 192.168.137.120:6443 --token 2584pj.6s8sl3rfjbyy923t \
--discovery-token-ca-cert-hash sha256:829b5eceffd043880d6d29e430dd62fa09fd48afd6f78a9b3639752f1fdc1ba1

 

--mmm121でノード追加確認

kubectl get nodes

※Readyになるまでしばらく時間がかかる


--(10)サンプルコンテナのデプロイ
--mmm121での作業

su - kube

kubectl create deployment hello-node \
--image=gcr.io/hello-minikube-zero-install/hello-node

kubectl get deployments
kubectl get pods


HELLO_POD=$(kubectl get pods -l "app=hello-node" -o=jsonpath={.items[0].metadata.name})

kubectl get po/${HELLO_POD} -o jsonpath={.status.hostIP}

kubectl expose deployment hello-node --type=LoadBalancer --port=8080
kubectl get svc/hello-node

NODE_IP=$(kubectl get po/hello-node-7676b5fb8d-zdh27 -o jsonpath='{.status.hostIP}')
NODE_PORT=$(kubectl get svc/hello-node -o jsonpath='{.spec.ports[0].nodePort}')

echo ${NODE_IP}
echo ${NODE_PORT}

curl http://${NODE_IP}:${NODE_PORT}/

kubectl delete service hello-node
kubectl delete deployment hello-node


--(11)metrics-serverの展開
--mmm121での作業

exit

yum -y install git
yum -y install epel-release
yum -y install jq

su - kube

git clone https://github.com/kubernetes-incubator/metrics-server.git -b v0.3.5
cd ./metrics-server


vim ./deploy/1.8+/metrics-server-deployment.yaml
commandフィールドを追加

---
apiVersion: v1
kind: ServiceAccount
metadata:
name: metrics-server
namespace: kube-system
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: metrics-server
namespace: kube-system
labels:
k8s-app: metrics-server
spec:
selector:
matchLabels:
k8s-app: metrics-server
template:
metadata:
name: metrics-server
labels:
k8s-app: metrics-server
spec:
serviceAccountName: metrics-server
volumes:
# mount in tmp so we can safely use from-scratch images and/or read-only containers
- name: tmp-dir
emptyDir: {}
containers:
- name: metrics-server
image: k8s.gcr.io/metrics-server-amd64:v0.3.2
imagePullPolicy: Always
command:
- /metrics-server
- --kubelet-insecure-tls
- --kubelet-preferred-address-types=InternalIP
volumeMounts:
- name: tmp-dir
mountPath: /tmp

 

kubectl create -f deploy/1.8+

kubectl -n kube-system get pods -l k8s-app=metrics-server

kubectl get --raw "/apis/metrics.k8s.io/" | jq

kubectl top node

kubectl -n kube-system logs metrics-server-56989d948d-f4kz7

kubectl -n kube-system describe pod metrics-server-56989d948d-f4kz7