1, Basic description
This blog post will demonstrate CentOS 7 binary installation of highly available k8s 1.17.x. compared with other versions, the binary installation method is not much different.
2, Basic environment configuration
2.1 host information
OS | role && hostname | memory | CPU | Role |
---|---|---|---|---|
Centos 7.6 | k8s-master01 | 8G | 4 core | 192.168.99.101 |
Centos 7.6 | k8s-master02 | 8G | 4 core | 192.168.99.102 |
Centos 7.6 | k8s-master03 | 8G | 4 core | 192.168.99.103 |
Centos 7.6 | k8s-node01 | 8G | 4 core | 192.168.99.104 |
Centos 7.6 | k8s-node02 | 8G | 4 core | 192.168.99.105 |
k8s-master-lb | 192.168.99.108 |
2.2 system information
$ cat /etc/redhat-release CentOS Linux release 7.6.1810 (Core) $ uname -ra Linux k8s-master01 5.13.7-1.el7.elrepo.x86_64 #1 SMP Fri Jul 30 10:08:55 EDT 2021 x86_64 x86_64 x86_64 GNU/Linux
2.3 configure all node hosts files
$ cat /etc/hosts 127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 192.168.99.101 k8s-master01 192.168.99.102 k8s-master02 192.168.99.103 k8s-master03 192.168.99.108 k8s-master-lb 192.168.99.104 k8s-node01 192.168.99.105 k8s-node02
2.4 close firewalld and selinux on all nodes
$ systemctl disable --now firewalld $ setenforce 0 $ sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config
2.5 close swap partition for all nodes
$ swapoff -a && sysctl -w vm.swappiness=0 vm.swappiness = 0 $ sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
2.6 synchronization time of all nodes
$ ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime $ echo 'Asia/Shanghai' /etc/timezone $ ntpdate time2.aliyun.com
2.7 master 01 node generates ssh key
$ ssh-keygen -t rsa
2.8 Master01 configure password free login to other nodes
$ for i in k8s-master01 k8s-master02 k8s-master03 k8s-node01 k8s-node02;do ssh-copy-id -i .ssh/id_rsa.pub $i;done
2.9 basic tools for installation of all nodes
$ yum install wget jq psmisc vim net-tools yum-utils device-mapper-persistent-data lvm2 git -y
2.10 Master01 download installation files
$ git clone https://github.com/dotbalo/k8s-ha-install.git
2.11 switch to 1.17.x branch
$ git checkout manual-installation-v1.17.x
3, Basic component installation
3.1 installation of ipvs at all nodes
$ yum install ipvsadm ipset sysstat conntrack libseccomp -y $ cat <<EOF > /etc/modules-load.d/ipvs.conf ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr ip_vs_lblc ip_vs_lblcr ip_vs_dh ip_vs_sh ip_vs_fo ip_vs_nq ip_vs_sed ip_vs_ftp EOF $ systemctl enable --now systemd-modules-load.service $ lsmod |grep ip_vs ip_vs_ftp 16384 0 nf_nat 45056 3 iptable_nat,xt_MASQUERADE,ip_vs_ftp ip_vs_sed 16384 0 ip_vs_nq 16384 0 ip_vs_fo 16384 0 ip_vs_sh 16384 0 ip_vs_dh 16384 0 ip_vs_lblcr 16384 0 ip_vs_lblc 16384 0 ip_vs_wrr 16384 0 ip_vs_rr 16384 4 ip_vs_wlc 16384 0 ip_vs_lc 16384 0 ip_vs 159744 28 ip_vs_wlc,ip_vs_rr,ip_vs_dh,ip_vs_lblcr,ip_vs_sh,ip_vs_fo,ip_vs_nq,ip_vs_lblc,ip_vs_wrr,ip_vs_lc,ip_vs_sed,ip_vs_ftp nf_conntrack 155648 5 xt_conntrack,nf_nat,nf_conntrack_netlink,xt_MASQUERADE,ip_vs nf_defrag_ipv6 24576 2 nf_conntrack,ip_vs libcrc32c 16384 4 nf_conntrack,nf_nat,xfs,ip_vs
3.2 configuring kernel parameters for all nodes
$ cat <<EOF > /etc/sysctl.d/k8s.conf net.ipv4.ip_forward = 1 net.bridge.bridge-nf-call-ip6tables = 1 net.bridge.bridge-nf-call-iptables = 1 fs.may_detach_mounts = 1 vm.overcommit_memory=1 vm.panic_on_oom=0 fs.inotify.max_user_watches=89100 fs.file-max=52706963 fs.nr_open=52706963 net.netfilter.nf_conntrack_max=2310720 EOF $ sysctl --system
3.3 configuring Docker yum source
$ curl -o /etc/yum.repos.d/docker-ce.repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo $ yum clean all && yum makecache $ yum list docker-ce --showduplicates | sort -r # View all available versions $ yum install docker-ce-cli-19.03.9-3.el7 docker-ce-19.03.9-3.el7 -y $ mkdir -p /etc/docker $ tee /etc/docker/daemon.json <<EOF { "registry-mirrors" : [ "https://8xpk5wnt.mirror.aliyuncs.com" ] } EOF $ systemctl daemon-reload && systemctl enable --now docker $ docker version Client: Docker Engine - Community Version: 19.03.9 API version: 1.40 Go version: go1.13.10 Git commit: 9d988398e7 Built: Fri May 15 00:25:27 2020 OS/Arch: linux/amd64 Experimental: false Server: Docker Engine - Community Engine: Version: 19.03.9 API version: 1.40 (minimum version 1.12) Go version: go1.13.10 Git commit: 9d988398e7 Built: Fri May 15 00:24:05 2020 OS/Arch: linux/amd64 Experimental: false containerd: Version: 1.4.12 GitCommit: 7b11cfaabd73bb80907dd23182b9347b4245eb5d runc: Version: 1.0.2 GitCommit: v1.0.2-0-g52b36a2 docker-init: Version: 0.18.0 GitCommit: fec3683
4, k8s component installation
4.1 download kubernetes 1.17.x installation package
$ wget https://storage.googleapis.com/kubernetes-release/release/v1.17.0/kubernetes-server-linux-amd64.tar.gz
4.2 download etcd 3.3.18 installation package
$ wget https://github.com/etcd-io/etcd/releases/download/v3.3.18/etcd-v3.3.18-linux-amd64.tar.gz
4.3 unzip kubernetes installation files
$ tar -xf kubernetes-server-linux-amd64.tar.gz --strip-components=3 -C /usr/local/bin kubernetes/server/bin/kube{let,ctl,-apiserver,-controller-manager,-scheduler,-proxy}
4.4 extract etcd installation files
$ tar -zxvf etcd-v3.3.18-linux-amd64.tar.gz --strip-components=1 -C /usr/local/bin etcd-v3.3.18-linux-amd64/etcd{,ctl}
4.5 version view
$ kubectl version Client Version: version.Info{Major:"1", Minor:"17", GitVersion:"v1.17.0", GitCommit:"70132b0f130acc0bed193d9ba59dd186f0e634cf", GitTreeState:"clean", BuildDate:"2019-12-07T21:20:10Z", GoVersion:"go1.13.4", Compiler:"gc", Platform:"linux/amd64"} The connection to the server localhost:8080 was refused - did you specify the right host or port? $ etcdctl -v etcdctl version: 3.3.18 API version: 2
4.6 sending components to other nodes
$ MasterNodes='k8s-master02 k8s-master03' $ WorkNodes='k8s-node01 k8s-node02' $ for NODE in $MasterNodes; do echo $NODE; scp /usr/local/bin/kube{let,ctl,-apiserver,-controller-manager,-scheduler,-proxy} $NODE:/usr/local/bin/; scp /usr/local/bin/etcd* $NODE:/usr/local/bin/; done $ for NODE in $WorkNodes; do scp /usr/local/bin/kube{let,-proxy} $NODE:/usr/local/bin/ ; done
4.7 CNI installation and download CNI components
$ wget https://github.com/containernetworking/plugins/releases/download/v0.7.5/cni-plugins-amd64-v0.7.5.tgz
4.8 create / opt/cni/bin directory for all nodes
$ mkdir -p /opt/cni/bin
4.9 decompress cni and send it to other nodes
$ tar -zxf cni-plugins-amd64-v0.7.5.tgz -C /opt/cni/bin $ for NODE in $MasterNodes; do ssh $NODE 'mkdir -p /opt/cni/bin'; scp /opt/cni/bin/* $NODE:/opt/cni/bin/; done $ for NODE in $WorkNodes; do ssh $NODE 'mkdir -p /opt/cni/bin'; scp /opt/cni/bin/* $NODE:/opt/cni/bin/; done
5, Generate certificate
5.1 download certificate generation tool
$ wget "https://pkg.cfssl.org/R1.2/cfssl_linux-amd64" -O /usr/local/bin/cfssl $ wget "https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64" -O /usr/local/bin/cfssljson $ chmod +x /usr/local/bin/cfssl /usr/local/bin/cfssljson
5.2 create etcd certificate directory for all Master nodes
$ mkdir /etc/etcd/ssl -p
5.3 Master01 node generates etcd certificate
$ cd /root/k8s-ha-install/pki $ cfssl gencert -initca etcd-ca-csr.json | cfssljson -bare /etc/etcd/ssl/etcd-ca $ cfssl gencert \ -ca=/etc/etcd/ssl/etcd-ca.pem \ -ca-key=/etc/etcd/ssl/etcd-ca-key.pem \ -config=ca-config.json \ -hostname=127.0.0.1,k8s-master01,k8s-master02,k8s-master03,192.168.99.101,192.168.99.102,192.168.99.103 \ -profile=kubernetes \ etcd-csr.json | cfssljson -bare /etc/etcd/ssl/etcd
5.4 copy certificates to other nodes
$ MasterNodes='k8s-master02 k8s-master03' $ WorkNodes='k8s-node01 k8s-node02' $ for NODE in $MasterNodes; do ssh $NODE "mkdir -p /etc/etcd/ssl" for FILE in etcd-ca-key.pem etcd-ca.pem etcd-key.pem etcd.pem; do scp /etc/etcd/ssl/${FILE} $NODE:/etc/etcd/ssl/${FILE} done done
5.5 create kubernetes certificate directory for all nodes
$ mkdir -p /etc/kubernetes/pki
5.6 generating kubernetes certificate
$ cfssl gencert -initca ca-csr.json | cfssljson -bare /etc/kubernetes/pki/ca $ cfssl gencert -ca=/etc/kubernetes/pki/ca.pem -ca-key=/etc/kubernetes/pki/ca-key.pem -config=ca-config.json -hostname=10.96.0.1,192.168.99.108,127.0.0.1,kubernetes,kubernetes.default,kubernetes.default.svc,kubernetes.default.svc.cluster,kubernetes.default.svc.cluster.local,192.168.99.101,192.168.99.102,192.168.99.103 -profile=kubernetes apiserver-csr.json | cfssljson -bare /etc/kubernetes/pki/apiserver $ cfssl gencert -initca front-proxy-ca-csr.json | cfssljson -bare /etc/kubernetes/pki/front-proxy-ca $ cfssl gencert -ca=/etc/kubernetes/pki/front-proxy-ca.pem -ca-key=/etc/kubernetes/pki/front-proxy-ca-key.pem -config=ca-config.json -profile=kubernetes front-proxy-client-csr.json | cfssljson -bare /etc/kubernetes/pki/front-proxy-client $ cfssl gencert \ -ca=/etc/kubernetes/pki/ca.pem \ -ca-key=/etc/kubernetes/pki/ca-key.pem \ -config=ca-config.json \ -profile=kubernetes \ manager-csr.json | cfssljson -bare /etc/kubernetes/pki/controller-manager $ kubectl config set-cluster kubernetes \ --certificate-authority=/etc/kubernetes/pki/ca.pem \ --embed-certs=true \ --server=https://192.168.99.108:8443 \ --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig $ kubectl config set-context system:kube-controller-manager@kubernetes \ --cluster=kubernetes \ --user=system:kube-controller-manager \ --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig $ kubectl config use-context system:kube-controller-manager@kubernetes \ --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig $ kubectl config set-credentials system:kube-controller-manager \ --client-certificate=/etc/kubernetes/pki/controller-manager.pem \ --client-key=/etc/kubernetes/pki/controller-manager-key.pem \ --embed-certs=true \ --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig User "system:kube-controller-manager" set. $ kubectl config set-context system:kube-controller-manager@kubernetes \ --cluster=kubernetes \ --user=system:kube-controller-manager \ --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig Context "system:kube-controller-manager@kubernetes" created. $ kubectl config use-context system:kube-controller-manager@kubernetes \ --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig Switched to context "system:kube-controller-manager@kubernetes". $ cfssl gencert \ -ca=/etc/kubernetes/pki/ca.pem \ -ca-key=/etc/kubernetes/pki/ca-key.pem \ -config=ca-config.json \ -profile=kubernetes \ scheduler-csr.json | cfssljson -bare /etc/kubernetes/pki/scheduler $ kubectl config set-cluster kubernetes \ --certificate-authority=/etc/kubernetes/pki/ca.pem \ --embed-certs=true \ --server=https://192.168.99.108:8443 \ --kubeconfig=/etc/kubernetes/scheduler.kubeconfig Cluster "kubernetes" set. $ kubectl config set-credentials system:kube-scheduler \ --client-certificate=/etc/kubernetes/pki/scheduler.pem \ --client-key=/etc/kubernetes/pki/scheduler-key.pem \ --embed-certs=true \ --kubeconfig=/etc/kubernetes/scheduler.kubeconfig User "system:kube-scheduler" set. $ kubectl config set-context system:kube-scheduler@kubernetes \ --cluster=kubernetes \ --user=system:kube-scheduler \ --kubeconfig=/etc/kubernetes/scheduler.kubeconfig Context "system:kube-scheduler@kubernetes" created. $ kubectl config use-context system:kube-scheduler@kubernetes \ --kubeconfig=/etc/kubernetes/scheduler.kubeconfig Switched to context "system:kube-scheduler@kubernetes". $ cfssl gencert \ -ca=/etc/kubernetes/pki/ca.pem \ -ca-key=/etc/kubernetes/pki/ca-key.pem \ -config=ca-config.json \ -profile=kubernetes \ admin-csr.json | cfssljson -bare /etc/kubernetes/pki/admin $ kubectl config set-cluster kubernetes --certificate-authority=/etc/kubernetes/pki/ca.pem --embed-certs=true --server=https://192.168.99.108:8443 --kubeconfig=/etc/kubernetes/admin.kubeconfig Cluster "kubernetes" set. $ kubectl config set-credentials kubernetes-admin --client-certificate=/etc/kubernetes/pki/admin.pem --client-key=/etc/kubernetes/pki/admin-key.pem --embed-certs=true --kubeconfig=/etc/kubernetes/admin.kubeconfig User "kubernetes-admin" set. $ kubectl config set-context kubernetes-admin@kubernetes --cluster=kubernetes --user=kubernetes-admin --kubeconfig=/etc/kubernetes/admin.kubeconfig Context "kubernetes-admin@kubernetes" created. $ kubectl config use-context kubernetes-admin@kubernetes --kubeconfig=/etc/kubernetes/admin.kubeconfig Switched to context "kubernetes-admin@kubernetes". $ for NODE in k8s-master01 k8s-master02 k8s-master03; do \cp kubelet-csr.json kubelet-$NODE-csr.json; sed -i "s/\$NODE/$NODE/g" kubelet-$NODE-csr.json; cfssl gencert \ -ca=/etc/kubernetes/pki/ca.pem \ -ca-key=/etc/kubernetes/pki/ca-key.pem \ -config=ca-config.json \ -hostname=$NODE \ -profile=kubernetes \ kubelet-$NODE-csr.json | cfssljson -bare /etc/kubernetes/pki/kubelet-$NODE; rm -f kubelet-$NODE-csr.json done $ for NODE in k8s-master01 k8s-master02 k8s-master03; do ssh $NODE "mkdir -p /etc/kubernetes/pki" scp /etc/kubernetes/pki/ca.pem $NODE:/etc/kubernetes/pki/ca.pem scp /etc/kubernetes/pki/kubelet-$NODE-key.pem $NODE:/etc/kubernetes/pki/kubelet-key.pem scp /etc/kubernetes/pki/kubelet-$NODE.pem $NODE:/etc/kubernetes/pki/kubelet.pem rm -f /etc/kubernetes/pki/kubelet-$NODE-key.pem /etc/kubernetes/pki/kubelet-$NODE.pem done $ for NODE in k8s-master01 k8s-master02 k8s-master03; do ssh $NODE "cd /etc/kubernetes/pki && \ kubectl config set-cluster kubernetes \ --certificate-authority=/etc/kubernetes/pki/ca.pem \ --embed-certs=true \ --server=https://192.168.99.108:8443 \ --kubeconfig=/etc/kubernetes/kubelet.kubeconfig && \ kubectl config set-credentials system:node:${NODE} \ --client-certificate=/etc/kubernetes/pki/kubelet.pem \ --client-key=/etc/kubernetes/pki/kubelet-key.pem \ --embed-certs=true \ --kubeconfig=/etc/kubernetes/kubelet.kubeconfig && \ kubectl config set-context system:node:${NODE}@kubernetes \ --cluster=kubernetes \ --user=system:node:${NODE} \ --kubeconfig=/etc/kubernetes/kubelet.kubeconfig && \ kubectl config use-context system:node:${NODE}@kubernetes \ --kubeconfig=/etc/kubernetes/kubelet.kubeconfig" done
5.7 create ServiceAccount Key
$ openssl genrsa -out /etc/kubernetes/pki/sa.key 2048 Generating RSA private key, 2048 bit long modulus (2 primes) ...................................................................................+++++ ...............+++++ e is 65537 (0x010001) $ openssl rsa -in /etc/kubernetes/pki/sa.key -pubout -out /etc/kubernetes/pki/sa.pub writing RSA key $ for NODE in k8s-master02 k8s-master03; do for FILE in $(ls /etc/kubernetes/pki | grep -v etcd); do scp /etc/kubernetes/pki/${FILE} $NODE:/etc/kubernetes/pki/${FILE}; done; for FILE in admin.kubeconfig controller-manager.kubeconfig scheduler.kubeconfig; do scp /etc/kubernetes/${FILE} $NODE:/etc/kubernetes/${FILE}; done; done
6, Kubernetes system component configuration
6.1 the etcd configuration is roughly the same. Pay attention to modifying the host name and IP address of the etcd configuration of each Master node
$ vim /etc/etcd/etcd.config.yml name: 'k8s-master01' data-dir: /var/lib/etcd wal-dir: /var/lib/etcd/wal snapshot-count: 5000 heartbeat-interval: 100 election-timeout: 1000 quota-backend-bytes: 0 listen-peer-urls: 'https://192.168.99.101:2380' listen-client-urls: 'https://192.168.99.101:2379,http://127.0.0.1:2379' max-snapshots: 3 max-wals: 5 cors: initial-advertise-peer-urls: 'https://192.168.99.101:2380' advertise-client-urls: 'https://192.168.99.101:2379' discovery: discovery-fallback: 'proxy' discovery-proxy: discovery-srv: initial-cluster: 'k8s-master01=https://192.168.99.101:2380,k8s-master02=https://192.168.99.102:2380,k8s-master03=https://192.168.99.103:2380' initial-cluster-token: 'etcd-k8s-cluster' initial-cluster-state: 'new' strict-reconfig-check: false enable-v2: true enable-pprof: true proxy: 'off' proxy-failure-wait: 5000 proxy-refresh-interval: 30000 proxy-dial-timeout: 1000 proxy-write-timeout: 5000 proxy-read-timeout: 0 client-transport-security: ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem' cert-file: '/etc/kubernetes/pki/etcd/etcd.pem' key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem' client-cert-auth: true trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem' auto-tls: true peer-transport-security: ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem' cert-file: '/etc/kubernetes/pki/etcd/etcd.pem' key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem' peer-client-cert-auth: true trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem' auto-tls: true debug: false log-package-levels: log-output: default force-new-cluster: false
6.2 all Master nodes create etcd service s and start them
$ vim /usr/lib/systemd/system/etcd.service [Unit] Description=Etcd Service Documentation=https://coreos.com/etcd/docs/latest/ After=network.target [Service] Type=notify ExecStart=/usr/local/bin/etcd --config-file=/etc/etcd/etcd.config.yml Restart=on-failure RestartSec=10 LimitNOFILE=65536 [Install] WantedBy=multi-user.target Alias=etcd3.service $ mkdir /etc/kubernetes/pki/etcd $ ln -s /etc/etcd/ssl/* /etc/kubernetes/pki/etcd/ $ systemctl daemon-reload && systemctl enable --now etcd
6.3 all Master nodes are installed with kept and haproxy
$ yum install keepalived haproxy -y
6.4 HAProxy configuration
$ vim /etc/haproxy/haproxy.cfg global maxconn 2000 ulimit-n 16384 log 127.0.0.1 local0 err stats timeout 30s defaults log global mode http option httplog timeout connect 5000 timeout client 50000 timeout server 50000 timeout http-request 15s timeout http-keep-alive 15s frontend monitor-in bind *:33305 mode http option httplog monitor-uri /monitor listen stats bind *:8006 mode http stats enable stats hide-version stats uri /stats stats refresh 30s stats realm Haproxy\ Statistics stats auth admin:admin frontend k8s-master bind 0.0.0.0:8443 bind 127.0.0.1:8443 mode tcp option tcplog tcp-request inspect-delay 5s default_backend k8s-master backend k8s-master mode tcp option tcplog option tcp-check balance roundrobin default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100 server k8s-master01 192.168.99.101:6443 check server k8s-master02 192.168.99.102:6443 check server k8s-master03 192.168.99.103:6443 check
6.5 KeepAlived configuration (pay attention to the IP and network card of each node)
$ vim /etc/keepalived/keepalived.conf ! Configuration File for keepalived global_defs { router_id LVS_DEVEL } vrrp_script chk_apiserver { script "/etc/keepalived/check_apiserver.sh" interval 2 weight -5 fall 3 rise 2 } vrrp_instance VI_1 { state MASTER interface ens33 mcast_src_ip 192.168.99.101 virtual_router_id 51 priority 100 advert_int 2 authentication { auth_type PASS auth_pass K8SHA_KA_AUTH } virtual_ipaddress { 192.168.99.108 } track_script { chk_apiserver } }
6.6 health check configuration
$ vim /etc/keepalived/check_apiserver.sh #!/bin/bash err=0 for k in $(seq 1 5) do check_code=$(pgrep kube-apiserver) if [[ $check_code == "" ]]; then err=$(expr $err + 1) sleep 5 continue else err=0 break fi done if [[ $err != "0" ]]; then echo "systemctl stop keepalived" /usr/bin/systemctl stop keepalived exit 1 else exit 0 fi
6.7 start HAProxy and KeepAlived
$ systemctl enable --now haproxy $ systemctl enable --now keepalived
6.8 VIP test
$ ping 192.168.99.108 PING 192.168.99.108 (192.168.99.108) 56(84) bytes of data. 64 bytes from 192.168.99.108: icmp_seq=1 ttl=64 time=1.39 ms 64 bytes from 192.168.99.108: icmp_seq=2 ttl=64 time=2.46 ms 64 bytes from 192.168.99.108: icmp_seq=3 ttl=64 time=1.68 ms 64 bytes from 192.168.99.108: icmp_seq=4 ttl=64 time=1.08 ms
6.9 create relevant directories for all nodes
$ mkdir -p /etc/kubernetes/manifests/ /etc/systemd/system/kubelet.service.d /var/lib/kubelet /var/log/kubernetes
6.10 create Kube apiserver for all Master nodes
$ vim /etc/kubernetes/token.csv d7d356746b508a1a478e49968fba7947,kubelet-bootstrap,10001,"system:kubelet-bootstrap" $ vim /usr/lib/systemd/system/kube-apiserver.service [Unit] Description=Kubernetes API Server Documentation=https://github.com/kubernetes/kubernetes After=network.target [Service] ExecStart=/usr/local/bin/kube-apiserver \ --v=2 \ --logtostderr=true \ --allow-privileged=true \ --bind-address=0.0.0.0 \ --secure-port=6443 \ --insecure-port=0 \ --advertise-address=192.168.99.108 \ --service-cluster-ip-range=10.96.0.0/12 \ --service-node-port-range=30000-32767 \ --etcd-servers=https://192.168.99.101:2379,https://192.168.99.102:2379,https://192.168.99.103:2379 \ --etcd-cafile=/etc/etcd/ssl/etcd-ca.pem \ --etcd-certfile=/etc/etcd/ssl/etcd.pem \ --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \ --client-ca-file=/etc/kubernetes/pki/ca.pem \ --tls-cert-file=/etc/kubernetes/pki/apiserver.pem \ --tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem \ --kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem \ --kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem \ --service-account-key-file=/etc/kubernetes/pki/sa.pub \ --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \ --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota \ --authorization-mode=Node,RBAC \ --enable-bootstrap-token-auth=true \ --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \ --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem \ --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem \ --requestheader-allowed-names=aggregator \ --requestheader-group-headers=X-Remote-Group \ --requestheader-extra-headers-prefix=X-Remote-Extra- \ --requestheader-username-headers=X-Remote-User \ --token-auth-file=/etc/kubernetes/token.csv Restart=on-failure RestartSec=10s LimitNOFILE=65535 [Install] WantedBy=multi-user.target
6.11 all Master nodes start Kube apiserver
$ systemctl daemon-reload && systemctl enable --now kube-apiserver
6.12 configure Kube controller manager for all Master nodes
$ vim /usr/lib/systemd/system/kube-controller-manager.service [Unit] Description=Kubernetes Controller Manager Documentation=https://github.com/kubernetes/kubernetes After=network.target [Service] ExecStart=/usr/local/bin/kube-controller-manager \ --v=2 \ --logtostderr=true \ --address=127.0.0.1 \ --root-ca-file=/etc/kubernetes/pki/ca.pem \ --cluster-signing-cert-file=/etc/kubernetes/pki/ca.pem \ --cluster-signing-key-file=/etc/kubernetes/pki/ca-key.pem \ --service-account-private-key-file=/etc/kubernetes/pki/sa.key \ --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig \ --leader-elect=true \ --use-service-account-credentials=true \ --node-monitor-grace-period=40s \ --node-monitor-period=5s \ --pod-eviction-timeout=2m0s \ --controllers=*,bootstrapsigner,tokencleaner \ --allocate-node-cidrs=true \ --cluster-cidr=10.244.0.0/16 \ --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \ --node-cidr-mask-size=24 Restart=always RestartSec=10s [Install] WantedBy=multi-user.target
6.13 all Master nodes start Kube controller manager
$ systemctl daemon-reload && systemctl enable --now kube-controller-manager
6.14 configure Kube scheduler for all Master nodes
$ vim /usr/lib/systemd/system/kube-scheduler.service [Unit] Description=Kubernetes Scheduler Documentation=https://github.com/kubernetes/kubernetes After=network.target [Service] ExecStart=/usr/local/bin/kube-scheduler \ --v=2 \ --logtostderr=true \ --address=127.0.0.1 \ --leader-elect=true \ --kubeconfig=/etc/kubernetes/scheduler.kubeconfig Restart=always RestartSec=10s [Install] WantedBy=multi-user.target $ systemctl daemon-reload && systemctl enable --now kube-scheduler
7, TLS Bootstrapping configuration
7.1 create a bootstrap in Master01
$ kubectl config set-cluster kubernetes --certificate-authority=/etc/kubernetes/pki/ca.pem --embed-certs=true --server=https://192.168.99.108:8443 --kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig $ kubectl config set-credentials tls-bootstrap-token-user --token=c8ad9c.2e4d610cf3e7426e --kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig $ kubectl config set-context tls-bootstrap-token-user@kubernetes --cluster=kubernetes --user=tls-bootstrap-token-user --kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig $ kubectl config use-context tls-bootstrap-token-user@kubernetes --kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig $ cp /etc/kubernetes/admin.kubeconfig /root/.kube/config $ cd ~/k8s-ha-install/bootstrap/ $ kubectl create -f bootstrap.secret.yaml secret/bootstrap-token-c8ad9c created clusterrolebinding.rbac.authorization.k8s.io/kubelet-bootstrap created clusterrolebinding.rbac.authorization.k8s.io/node-autoapprove-bootstrap created clusterrolebinding.rbac.authorization.k8s.io/node-autoapprove-certificate-rotation created clusterrole.rbac.authorization.k8s.io/system:kube-apiserver-to-kubelet created clusterrolebinding.rbac.authorization.k8s.io/system:kube-apiserver created
8, Node configuration
8.1 copy certificate to Node
$ for NODE in k8s-node01 k8s-node02; do ssh $NODE mkdir -p /etc/kubernetes/pki /etc/etcd/ssl /etc/etcd/ssl for FILE in etcd-ca.pem etcd.pem etcd-key.pem; do scp /etc/etcd/ssl/$FILE $NODE:/etc/etcd/ssl/ done for FILE in pki/ca.pem pki/ca-key.pem pki/front-proxy-ca.pem bootstrap-kubelet.kubeconfig; do scp /etc/kubernetes/$FILE $NODE:/etc/kubernetes/${FILE} done done
8.2 create relevant directories for all nodes
$ mkdir -p /var/lib/kubelet /var/log/kubernetes /etc/systemd/system/kubelet.service.d /etc/kubernetes/manifests/
8.3 configure kubelet for all nodes
$ vim /usr/lib/systemd/system/kubelet.service [Unit] Description=Kubernetes Kubelet Documentation=https://github.com/kubernetes/kubernetes After=docker.service Requires=docker.service [Service] ExecStart=/usr/local/bin/kubelet Restart=always StartLimitInterval=0 RestartSec=10 [Install] WantedBy=multi-user.target $ vim /etc/systemd/system/kubelet.service.d/10-kubelet.conf [Service] Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig --kubeconfig=/etc/kubernetes/kubelet.kubeconfig" Environment="KUBELET_SYSTEM_ARGS=--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin" Environment="KUBELET_CONFIG_ARGS=--config=/etc/kubernetes/kubelet-conf.yml" Environment="KUBELET_EXTRA_ARGS=--node-labels=node.kubernetes.io/node='' --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.1" ExecStart= ExecStart=/usr/local/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_SYSTEM_ARGS $KUBELET_EXTRA_ARGS $ vim /etc/kubernetes/kubelet-conf.yml apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration address: 0.0.0.0 port: 10250 readOnlyPort: 10255 authentication: anonymous: enabled: false webhook: cacheTTL: 2m0s enabled: true x509: clientCAFile: /etc/kubernetes/pki/ca.pem authorization: mode: Webhook webhook: cacheAuthorizedTTL: 5m0s cacheUnauthorizedTTL: 30s cgroupDriver: cgroupfs cgroupsPerQOS: true clusterDNS: - 10.96.0.10 clusterDomain: cluster.local containerLogMaxFiles: 5 containerLogMaxSize: 10Mi contentType: application/vnd.kubernetes.protobuf cpuCFSQuota: true cpuManagerPolicy: none cpuManagerReconcilePeriod: 10s enableControllerAttachDetach: true enableDebuggingHandlers: true enforceNodeAllocatable: - pods eventBurst: 10 eventRecordQPS: 5 evictionHard: imagefs.available: 15% memory.available: 100Mi nodefs.available: 10% nodefs.inodesFree: 5% evictionPressureTransitionPeriod: 5m0s failSwapOn: true fileCheckFrequency: 20s hairpinMode: promiscuous-bridge healthzBindAddress: 127.0.0.1 healthzPort: 10248 httpCheckFrequency: 20s imageGCHighThresholdPercent: 85 imageGCLowThresholdPercent: 80 imageMinimumGCAge: 2m0s iptablesDropBit: 15 iptablesMasqueradeBit: 14 kubeAPIBurst: 10 kubeAPIQPS: 5 makeIPTablesUtilChains: true maxOpenFiles: 1000000 maxPods: 110 nodeStatusUpdateFrequency: 10s oomScoreAdj: -999 podPidsLimit: -1 registryBurst: 10 registryPullQPS: 5 resolvConf: /etc/resolv.conf rotateCertificates: true runtimeRequestTimeout: 2m0s serializeImagePulls: true staticPodPath: /etc/kubernetes/manifests streamingConnectionIdleTimeout: 4h0m0s syncFrequency: 1m0s volumeStatsAggPeriod: 1m0s
8.4 start all nodes kubelet
$ systemctl daemon-reload && systemctl enable --now kubelet
8.5 viewing cluster status
$ kubectl get node NAME STATUS ROLES AGE VERSION k8s-master01 NotReady <none> 54s v1.17.0 k8s-master02 NotReady <none> 54s v1.17.0 k8s-master03 NotReady <none> 54s v1.17.0 k8s-node01 NotReady <none> 54s v1.17.0 k8s-node02 NotReady <none> 54s v1.17.0
8.6 Kube proxy configuration
$ cd /root/k8s-ha-install $ kubectl -n kube-system create serviceaccount kube-proxy $ kubectl create clusterrolebinding system:kube-proxy --clusterrole system:node-proxier --serviceaccount kube-system:kube-proxy $ SECRET=$(kubectl -n kube-system get sa/kube-proxy \ --output=jsonpath='{.secrets[0].name}') $ JWT_TOKEN=$(kubectl -n kube-system get secret/$SECRET \ --output=jsonpath='{.data.token}' | base64 -d) $ PKI_DIR=/etc/kubernetes/pki $ K8S_DIR=/etc/kubernetes $ kubectl config set-cluster kubernetes --certificate-authority=/etc/kubernetes/pki/ca.pem --embed-certs=true --server=https://192.168.99.108:8443 --kubeconfig=${K8S_DIR}/kube-proxy.kubeconfig $ kubectl config set-credentials kubernetes --token=${JWT_TOKEN} --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig $ kubectl config set-context kubernetes --cluster=kubernetes --user=kubernetes --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig $ kubectl config use-context kubernetes --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
8.7 assignment Service documents
$ for NODE in k8s-master01 k8s-master02 k8s-master03; do scp ${K8S_DIR}/kube-proxy.kubeconfig $NODE:/etc/kubernetes/kube-proxy.kubeconfig scp kube-proxy/kube-proxy.conf $NODE:/etc/kubernetes/kube-proxy.conf scp kube-proxy/kube-proxy.service $NODE:/usr/lib/systemd/system/kube-proxy.service done $ for NODE in k8s-node01 k8s-node02; do scp /etc/kubernetes/kube-proxy.kubeconfig $NODE:/etc/kubernetes/kube-proxy.kubeconfig scp kube-proxy/kube-proxy.conf $NODE:/etc/kubernetes/kube-proxy.conf scp kube-proxy/kube-proxy.service $NODE:/usr/lib/systemd/system/kube-proxy.service done
8.8 all nodes start Kube proxy
$ systemctl daemon-reload && systemctl enable --now kube-proxy
9, Installing calico
9.1 installing Calico 3.11.1
$ cd /root/k8s-ha-install/Calico/ $ kubectl create -f calico.yaml
9.2 viewing Calico status
$ kubectl get pod -n kube-system -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES calico-kube-controllers-648f4868b8-59s87 1/1 Running 2 61s 10.244.85.194 k8s-node01 <none> <none> calico-node-8rvl9 1/1 Running 1 61s 192.168.99.103 k8s-master03 <none> <none> calico-node-bjpng 1/1 Running 1 61s 192.168.99.102 k8s-master02 <none> <none> calico-node-f7spq 1/1 Running 1 61s 192.168.99.101 k8s-master01 <none> <none> calico-node-k8797 1/1 Running 1 61s 192.168.99.104 k8s-node01 <none> <none> calico-node-q8djw 1/1 Running 1 61s 192.168.99.105 k8s-node02 <none> <none> $ kubectl get node NAME STATUS ROLES AGE VERSION k8s-master01 Ready <none> 84s v1.17.0 k8s-master02 Ready <none> 84s v1.17.0 k8s-master03 Ready <none> 84s v1.17.0 k8s-node01 Ready <none> 84s v1.17.0 k8s-node02 Ready <none> 84s v1.17.0 $ kubectl cluster-info Kubernetes master is running at https://192.168.99.108:8443
10, Installing CoreDNS
$ cd /root/k8s-ha-install/CoreDNS/ $ kubectl create -f coredns.yaml $ kubectl get po -n kube-system $ kubectl get pod -n kube-system -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES calico-kube-controllers-648f4868b8-59s87 1/1 Running 2 2m25s 10.244.85.194 k8s-node01 <none> <none> calico-node-8rvl9 1/1 Running 1 2m25s 192.168.99.103 k8s-master03 <none> <none> calico-node-bjpng 1/1 Running 1 2m25s 192.168.99.102 k8s-master02 <none> <none> calico-node-f7spq 1/1 Running 1 2m25s 192.168.99.101 k8s-master01 <none> <none> calico-node-k8797 1/1 Running 1 2m25s 192.168.99.104 k8s-node01 <none> <none> calico-node-q8djw 1/1 Running 1 2m25s 192.168.99.105 k8s-node02 <none> <none> coredns-76b74f549-xlfc2 1/1 Running 1 95s 10.244.122.130 k8s-master02 <none> <none> $ kubectl logs -f coredns-76b74f549-xlfc2 -n kube-system [INFO] plugin/ready: Still waiting on: "kubernetes" .:53 [INFO] plugin/reload: Running configuration MD5 = 8b19e11d5b2a72fb8e63383b064116a1 CoreDNS-1.6.6 linux/amd64, go1.13.5, 6a7a75e
11, Cluster validation
11.1 installing busybox
$ cat<<EOF | kubectl apply -f - apiVersion: v1 kind: Pod metadata: name: busybox namespace: default spec: containers: - name: busybox image: busybox:1.28 command: - sleep - "3600" imagePullPolicy: IfNotPresent restartPolicy: Always EOF
11.2 verification and analysis
$ kubectl exec busybox -n default -- nslookup kubernetes Server: 10.96.0.10 Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local Name: kubernetes Address 1: 10.96.0.1 kubernetes.default.svc.cluster.local $ kubectl exec busybox -n default -- nslookup kube-dns.kube-system Server: 10.96.0.10 Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local Name: kube-dns.kube-system Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local