k8s master load balancing

Keywords: Linux Kubernetes JSON Nginx Docker

k8s master load balancing


1. Server Planning
Note: Only master load balancing is implemented

Server name IP role
k8s-master1  192.168.1.107  k8s-master1,etcd
k8s-master2  192.168.1.108 k8s-master2
k8s-node1  192.168.1.109 k8s-node1
nginx  192.168.1.55 nginx load


2.k8s-master1 deployment
1. Install Docker

#Close the firewall
ufw disable && ufw status
#Execute script to install docker
curl -s  https://raw.githubusercontent.com/jy1779/docker/master/install/aliyun_docker_install.sh | bash
#Modify docker.server parameters
LINE=$(grep -n ExecStart /lib/systemd/system/docker.service|awk -F : '{print $1}')
EXECSTARTPOST='ExecStartPost=/sbin/iptables -I FORWARD -s 0.0.0.0/0 -j ACCEPT'
sed "$LINE a$EXECSTARTPOST" -i /lib/systemd/system/docker.service
#Reload docker.server and restart docker service
systemctl daemon-reload && service docker restart
service docker status


2. Generate profile and root certificate
#Add Kernel Parameters
/etc/sysctl.d/k8s.conf
#Parameter description:
# Controls IP packet forwarding
net.ipv4.ip_forward = 1
# Enable netfilter on bridges.
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1

#Execute commands, add kernel parameters
cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
#Make Kernel Parameters Effective
sysctl -p /etc/sysctl.d/k8s.conf


#Execute if prompted for the following error: modprobe br_netfilter
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-ip6tables: No such file or directory
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-iptables: No such file or directory
#Get k8s binaries and configuration files

#kubernetes.git is not a file on the official website, it is a file needed to customize the installation of the k8s cluster
git clone https://code.aliyun.com/jy1779/kubernetes.git
tar xf ./kubernetes/kubernetes-bins.tar.gz -C /usr/local/sbin/ && rm -f ./kubernetes/kubernetes-bins.tar.gz
echo 'export PATH=$PATH:/usr/local/sbin/kubernetes-bins' >> /etc/profile && source /etc/profile


#Detect environment variables

which kubectl 
/usr/local/sbin/kubernetes-bins/kubectl


#Generate Profile

cd /root/kubernetes/kubernetes-starter/
#Modify Profile
vim config.properties
#kubernetes binary directory, eg: /home/michael/bin
BIN_PATH=/usr/local/sbin/kubernetes-bins
#Current node ip, eg: 192.168.1.102
NODE_IP=192.168.1.107
#List of etcd service clusters, eg: http://192.168.1.102:2379
#If you already have an etcd cluster, you can fill in the existing one.If not, fill in: http://${MASTER_IP}:2379 (MASTER_IP replaces itself with its own primary node ip)
ETCD_ENDPOINTS=https://192.168.1.107:2379
#ip address of kubernetes primary node, eg: 192.168.1.102
MASTER_IP=192.168.1.107
#Execute script to generate configuration file
./gen-config.sh with-ca
====List of substitution variables====
BIN_PATH=/usr/local/sbin/kubernetes-bins
NODE_IP=192.168.1.107
ETCD_ENDPOINTS=https://192.168.1.107:2379
MASTER_IP=192.168.1.107
====================
====Replace Profile====
all-node/kube-calico.service
ca/admin/admin-csr.json
ca/ca-config.json
ca/ca-csr.json
ca/calico/calico-csr.json
ca/etcd/etcd-csr.json
ca/kube-proxy/kube-proxy-csr.json
ca/kubernetes/kubernetes-csr.json
master-node/etcd.service
master-node/kube-apiserver.service
master-node/kube-controller-manager.service
master-node/kube-scheduler.service
services/kube-dashboard.yaml
services/kube-dns.yaml
worker-node/10-calico.conf
worker-node/kubelet.service
worker-node/kube-proxy.service
=================
//Configuration generated successfully at: /root/kubernetes/kubernetes-starter/target


#Install cfssl

wget -q --show-progress --https-only --timestamping \
  https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 \
  https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
#Modify to Executable
chmod +x cfssl_linux-amd64 cfssljson_linux-amd64
#Move to bin directory
mv cfssl_linux-amd64 /usr/local/bin/cfssl
mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
#Validation
cfssl version

#Generate Root Certificate
#Create directory to store ca certificates

mkdir -p /etc/kubernetes/ca
#Tip: ca-config.json, ca-csr.json are ready to be modified or generated by themselves
#Copy a ca file
cp ~/kubernetes/kubernetes-starter/target/ca/ca-config.json /etc/kubernetes/ca
cp ~/kubernetes/kubernetes-starter/target/ca/ca-csr.json /etc/kubernetes/ca
#Generate certificates and keys
cd /etc/kubernetes/ca
cfssl gencert -initca ca-csr.json | cfssljson -bare ca
#View certificates and keys
 ls
ca-config.json  ca.csr  ca-csr.json  ca-key.pem  ca.pem


3. Deploy Etcd
When an etcd node needs to provide access to other services, it verifies the identity of other services, so it needs a server certificate that identifies the service it is listening to. When there are multiple etcd nodes, it also needs a client certificate to interact with other nodes in the etcd cluster. Of course, clients and servers can also use the same certificate because they are essentially identical.
#Create a directory to store etcd certificates

mkdir -p /etc/kubernetes/ca/etcd
#Copy etcd certificate configuration
cp ~/kubernetes/kubernetes-starter/target/ca/etcd/etcd-csr.json /etc/kubernetes/ca/etcd/
cd /etc/kubernetes/ca/etcd/
#Modify etcd-csr.json configuration file
{
  "CN": "etcd",
  "hosts": [
    "127.0.0.1",
    "192.168.1.107",
    "192.168.1.108",  #Add IP for k8s-master2
    "192.168.1.55"     #Add nginx Load Balancing IP
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "XS",
      "O": "k8s",
      "OU": "System"
    }
  ]
}
#Copy etcd certificate configuration
cp ~/kubernetes/kubernetes-starter/target/ca/etcd/etcd-csr.json /etc/kubernetes/ca/etcd/
cd /etc/kubernetes/ca/etcd/
#Use root certificate (ca.pem) to issue etcd certificates
cfssl gencert \
        -ca=/etc/kubernetes/ca/ca.pem \
        -ca-key=/etc/kubernetes/ca/ca-key.pem \
        -config=/etc/kubernetes/ca/ca-config.json \
        -profile=kubernetes etcd-csr.json | cfssljson -bare etcd
#Similar to the previous generation of three files etcd.csr is an intermediate certificate request file, and what we ultimately want is etcd-key.pem and etcd.pem
ls
etcd.csr  etcd-csr.json  etcd-key.pem  etcd.pem
#Create a working directory (where data is stored)
mkdir -p /var/lib/etcd
#copy etcd service configuration file to system services directory
cp ~/kubernetes/kubernetes-starter/target/master-node/etcd.service /lib/systemd/system/
#Create etcd service
systemctl enable etcd.service
#Start etcd service
service etcd start
#Check the service log to see if there is any error information to ensure the service is working properly
journalctl -f -u etcd.service
#Testing if etcd service is working
ETCDCTL_API=3 etcdctl \
  --endpoints=https://192.168.1.107:2379  \
  --cacert=/etc/kubernetes/ca/ca.pem \
  --cert=/etc/kubernetes/ca/etcd/etcd.pem \
  --key=/etc/kubernetes/ca/etcd/etcd-key.pem \
  endpoint health
#The following shows successful deployment.
https://192.168.1.107:2379 is healthy: successfully committed proposal: took = 10.408412ms



4. Deploy APIServer

#Create directory to store api certificates
mkdir -p /etc/kubernetes/ca/kubernetes
#Copy apiserver certificate configuration
cp ~/kubernetes/kubernetes-starter/target/ca/kubernetes/kubernetes-csr.json /etc/kubernetes/ca/kubernetes/
#Issue a kubernetes certificate using a root certificate (ca.pem)
cd /etc/kubernetes/ca/kubernetes/
#Modify the kubernetes-csr.json configuration file
{
  "CN": "kubernetes",
  "hosts": [
    "127.0.0.1",
    "192.168.1.107",
    "192.168.1.108",  #Add IP for k8s-master2
    "192.168.1.55",    #Add nginx Load Balanced iP s
    "10.68.0.1",
    "kubernetes",
    "kubernetes.default",
    "kubernetes.default.svc",
    "kubernetes.default.svc.cluster",
    "kubernetes.default.svc.cluster.local"
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "XS",
      "O": "k8s",
      "OU": "System"
    }
  ]
}
cfssl gencert \
        -ca=/etc/kubernetes/ca/ca.pem \
        -ca-key=/etc/kubernetes/ca/ca-key.pem \
        -config=/etc/kubernetes/ca/ca-config.json \
        -profile=kubernetes kubernetes-csr.json | cfssljson -bare kubernetes
#Similar to the previous generation of three files, kubernetes.csr is an intermediate certificate request file, and ultimately we want kubernetes-key.pem and kubernetes.pem
ls
kubernetes.csr  kubernetes-csr.json  kubernetes-key.pem  kubernetes.pem
#Generate token authentication file
#Generate random token
head -c 16 /dev/urandom | od -An -t x | tr -d ' '
head -c 16 /dev/urandom | od -An -t x | tr -d ' '
97e8c07dce2b2bab69cfd3162d5383c9
#Write to token.csv file
echo "97e8c07dce2b2bab69cfd3162d5383c9,kubelet-bootstrap,10001,"system:kubelet-bootstrap"" > /etc/kubernetes/ca/kubernetes/token.csv
#copy the apiservice service configuration file to the system services directory
cp ~/kubernetes/kubernetes-starter/target/master-node/kube-apiserver.service /lib/systemd/system/
#Create a kube-apiserver service
systemctl enable kube-apiserver.service
#Start the kube-apiserver service
service kube-apiserver start
#View the kube-apiserver log
journalctl -f -u kube-apiserver
#The default api allows service ports, which can be modified, such as port 80, but cannot be mapped without modification
cat ~/kubernetes/kubernetes-starter/target/master-node/kube-apiserver.service |grep port-range
  --service-node-port-range=20000-40000 \


5. Deploy Controller-manager

controller-manager General and api-server On the same machine, so you can use an unsecured port with api-server Communicate without generating certificates and private keys.
#copy the kube-controller-manager.service service service configuration file to the system services directory
cp ~/kubernetes/kubernetes-starter/target/master-node/kube-controller-manager.service /lib/systemd/system/
#Create a kube-controller-manager.service service
systemctl enable kube-controller-manager.service
#Start the kube-controller-manager.service service
service kube-controller-manager start
#View the kube-controller-manager.service log
journalctl -f -u kube-controller-manager


6. Deploy Scheduler

 Scheduler General and api-server On the same machine, so you can use an unsecured port with api-server Communicate without generating certificates and private keys.
#copy the scheduler service configuration file to the system services directory
cp ~/kubernetes/kubernetes-starter/target/master-node/kube-scheduler.service /lib/systemd/system/
#Create a kube-scheduler.service service
systemctl enable kube-scheduler.service
#Start the kube-scheduler.service service
service kube-scheduler start
#View the kube-scheduler.service log
journalctl -f -u kube-scheduler


7. Configure Kubectl management

#Create a directory to store kubectl certificates
mkdir -p /etc/kubernetes/ca/admin
#Preparing admin certificate configuration - kubectl only requires client certificates, so the hosts field can be empty in a certificate request
#Copy the kubectl certificate configuration
cp ~/kubernetes/kubernetes-starter/target/ca/admin/admin-csr.json /etc/kubernetes/ca/admin/
#Use root certificate (ca.pem) to issue admin certificate
cd /etc/kubernetes/ca/admin/
cfssl gencert \
        -ca=/etc/kubernetes/ca/ca.pem \
        -ca-key=/etc/kubernetes/ca/ca-key.pem \
        -config=/etc/kubernetes/ca/ca-config.json \
        -profile=kubernetes admin-csr.json | cfssljson -bare admin
#What we want ultimately is admin-key.pem and admin.pem
ls
admin.csr  admin-csr.json  admin-key.pem  admin.pem
#Configure the kubectl file
#Specify the address and certificate location of apiserver
kubectl config set-cluster kubernetes \
        --certificate-authority=/etc/kubernetes/ca/ca.pem \
        --embed-certs=true \
        --server=https://192.168.1.107:6443
#Set client authentication parameters, specify admin certificate and secret key
kubectl config set-credentials admin \
        --client-certificate=/etc/kubernetes/ca/admin/admin.pem \
        --embed-certs=true \
        --client-key=/etc/kubernetes/ca/admin/admin-key.pem
#Associate users and clusters
kubectl config set-context kubernetes \
        --cluster=kubernetes --user=admin
#Set Current Context
kubectl config use-context kubernetes
#The result is a configuration file where you can see the content
cat ~/.kube/config
#Verify master components
root@k8s-master1:/etc/kubernetes/ca/calico# kubectl get cs
NAME                 STATUS    MESSAGE              ERROR
etcd-0               Healthy   {"health": "true"}   
controller-manager   Healthy   ok                   
scheduler            Healthy   ok    
#Create a kubelet-bootstrap binding
kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap


8. Deploy Calico network
Calico implements the CNI interface, which is an option for the Kubernetes network scheme. It is a pure three-tier data center network scheme (does not require Overlay), and has good integration with IaaS and container platforms such as OpenStack, Kubernetes, AWS, GCE, etc.Calico implements an efficient vRouter for data forwarding at each computing node using Linux Kernel, and each vRouter is responsible for spreading the routing information of its workload over the entire Calico network through the BGP protocol - small-scale deployments can be directly interconnected and large-scale deployments can be accomplished through the specified BGP route reflector.This ensures that all the data traffic between workloads is connected through IP routing.

The # calico certificate is used in four places:
# calico/node: This docker container accesses etcd at runtime using a certificate
# cni plug-in in cni configuration file: need to access etcd to use certificate
# calicoctl: Use certificate to access etcd when operating cluster networks
# calico/kube-controllers: Access etcd using certificates when synchronizing cluster network policies

#Create a storage calico certificate
mkdir -p /etc/kubernetes/ca/calico
#Prepare calico certificate configuration - calico only requires client certificates, so the hosts field can be empty in the certificate request
cp ~/kubernetes/kubernetes-starter/target/ca/calico/calico-csr.json /etc/kubernetes/ca/calico/
cd /etc/kubernetes/ca/calico/
cfssl gencert \
        -ca=/etc/kubernetes/ca/ca.pem \
        -ca-key=/etc/kubernetes/ca/ca-key.pem \
        -config=/etc/kubernetes/ca/ca-config.json \
        -profile=kubernetes calico-csr.json | cfssljson -bare calico
#What we want ultimately is calico-key.pem and calico.pem
ls 
calico.csr  calico-csr.json  calico-key.pem  calico.pem
#Start the kube-calico.service service
cp ~/kubernetes/kubernetes-starter/target/all-node/kube-calico.service /lib/systemd/system/
systemctl enable kube-calico.service
#Download mirror is required to start kube-calico service
service kube-calico start


3.k8s-master2 deployment
1. Install Docker

#Close the firewall
ufw disable && ufw status
#Execute script to install docker
curl -s  https://raw.githubusercontent.com/jy1779/docker/master/install/aliyun_docker_install.sh | bash
#Modify docker.server parameters
LINE=$(grep -n ExecStart /lib/systemd/system/docker.service|awk -F : '{print $1}')
EXECSTARTPOST='ExecStartPost=/sbin/iptables -I FORWARD -s 0.0.0.0/0 -j ACCEPT'
sed "$LINE a$EXECSTARTPOST" -i /lib/systemd/system/docker.service
#Reload docker.server and restart docker service
systemctl daemon-reload && service docker restart
service docker status


2. Generate profile and root certificate
#Add Kernel Parameters
/etc/sysctl.d/k8s.conf
#Parameter description:
# Controls IP packet forwarding
net.ipv4.ip_forward = 1
# Enable netfilter on bridges.
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1

#Execute commands, add kernel parameters
cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
#Make Kernel Parameters Effective
sysctl -p /etc/sysctl.d/k8s.conf


#Execute if prompted for the following error: modprobe br_netfilter
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-ip6tables: No such file or directory
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-iptables: No such file or directory

#Get k8s binaries and configuration files
#kubernetes.git is not a file on the official website, it is a file needed to customize the installation of the k8s cluster
root@master:~# git clone https://code.aliyun.com/jy1779/kubernetes.git
#Unzip the k8s binary and add it to the system environment variable
root@master:~# tar xf ./kubernetes/kubernetes-bins.tar.gz -C /usr/local/sbin/ && rm -f ./kubernetes/kubernetes-bins.tar.gz
root@master:~# echo 'export PATH=$PATH:/usr/local/sbin/kubernetes-bins' >> /etc/profile && source /etc/profile
#Detecting environment variables
root@master:~# which kubectl 
/usr/local/sbin/kubernetes-bins/kubectl
#Generate profile
cd /root/kubernetes/kubernetes-starter/
#Modify Profile
vim config.propertie
#kubernetes binary directory, eg: /home/michael/bin
BIN_PATH=/usr/local/sbin/kubernetes-bins
#Current node ip, eg: 192.168.1.102
NODE_IP=192.168.1.108
#List of etcd service clusters, eg: http://192.168.1.102:2379
#If you already have an etcd cluster, you can fill in the existing one.If not, fill in: http://${MASTER_IP}:2379 (MASTER_IP replaces itself with its own primary node ip)
ETCD_ENDPOINTS=https://192.168.1.107
#ip address of kubernetes primary node, eg: 192.168.1.102
MASTER_IP=192.168.1.108
./gen-config.sh with-ca
====List of substitution variables====
BIN_PATH=/usr/local/sbin/kubernetes-bins
NODE_IP=192.168.1.72
ETCD_ENDPOINTS=https://192.168.1.72:2379,https://192.168.1.73:2379,https://192.168.1.74:2379
MASTER_IP=192.168.1.72
====================
====Replace Profile====
all-node/kube-calico.service
ca/admin/admin-csr.json
ca/ca-config.json
ca/ca-csr.json
ca/calico/calico-csr.json
ca/etcd/etcd-csr.json
ca/kube-proxy/kube-proxy-csr.json
ca/kubernetes/kubernetes-csr.json
master-node/etcd.service
master-node/kube-apiserver.service
master-node/kube-controller-manager.service
master-node/kube-scheduler.service
services/kube-dashboard.yaml
services/kube-dns.yaml
worker-node/10-calico.conf
worker-node/kubelet.service
worker-node/kube-proxy.service
=================
//Configuration generated successfully at: /root/kubernetes/kubernetes-starter/target


#Install cfssl

wget -q --show-progress --https-only --timestamping \
  https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 \
  https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
#Modify to Executable
chmod +x cfssl_linux-amd64 cfssljson_linux-amd64
#Move to bin directory
mv cfssl_linux-amd64 /usr/local/bin/cfssl
mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
#Validation
cfssl version


#Generate Root Certificate
#Create directory to store ca certificates

mkdir -p /etc/kubernetes/ca
#Get a certificate from k8s-master1
rsync -av 192.168.1.107:/etc/kubernetes/ca/ca.pem /etc/kubernetes/ca/
rsync -av 192.168.1.107:/etc/kubernetes/ca/ca-key.pem /etc/kubernetes/ca/
rsync -av 192.168.1.107:/etc/kubernetes/ca/ca-config.json /etc/kubernetes/ca/


#Create a directory to store etcd certificates
#Get a certificate from k8s-master1

mkdir -p /etc/kubernetes/ca/etcd
rsync -av 192.168.1.107:/etc/kubernetes/ca/etcd/etcd-key.pem /etc/kubernetes/ca/etcd/
rsync -av 192.168.1.107:/etc/kubernetes/ca/etcd/etcd.pem /etc/kubernetes/ca/etcd/




#Test Connection to etcd Service

ETCDCTL_API=3 etcdctl \
  --endpoints=https://192.168.1.107:2379  \
  --cacert=/etc/kubernetes/ca/ca.pem \
  --cert=/etc/kubernetes/ca/etcd/etcd.pem \
  --key=/etc/kubernetes/ca/etcd/etcd-key.pem \
  endpoint health
#Prompt below, normal
https://192.168.1.107:2379 is healthy: successfully committed proposal: took = 341.160166ms


3. Deploy APIServer
#Create directory to store api certificates
#Get a certificate from k8s-master1

mkdir -p /etc/kubernetes/ca/kubernetes
cd /etc/kubernetes/ca/kubernetes/
rsync -av 192.168.1.107:/etc/kubernetes/ca/kubernetes/kubernetes-key.pem /etc/kubernetes/ca/kubernetes/
rsync -av 192.168.1.107:/etc/kubernetes/ca/kubernetes/kubernetes.pem /etc/kubernetes/ca/kubernetes/
rsync -av 192.168.1.107:/etc/kubernetes/ca/kubernetes/token.csv /etc/kubernetes/ca/kubernetes/
#copy the apiservice service configuration file to the system services directory
cp ~/kubernetes/kubernetes-starter/target/master-node/kube-apiserver.service /lib/systemd/system/
#Create a kube-apiserver service
systemctl enable kube-apiserver.service
#Start the kube-apiserver service
service kube-apiserver start
#View the kube-apiserver log
journalctl -f -u kube-apiserver


4. Deploy Controller-manager

controller-manager General and api-server On the same machine, so you can use an unsecured port with api-server Communicate without generating certificates and private keys.
#copy the kube-controller-manager.service service service configuration file to the system services directory
cp ~/kubernetes/kubernetes-starter/target/master-node/kube-controller-manager.service /lib/systemd/system/
#Create a kube-controller-manager.service service
systemctl enable kube-controller-manager.service
#Start the kube-controller-manager.service service
service kube-controller-manager start
#View the kube-controller-manager.service log
journalctl -f -u kube-controller-manager


5. Deploy Scheduler
Scheduler is usually on the same machine as api-server, so it can communicate with api-server using an unsecured port without generating certificates and private keys.

#copy the scheduler service configuration file to the system services directory
cp ~/kubernetes/kubernetes-starter/target/master-node/kube-scheduler.service /lib/systemd/system/
#Create a kube-scheduler.service service
systemctl enable kube-scheduler.service
#Start the kube-scheduler.service service
service kube-scheduler start
#View the kube-scheduler.service log
journalctl -f -u kube-scheduler


6. Configure Kubectl management
#Create a directory to store kubectl certificates

mkdir -p /etc/kubernetes/ca/admin
#Preparing admin certificate configuration - kubectl only requires client certificates, so the hosts field can be empty in a certificate request
#Copy the kubectl certificate configuration
cp ~/kubernetes/kubernetes-starter/target/ca/admin/admin-csr.json /etc/kubernetes/ca/admin/
#Use root certificate (ca.pem) to issue admin certificate
cd /etc/kubernetes/ca/admin/
cfssl gencert \
        -ca=/etc/kubernetes/ca/ca.pem \
        -ca-key=/etc/kubernetes/ca/ca-key.pem \
        -config=/etc/kubernetes/ca/ca-config.json \
        -profile=kubernetes admin-csr.json | cfssljson -bare admin
#Configure the kubectl file
#Specify the address and certificate location of apiserver
kubectl config set-cluster kubernetes \
        --certificate-authority=/etc/kubernetes/ca/ca.pem \
        --embed-certs=true \
        --server=https://192.168.1.108:6443
#Set client authentication parameters, specify admin certificate and secret key
kubectl config set-credentials admin \
        --client-certificate=/etc/kubernetes/ca/admin/admin.pem \
        --embed-certs=true \
        --client-key=/etc/kubernetes/ca/admin/admin-key.pem
#Associate users and clusters
kubectl config set-context kubernetes \
        --cluster=kubernetes --user=admin
#Set Current Context
kubectl config use-context kubernetes
#View Authentication
cat ~/.kube/config
#View master components
kubectl get componentstatus
NAME                 STATUS    MESSAGE              ERROR
scheduler            Healthy   ok                   
controller-manager   Healthy   ok                   
etcd-0               Healthy   {"health": "true"}


7. Deploy Calico network

#Create a storage calico certificate
mkdir -p /etc/kubernetes/ca/calico
rsync -av 192.168.1.107:/etc/kubernetes/ca/calico/calico.pem /etc/kubernetes/ca/calico
rsync -av 192.168.1.107:/etc/kubernetes/ca/calico/calico-key.pem /etc/kubernetes/ca/calico
#Start the kube-calico.service service
cp ~/kubernetes/kubernetes-starter/target/all-node/kube-calico.service /lib/systemd/system/
systemctl enable kube-calico.service
#Download mirror is required to start kube-calico service
service kube-calico start   
journalctl -f -u kube-calico
#Log Viewing
calicoctl node status


4. Deploy nginx
Server: 192.168.1.55
docker-compose deployment nginx

#View the docker-compose structure of nginx
tree -L 2 nginx/
nginx/
├── conf
│   ├── conf.d
│   ├── fastcgi_params
│   ├── koi-utf
│   ├── koi-win
│   ├── mime.types
│   ├── modules -> /usr/lib/nginx/modules
│   ├── nginx.conf
│   ├── scgi_params
│   ├── uwsgi_params
│   └── win-utf
├── docker-compose.yml
└── html
    ├── 50x.html
    └── index.html
#docker-compose.yaml configuration file
cd nginx
cat docker-compose.yml 
version: '2.0'
services:
  nginxs:
    image: nginx
    container_name: nginxs
    network_mode: host
    volumes:
      - "./conf:/etc/nginx"
      - "./html:/usr/share/nginx/html"
#View nginx profile
cat conf/nginx.conf 
user  nginx;
worker_processes  1;
error_log  /var/log/nginx/error.log warn;
pid        /var/run/nginx.pid;
events {
    worker_connections  1024;
}
#4-Layer Forwarding
stream {
      log_format ws "$remote_addr $upstream_addr $time_local $status";
      access_log /var/log/nginx/k8s.log ws;
      server {
          listen 6443;
          proxy_pass app_server;
      }
      upstream app_server{
          server 192.168.1.107:6443;  #k8s-master1
          server 192.168.1.108:6443;  #k8s-master2
      }
}
http {
    include       /etc/nginx/mime.types;
    default_type  application/octet-stream;
    log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
                      '$status $body_bytes_sent "$http_referer" '
                      '"$http_user_agent" "$http_x_forwarded_for"';
    access_log  /var/log/nginx/access.log  main;
    sendfile        on;
    #tcp_nopush     on;
    keepalive_timeout  65;
    #gzip  on;
    include /etc/nginx/conf.d/*.conf;
}
docker-compose ps
 Name          Command          State   Ports 
---------------------------------------------
nginxs   nginx -g daemon off;   Up   
#View Port
netstat -nutlp|grep 6443
tcp        0      0 0.0.0.0:6443            0.0.0.0:*               LISTEN      5706/nginx: master


5. Deploy Node
1. Install Docker

#Close the firewall
ufw disable && ufw status
#Execute docker installation script
curl -s  https://raw.githubusercontent.com/jy1779/docker/master/install/aliyun_docker_install.sh | bash
#Get binary files
git clone https://code.aliyun.com/jy1779/kubernetes.git    
#Unzip kubernetes-bins and add to environment variables
tar xf ./kubernetes/kubernetes-bins.tar.gz -C /usr/local/sbin/
echo 'export PATH=$PATH:/usr/local/sbin/kubernetes-bins' >> /etc/profile && source /etc/profile
#Modify docker.server
LINE=$(grep -n ExecStart /lib/systemd/system/docker.service|awk -F : '{print $1}')
EXECSTARTPOST='ExecStartPost=/sbin/iptables -I FORWARD -s 0.0.0.0/0 -j ACCEPT'
sed "$LINE a$EXECSTARTPOST" -i /lib/systemd/system/docker.service
#Restart docker
systemctl daemon-reload && service docker restart
service docker status

2. Generate a configuration file

#Adding Kernel Parameters
cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
#Make Kernel Parameters Effective
sysctl -p /etc/sysctl.d/k8s.conf
#Modify configuration file config.properties
cd /root/kubernetes/kubernetes-starter/
#View Profile
cat config.properties 
#kubernetes binary directory, eg: /home/michael/bin
BIN_PATH=/usr/local/sbin/kubernetes-bins
#Current node ip, eg: 192.168.1.102
NODE_IP=192.168.1.109
#List of etcd service clusters, eg: http://192.168.1.102:2379
#If you already have an etcd cluster, you can fill in the existing one.If not, fill in: http://${MASTER_IP}:2379 (MASTER_IP replaces itself with its own primary node ip)
ETCD_ENDPOINTS=https://192.168.1.107:2379
#ip address of kubernetes primary node, eg: 192.168.1.102
MASTER_IP=192.168.1.55
#Generate profile
cd ~/kubernetes/kubernetes-starter && ./gen-config.sh with-ca
====List of substitution variables====
BIN_PATH=/usr/local/sbin/kubernetes-bins
NODE_IP=192.168.1.109
ETCD_ENDPOINTS=https://192.168.1.107:2379
MASTER_IP=192.168.1.55
====================
====Replace Profile====
all-node/kube-calico.service
ca/admin/admin-csr.json
ca/ca-config.json
ca/ca-csr.json
ca/calico/calico-csr.json
ca/etcd/etcd-csr.json
ca/kube-proxy/kube-proxy-csr.json
ca/kubernetes/kubernetes-csr.json
master-node/etcd.service
master-node/kube-apiserver.service
master-node/kube-controller-manager.service
master-node/kube-scheduler.service
services/kube-dashboard.yaml
services/kube-dns.yaml
worker-node/10-calico.conf
worker-node/kubelet.service
worker-node/kube-proxy.service
=================
//Configuration generated successfully at: /root/kubernetes/kubernetes-starter/target


#Install cfssl

wget -q --show-progress --https-only --timestamping \
  https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 \
  https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
chmod +x cfssl_linux-amd64 cfssljson_linux-amd64
mv cfssl_linux-amd64 /usr/local/bin/cfssl
mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
cfssl version


#Create directory to store certificates
#Get from k8s-master 1

mkdir -p /etc/kubernetes/ca/
mkdir -p /etc/kubernetes/ca/calico/
rsync -av 192.168.1.107:/etc/kubernetes/ca/ca.pem /etc/kubernetes/ca/
rsync -av 192.168.1.107:/etc/kubernetes/ca/ca-key.pem /etc/kubernetes/ca/
rsync -av 192.168.1.107:/etc/kubernetes/ca/ca-config.json /etc/kubernetes/ca/
rsync -av 192.168.1.107:/etc/kubernetes/ca/calico/calico.pem /etc/kubernetes/ca/calico/
rsync -av 192.168.1.107:/etc/kubernetes/ca/calico/calico-key.pem /etc/kubernetes/ca/calico/

3. Deploy Calico network

#Copy calico startup file to system services directory
cp ~/kubernetes/kubernetes-starter/target/all-node/kube-calico.service /lib/systemd/system/
#Create kube-calico.service
systemctl enable kube-calico.service
#Start the kube-calico.service service
service kube-calico start
#Looking at the calico node, you can see the calico of the master node
calicoctl node status


4. Deploy Kubelet

cd /etc/kubernetes/
#Create bootstrap.kubeconfig
kubectl config set-cluster kubernetes \
        --certificate-authority=/etc/kubernetes/ca/ca.pem \
        --embed-certs=true \
        --server=https://192.168.1.55:6443 \
        --kubeconfig=bootstrap.kubeconfig
kubectl config set-credentials kubelet-bootstrap \
        --token=97e8c07dce2b2bab69cfd3162d5383c9 \
        --kubeconfig=bootstrap.kubeconfig
kubectl config set-context default \
        --cluster=kubernetes \
        --user=kubelet-bootstrap \
        --kubeconfig=bootstrap.kubeconfig
kubectl config use-context default --kubeconfig=bootstrap.kubeconfig
#Prepare cni
mkdir -p /etc/cni/net.d/
cp ~/kubernetes/kubernetes-starter/target/worker-node/10-calico.conf /etc/cni/net.d/
#Create a working directory to store kubelet
mkdir /var/lib/kubelet
#Copy kubelet.service to system directory
cp ~/kubernetes/kubernetes-starter/target/worker-node/kubelet.service /lib/systemd/system/
#Create a kubelet service
systemctl enable kubelet
#Start the kubelet service
service kubelet start


5.Master issues certificates

#Execute on master server
 kubectl get csr
NAME                                                   AGE       REQUESTOR           CONDITION
node-csr-Vuj62TUED4foaVjOmsbvMLJpfDsy1RBHbKMAhgtuoyE   16s       kubelet-bootstrap   Pending
#Execute Instruction Issuance
kubectl get csr|grep 'Pending' | awk '{print $1}'| xargs kubectl certificate approve
certificatesigningrequest "node-csr-Vuj62TUED4foaVjOmsbvMLJpfDsy1RBHbKMAhgtuoyE" approved
#View again
kubectl get csr
NAME                                                   AGE       REQUESTOR           CONDITION
node-csr-Vuj62TUED4foaVjOmsbvMLJpfDsy1RBHbKMAhgtuoyE   1m        kubelet-bootstrap   Approved,Issued
#Validation node
kubectl get node
NAME            STATUS    ROLES     AGE       VERSION
192.168.1.109   Ready     <none>    52s       v1.9.0


6. Deploy kube-proxy

#Create a kube-proxy working directory and store a certificate directory
mkdir -p /var/lib/kube-proxy
mkdir -p /etc/kubernetes/ca/kube-proxy
#Copy the kube-proxy service configuration file
cp ~/kubernetes/kubernetes-starter/target/ca/kube-proxy/kube-proxy-csr.json /etc/kubernetes/ca/kube-proxy/
cd /etc/kubernetes/ca/kube-proxy/
#Use root certificate (ca.pem) to issue calico certificates
cfssl gencert \
        -ca=/etc/kubernetes/ca/ca.pem \
        -ca-key=/etc/kubernetes/ca/ca-key.pem \
        -config=/etc/kubernetes/ca/ca-config.json \
        -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
cd /etc/kubernetes/
kubectl config set-cluster kubernetes \
        --certificate-authority=/etc/kubernetes/ca/ca.pem \
        --embed-certs=true \
        --server=https://192.168.1.55:6443 \
        --kubeconfig=kube-proxy.kubeconfig
kubectl config set-credentials kube-proxy \
        --client-certificate=/etc/kubernetes/ca/kube-proxy/kube-proxy.pem \
        --client-key=/etc/kubernetes/ca/kube-proxy/kube-proxy-key.pem \
        --embed-certs=true \
        --kubeconfig=kube-proxy.kubeconfig
kubectl config set-context default \
        --cluster=kubernetes \
        --user=kube-proxy \
        --kubeconfig=kube-proxy.kubeconfig
kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
#Copy the kube-proxy startup file to the system directory
cp ~/kubernetes/kubernetes-starter/target/worker-node/kube-proxy.service /lib/systemd/system/
#Create a kube-proxy service
systemctl enable kube-proxy
#Start the kube-proxy service
service kube-proxy start


7. Create deployment in k8s-master1

#nginx-depolyment.yaml configuration file
cat nginx-depolyment.yaml 
apiVersion: apps/v1beta1
kind: Deployment
metadata:
  name: nginx
  annotations:
    nginx.ingress.kubernetes.io/secure-backends: "true"
spec:
  replicas: 1
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: registry.cn-hangzhou.aliyuncs.com/jonny/nginx:1.9.14
        ports:
          - containerPort: 80
#nginx-service.yaml configuration file
cat nginx-service.yaml 
apiVersion: v1
kind: Service
metadata:
  name: nginx-service
spec:
  selector:
    app: nginx
  ports:
  - protocol: TCP
    port: 80
    targetPort: 80
    nodePort: 20001
  type: NodePort
#View nginx pod
kubectl get pods
NAME                     READY     STATUS    RESTARTS   AGE
nginx-65dbdf6899-z8cp5   1/1       Running   0          2m
kubectl get service
NAME            TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)        AGE
kubernetes      ClusterIP   10.68.0.1       <none>        443/TCP        2h
nginx-service   NodePort    10.68.169.183   <none>        80:20001/TCP   2m
#Verify nginx
curl -I http://192.168.1.109:20001/
HTTP/1.1 200 OK
Server: nginx/1.9.14
Date: Thu, 18 Apr 2019 09:04:03 GMT
Content-Type: text/html
Content-Length: 612
Last-Modified: Wed, 21 Sep 2016 08:11:20 GMT
Connection: keep-alive
ETag: "57e240a8-264"
Accept-Ranges: bytes


5. Add a kubectl remote client

stay nginx Add on Server kubectl Client
//Get from k8s-master
rsync -av 192.168.1.107:/usr/local/sbin/kubernetes-bins/kubectl /usr/bin/kubectl
rsync -av 192.168.1.107:/etc/kubernetes/ca/admin/ /root/kubectl/ca/
rsync -av 192.168.1.107:/etc/kubernetes/ca/ca.pem /root/kubectl/ca/
kubectl config set-cluster kubernetes --server=https://192.168.1.55:6443 --certificate-authority=ca.pem
#Set cluster-admin user certificate authentication field in user item
kubectl config set-credentials cluster-admin --certificate-authority=ca.pem --client-key=admin-key.pem --client-certificate=admin.pem
#Set default cluster and user named default in environment item
kubectl config set-context default --cluster=kubernetes --user=cluster-admin
#Set default environment item to default
kubectl config use-context default
cat /root/.kube/config 
apiVersion: v1
clusters:
- cluster:
    certificate-authority: /root/kubectl/ca/ca.pem
    server: https://192.168.1.55:6443
  name: kubernetes
contexts:
- context:
    cluster: kubernetes
    user: cluster-admin
  name: default
current-context: default
kind: Config
preferences: {}
users:
- name: cluster-admin
  user:
    as-user-extra: {}
    client-certificate: /root/kubectl/ca/admin.pem
    client-key: /root/kubectl/ca/admin-key.pem
#Validation
kubectl get node
NAME            STATUS    ROLES     AGE       VERSION
192.168.1.109   Ready     <none>    1h        v1.9.0


6. Simulate turning off a k8s-master1

#Operation at k8s-master1
service kube-apiserver stop
service kube-controller-manager stop
service kube-scheduler stop
#Local is no longer executable
kubectl get node
The connection to the server 192.168.1.107:6443 was refused - did you specify the right host or port?
#Execute on remote client side of nginx server, unaffected
kubectl get node
NAME            STATUS    ROLES     AGE       VERSION
192.168.1.109   Ready     <none>    48m       v1.9.0
kubectl get pods
NAME                     READY     STATUS    RESTARTS   AGE
nginx-65dbdf6899-z8cp5   1/1       Running   0          42m
#You can view nginx's log and forward the request to another master 192.168.1.108 when 192.168.1.107 master is closed
192.168.1.55 192.168.1.108:6443 18/Apr/2019:10:19:19 +0000 200


Posted by Jr0x on Sat, 04 May 2019 23:50:39 -0700