k8s-day03
2. 安装部署Kube-Proxy
[root@gcc-200 ~}$ vi /opt/certs/kube-proxy-csr.json
{
"CN": "system:kube-proxy",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "shanghai",
"L": "shanghai",
"O": "od",
"OU": "ops"
}
]
}
[root@gcc-200 /opt/certs}$ cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=client kube-proxy-csr.json |cfssl-json -bare kube-proxy-client
2020/06/08 16:14:40 [INFO] generate received request
2020/06/08 16:14:40 [INFO] received CSR
2020/06/08 16:14:40 [INFO] generating key: rsa-2048
...............................
[root@gcc-21 /opt/kubernetes/server/bin/certs}$ scp -rp gcc-200:/opt/certs/kube-proxy-client-key.pem ./
[root@gcc-21 /opt/kubernetes/server/bin/certs}$ scp -rp gcc-200:/opt/certs/kube-proxy-client.pem ./
[root@gcc-21 /opt/kubernetes/server/bin/certs}$ cd ../conf/
kubectl config set-cluster myk8s \
--certificate-authority=/opt/kubernetes/server/bin/certs/ca.pem \
--embed-certs=true \
--server=https://10.0.0.10:7443 \
--kubeconfig=kube-proxy.kubeconfig
kubectl config set-credentials kube-proxy \
--client-certificate=/opt/kubernetes/server/bin/certs/kube-proxy-client.pem \
--client-key=/opt/kubernetes/server/bin/certs/kube-proxy-client-key.pem \
--embed-certs=true \
--kubeconfig=kube-proxy.kubeconfig
kubectl config set-context myk8s-context \
--cluster=myk8s \
--user=kube-proxy \
--kubeconfig=kube-proxy.kubeconfig
kubectl config use-context myk8s-context --kubeconfig=kube-proxy.kubeconfig
scp -rp gcc-21:/opt/kubernetes/server/bin/conf/kube-proxy.kubeconfig /opt/kubernetes/server/bin/conf/
[root@gcc-21 ~}$ yum install ipvsadm -y
[root@gcc-21 ~}$ vi ipvs.sh
#!/bin/bash
ipvs_mods_dir="/usr/lib/modules/$(uname -r)/kernel/net/netfilter/ipvs"
for i in $(ls $ipvs_mods_dir|grep -o "^[^.]*")
do
/sbin/modinfo -F filename $i &>/dev/null
if [ $? -eq 0 ];then
/sbin/modprobe $i
fi
done
[root@gcc-21 ~}$ chmod +x ipvs.sh
[root@gcc-21 ~}$ sh ipvs.sh
[root@gcc-21 ~}$ lsmod | grep ip_vs
ip_vs_wrr 12697 0
ip_vs_wlc 12519 0
ip_vs_sh 12688 0
ip_vs_sed 12519 0
ip_vs_rr 12600 0
ip_vs_pe_sip 12740 0
nf_conntrack_sip 33860 1 ip_vs_pe_sip
ip_vs_nq 12516 0
ip_vs_lc 12516 0
ip_vs_lblcr 12922 0
ip_vs_lblc 12819 0
ip_vs_ftp 13079 0
ip_vs_dh 12688 0
[root@gcc-21 ~}$ vi /opt/kubernetes/server/bin/kube-proxy.sh
#!/bin/sh
./kube-proxy \
--cluster-cidr 172.0.0.0/16 \
--hostname-override gcc-21.host.com \
--proxy-mode=ipvs \
--ipvs-scheduler=nq \
--kubeconfig ./conf/kube-proxy.kubeconfig
[root@czlc0-21 ~}$ chmod +x /opt/kubernetes/server/bin/kube-proxy.sh
[root@czlc0-21 ~}$ mkdir -p /data/logs/kubernetes/kube-proxy
[root@czlc0-21 ~}$ cat /etc/supervisord.d/kube-proxy.ini
[program:kube-proxy-0-21]
command=/opt/kubernetes/server/bin/kube-proxy.sh ; the program (relative uses PATH, can take args)
numprocs=1 ; number of processes copies to start (def 1)
directory=/opt/kubernetes/server/bin ; directory to cwd to before exec (def no cwd)
autostart=true ; start at supervisord start (default: true)
autorestart=true ; retstart at unexpected quit (default: true)
startsecs=30 ; number of secs prog must stay running (def. 1)
startretries=3 ; max
exitcodes=0,2 ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT ; signal used to kill process (default TERM)
stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10)
user=root ; setuid to this UNIX account to run the program
redirect_stderr=true ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/kubernetes/kube-proxy/proxy.stdout.log ; stderr log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB ; max
stdout_logfile_backups=4 ;
stdout_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false ; emit events on stdout writes (default false)
[root@gcc-21 /opt/kubernetes/server/bin}$ supervisorctl update
kube-proxy-7-21: added process group
[root@gcc-21 /opt/kubernetes/server/bin}$ supervisorctl status
etcd-server-7-21 RUNNING pid 3883, uptime 1 day, 6:41:38
kube-apiserver-7-21 RUNNING pid 3884, uptime 1 day, 6:41:38
kube-controller-manager-7-21 RUNNING pid 3880, uptime 1 day, 6:41:38
kube-kubelet-7-21 RUNNING pid 16162, uptime 16:47:33
kube-proxy-7-21 RUNNING pid 14063, uptime 0:00:30
kube-scheduler-7-21 RUNNING pid 3882, uptime 1 day, 6:41:38
[root@gcc-22 /opt/kubernetes/server/bin}$ supervisorctl status
etcd-server-7-22 RUNNING pid 3899, uptime 1 day, 6:48:32
kube-apiserver-7-22 RUNNING pid 3897, uptime 1 day, 6:48:32
kube-controller-manager-7-22 RUNNING pid 3895, uptime 1 day, 6:48:32
kube-kubelet-7-22 RUNNING pid 16150, uptime 16:54:14
kube-proxy-7-22 RUNNING pid 14832, uptime 0:00:36
kube-scheduler-7-22 RUNNING pid 3896, uptime 1 day, 6:48:32
[root@gcc-22 /opt/kubernetes/server/bin}$ ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 192.168.0.1:443 nq
-> 10.4.7.21:6443 Masq 1 0 0
-> 10.4.7.22:6443 Masq 1 0 0
[root@gcc-22 /opt/kubernetes/server/bin}$ kubectl get service
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 192.168.0.1 <none> 443/TCP 3d4h
[root@gcc-22 /opt/kubernetes/server/bin}$ kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 192.168.0.1 <none> 443/TCP 3d4h
3. 验证K8S集群
[root@gcc-21 ~}$ vi /root/nginx-ds.yaml
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: nginx-ds
spec:
template:
metadata:
labels:
app: nginx-ds
spec:
containers:
- name: my-nginx
image: harbor.od.com/public/nginx:v1.18.0-curl
ports:
- containerPort: 80
[root@gcc-22 ~}$ kubectl create -f nginx-ds.yml
daemonset.extensions/nginx-ds created
[root@gcc-22 ~}$ kubectl get pods
NAME READY STATUS RESTARTS AGE
nginx-ds-9ss9d 1/1 Running 0 52s
nginx-ds-knztg 1/1 Running 0 52s