k8s-day03

2. 安装部署Kube-Proxy

#所有的运算主机上面进行安装,首先要在运维机器上面进行证书的签发


#在200机器签发证书
[root@gcc-200 ~}$  vi /opt/certs/kube-proxy-csr.json

{
    "CN": "system:kube-proxy",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "shanghai",
            "L": "shanghai",
            "O": "od",
            "OU": "ops"
        }
    ]
}

#生成证书 
[root@gcc-200 /opt/certs}$  cfssl  gencert  -ca=ca.pem  -ca-key=ca-key.pem -config=ca-config.json -profile=client  kube-proxy-csr.json |cfssl-json  -bare kube-proxy-client
2020/06/08 16:14:40 [INFO] generate received request
2020/06/08 16:14:40 [INFO] received CSR
2020/06/08 16:14:40 [INFO] generating key: rsa-2048
...............................


#在21机器操作(所有运算节点)
[root@gcc-21 /opt/kubernetes/server/bin/certs}$  scp -rp gcc-200:/opt/certs/kube-proxy-client-key.pem  ./
[root@gcc-21 /opt/kubernetes/server/bin/certs}$  scp -rp gcc-200:/opt/certs/kube-proxy-client.pem  ./


#配置证书 
#创建配置,分发证书
[root@gcc-21 /opt/kubernetes/server/bin/certs}$  cd ../conf/


#set-cluster
kubectl config set-cluster myk8s \
  --certificate-authority=/opt/kubernetes/server/bin/certs/ca.pem \
  --embed-certs=true \
  --server=https://10.0.0.10:7443 \
  --kubeconfig=kube-proxy.kubeconfig


#set-credentials
kubectl config set-credentials kube-proxy \
  --client-certificate=/opt/kubernetes/server/bin/certs/kube-proxy-client.pem \
  --client-key=/opt/kubernetes/server/bin/certs/kube-proxy-client-key.pem \
  --embed-certs=true \
  --kubeconfig=kube-proxy.kubeconfig


#set-context
kubectl config set-context myk8s-context \
  --cluster=myk8s \
  --user=kube-proxy \
  --kubeconfig=kube-proxy.kubeconfig

#use-context
kubectl config use-context myk8s-context --kubeconfig=kube-proxy.kubeconfig


#22主机
scp -rp gcc-21:/opt/kubernetes/server/bin/conf/kube-proxy.kubeconfig                                      /opt/kubernetes/server/bin/conf/

#ipvs   调度算法  
[root@gcc-21 ~}$  yum install ipvsadm -y

[root@gcc-21 ~}$  vi ipvs.sh 
#!/bin/bash
ipvs_mods_dir="/usr/lib/modules/$(uname -r)/kernel/net/netfilter/ipvs"
for i in $(ls $ipvs_mods_dir|grep -o "^[^.]*")
do
  /sbin/modinfo -F filename $i &>/dev/null
  if [ $? -eq 0 ];then
    /sbin/modprobe $i
  fi
done

[root@gcc-21 ~}$  chmod +x ipvs.sh
[root@gcc-21 ~}$  sh ipvs.sh 
[root@gcc-21 ~}$  lsmod | grep  ip_vs
ip_vs_wrr              12697  0 
ip_vs_wlc              12519  0 
ip_vs_sh               12688  0 
ip_vs_sed              12519  0 
ip_vs_rr               12600  0 
ip_vs_pe_sip           12740  0 
nf_conntrack_sip       33860  1 ip_vs_pe_sip
ip_vs_nq               12516  0 
ip_vs_lc               12516  0 
ip_vs_lblcr            12922  0 
ip_vs_lblc             12819  0 
ip_vs_ftp              13079  0 
ip_vs_dh               12688  0 


#编辑启动脚本  

#创建kube-proxy启动脚本
[root@gcc-21 ~}$  vi /opt/kubernetes/server/bin/kube-proxy.sh
#!/bin/sh
./kube-proxy \
  --cluster-cidr 172.0.0.0/16 \
  --hostname-override gcc-21.host.com \
  --proxy-mode=ipvs \
  --ipvs-scheduler=nq \
  --kubeconfig ./conf/kube-proxy.kubeconfig


#检查配置,权限,创建日志目录
[root@czlc0-21 ~}$  chmod +x /opt/kubernetes/server/bin/kube-proxy.sh
[root@czlc0-21 ~}$  mkdir -p /data/logs/kubernetes/kube-proxy

#创建supervisor配置
[root@czlc0-21 ~}$  cat /etc/supervisord.d/kube-proxy.ini
[program:kube-proxy-0-21]
command=/opt/kubernetes/server/bin/kube-proxy.sh                     ; the program (relative uses PATH, can take args)
numprocs=1                                                           ; number of processes copies to start (def 1)
directory=/opt/kubernetes/server/bin                                 ; directory to cwd to before exec (def no cwd)
autostart=true                                                       ; start at supervisord start (default: true)
autorestart=true                                                     ; retstart at unexpected quit (default: true)
startsecs=30                                                         ; number of secs prog must stay running (def. 1)
startretries=3                                                       ; max # of serial start failures (default 3)
exitcodes=0,2                                                        ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT                                                      ; signal used to kill process (default TERM)
stopwaitsecs=10                                                      ; max num secs to wait b4 SIGKILL (default 10)
user=root                                                            ; setuid to this UNIX account to run the program
redirect_stderr=true                                                 ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/kubernetes/kube-proxy/proxy.stdout.log     ; stderr log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB                                         ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4                                             ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB                                          ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false                                          ; emit events on stdout writes (default false)


#启动并检查
[root@gcc-21 /opt/kubernetes/server/bin}$  supervisorctl update
kube-proxy-7-21: added process group

[root@gcc-21 /opt/kubernetes/server/bin}$  supervisorctl status
etcd-server-7-21                 RUNNING   pid 3883, uptime 1 day, 6:41:38
kube-apiserver-7-21              RUNNING   pid 3884, uptime 1 day, 6:41:38
kube-controller-manager-7-21     RUNNING   pid 3880, uptime 1 day, 6:41:38
kube-kubelet-7-21                RUNNING   pid 16162, uptime 16:47:33
kube-proxy-7-21                  RUNNING   pid 14063, uptime 0:00:30
kube-scheduler-7-21              RUNNING   pid 3882, uptime 1 day, 6:41:38
[root@gcc-22 /opt/kubernetes/server/bin}$  supervisorctl  status
etcd-server-7-22                 RUNNING   pid 3899, uptime 1 day, 6:48:32
kube-apiserver-7-22              RUNNING   pid 3897, uptime 1 day, 6:48:32
kube-controller-manager-7-22     RUNNING   pid 3895, uptime 1 day, 6:48:32
kube-kubelet-7-22                RUNNING   pid 16150, uptime 16:54:14
kube-proxy-7-22                  RUNNING   pid 14832, uptime 0:00:36
kube-scheduler-7-22              RUNNING   pid 3896, uptime 1 day, 6:48:32
[root@gcc-22 /opt/kubernetes/server/bin}$  ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  192.168.0.1:443 nq
  -> 10.4.7.21:6443               Masq    1      0          0         
  -> 10.4.7.22:6443               Masq    1      0          0  


[root@gcc-22 /opt/kubernetes/server/bin}$  kubectl get  service
NAME         TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)   AGE
kubernetes   ClusterIP   192.168.0.1   <none>        443/TCP   3d4h
[root@gcc-22 /opt/kubernetes/server/bin}$  kubectl get  svc    
NAME         TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)   AGE
kubernetes   ClusterIP   192.168.0.1   <none>        443/TCP   3d4h

3. 验证K8S集群

#在任意一个节点,创建一个资源配置清单  
[root@gcc-21 ~}$  vi /root/nginx-ds.yaml
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
  name: nginx-ds
spec:
  template:
    metadata:
      labels:
        app: nginx-ds
    spec:
      containers:
      - name: my-nginx
        image: harbor.od.com/public/nginx:v1.18.0-curl
        ports:
        - containerPort: 80

#应用资源配置清单

[root@gcc-22 ~}$  kubectl create -f nginx-ds.yml 
daemonset.extensions/nginx-ds created

[root@gcc-22 ~}$  kubectl get pods
NAME             READY   STATUS    RESTARTS   AGE
nginx-ds-9ss9d   1/1     Running   0          52s
nginx-ds-knztg   1/1     Running   0          52s
Copyright © 高程程 all right reserved,powered by Gitbook修订于: 2021-05-18 21:14:48

results matching ""

    No results matching ""