k8s-day05
2. K8S的服务暴露
K8S的服务发现 DNS实现的是集群内被发现
使用的NodePort 型的服务
ipvs 只能是iptables的模型
使用Ingress资源
Ingress只能暴露7层应用 http和https协议
基于域名的的URL地址
将集群外部请求的流量,转发到集群内部 ,实现服务暴露
nginx(调度流量)+一段go脚本(动态识别yaml)
常用的Ingress
Ingress-nginx
Haproxy
Traefik
https://github.com/containous/traefik
[root@gcc-200 ~]
[root@gcc-200 ~]
traefik v1.7.2-alpine add5fac61ae5 20 months ago 72.4MB
[root@gcc-200 ~]
[root@gcc-200 ~]
https://github.com/containous/traefik/tree/v1.7/examples/k8s
[root@gcc-200 ~]
[root@gcc-200 /data/k8s-yaml/traefik]
apiVersion: v1
kind: ServiceAccount
metadata:
name: traefik-ingress-controller
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: traefik-ingress-controller
rules:
- apiGroups:
- ""
resources:
- services
- endpoints
- secrets
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- ingresses
verbs:
- get
- list
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: traefik-ingress-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: traefik-ingress-controller
subjects:
- kind: ServiceAccount
name: traefik-ingress-controller
namespace: kube-system
[root@gcc-200 /data/k8s-yaml/traefik]
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: traefik-ingress
namespace: kube-system
labels:
k8s-app: traefik-ingress
spec:
template:
metadata:
labels:
k8s-app: traefik-ingress
name: traefik-ingress
spec:
serviceAccountName: traefik-ingress-controller
terminationGracePeriodSeconds: 60
containers:
- image: harbor.od.com/public/traefik:v1.7.2
name: traefik-ingress
ports:
- name: controller
containerPort: 80
hostPort: 81
- name: admin-web
containerPort: 8080
securityContext:
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
args:
- --api
- --kubernetes
- --logLevel=INFO
- --insecureskipverify=true
- --kubernetes.endpoint=https://10.0.0.10:7443
- --accesslog
- --accesslog.filepath=/var/log/traefik_access.log
- --traefiklog
- --traefiklog.filepath=/var/log/traefik.log
- --metrics.prometheus
[root@gcc-200 /data/k8s-yaml/traefik]
kind: Service
apiVersion: v1
metadata:
name: traefik-ingress-service
namespace: kube-system
spec:
selector:
k8s-app: traefik-ingress
ports:
- protocol: TCP
port: 80
name: controller
- protocol: TCP
port: 8080
name: admin-web
[root@gcc-200 /data/k8s-yaml/traefik]
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: traefik-web-ui
namespace: kube-system
annotations:
kubernetes.io/ingress.class: traefik
spec:
rules:
- host: traefik.od.com
http:
paths:
- path: /
backend:
serviceName: traefik-ingress-service
servicePort: 8080
[root@gcc-200 /data/k8s-yaml/traefik]
total 16
-rw-r--r-- 1 root root 1096 Jun 10 15:14 ds.yaml
-rw-r--r-- 1 root root 327 Jun 10 15:16 ingress.yaml
-rw-r--r-- 1 root root 800 Jun 10 15:10 rbac.yaml
-rw-r--r-- 1 root root 269 Jun 10 15:15 svc.yaml
[root@gcc-22 ~]
serviceaccount/traefik-ingress-controller created
clusterrole.rbac.authorization.k8s.io/traefik-ingress-controller created
clusterrolebinding.rbac.authorization.k8s.io/traefik-ingress-controller created
[root@gcc-22 ~]
daemonset.extensions/traefik-ingress created
[root@gcc-22 ~]
service/traefik-ingress-service created
[root@gcc-22 ~]
ingress.extensions/traefik-web-ui created
[root@gcc-22 ~]
[root@gcc-22 ~]
NAME READY STATUS RESTARTS AGE
coredns-6b6c4f9648-7t5tm 1/1 Running 0 21h
traefik-ingress-t7d5x 1/1 Running 0 3m14s
traefik-ingress-wc7c6 1/1 Running 0 3m14s
[root@gcc-21 ~]
[root@gcc-11 ~]
$ORIGIN od.com.
$TTL 600 ; 10 minutes
@ IN SOA dns.od.com. dnsadmin.od.com. (
2019060404 ; serial
10800 ; refresh (3 hours)
900 ; retry (15 minutes)
604800 ; expire (1 week)
86400 ; minimum (1 day)
)
NS dns.od.com.
$TTL 60 ; 1 minute
dns A 10.0.0.11
harbor A 10.0.0.200
k8s-yaml A 10.0.0.200
traefik A 10.0.0.10
[root@gcc-11 ~]
[root@gcc-11 ~]
10.0.0.10
[root@gcc-11 ~]
upstream default_backend_traefik {
server 10.0.0.21:81 max_fails=3 fail_timeout=10s;
server 10.0.0.22:81 max_fails=3 fail_timeout=10s;
}
server {
server_name *.od.com;
location / {
proxy_pass http://default_backend_traefik;
proxy_set_header Host $http_host;
proxy_set_header x-forwarded-for $proxy_add_x_forwarded_for;
}
}
[root@gcc-11 ~]
nginx: the configuration file /etc/nginx/nginx.conf syntax is ok
nginx: configuration file /etc/nginx/nginx.conf test is successful
[root@gcc-11 ~]
3. Dashboard仪表盘
基于RBAC认证的一个GUI资源管理插件
常用版本: v1.8.3 v1.10.1
K8S如何基于RBAC授权 鉴权
https://github.com/kubernetes/dashboard
[root@gcc-200 /data/k8s-yaml/traefik]
[root@gcc-200 /data/k8s-yaml/traefik]
k8scn/kubernetes-dashboard-amd64 v1.8.3 fcac9aa03fd6 2 years ago 102MB
[root@gcc-200 /data/k8s-yaml/traefik]
[root@gcc-200 /data/k8s-yaml/traefik]
https://github.com/kubernetes/kubernetes/tree/master/cluster/addons/dashboard
root@gcc-200 /data/k8s-yaml]
[root@gcc-200 /data/k8s-yaml/dashboard]
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
addonmanager.kubernetes.io/mode: Reconcile
name: kubernetes-dashboard-admin
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard-admin
namespace: kube-system
labels:
k8s-app: kubernetes-dashboard
addonmanager.kubernetes.io/mode: Reconcile
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard-admin
namespace: kube-system
[root@gcc-200 /data/k8s-yaml/dashboard]
apiVersion: apps/v1
kind: Deployment
metadata:
name: kubernetes-dashboard
namespace: kube-system
labels:
k8s-app: kubernetes-dashboard
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-cluster-critical
containers:
- name: kubernetes-dashboard
image: harbor.od.com/public/dashboard:v1.8.3
resources:
limits:
cpu: 100m
memory: 300Mi
requests:
cpu: 50m
memory: 100Mi
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
volumeMounts:
- name: tmp-volume
mountPath: /tmp
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
volumes:
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard-admin
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
[root@gcc-200 /data/k8s-yaml/dashboard]
apiVersion: v1
kind: Service
metadata:
name: kubernetes-dashboard
namespace: kube-system
labels:
k8s-app: kubernetes-dashboard
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
selector:
k8s-app: kubernetes-dashboard
ports:
- port: 443
targetPort: 8443
[root@gcc-200 /data/k8s-yaml/dashboard]
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: kubernetes-dashboard
namespace: kube-system
annotations:
kubernetes.io/ingress.class: traefik
spec:
rules:
- host: dashboard.od.com
http:
paths:
- backend:
serviceName: kubernetes-dashboard
servicePort: 443
[root@gcc-200 /data/k8s-yaml/dashboard]
total 16
-rw-r--r-- 1 root root 1381 Jun 10 16:41 dp.yaml
-rw-r--r-- 1 root root 318 Jun 10 16:42 ingress.yaml
-rw-r--r-- 1 root root 610 Jun 10 16:39 rbac.yaml
-rw-r--r-- 1 root root 322 Jun 10 16:41 svc.yaml
[root@gcc-200 /data/k8s-yaml/dashboard]
[root@gcc-21 ~]
serviceaccount/kubernetes-dashboard-admin created
clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard-admin created
[root@gcc-21 ~]
service/kubernetes-dashboard created
[root@gcc-21 ~]
deployment.apps/kubernetes-dashboard created
[root@gcc-21 ~]
ingress.extensions/kubernetes-dashboard created
[root@gcc-21 ~]
NAME READY STATUS RESTARTS AGE
coredns-6b6c4f9648-7t5tm 1/1 Running 0 22h
kubernetes-dashboard-76dcdb4677-tfgc7 1/1 Running 0 44s
traefik-ingress-t7d5x 1/1 Running 0 60m
traefik-ingress-wc7c6 1/1 Running 0 60m
[root@gcc-11 ~]
$ORIGIN od.com.
$TTL 600 ; 10 minutes
@ IN SOA dns.od.com. dnsadmin.od.com. (
2019060405 ; serial
10800 ; refresh (3 hours)
900 ; retry (15 minutes)
604800 ; expire (1 week)
86400 ; minimum (1 day)
)
NS dns.od.com.
$TTL 60 ; 1 minute
dns A 10.0.0..11
harbor A 10.0.0..200
k8s-yaml A 10.0.0..200
traefik A 10.0.0..10
dashboard A 10.0.0..10
[root@gcc-11 ~]
[root@gcc-11 ~]
10.0.0.10
dashboard.od.com
4. K8S的RBAC鉴权
ABRC 基于属性的访问控制
RBAC是基于角色的访问控制
自1.6版本以后都是默认使用基于角色认证
支持权限动态调整 对集群所有的资源都实现了权限覆盖
读 get
写 write
更新 update
列出 list
监视 watch
[root@gcc-200 /opt/certs]
Generating RSA private key, 2048 bit long modulus
.................+++
....+++
e is 65537 (0x10001)
[root@gcc-200 /opt/certs]
[root@gcc-200 /opt/certs]
-rw-r--r-- 1 root root 1009 Jun 10 17:09 dashboard.od.com.csr
-rw------- 1 root root 1675 Jun 10 17:07 dashboard.od.com.key
[root@gcc-200 /opt/certs]
Signature ok
subject=/CN=dashboard.od.com/C=CN/ST=SH/L=Shanghai/O=OldboyEdu/OU=ops
Getting CA Private Key
[root@gcc-200 /opt/certs]
-rw-r--r-- 1 root root 1200 Jun 10 17:11 dashboard.od.com.crt
-rw-r--r-- 1 root root 1009 Jun 10 17:09 dashboard.od.com.csr
-rw------- 1 root root 1675 Jun 10 17:07 dashboard.od.com.key
[root@gcc-11 ~]
[root@gcc-11 /etc/nginx/certs]
[root@gcc-11 /etc/nginx/certs]
[root@gcc-11 /etc/nginx/certs]
total 8
-rw-r--r-- 1 root root 1200 Jun 10 17:11 dashboard.od.com.crt
-rw------- 1 root root 1675 Jun 10 17:07 dashboard.od.com.key
[root@gcc-11 /etc/nginx]
server {
listen 80;
server_name dashboard.od.com;
rewrite ^(.*)$ https://${server_name}$1 permanent;
}
server {
listen 443 ssl;
server_name dashboard.od.com;
ssl_certificate "certs/dashboard.od.com.crt";
ssl_certificate_key "certs/dashboard.od.com.key";
ssl_session_cache shared:SSL:1m;
ssl_session_timeout 10m;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
location / {
proxy_pass http://default_backend_traefik;
proxy_set_header Host $http_host;
proxy_set_header x-forwarded-for $proxy_add_x_forwarded_for;
}
}
[root@gcc-11 /etc/nginx]
nginx: the configuration file /etc/nginx/nginx.conf syntax is ok
nginx: configuration file /etc/nginx/nginx.conf test is successful
[root@gcc-11 /etc/nginx]
如果重启失败,建议修改一下od.com.zone
[root@gcc-21 ~]
NAME TYPE DATA AGE
coredns-token-bld4n kubernetes.io/service-account-token 3 22h
default-token-4s42p kubernetes.io/service-account-token 3 5d1h
kubernetes-dashboard-admin-token-jnvnh kubernetes.io/service-account-token 3 36m
kubernetes-dashboard-key-holder Opaque 2 35m
traefik-ingress-controller-token-nvpht kubernetes.io/service-account-token 3 122m
[root@gcc-21 ~]
Name: kubernetes-dashboard-admin-token-jnvnh
Namespace: kube-system
Labels: <none>
Annotations: kubernetes.io/service-account.name: kubernetes-dashboard-admin
kubernetes.io/service-account.uid: 0b60103b-5cd9-499c-afb7-81ede7649b69
Type: kubernetes.io/service-account-token
Data
====
ca.crt: 1350 bytes
namespace: 11 bytes
token: eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZC1hZG1pbi10b2tlbi1qbnZuaCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZC1hZG1pbiIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6IjBiNjAxMDNiLTVjZDktNDk5Yy1hZmI3LTgxZWRlNzY0OWI2OSIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlLXN5c3RlbTprdWJlcm5ldGVzLWRhc2hib2FyZC1hZG1pbiJ9.qhGSwpVwP-UzmSNSRyNroVA0TDqmHPSZBKh9M-G8u7_hAySFxbx1MDCX-2Vc0C56BVwOspSDZwRyEsUiwUK1oyMa4JzJfAWsaAoz3lUk_Ha559HUN8d91tl3OqO6vjelLJZ99nndDsQkEOp3bamtYeNQpCraFEFidVnqD5wOeedWOjFSH8cqBz4hxKNeE9ZvpYGnxBc3BN9QSAsFM8xleY6A59Ffao8XGRg1eiqJcnmXAe-lOa8N6CafZwcVhZZZ7wA67fLpa_t2E8ZrpEKvK4i_WEr1kq9Kcx_opFIz3s2L-pOc0CSISHHeJs6JUb5o4kv5CWVLRnWJYAc1wkmuZg
[root@gcc-21 ~]
https://dashboard.od.com
[root@gcc-200 /data/k8s-yaml/dashboard]
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
addonmanager.kubernetes.io/mode: Reconcile
name: kubernetes-dashboard
namespace: kube-system
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
addonmanager.kubernetes.io/mode: Reconcile
name: kubernetes-dashboard-minimal
namespace: kube-system
rules:
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"]
verbs: ["get", "update", "delete"]
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:"]
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: kubernetes-dashboard-minimal
namespace: kube-system
labels:
k8s-app: kubernetes-dashboard
addonmanager.kubernetes.io/mode: Reconcile
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard-minimal
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
[root@gcc-21 ~]
serviceaccount/kubernetes-dashboard created
role.rbac.authorization.k8s.io/kubernetes-dashboard-minimal created
rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard-minimal created
[root@gcc-21 ~]
Warning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply
deployment.apps/kubernetes-dashboard configured
5. 部署Heapster
Dashboard的监控 收集者 CPU 内存
[root@gcc-200 /data/k8s-yaml/dashboard]
[root@gcc-200 /data/k8s-yaml/dashboard]
quay.io/bitnami/heapster 1.5.4 c359b95ad38b 15 months ago 136MB
[root@gcc-200 /data/k8s-yaml/dashboard]
[root@gcc-200 /data/k8s-yaml/dashboard]
[root@gcc-200 /data/k8s-yaml/dashboard]
[root@gcc-200 /data/k8s-yaml/dashboard]
[root@gcc-200 /data/k8s-yaml/dashboard/heapster]
apiVersion: v1
kind: ServiceAccount
metadata:
name: heapster
namespace: kube-system
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: heapster
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:heapster
subjects:
- kind: ServiceAccount
name: heapster
namespace: kube-system
[root@czlc0-200 /data/k8s-yaml/dashboard/heapster]
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: heapster
namespace: kube-system
spec:
replicas: 1
template:
metadata:
labels:
task: monitoring
k8s-app: heapster
spec:
serviceAccountName: heapster
containers:
- name: heapster
image: harbor.od.com/public/heapster:v1.5.4
imagePullPolicy: IfNotPresent
command:
- /opt/bitnami/heapster/bin/heapster
- --source=kubernetes:https://kubernetes.default
[root@czlc0-200 /data/k8s-yaml/dashboard/heapster]
apiVersion: v1
kind: Service
metadata:
labels:
task: monitoring
kubernetes.io/cluster-service: 'true'
kubernetes.io/name: Heapster
name: heapster
namespace: kube-system
spec:
ports:
- port: 80
targetPort: 8082
selector:
k8s-app: heapster
[root@gcc-200 /data/k8s-yaml/heapster]
total 12
-rw-r--r-- 1 root root 506 Jun 10 18:20 dp.yaml
-rw-r--r-- 1 root root 356 Jun 10 18:19 rbac.yaml
-rw-r--r-- 1 root root 453 Jun 10 18:20 svc.yaml
[root@gcc-21 ~]
serviceaccount/heapster created
clusterrolebinding.rbac.authorization.k8s.io/heapster created
[root@gcc-21 ~]
deployment.extensions/heapster created
[root@gcc-21 ~]
service/heapster created
[root@gcc-21 ~]
NAME READY STATUS RESTARTS AGE
coredns-6b6c4f9648-7t5tm 1/1 Running 0 23h
heapster-b5b9f794-dhf52 0/1 ContainerCreating 0 25s
kubernetes-dashboard-6d58ccc9fc-krjfw 1/1 Running 0 10m
traefik-ingress-t7d5x 1/1 Running 0 157m
traefik-ingress-wc7c6 1/1 Running 0 157m
[root@gcc-21 ~]
NAME READY STATUS RESTARTS AGE
coredns-6b6c4f9648-7t5tm 1/1 Running 0 23h
heapster-b5b9f794-dhf52 1/1 Running 0 29s
kubernetes-dashboard-6d58ccc9fc-krjfw 1/1 Running 0 10m
traefik-ingress-t7d5x 1/1 Running 0 158m
traefik-ingress-wc7c6 1/1 Running 0 158m