烟台H5网站设计公司,莱特币做空国外网站,发卡网站怎么做,公司网站html模板CKA认证模块②-K8S企业运维和落地实战-2
K8S常见的存储方案及具体应用场景分析
k8s存储-empty
emptyDir类型的Volume是在Pod分配到Node上时被创建#xff0c;Kubernetes会在Node上自动分配一个目录#xff0c;因此无需指定宿主机Node上对应的目录文件。 这个目录的初始内容…CKA认证模块②-K8S企业运维和落地实战-2
K8S常见的存储方案及具体应用场景分析
k8s存储-empty
emptyDir类型的Volume是在Pod分配到Node上时被创建Kubernetes会在Node上自动分配一个目录因此无需指定宿主机Node上对应的目录文件。 这个目录的初始内容为空当Pod从Node上移除时emptyDir中的数据会被永久删除。emptyDir Volume主要用于某些应用程序无需永久保存的临时目录多个容器的共享目录等。
[rootk8s-master01 ~]# mkdir storage
[rootk8s-master01 ~]# cd storage/
# 创建工作目录[rootk8s-master01 storage]# kubectl explain pod.spec.volumes.
...
# 查看支持哪些存储卷[rootk8s-master01 storage]# cat emptydir.yaml
apiVersion: v1
kind: Pod
metadata:name: pod-empty
spec:containers:- name: container-emptyimage: nginximagePullPolicy: IfNotPresentvolumeMounts:- name: cache-volumemountPath: /cachevolumes:- name: cache-volumeemptyDir: {}
[rootk8s-master01 storage]# kubectl apply -f emptydir.yaml
pod/pod-empty created
[rootk8s-master01 storage]# kubectl get pods -owide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod-empty 1/1 Running 0 4s 10.244.58.253 k8s-node02 none none
# 创建pod
[rootk8s-master01 storage]# kubectl exec -it pod-empty -c container-empty -- /bin/bash
rootpod-empty:/# cd /cache/
rootpod-empty:/cache# touch 123
rootpod-empty:/cache# touch aa
rootpod-empty:/cache# ls
123 aa
# 创建empty挂载文件夹下文件
rootpod-empty:/cache# exit
exit
[rootk8s-master01 storage]# kubectl get pods -oyaml |grep uiduid: 8ce8fecc-dc86-4c92-8876-69ac034b6972
[rootk8s-master01 storage]# kubectl get pods -owide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod-empty 1/1 Running 0 2m43s 10.244.58.253 k8s-node02 none none
# 查看pod的uid,并查看调度节点[rootk8s-node02 ~]# yum -y install tree
[rootk8s-node02 ~]# tree /var/lib/kubelet/pods/8ce8fecc-dc86-4c92-8876-69ac034b6972
...
[rootk8s-node02 ~]# cd /var/lib/kubelet/pods/8ce8fecc-dc86-4c92-8876-69ac034b6972/
[rootk8s-node02 8ce8fecc-dc86-4c92-8876-69ac034b6972]# cd volumes/kubernetes.io~empty-dir/cache-volume/
[rootk8s-node02 cache-volume]# ls
123 aa
[rootk8s-node02 cache-volume]# pwd
/var/lib/kubelet/pods/8ce8fecc-dc86-4c92-8876-69ac034b6972/volumes/kubernetes.io~empty-dir/cache-volume
# 已经保存到临时文件夹下[rootk8s-master01 storage]# kubectl delete pod pod-empty
pod pod-empty deleted
# 删除pod[rootk8s-node02 cache-volume]# ls
[rootk8s-node02 cache-volume]# cd ..
cd: error retrieving current directory: getcwd: cannot access parent directories: No such file or directory
# 因为整个容器目录都被删除,emptyDir目录自然也被删除,所以不建议使用k8s存储-hostPath
hostpath存储卷缺点
单节点
pod删除之后重新创建必须调度到同一个node节点数据才不会丢失
[rootk8s-node01 images]# ctr -n k8s.io images import tomcat.tar.gz.0
[rootk8s-node02 images]# ctr -n k8s.io images import tomcat.tar.gz.0
# 因为之前上传过同名镜像包,名字不能一样后面加了.0[rootk8s-master01 storage]# kubectl explain pod.spec.volumes.hostPath.type
# 查看支持哪些类型 https://kubernetes.io/docs/concepts/storage/volumes#hostpath[rootk8s-master01 storage]# kubectl explain pod.spec |grep -i nodename
# 查看帮助[rootk8s-master01 storage]# cat hostpath.yaml
apiVersion: v1
kind: Pod
metadata:name: pod-hostpathnamespace: default
spec:nodeName: k8s-node01containers:- name: test-nginximage: nginximagePullPolicy: IfNotPresentvolumeMounts: - name: test-volumemountPath: /test-nginx- name: test-tomcatimage: tomcatimagePullPolicy: IfNotPresentvolumeMounts: - name: test-volumemountPath: /test-tomcatvolumes:- name: test-volumehostPath: path: /data1type: DirectoryOrCreate
[rootk8s-master01 storage]# kubectl apply -f hostpath.yaml
pod/pod-hostpath created
[rootk8s-master01 storage]# kubectl exec -it pod-hostpath -c test-nginx -- /bin/bash
rootpod-hostpath:/# cd test-nginx/
rootpod-hostpath:/test-nginx# touch nginx
rootpod-hostpath:/test-nginx# ls
nginx
rootpod-hostpath:/test-nginx# exit
exit
[rootk8s-master01 storage]# kubectl exec -it pod-hostpath -c test-tomcat -- /bin/bash
rootpod-hostpath:/usr/local/tomcat# cd /test-tomcat/
rootpod-hostpath:/test-tomcat# touch tomcat
rootpod-hostpath:/test-tomcat# ls
nginx tomcat
rootpod-hostpath:/test-tomcat# exit
exit
[rootk8s-node01 ~]# ls /data1/
nginx tomcat
# 测试是否为同一卷[rootk8s-master01 storage]# kubectl delete -f hostpath.yaml
pod pod-hostpath deleted
# 删除pod[rootk8s-node01 ~]# ll /data1/
total 0
-rw-r--r-- 1 root root 0 Jul 3 16:30 nginx
-rw-r--r-- 1 root root 0 Jul 3 16:31 tomcat
# 数据仍然存在k8s存储-NFS
注意: NFS服务器配置白名单是node节点网段而不是pod网段
node节点网段,例如我的环境应配置: 192.168.1.0/24
yum -y install nfs-utils
systemctl enable --now nfs
# 所有节点安装nfs[rootk8s-master01 storage]# mkdir -pv /data/volume
mkdir: created directory ‘/data’
mkdir: created directory ‘/data/volume’
# v参数是展示创建了哪些文件夹
[rootk8s-master01 storage]# cat /etc/exports
/data/volume *(rw,no_root_squash)
[rootk8s-master01 storage]# systemctl restart nfs
[rootk8s-master01 storage]# showmount -e localhost
Export list for localhost:
/data/volume *
# 配置nfs服务
[rootk8s-node01 ~]# mount -t nfs k8s-master01:/data/volume /mnt
[rootk8s-node01 ~]# df -Th |tail -n1
k8s-master01:/data/volume nfs4 38G 11G 27G 29% /mnt
[rootk8s-node01 ~]# umount /mnt
# 测试nfs服务[rootk8s-master01 storage]# cat nfs.yaml
apiVersion: apps/v1
kind: Deployment
metadata:name: test-nfs-volumenamespace: default
spec:replicas: 3selector: matchLabels:storage: nfstemplate:metadata:labels:storage: nfsspec:containers:- name: test-nfsimage: xianchao/nginx:v1imagePullPolicy: IfNotPresentports:- containerPort: 80protocol: TCPvolumeMounts:- name: nfs-volumesmountPath: /usr/share/nginx/htmlvolumes: - name: nfs-volumesnfs:server: 192.168.1.181# 注意nfs服务器的ip一定不能写错path: /data/volume
[rootk8s-master01 storage]# kubectl apply -f nfs.yaml
deployment.apps/test-nfs-volume created
# 创建deployment
[rootk8s-master01 storage]# echo nfs-test /data/volume/index.html
[rootk8s-master01 storage]# kubectl get pods -owide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
test-nfs-volume-6656574b86-66m2h 1/1 Running 0 5m37s 10.244.85.233 k8s-node01 none none
test-nfs-volume-6656574b86-6nxgw 1/1 Running 0 5m37s 10.244.85.232 k8s-node01 none none
test-nfs-volume-6656574b86-cvqmr 1/1 Running 0 5m37s 10.244.58.255 k8s-node02 none none
[rootk8s-master01 storage]# curl 10.244.85.232
nfs-test
# 测试nfs
[rootk8s-master01 storage]# kubectl exec -it test-nfs-volume-6656574b86-cvqmr
-- /bin/bash
roottest-nfs-volume-6656574b86-cvqmr:/# ls /usr/share/nginx/html/
index.html
roottest-nfs-volume-6656574b86-cvqmr:/# cat /usr/share/nginx/html/index.html
nfs-test
roottest-nfs-volume-6656574b86-cvqmr:/# exit
exit
# 进入pod测试[rootk8s-master01 storage]# kubectl delete -f nfs.yaml
deployment.apps test-nfs-volume deleted
# 删除deployment[rootk8s-master01 storage]# ls /data/volume/
index.html
[rootk8s-master01 storage]# cat /data/volume/index.html
nfs-test
# 数据仍然存在k8s存储-PVC
[rootk8s-master01 storage]# mkdir /data/volume-test/v{1..10} -p
[rootk8s-master01 storage]# cat /etc/exports
/data/volume *(rw,no_root_squash)
/data/volume-test/v1 *(rw,no_root_squash)
/data/volume-test/v2 *(rw,no_root_squash)
/data/volume-test/v3 *(rw,no_root_squash)
/data/volume-test/v4 *(rw,no_root_squash)
/data/volume-test/v5 *(rw,no_root_squash)
/data/volume-test/v6 *(rw,no_root_squash)
/data/volume-test/v7 *(rw,no_root_squash)
/data/volume-test/v8 *(rw,no_root_squash)
/data/volume-test/v9 *(rw,no_root_squash)
/data/volume-test/v10 *(rw,no_root_squash)
[rootk8s-master01 storage]# exportfs -arv
# 生效配置[rootk8s-master01 storage]# cat pv.yaml
apiVersion: v1
kind: PersistentVolume
metadata:name: v1labels:app: v1
spec:nfs:server: 192.168.1.181path: /data/volume-test/v1accessModes: [ReadWriteOnce]capacity: storage: 1Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:name: v2labels:app: v2
spec:nfs:server: 192.168.1.181path: /data/volume-test/v2accessModes: [ReadOnlyMany]capacity: storage: 2Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:name: v3labels:app: v3
spec:nfs:server: 192.168.1.181path: /data/volume-test/v3accessModes: [ReadWriteMany]capacity: storage: 3Gihttps://kubernetes.io/docs/concepts/storage/persistent-volumes#access-mod
访问模式有 ReadWriteOnce 卷可以被一个节点以读写方式挂载。 ReadWriteOnce 访问模式也允许运行在同一节点上的多个 Pod 访问卷。 ReadOnlyMany 卷可以被多个节点以只读方式挂载。 ReadWriteMany 卷可以被多个节点以读写方式挂载。 ReadWriteOncePod 特性状态 Kubernetes v1.27 [beta]卷可以被单个 Pod 以读写方式挂载。 如果你想确保整个集群中只有一个 Pod 可以读取或写入该 PVC 请使用 ReadWriteOncePod 访问模式。这只支持 CSI 卷以及需要 Kubernetes 1.22 以上版本。
[rootk8s-master01 storage]# kubectl apply -f pv.yaml
persistentvolume/v1 created
persistentvolume/v2 created
persistentvolume/v3 created
[rootk8s-master01 storage]# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
v1 1Gi RWO Retain Available 22s
v2 2Gi ROX Retain Available 22s
v3 3Gi RWX Retain Available 22s
[rootk8s-master01 storage]# cat pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:name: pvc-v1
spec:accessModes: [ReadWriteOnce]selector:matchLabels: app: v1resources:requests: storage: 1Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:name: pvc-v2
spec:accessModes: [ReadOnlyMany]selector:matchLabels: app: v2resources:requests: storage: 2Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:name: pvc-v3
spec:accessModes: [ReadWriteMany]selector:matchLabels: app: v3resources:requests: storage: 3Gi
[rootk8s-master01 storage]# kubectl apply -f pvc.yaml
persistentvolumeclaim/pvc-v1 created
persistentvolumeclaim/pvc-v2 created
persistentvolumeclaim/pvc-v3 created
[rootk8s-master01 storage]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
pvc-v1 Bound v1 1Gi RWO 4s
pvc-v2 Bound v2 2Gi ROX 4s
pvc-v3 Bound v3 3Gi RWX 4s
# Bound状态就说明pvc已经和pv绑定RWO: ReadWriteOnce
ROX: ReadOnlyMany
RWX: ReadWriteMany
[rootk8s-master01 storage]# cat deploy_pvc.yaml
apiVersion: apps/v1
kind: Deployment
metadata:name: pvc-test
spec:replicas: 3selector:matchLabels:storage: pvctemplate:metadata:labels:storage: pvcspec:containers:- name: test-pvcimage: xianchao/nginx:v1imagePullPolicy: IfNotPresentports:- containerPort: 80protocol: TCPvolumeMounts: - name: nginx-htmlmountPath: /usr/share/nginx/htmlvolumes:- name: nginx-htmlpersistentVolumeClaim: claimName: pvc-v1
[rootk8s-master01 storage]# kubectl apply -f deploy_pvc.yaml
deployment.apps/pvc-test created
# 创建deployment使用pvc
[rootk8s-master01 storage]# echo pvc-test /data/volume-test/v1/index.html
[rootk8s-master01 storage]# kubectl get pods -owide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pvc-test-66c48b4c9d-dljv7 1/1 Running 0 46s 10.244.85.236 k8s-node01 none none
pvc-test-66c48b4c9d-fcttc 1/1 Running 0 46s 10.244.58.197 k8s-node02 none none
pvc-test-66c48b4c9d-kcjvr 1/1 Running 0 46s 10.244.58.198 k8s-node02 none none
[rootk8s-master01 storage]# curl 10.244.58.198
pvc-test
# 测试访问成功**注**使用pvc和pv的注意事项
1、我们每次创建pvc的时候需要事先有划分好的pv这样可能不方便那么可以在创建pvc的时候直接动态创建一个pv这个存储类pv事先是不存在的
2、pvc和pv绑定如果使用默认的回收策略retain那么删除pvc之后pv会处于released状态我们想要继续使用这个pv需要手动删除pvkubectl delete pv pv_name删除pv不会删除pv里的数据当我们重新创建pvc时还会和这个最匹配的pv绑定数据还是原来数据不会丢失。
经过测试如果回收策略是Delete删除pvpv后端存储的数据也不会被删除
**回收策略**persistentVolumeReclaimPolicy字段
删除pvc的步骤
需要先删除使用pvc的pod
再删除pvc
[rootk8s-master01 storage]# kubectl delete -f deploy_pvc.yaml
deployment.apps pvc-test deleted
[rootk8s-master01 storage]# kubectl delete -f pvc.yaml
persistentvolumeclaim pvc-v1 deleted
persistentvolumeclaim pvc-v2 deleted
persistentvolumeclaim pvc-v3 deleted
[rootk8s-master01 storage]# kubectl delete -f pv.yaml
persistentvolume v1 deleted
persistentvolume v2 deleted
persistentvolume v3 deleted演示pv用Delete回收策略:
[rootk8s-master01 storage]# cat pv-1.yaml
apiVersion: v1
kind: PersistentVolume
metadata:name: v4labels:app: v4
spec:nfs:server: 192.168.1.181path: /data/volume-test/v4accessModes: [ReadWriteOnce]capacity: storage: 1GipersistentVolumeReclaimPolicy: Delete
[rootk8s-master01 storage]# kubectl apply -f pv-1.yaml
persistentvolume/v4 created
# 创建pv
[rootk8s-master01 storage]# cat pvc-1.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:name: pvc-v4
spec:accessModes: [ReadWriteOnce]selector:matchLabels: app: v4resources:requests: storage: 1Gi
[rootk8s-master01 storage]# kubectl apply -f pvc-1.yaml
persistentvolumeclaim/pvc-v4 created
# 创建pvc
[rootk8s-master01 storage]# cat deploy_pvc-1.yaml
apiVersion: apps/v1
kind: Deployment
metadata:name: pvc-test-1
spec:replicas: 3selector:matchLabels:storage: pvc-1template:metadata:labels:storage: pvc-1spec:containers:- name: test-pvcimage: xianchao/nginx:v1imagePullPolicy: IfNotPresentports:- containerPort: 80protocol: TCPvolumeMounts: - name: nginx-htmlmountPath: /usr/share/nginx/htmlvolumes:- name: nginx-htmlpersistentVolumeClaim: claimName: pvc-v4
[rootk8s-master01 storage]# kubectl apply -f deploy_pvc-1.yaml
deployment.apps/pvc-test-1 created
# 创建deployment使用pvc
[rootk8s-master01 storage]# kubectl get pods
NAME READY STATUS RESTARTS AGE
pvc-test-1-58fc869c7c-fgl4r 1/1 Running 0 6s
pvc-test-1-58fc869c7c-h5rxb 1/1 Running 0 6s
pvc-test-1-58fc869c7c-nr7cv 1/1 Running 0 6s
[rootk8s-master01 storage]# kubectl exec -it pvc-test-1-58fc869c7c-fgl4r -- /bin/bash
rootpvc-test-1-58fc869c7c-fgl4r:/# cd /usr/share/nginx/html/
rootpvc-test-1-58fc869c7c-fgl4r:/usr/share/nginx/html# echo ReclaimPolicy-Delete_test index.html
rootpvc-test-1-58fc869c7c-fgl4r:/usr/share/nginx/html# touch 123
# 写入内容
rootpvc-test-1-58fc869c7c-fgl4r:/usr/share/nginx/html# ls
123 index.html
rootpvc-test-1-58fc869c7c-fgl4r:/usr/share/nginx/html# exit
exit
command terminated with exit code 127
# 退出容器
[rootk8s-master01 storage]# kubectl delete -f deploy_pvc-1.yaml
deployment.apps pvc-test-1 deleted
[rootk8s-master01 storage]# kubectl delete -f pvc-1.yaml
persistentvolumeclaim pvc-v4 deleted
[rootk8s-master01 storage]# kubectl delete -f pv-1.yaml
persistentvolume v4 deleted
# 删除deployment,pvc以及pv
[rootk8s-master01 storage]# kubectl get pv
No resources found
[rootk8s-master01 storage]# ls /data/volume-test/v4/
123 index.html
# 可以看到,内容并没有被删除,所以Delete不是像官网说的一样会删除,可能是暂时不支持NFSStorageclass存储类动态生成存储
存储类动态生成pv
[rootk8s-node01 images]# ctr -n k8s.io images import nfs-subdir-external-provisioner.tar.gz
[rootk8s-node02 images]# ctr -n k8s.io images import nfs-subdir-external-provisioner.tar.gz
# 工作节点导入镜像[rootk8s-master01 storageclass]# mkdir /data/nfs_pro -p
[rootk8s-master01 storageclass]# cat /etc/exports
/data/volume 192.168.1.0/24(rw,no_root_squash)
/data/volume-test/v1 *(rw,no_root_squash)
/data/volume-test/v2 *(rw,no_root_squash)
/data/volume-test/v3 *(rw,no_root_squash)
/data/volume-test/v4 *(rw,no_root_squash)
/data/volume-test/v5 *(rw,no_root_squash)
/data/volume-test/v6 *(rw,no_root_squash)
/data/volume-test/v7 *(rw,no_root_squash)
/data/volume-test/v8 *(rw,no_root_squash)
/data/volume-test/v9 *(rw,no_root_squash)
/data/volume-test/v10 *(rw,no_root_squash)
/data/nfs_pro *(rw,no_root_squash)
[rootk8s-master01 storageclass]# exportfs -arv
exporting 192.168.1.0/24:/data/volume
exporting *:/data/nfs_pro
exporting *:/data/volume-test/v10
exporting *:/data/volume-test/v9
exporting *:/data/volume-test/v8
exporting *:/data/volume-test/v7
exporting *:/data/volume-test/v6
exporting *:/data/volume-test/v5
exporting *:/data/volume-test/v4
exporting *:/data/volume-test/v3
exporting *:/data/volume-test/v2
exporting *:/data/volume-test/v1
# 配置nfs服务[rootk8s-master01 ~]# mkdir storageclass
[rootk8s-master01 ~]# cd storageclass/
# 创建工作目录[rootk8s-master01 storageclass]# cat serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:name: nfs-provisioner[rootk8s-master01 storageclass]# kubectl apply -f serviceaccount.yaml
serviceaccount/nfs-provisioner created
# 创建sa
[rootk8s-master01 storageclass]# kubectl create clusterrolebinding nfs-provisioner-clusterrolebinding --clusterrolecluster-admin --serviceaccountdefault:nfs-provisioner
clusterrolebinding.rbac.authorization.k8s.io/nfs-provisioner-clusterrolebinding created
# 对sa授权[rootk8s-master01 storageclass]# cat nfs-deployment.yaml
kind: Deployment
apiVersion: apps/v1
metadata:name: nfs-provisioner
spec:selector:matchLabels:app: nfs-provisionerreplicas: 1strategy:type: Recreatetemplate:metadata:labels:app: nfs-provisionerspec:serviceAccount: nfs-provisionercontainers:- name: nfs-provisionerimage: registry.cn-beijing.aliyuncs.com/mydlq/nfs-subdir-external-provisioner:v4.0.0imagePullPolicy: IfNotPresentvolumeMounts:- name: nfs-client-rootmountPath: /persistentvolumesenv:- name: PROVISIONER_NAMEvalue: example.com/nfs- name: NFS_SERVERvalue: 192.168.1.181- name: NFS_PATHvalue: /data/nfs_pro/volumes:- name: nfs-client-rootnfs:server: 192.168.1.181path: /data/nfs_pro/[rootk8s-master01 storageclass]# kubectl apply -f nfs-deployment.yaml
deployment.apps/nfs-provisioner created
[rootk8s-master01 storageclass]# kubectl get pods
NAME READY STATUS RESTARTS AGE
nfs-provisioner-747db885fd-phwm2 1/1 Running 0 60s
# 安装nfs-provisioner程序[rootk8s-master01 storageclass]# cat nfs-storageclass.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:name: nfs
provisioner: example.com/nfs
# 注意provisioner的值一定要跟安装nfs-provisioner时候的PROVISIONER_NAME一致
[rootk8s-master01 storageclass]# kubectl apply -f nfs-storageclass.yaml
storageclass.storage.k8s.io/nfs created
[rootk8s-master01 storageclass]# kubectl get sc
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
nfs example.com/nfs Delete Immediate false 16s
# 创建storageclass,动态供给pv[rootk8s-master01 storageclass]# cat claim.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:name: test-claim1
spec:accessModes: [ReadWriteMany]resources:requests:storage: 1GistorageClassName: nfs[rootk8s-master01 storageclass]# kubectl apply -f claim.yaml
persistentvolumeclaim/test-claim1 created
# 创建pvc
[rootk8s-master01 storageclass]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
test-claim1 Bound pvc-b334c653-bb8b-4e48-8aa1-469f8ea90fb3 1Gi RWX nfs 49s
# 可以看到pvc已经成功创建了步骤总结
1、供应商创建一个nfs provisioner
2、创建storageclassstorageclass指定刚才创建的供应商
3、创建pvc这个pvc指定storageclass
[rootk8s-master01 storageclass]# cat read-pod.yaml
kind: Pod
apiVersion: v1
metadata:name: read-pod
spec:containers:- name: read-podimage: nginximagePullPolicy: IfNotPresentvolumeMounts:- name: nfs-pvcmountPath: /usr/share/nginx/htmlrestartPolicy: Nevervolumes:- name: nfs-pvcpersistentVolumeClaim:claimName: test-claim1[rootk8s-master01 storageclass]# kubectl apply -f read-pod.yaml
pod/read-pod created
# 创建pod,挂载storageclass动态生成的pvc: test-claim1
[rootk8s-master01 storageclass]# kubectl get pods | grep read
read-pod 1/1 Running 0 88s
# 可以看到pod已经建立成功
[rootk8s-master01 storageclass]# echo nfs-provisioner /data/nfs_pro/default-test-claim1-pvc-b334c653-bb8b-4e48-8aa1-469f8ea90fb3/index.html
# 写入文件测试
[rootk8s-master01 storageclass]# kubectl get pods -owide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nfs-provisioner-747db885fd-phwm2 1/1 Running 0 8m20s 10.244.58.201 k8s-node02 none none
read-pod 1/1 Running 0 3m 10.244.85.238 k8s-node01 none none
[rootk8s-master01 storageclass]# curl 10.244.85.238
nfs-provisioner
# 访问成功K8S控制器Statefulset入门到企业实战应用
StatefulSet资源-YAML编写技巧
StatefulSet是为了管理有状态服务的问题而设计的
扩展
有状态服务
**StatefulSet是有状态的集合管理有状态的服务**它所管理的Pod的名称不能随意变化。数据持久化的目录也是不一样每一个Pod都有自己独有的数据持久化存储目录。比如MySQL主从、redis集群等。
无状态服务
**RC、Deployment、DaemonSet都是管理无状态的服务**它们所管理的Pod的IP、名字启停顺序等都是随机的。个体对整体无影响所有pod都是共用一个数据卷的部署的tomcat就是无状态的服务tomcat被删除在启动一个新的tomcat加入到集群即可跟tomcat的名字无关。
[rootk8s-master01 ~]# kubectl explain statefulset.
# 查看帮助[rootk8s-master01 ~]# mkdir statefulset
[rootk8s-master01 ~]# cd statefulset/
# 创建工作目录[rootk8s-master01 statefulset]# cat statefulset.yaml
apiVersion: v1
kind: Service
metadata:name: nginxlabels:app: nginx
spec:ports:- port: 80name: webclusterIP: Noneselector:app: nginx
---
apiVersion: apps/v1
kind: StatefulSet
metadata:name: web
spec:replicas: 2selector:matchLabels:app: nginx serviceName: nginxtemplate: metadata:labels:app: nginxspec:containers:- name: nginximage: nginximagePullPolicy: IfNotPresentports:- name: webcontainerPort: 80volumeMounts:- name: wwwmountPath: /usr/share/nginx/htmlvolumeClaimTemplates: - metadata:name: wwwspec:accessModes: [ReadWriteOnce]storageClassName: nfsresources:requests:storage: 1Gi
[rootk8s-master01 statefulset]# kubectl apply -f statefulset.yaml
service/nginx created
statefulset.apps/web created
# 创建statefulset以及对应的service[rootk8s-master01 statefulset]# kubectl get pods
NAME READY STATUS RESTARTS AGE
nfs-provisioner-747db885fd-phwm2 1/1 Running 4 (105m ago) 18h
web-0 1/1 Running 0 39s
web-1 1/1 Running 0 38s
[rootk8s-master01 statefulset]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
test-claim1 Bound pvc-b334c653-bb8b-4e48-8aa1-469f8ea90fb3 1Gi RWX nfs 18h
www-web-0 Bound pvc-54ce83ca-698d-4c32-a0a2-1350f8717941 1Gi RWO nfs 88s
www-web-1 Bound pvc-35c5949e-0227-4d8e-bdbb-02c1ba9ce488 1Gi RWO nfs 85s
[rootk8s-master01 statefulset]# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
pvc-35c5949e-0227-4d8e-bdbb-02c1ba9ce488 1Gi RWO Delete Bound default/www-web-1 nfs 88s
pvc-54ce83ca-698d-4c32-a0a2-1350f8717941 1Gi RWO Delete Bound default/www-web-0 nfs 91s
pvc-b334c653-bb8b-4e48-8aa1-469f8ea90fb3 1Gi RWX Delete Bound default/test-claim1 nfs 18h
# 可以看到已经自动帮你创建了pv以及pvc
[rootk8s-master01 statefulset]# kubectl get pods -owide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nfs-provisioner-747db885fd-phwm2 1/1 Running 4 (109m ago) 18h 10.244.58.203 k8s-node02 none none
web-0 1/1 Running 0 4m1s 10.244.58.204 k8s-node02 none none
web-1 1/1 Running 0 4m 10.244.85.242 k8s-node01 none none
[rootk8s-master01 statefulset]# echo web-test-0 /data/nfs_pro/default-www-web-0-pvc-54ce83ca-698d-4c32-a0a2-1350f8717941/index.html
[rootk8s-master01 statefulset]# echo web-test-1 /data/nfs_pro/default-www-web-1-pvc-35c5949e-0227-4d8e-bdbb-02c1ba9ce488/index.html
[rootk8s-master01 statefulset]# curl 10.244.58.204
web-test-0
[rootk8s-master01 statefulset]# curl 10.244.85.242
web-test-1
# 测试成功,pod分别使用不同卷[rootk8s-master01 statefulset]# kubectl run busybox --image docker.io/library/busybox:1.28 --image-pull-policyIfNotPresent --restartNever --rm -it busybox -- sh
If you dont see a command prompt, try pressing enter.
/ # nslookup nginx
Server: 10.96.0.10
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.localName: nginx
Address 1: 10.244.58.204 web-0.nginx.default.svc.cluster.local
Address 2: 10.244.85.242 web-1.nginx.default.svc.cluster.local
/ # exit
pod busybox deleted
# 因为ClusterIP设置为了None,所以解析出来是两个pod的地址[rootk8s-master01 statefulset]# kubectl delete -f statefulset.yaml
service nginx deleted
statefulset.apps web deleted
# 删除service以及statefulset[rootk8s-master01 statefulset]# cat statefulset.yaml
apiVersion: v1
kind: Service
metadata:name: nginxlabels:app: nginx
spec:ports:- port: 80name: webselector:app: nginx
---
apiVersion: apps/v1
kind: StatefulSet
metadata:name: web
spec:replicas: 2selector:matchLabels:app: nginx serviceName: nginxtemplate: metadata:labels:app: nginxspec:containers:- name: nginximage: nginximagePullPolicy: IfNotPresentports:- name: webcontainerPort: 80volumeMounts:- name: wwwmountPath: /usr/share/nginx/htmlvolumeClaimTemplates: - metadata:name: wwwspec:accessModes: [ReadWriteOnce]storageClassName: nfsresources:requests:storage: 1Gi
[rootk8s-master01 statefulset]# kubectl apply -f statefulset.yaml
service/nginx created
statefulset.apps/web created
# 删除clusterIP: None,也就是给一个ip给service看看会怎么样
[rootk8s-master01 statefulset]# kubectl run busybox --image docker.io/library/busybox:1.28 --image-pull-policyIfNotPresent --restartNever --rm -it busybox -- sh
If you dont see a command prompt, try pressing enter.
/ # nslookup nginx
Server: 10.96.0.10
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.localName: nginx
Address 1: 10.96.165.5 nginx.default.svc.cluster.local
/ # exit
pod busybox deleted
[rootk8s-master01 statefulset]# kubectl get svc -owide -l appnginx
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
nginx ClusterIP 10.96.165.5 none 80/TCP 48s appnginx
# 可以看到解析出来是service的ipStatefulSet总结
1、Statefulset管理的podpod名字是有序的由statefulset的名字-0、1、2这种格式组成
2、创建statefulset资源的时候必须事先创建好一个service,如果创建的service没有ip那对这个service做dns解析会找到它所关联的pod ip如果创建的service有ip那对这个service做dns解析会解析到service本身ip。
3、statefulset管理的pod删除pod新创建的pod名字跟删除的pod名字是一样的
4、statefulset具有volumeclaimtemplate这个字段这个是卷申请模板会自动创建pvpvc也会自动生成跟pv进行绑定那如果创建的statefulset使用了volumeclaimtemplate这个字段那创建pod数据目录是独享的
5、ststefulset创建的pod是域名的域名组成pod-name.svc-name.svc-namespace.svc.cluster.local
StatefulSet管理pod-扩缩容和更新
[rootk8s-master01 statefulset]# cat statefulset.yaml |grep replicas:replicas: 3
[rootk8s-master01 statefulset]# kubectl apply -f statefulset.yaml
service/nginx unchanged
statefulset.apps/web configured
[rootk8s-master01 statefulset]# kubectl get pods -w -l appnginx
NAME READY STATUS RESTARTS AGE
web-0 1/1 Running 0 43m
web-1 1/1 Running 0 43m
web-2 0/1 Pending 0 0s
web-2 0/1 Pending 0 0s
web-2 0/1 Pending 0 1s
web-2 0/1 ContainerCreating 0 1s
web-2 0/1 ContainerCreating 0 1s
web-2 1/1 Running 0 2s
# 直接修改yaml文件实现pod扩容[rootk8s-master01 statefulset]# cat statefulset.yaml |grep replicas:replicas: 2
[rootk8s-master01 statefulset]# kubectl apply -f statefulset.yaml
service/nginx unchanged
statefulset.apps/web configured
[rootk8s-master01 statefulset]# kubectl get pods -w -l appnginx
NAME READY STATUS RESTARTS AGE
web-0 1/1 Running 0 44m
web-1 1/1 Running 0 44m
web-2 1/1 Running 0 5s
web-2 1/1 Terminating 0 13s
web-2 1/1 Terminating 0 13s
web-2 0/1 Terminating 0 14s
web-2 0/1 Terminating 0 14s
web-2 0/1 Terminating 0 14s
^C[rootk8s-master01 statefulset]# kubectl get pods -l appnginx
NAME READY STATUS RESTARTS AGE
web-0 1/1 Running 0 45m
web-1 1/1 Running 0 45m
# 实现pod缩容更新
[rootk8s-master01 statefulset]# kubectl explain statefulset.spec.updateStrategy.
# 查看帮助[rootk8s-master01 statefulset]# cat statefulset.yaml
apiVersion: v1
kind: Service
metadata:name: nginxlabels:app: nginx
spec:ports:- port: 80name: webselector:app: nginx
---
apiVersion: apps/v1
kind: StatefulSet
metadata:name: web
spec:replicas: 3updateStrategy:rollingUpdate: partition: 1# 意为只更新序号大于等于1的podmaxUnavailable: 0# 最多不可用pod数为0selector:matchLabels:app: nginx serviceName: nginxtemplate: metadata:labels:app: nginxspec:containers:- name: nginximage: ikubernetes/myapp:v1imagePullPolicy: IfNotPresentports:- name: webcontainerPort: 80volumeMounts:- name: wwwmountPath: /usr/share/nginx/htmlvolumeClaimTemplates: - metadata:name: wwwspec:accessModes: [ReadWriteOnce]storageClassName: nfsresources:requests:storage: 1Gi
[rootk8s-master01 statefulset]# kubectl apply -f statefulset.yaml
service/nginx unchanged
statefulset.apps/web configured
[rootk8s-master01 statefulset]# kubectl get pods -l appnginx -w
NAME READY STATUS RESTARTS AGE
web-0 1/1 Running 0 17s
web-1 1/1 Running 0 15s
web-2 1/1 Running 0 14s
web-2 1/1 Terminating 0 24s
web-2 1/1 Terminating 0 24s
web-2 0/1 Terminating 0 25s
web-2 0/1 Terminating 0 25s
web-2 0/1 Terminating 0 25s
web-2 0/1 Pending 0 0s
web-2 0/1 Pending 0 0s
web-2 0/1 ContainerCreating 0 0s
web-2 0/1 ContainerCreating 0 0s
web-2 1/1 Running 0 1s
web-1 1/1 Terminating 0 27s
web-1 0/1 Terminating 0 28s
web-1 0/1 Terminating 0 28s
web-1 0/1 Terminating 0 28s
web-1 0/1 Pending 0 0s
web-1 0/1 Pending 0 0s
web-1 0/1 ContainerCreating 0 0s
web-1 0/1 ContainerCreating 0 0s
web-1 1/1 Running 0 2s
# 先从大的开始删(只测过一次,不确定)需要注意rollingUpdate下的partition字段,整数类型, 如果是1,就代表只更新序号大于等于1的pod
测试OnDelete类型
[rootk8s-master01 statefulset]# cat statefulset.yaml
apiVersion: v1
kind: Service
metadata:name: nginxlabels:app: nginx
spec:ports:- port: 80name: webselector:app: nginx
---
apiVersion: apps/v1
kind: StatefulSet
metadata:name: web
spec:replicas: 3updateStrategy:type: OnDeleteselector:matchLabels:app: nginx serviceName: nginxtemplate: metadata:labels:app: nginxspec:containers:- name: nginximage: ikubernetes/myapp:v2imagePullPolicy: IfNotPresentports:- name: webcontainerPort: 80volumeMounts:- name: wwwmountPath: /usr/share/nginx/htmlvolumeClaimTemplates: - metadata:name: wwwspec:accessModes: [ReadWriteOnce]storageClassName: nfsresources:requests:storage: 1Gi
[rootk8s-master01 statefulset]# kubectl apply -f statefulset.yaml
service/nginx unchanged
statefulset.apps/web configured
[rootk8s-master01 statefulset]# kubectl get pods -l appnginx -w
NAME READY STATUS RESTARTS AGE
web-0 1/1 Running 0 2m30s
web-1 1/1 Running 0 2m
web-2 1/1 Running 0 2m2s
# 没有更新
[rootk8s-master01 statefulset]# kubectl delete pod web-0
pod web-0 deleted
[rootk8s-master01 statefulset]# kubectl delete pod web-1
pod web-1 deleted
[rootk8s-master01 statefulset]# kubectl delete pod web-2
pod web-2 deleted
[rootk8s-master01 statefulset]# kubectl get pods -l appnginx
NAME READY STATUS RESTARTS AGE
web-0 1/1 Running 0 10s
web-1 1/1 Running 0 8s
web-2 1/1 Running 0 5s
# OnDelete类型必须要手动删除pod才会更新[rootk8s-master01 statefulset]# kubectl delete -f statefulset.yaml
service nginx deleted
statefulset.apps web deleted
# 清除环境K8S控制器DaemonSet入门到企业实战应用
Daemonset控制器基本介绍
DaemonSet概述
DaemonSet控制器能够确保k8s集群所有的节点都运行一个相同的pod副本当向k8s集群中增加node节点时这个node节点也会自动创建一个pod副本当node节点从集群移除这些pod也会自动删除删除Daemonset也会删除它们创建的pod
DaemonSet工作原理如何管理Pod
daemonset的控制器会监听kuberntes的daemonset对象、pod对象、node对象这些被监听的对象之变动就会触发syncLoop循环让kubernetes集群朝着daemonset对象描述的状态进行演进。
Daemonset具有实战应用场景分析
在集群的每个节点上运行存储比如glusterd 或 ceph。 在每个节点上运行日志收集组件比如flunentd 、 logstash、filebeat等。 在每个节点上运行监控组件比如Prometheus、 Node Exporter 、collectd等。
通过YAML文件创建Daemonset资源技巧
[rootk8s-master01 ~]# kubectl explain ds.
# 查看帮助Daemonset实战: 部署收集日志组件
ctr -n k8s.io images import fluentd_2_5_1.tar.gz
# 所有节点导入镜像[rootk8s-master01 ~]# kubectl describe node k8s-master01 |grep -i taint
Taints: node-role.kubernetes.io/control-plane:NoSchedule
# 查看master节点污点
[rootk8s-master01 ~]# mkdir daemonset
[rootk8s-master01 ~]# cd daemonset/
# 创建工作目录[rootk8s-master01 daemonset]# cat daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:name: fluentd-elasticsearchnamespace: kube-systemlabels:k8s-app: fluentd-logging
spec:selector:matchLabels:name: fluentd-elasticsearchtemplate:metadata:labels:name: fluentd-elasticsearchspec:tolerations:- key: node-role.kubernetes.io/control-planeeffect: NoSchedulecontainers:- name: fluentd-elasticsearchimage: xianchao/fluentd:v2.5.1imagePullPolicy: IfNotPresentresources:requests:cpu: 100mmemory: 200Milimits:cpu: 100mmemory: 200MivolumeMounts:- name: varlogmountPath: /var/logreadOnly: true- name: varlibcontainerdiocontainerdgrpcv1cricontainersmountPath: /var/lib/containerd/io.containerd.grpc.v1.cri/containersreadOnly: truevolumes:- name: varloghostPath:path: /var/log- name: varlibcontainerdiocontainerdgrpcv1cricontainershostPath:path: /var/lib/containerd/io.containerd.grpc.v1.cri/containers# 这个目录自己琢磨了一下,应该存放的是containerd的运行中容器状态
[rootk8s-master01 daemonset]# kubectl apply -f daemonset.yaml
daemonset.apps/fluentd-elasticsearch created[rootk8s-master01 daemonset]# kubectl get ds -n kube-system -l k8s-appfluentd-logging
NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE
fluentd-elasticsearch 3 3 3 3 3 none 94s
[rootk8s-master01 daemonset]# kubectl get pods -n kube-system -l namefluentd-elasticsearch -owide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
fluentd-elasticsearch-482h6 1/1 Running 0 3m18s 10.244.85.255 k8s-node01 none none
fluentd-elasticsearch-5hscz 1/1 Running 0 3m17s 10.244.58.217 k8s-node02 none none
fluentd-elasticsearch-jjf8w 1/1 Running 0 3m17s 10.244.32.161 k8s-master01 none none
# 可以看到已经部署成功Daemonset管理Pod: 动态更新和回滚
[rootk8s-master01 statefulset]# kubectl explain ds.spec.updateStrategy.rollingUpdate.
# 查看帮助[rootk8s-master01 daemonset]# kubectl set image daemonsets fluentd-elasticsearch fluentd-elasticsearchikubernetes/filebeat:5.6.6-alpine -n kube-system
daemonset.apps/fluentd-elasticsearch image updated
# 这个镜像启动pod会有问题主要是演示daemonset如何在命令行更新pod[rootk8s-master01 daemonset]# kubectl rollout history daemonset fluentd-elasticsearch -n kube-system
daemonset.apps/fluentd-elasticsearch
REVISION CHANGE-CAUSE
1 none
2 none[rootk8s-master01 daemonset]# kubectl -n kube-system rollout undo daemonset fluentd-elasticsearch --to-revision1
daemonset.apps/fluentd-elasticsearch rolled back
# 回滚
[rootk8s-master01 daemonset]# kubectl get pods -n kube-system -l namefluentd-elasticsearch -owide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
fluentd-elasticsearch-cm55l 1/1 Running 0 16m 10.244.58.214 k8s-node02 none none
fluentd-elasticsearch-lxmd8 1/1 Running 0 13s 10.244.32.164 k8s-master01 none none
fluentd-elasticsearch-x5jrc 1/1 Running 0 16m 10.244.85.193 k8s-node01 none none
# 状态正常[rootk8s-master01 daemonset]# kubectl delete -f daemonset.yaml
daemonset.apps fluentd-elasticsearch deleted
# 清除环境K8S配置管理中心ConfigMap实现微服务配置管理
配置管理中心Configmap基本介绍
Configmap是k8s中的资源对象用于保存非机密性的配置的数据可以用key/value键值对的形式保存也可通过文件的形式保存。 configmap是k8s的资源,相当于配置文件,可以有一个或者多个configmap;configmap可以做成volume,k8s pod启动后,通过volume挂载到容器内部指定目录;容器内部应用按照原有方式读取特定目录上的配置文件;在容器看来,配置文件就像是打包在容器内部特定目录,整个过程对应用没有任何侵入.
Configmap具体实战应用场景分析
集群跑着服务,像nginx,tomcat,mysql,突然资源不够用了,需要加机器,加机器的话又要更新配置,一个一个修改很麻烦,这时候就有configmap,可以把配置信息之类的存在configmap,通过volume卷挂载进去
Configmap注入方式有两种: 一种是将configmap作为存储卷,一种是将configmap通过env中configMapKeyRef注入到容器中
使用微服务架构的话,存在多个服务共用配置的情况,如果每个服务中心单独一份配置的话,那么更新配置很麻烦,使用configmap可以友好的进行配置共享
configmap局限性
configmap设计上不是用来保存大量数据的,保存在configmap中的数据不能超过1MiB,如果你需要保存超过此尺寸限制的数据,可以考虑挂载存储卷或者使用独立数据库或文件服务
创建configmap的第一种方案: 指定参数
[rootk8s-master01 ~]# kubectl create cm --help
[rootk8s-master01 ~]# kubectl create cm --help |grep \-\-from\-literal\[] -A1--from-literal[]:Specify a key and literal value to insert in configmap (i.e. mykeysomevalue)
# 查看帮助[rootk8s-master01 ~]# kubectl create cm tomcat-config --from-literaltomcat-port8080 --from-literaltomcat-server_namemyapp.tomcat.com
configmap/tomcat-config created
[rootk8s-master01 ~]# kubectl describe cm tomcat-config
Name: tomcat-config
Namespace: default
Labels: none
Annotations: noneDatatomcat-server_name:
----
myapp.tomcat.com
tomcat-port:
----
8080BinaryData
Events: none
# 创建一个名为tomcat-config的cm创建configmap的第二种方案: 指定文件
[rootk8s-master01 ~]# kubectl create cm www-nginx --from-filewww./nginx.conf
configmap/www-nginx created
[rootk8s-master01 ~]# kubectl describe cm www-nginx
Name: www-nginx
Namespace: default
Labels: none
Annotations: noneDatawww:
# 这里是--from-file后面那个参数定义的
----
server {server_name www.nginx.com;listen 80;root /home/nginx/www/
}BinaryData
Events: none
[rootk8s-master01 ~]# kubectl describe cm www-nginx-1
Name: www-nginx-1
Namespace: default
Labels: none
Annotations: noneDatanginx.conf:
# 如果不写的话就是文件名
----
server {server_name www.nginx.com;listen 80;root /home/nginx/www/
}BinaryData
Events: none创建configmap的第三种方案: 指定文件夹
[rootk8s-master01 ~]# mkdir configmap
[rootk8s-master01 ~]# cd configmap/
[rootk8s-master01 configmap]# mv ../nginx.conf ./
[rootk8s-master01 configmap]# mkdir test-a
[rootk8s-master01 configmap]# cd test-a/
[rootk8s-master01 test-a]# echo server-id1 my-server.cnf
[rootk8s-master01 test-a]# echo server-id2 my-slave.cnf
[rootk8s-master01 test-a]# kubectl create cm mysql-config --from-file/root/configmap/test-a/
configmap/mysql-config created
# 通过目录创建configmap
[rootk8s-master01 test-a]# kubectl describe cm mysql-config
Name: mysql-config
Namespace: default
Labels: none
Annotations: noneDatamy-server.cnf:
----
server-id1my-slave.cnf:
----
server-id2BinaryData
Events: none通过YAML文件创建configmap技巧
[rootk8s-master01 test-a]# cd ..
[rootk8s-master01 configmap]# kubectl explain cm.
# 查看帮助[rootk8s-master01 configmap]# cat mysql-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:name: mysqllabels:app: mysql
data:master.cnf: |[mysqld]log-binlog_bin_trust_function_creators1lower_case_table_names1slave.cnf: |[mysqld]super-read-onlylog_bin_trust_function_creators1
# 对于多行数据|必须要加,这代表多行字符串保留为单个字符串
[rootk8s-master01 configmap]# kubectl apply -f mysql-configmap.yaml
configmap/mysql created
[rootk8s-master01 configmap]# kubectl describe cm mysql
Name: mysql
Namespace: default
Labels: appmysql
Annotations: noneDatamaster.cnf:
----
[mysqld]
log-bin
log_bin_trust_function_creators1
lower_case_table_names1slave.cnf:
----
[mysqld]
super-read-only
log_bin_trust_function_creators1BinaryData
Events: none
[rootk8s-master01 configmap]# kubectl delete -f mysql-configmap.yaml
configmap mysql deleted
# 删除cm注意:
多行数据必须要加 “|”
使用cm第一种方式: ConfigMapKeyRef
[rootk8s-master01 configmap]# cat mysql-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:name: mysqllabels:app: mysql
data:log: 1lower: 1
[rootk8s-master01 configmap]# kubectl apply -f mysql-configmap.yaml
configmap/mysql created
# 创建cm[rootk8s-master01 configmap]# cat mysql-pod.yaml
apiVersion: v1
kind: Pod
metadata:name: mysql-pod
spec:containers:- name: mysqlimage: busyboximagePullPolicy: IfNotPresentcommand: [/bin/sh, -c, sleep 3600]env:- name: log-bin# 指定环境变量名字valueFrom:configMapKeyRef:name: mysql# 指定cm的名字key: log# 指定cm中的key- name: lowervalueFrom:configMapKeyRef:name: mysqlkey: lower
[rootk8s-master01 configmap]# kubectl apply -f mysql-pod.yaml
pod/mysql-pod created[rootk8s-master01 configmap]# kubectl exec -it mysql-pod -c mysql -- /bin/sh
/ # printenv
KUBERNETES_PORTtcp://10.96.0.1:443
KUBERNETES_SERVICE_PORT443
HOSTNAMEmysql-pod
SHLVL1
HOME/root
NGINX_PORT_80_TCPtcp://10.100.169.159:80
TERMxterm
lower1
KUBERNETES_PORT_443_TCP_ADDR10.96.0.1
PATH/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
NGINX_SERVICE_HOST10.100.169.159
KUBERNETES_PORT_443_TCP_PORT443
KUBERNETES_PORT_443_TCP_PROTOtcp
NGINX_SERVICE_PORT80
NGINX_PORTtcp://10.100.169.159:80
log-bin1
KUBERNETES_PORT_443_TCPtcp://10.96.0.1:443
KUBERNETES_SERVICE_PORT_HTTPS443
NGINX_SERVICE_PORT_WEB80
KUBERNETES_SERVICE_HOST10.96.0.1
PWD/
NGINX_PORT_80_TCP_ADDR10.100.169.159
NGINX_PORT_80_TCP_PORT80
NGINX_PORT_80_TCP_PROTOtcp
/ # exit
# 查看环境变量[rootk8s-master01 configmap]# kubectl delete -f mysql-pod.yaml
pod mysql-pod deleted
# 清除环境使用configmap第二种方式: envFrom
[rootk8s-master01 configmap]# cat mysql-pod-envfrom.yaml
apiVersion: v1
kind: Pod
metadata:name: mysql-pod-envfrom
spec:containers:- name: mysqlimage: busyboximagePullPolicy: IfNotPresentcommand: [/bin/sh, -c, sleep 3600]envFrom:- configMapRef: name: mysql
[rootk8s-master01 configmap]# kubectl apply -f mysql-pod-envfrom.yaml
pod/mysql-pod-envfrom created
[rootk8s-master01 configmap]# kubectl exec -it mysql-pod-envfrom -c mysql -- /bin/sh
/ # printenv
KUBERNETES_SERVICE_PORT443
KUBERNETES_PORTtcp://10.96.0.1:443
HOSTNAMEmysql-pod-envfrom
SHLVL1
HOME/root
TERMxterm
lower1
KUBERNETES_PORT_443_TCP_ADDR10.96.0.1
PATH/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
log1
KUBERNETES_PORT_443_TCP_PORT443
KUBERNETES_PORT_443_TCP_PROTOtcp
KUBERNETES_SERVICE_PORT_HTTPS443
KUBERNETES_PORT_443_TCPtcp://10.96.0.1:443
KUBERNETES_SERVICE_HOST10.96.0.1
PWD/
/ # exit
# 查看环境变量[rootk8s-master01 configmap]# kubectl delete -f mysql-pod-envfrom.yaml
pod mysql-pod-envfrom deleted
# 清除环境使用configmap第三种方式: volume
[rootk8s-master01 configmap]# cat mysql-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:name: mysqllabels:app: mysql
data:log: 1lower: 1my.cnf: |[mysqld]Welcomeyuhang
[rootk8s-master01 configmap]# kubectl apply -f mysql-configmap.yaml
configmap/mysql configured
# 更新cm[rootk8s-master01 configmap]# cat mysql-pod-volume.yaml
apiVersion: v1
kind: Pod
metadata:name: mysql-pod-volume
spec:containers:- name: mysqlimage: busyboximagePullPolicy: IfNotPresentcommand: [/bin/sh, -c, sleep 3600]volumeMounts:- name: mysql-configmountPath: /tmp/configvolumes:- name: mysql-configconfigMap:name: mysql
[rootk8s-master01 configmap]# kubectl apply -f mysql-pod-volume.yaml
pod/mysql-pod-volume created
# 创建pod[rootk8s-master01 configmap]# kubectl exec -it mysql-pod-volume -c mysql -- /bin/sh
/ # cd /tmp/config/
/tmp/config # ls
log lower my.cnf
/tmp/config # cat log
1/tmp/config # cat lower
1/tmp/config # cat my.cnf
[mysqld]
Welcomeyuhang
/tmp/config # exit
# 查看挂载目录下文件内容Configmap热加载: 自动更新配置
[rootk8s-master01 configmap]# kubectl edit cm mysql
data:log: 2# 修改log值为2
[rootk8s-master01 configmap]# kubectl exec -it mysql-pod-volume -c mysql -- /bin/sh
/ # cd /tmp/config/
/tmp/config # cat log
2/tmp/config # exit
# 有时候没改过来可能是还在改,过一会再看看[rootk8s-master01 configmap]# kubectl delete -f mysql-pod-volume.yaml
pod mysql-pod-volume deleted
# 清除环境nfigmap]# kubectl delete -f mysql-pod-envfrom.yaml pod “mysql-pod-envfrom” deleted
清除环境 #### 使用configmap第三种方式: volumeshell
[rootk8s-master01 configmap]# cat mysql-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:name: mysqllabels:app: mysql
data:log: 1lower: 1my.cnf: |[mysqld]Welcomeyuhang
[rootk8s-master01 configmap]# kubectl apply -f mysql-configmap.yaml
configmap/mysql configured
# 更新cm[rootk8s-master01 configmap]# cat mysql-pod-volume.yaml
apiVersion: v1
kind: Pod
metadata:name: mysql-pod-volume
spec:containers:- name: mysqlimage: busyboximagePullPolicy: IfNotPresentcommand: [/bin/sh, -c, sleep 3600]volumeMounts:- name: mysql-configmountPath: /tmp/configvolumes:- name: mysql-configconfigMap:name: mysql
[rootk8s-master01 configmap]# kubectl apply -f mysql-pod-volume.yaml
pod/mysql-pod-volume created
# 创建pod[rootk8s-master01 configmap]# kubectl exec -it mysql-pod-volume -c mysql -- /bin/sh
/ # cd /tmp/config/
/tmp/config # ls
log lower my.cnf
/tmp/config # cat log
1/tmp/config # cat lower
1/tmp/config # cat my.cnf
[mysqld]
Welcomeyuhang
/tmp/config # exit
# 查看挂载目录下文件内容Configmap热加载: 自动更新配置
[rootk8s-master01 configmap]# kubectl edit cm mysql
data:log: 2# 修改log值为2
[rootk8s-master01 configmap]# kubectl exec -it mysql-pod-volume -c mysql -- /bin/sh
/ # cd /tmp/config/
/tmp/config # cat log
2/tmp/config # exit
# 有时候没改过来可能是还在改,过一会再看看[rootk8s-master01 configmap]# kubectl delete -f mysql-pod-volume.yaml
pod mysql-pod-volume deleted
# 清除环境