# 官方提供@yunTaoScripts 存储管理 🔥🔥

loading

# emptyDir

  • 默认,类似于docker run -v /xx,pod销毁,存储销毁。
apiVersion: v1
kind: Pod
metadata:
  creationTimestamp: null
  labels:
    run: myapp
  name: myapp
spec:
  volumes:
  - name: vo1
    emptyDir: {}
  containers:
  - image: busybox
    imagePullPolicy: IfNotPresent
    name: mycon1
    command: ['sh','-c','touch /xx/xx.txt && sleep 10000']
    volumeMounts:
    - name: vo1
      mountPath: /xx
  - image: busybox
    imagePullPolicy: IfNotPresent
    name: mycon2
    command: ['sh','-c','touch /yy/yy.txt && sleep 10000']
    volumeMounts:
    - name: vo1
      mountPath: /yy
  restartPolicy: Always
status: {}

# hostPath

  • 类似于 docker run -v /data:/xx, pod销毁,目录依旧存在,如果指定主机目录不存在会自动创建。
apiVersion: v1
kind: Pod
metadata:
  creationTimestamp: null
  labels:
    run: myapp1
  name: myapp1
spec:
  terminationGracePeriodSeconds: 0
  volumes:
  - name: vo1
    hostPath: 
      path: /xyt
  containers:
  - image: busybox
    imagePullPolicy: IfNotPresent
    name: mycon1
    command: ['sh','-c','touch /xx/xx.txt && sleep 10000']
    volumeMounts:
    - name: vo1
      mountPath: /xx
  - image: busybox
    imagePullPolicy: IfNotPresent
    name: mycon2
    command: ['sh','-c','touch /yy/yy.txt && sleep 10000']
    volumeMounts:
    - name: vo1
      mountPath: /yy
  restartPolicy: Always
status: {}

# nfs

# 安装NFS

yum install nfs-utils rpcbind
# 修改 /etc/exports
# rw 表示设置目录可读写。
# sync 表示数据会同步写入到内存和硬盘中,相反 rsync 表示数据会先暂存于内存中,而非直接写入到硬盘中。
# no_root_squash NFS客户端连接服务端时如果使用的是root的话,那么对服务端分享的目录来说,也拥有root权限。
# no_all_squash 不论NFS客户端连接服务端时使用什么用户,对服务端分享的目录来说都不会拥有匿名用户权限。
/data/share/ *(rw,no_root_squash,no_all_squash,sync) 
exportfs -r
rpcinfo -p
systemctl status rpcbind.service 
systemctl restart rpcbind.service 
systemctl restart nfs
showmount -e localhost
=============================
# 客户端只需要安装rpcbind服务即可,无需安装nfs或开启nfs服务。
yum -y install rpcbind
showmount -e 192.168.2.202  #nfsserver的地址
mkdir -p /mnt/share
mount -t nfs 192.168.2.202:/data1 /mnt/share/ -o nolock,nfsvers=3,vers=3
umount /mnt/share

# 配置pod使用NFS作为存储

apiVersion: v1
kind: Pod
metadata:
  creationTimestamp: null
  labels:
    run: myapp2
  name: myapp2
spec:
  terminationGracePeriodSeconds: 0
  volumes:
  - name: vo1
    nfs:
      server: 10.211.55.112
      path: /data1
  containers:
  - image: busybox
    imagePullPolicy: IfNotPresent
    name: mycon1
    command: ['sh','-c','touch /xx/xx.txt && sleep 10000']
    volumeMounts:
    - name: vo1
      mountPath: /xx
  - image: busybox
    imagePullPolicy: IfNotPresent
    name: mycon2
    command: ['sh','-c','touch /yy/yy.txt && sleep 10000']
    volumeMounts:
    - name: vo1
      mountPath: /yy
  restartPolicy: Always
status: {}

# PersistentVolume

# 持久性存储

# 创建PV

apiVersion: v1
kind: PersistentVolume
metadata:
  name: pv0001
spec:
  capacity:
    storage: 5Gi
  volumeMode: Filesystem
  accessModes:
    - ReadWriteOnce
  persistentVolumeReclaimPolicy: Recycle
  storageClassName: xyt
  nfs:
    path: /data2
    server: 10.211.55.112

# 创建PVC

apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: pvc0011
spec:
  accessModes:
    - ReadWriteOnce
  volumeMode: Filesystem
  resources:
    requests:
      storage: 8Gi
  storageClassName: xyt   

# 配置pod使用持久性存储


aapiVersion: v1
kind: Pod
metadata:
  name: pvcpod
spec:
  hostNetwork: true
  containers:
  - name: myfrontend
    image: nginx
    imagePullPolicy: IfNotPresent
    volumeMounts:
    - mountPath: "/usr/share/nginx/html"
      name: mypd
  volumes:
  - name: mypd
    persistentVolumeClaim:
      claimName: pvc0011

👍🏻

  • PV的值如果大于PVC的话,是可以关联的。
  • 但是PVC的值大于PV的话,则不可以。
  • storageClassName 用于更精确的PV 和 PVC 的匹配。

# PV回收策略

# persistentVolumeReclaimPolicy

  • Recycle
    • 会删除数据,会生成一个pod回收数据
    • 删除pvc之后,pv可复用
    • pv状态由Released变为Available
  • Retain
    • 不回收数据
    • 但是删除pvc之后,pv依然不可用
    • pv状态长期保持为 Released

# PV访问模式

# accessModes

  • ReadWriteOnce
    • 卷可以被一个节点以读写方式挂载。 ReadWriteOnce 访问模式也允许运行在同一节点上的多个 Pod 访问卷。
  • ReadOnlyMany
    • 卷可以被多个节点以只读方式挂载。
  • ReadWriteMany
    • 卷可以被多个节点以读写方式挂载。