# 官方提供@yunTaoScripts 供应链安全 🔥🔥

loading

# 准入控制器

用户首先经过 Authentication 验证用户身份,token 是否正确或者证书是否合法。 然后经过Authoriization 通过RBAC授权用户执行 哪些操作。 最后通过 admission control 准入控制器 ,只有符合条件的pod 才会被创建,比如满足psp,gatekeeper条件。

containerd 客户端工具

  • containerd 有很多客户端工具,比如nerdctl、crictl、ctr。最好用的还是nerdctl。
  • 但是比如 nerdctl log pod 就不能用,可以使用crictl log pod
root@vms33:~# kubectl exec -it kube-apiserver-vms33.rhce.cc -- kube-apiserver --help| grep admission
      --disable-admission-plugins strings      admission plugins that should be disabled although they are in the default enabled plugins list (NamespaceLifecycle, LimitRanger, ServiceAccount, TaintNodesByCondition, PodSecurity, Priority, DefaultTolerationSeconds, DefaultStorageClass, StorageObjectInUseProtection, PersistentVolumeClaimResize, RuntimeClass, CertificateApproval, CertificateSigning, CertificateSubjectRestriction, DefaultIngressClass, MutatingAdmissionWebhook, ValidatingAdmissionWebhook, ResourceQuota). Comma-delimited list of admission plugins: AlwaysAdmit, AlwaysDeny, AlwaysPullImages, CertificateApproval, CertificateSigning, CertificateSubjectRestriction, DefaultIngressClass, DefaultStorageClass, DefaultTolerationSeconds, DenyServiceExternalIPs, EventRateLimit, ExtendedResourceToleration, ImagePolicyWebhook, LimitPodHardAntiAffinityTopology, LimitRanger, MutatingAdmissionWebhook, NamespaceAutoProvision, NamespaceExists, NamespaceLifecycle, NodeRestriction, OwnerReferencesPermissionEnforcement, PersistentVolumeClaimResize, PersistentVolumeLabel, PodNodeSelector, PodSecurity, PodSecurityPolicy, PodTolerationRestriction, Priority, ResourceQuota, RuntimeClass, SecurityContextDeny, ServiceAccount, StorageObjectInUseProtection, TaintNodesByCondition, ValidatingAdmissionWebhook. The order of plugins in this flag does not matter.
      --enable-admission-plugins strings       admission plugins that should be enabled in addition to default enabled ones (NamespaceLifecycle, LimitRanger, ServiceAccount, TaintNodesByCondition, PodSecurity, Priority, DefaultTolerationSeconds, DefaultStorageClass, StorageObjectInUseProtection, PersistentVolumeClaimResize, RuntimeClass, CertificateApproval, CertificateSigning, CertificateSubjectRestriction, DefaultIngressClass, MutatingAdmissionWebhook, ValidatingAdmissionWebhook, ResourceQuota). Comma-delimited list of admission plugins: AlwaysAdmit, AlwaysDeny, AlwaysPullImages, CertificateApproval, CertificateSigning, CertificateSubjectRestriction, DefaultIngressClass, DefaultStorageClass, DefaultTolerationSeconds, DenyServiceExternalIPs, EventRateLimit, ExtendedResourceToleration, ImagePolicyWebhook, LimitPodHardAntiAffinityTopology, LimitRanger, MutatingAdmissionWebhook, NamespaceAutoProvision, NamespaceExists, NamespaceLifecycle, NodeRestriction, OwnerReferencesPermissionEnforcement, PersistentVolumeClaimResize, PersistentVolumeLabel, PodNodeSelector, PodSecurity, PodSecurityPolicy, PodTolerationRestriction, Priority, ResourceQuota, RuntimeClass, SecurityContextDeny, ServiceAccount, StorageObjectInUseProtection, TaintNodesByCondition, ValidatingAdmissionWebhook. The order of plugins in this flag does not matter.

# 关闭 Resource Quota 访问控制

  • 创建 quota,由于未关闭准入控制,所有无法创建第4个svc。
root@vms33:~# cat service-quota.yaml
apiVersion: v1
kind: ResourceQuota
metadata:
  name: svc-quota
spec:
  hard:
    services: "3"
root@vms33:~# kubectl apply -f service-quota.yaml 
resourcequota/svc-quota created
root@vms33:~# kubectl get resourcequotas 
NAME        AGE   REQUEST         LIMIT
svc-quota   7s    services: 3/3   
root@vms33:~# kubectl expose pod pod1 --name svc4 --port 80
Error from server (Forbidden): services "svc4" is forbidden: exceeded quota: svc-quota, requested: services=1, used: services=3, limited: services=3

  • 关闭准入控制插件 Resource Quota,此时创建 svc 将不受限制。
root@vms33:~# cat /etc/kubernetes/manifests/kube-apiserver.yaml | grep admiss
    - --enable-admission-plugins=NodeRestriction
    - --disable-admission-plugins=ResourceQuota   ##增加此行
root@vms33:~# systemctl restart kubelet.service 
root@vms33:~# kubectl expose pod pod1 --name svc4 --port 80
service/svc4 exposed
root@vms33:~# kubectl expose pod pod1 --name svc5 --port 80
service/svc5 exposed
root@vms33:~# kubectl get resourcequotas 
NAME        AGE     REQUEST         LIMIT
svc-quota   6m55s   services: 3/3

# 关闭 Limit Range 访问控制

  • 创建limit range,此时超过500m 内存会被killed。
root@vms33:~# cat pod2.yaml 
apiVersion: v1
kind: Pod
metadata:
  creationTimestamp: null
  labels:
    run: pod2
  name: pod2
spec:
  nodeName: vms34.rhce.cc
  terminationGracePeriodSeconds: 0
  containers:
  - image: centos
    imagePullPolicy: IfNotPresent
    command: ["sh","-c","sleep 1d"]
    name: pod2
    resources: {}
  dnsPolicy: ClusterFirst
  restartPolicy: Always
status: {}
root@vms33:~# kubectl apply -f pod2.yaml 
pod/pod2 created
root@vms33:~# kubectl get pod
NAME   READY   STATUS    RESTARTS   AGE
pod1   1/1     Running   0          15m
pod2   1/1     Running   0          6s
root@vms33:~# kubectl get pod -owide
NAME   READY   STATUS    RESTARTS   AGE   IP               NODE            NOMINATED NODE   READINESS GATES
pod1   1/1     Running   0          15m   10.244.194.147   vms34.rhce.cc   <none>           <none>
pod2   1/1     Running   0          12s   10.244.194.148   vms34.rhce.cc   <none>           <none>

root@vms33:~# kubectl cp memload-7.0-1.r29766.x86_64.rpm pod2:/root
root@vms33:~# kubectl exec -it pod2 -- bash
[root@pod2 /]# cd /root/
[root@pod2 ~]# ls
anaconda-ks.cfg  anaconda-post.log  memload-7.0-1.r29766.x86_64.rpm  original-ks.cfg
[root@pod2 ~]# rpm -ivh memload-7.0-1.r29766.x86_64.rpm 
Verifying...                          ################################# [100%]
Preparing...                          ################################# [100%]
Updating / installing...
   1:memload-7.0-1.r29766             ################################# [100%]
[root@pod2 ~]# memload 1500  ##此时无限制
Attempting to allocate 1500 Mebibytes of resident memory...


root@vms33:~# cat pod-limit.yaml 
apiVersion: v1
kind: LimitRange
metadata:
  name: mem-min-max-demo-lr
spec:
  limits:
  - max:
      memory: 500Mi
    min:
      memory: 100Mi
    type: Container

root@vms33:~# kubectl cp memload-7.0-1.r29766.x86_64.rpm pod2:/root
root@vms33:~# kubectl exec -it pod2 -- bash
[root@pod2 /]# cd /root/
[root@pod2 ~]# rpm -ivh memload-7.0-1.r29766.x86_64.rpm 


[root@pod2 ~]# memload 400
Attempting to allocate 400 Mebibytes of resident memory...
^C
[root@pod2 ~]# memload 600
Attempting to allocate 600 Mebibytes of resident memory...
Killed

  • 关闭访问控制,此时内存无限制
root@vms33:~# grep admiss /etc/kubernetes/manifests/kube-apiserver.yaml
    - --enable-admission-plugins=NodeRestriction
    - --disable-admission-plugins=ResourceQuota,LimitRanger  # 增加limitrange
root@vms33:~# systemctl restart kubelet.service 
root@vms33:~# kubectl exec -it pod2 -- bash
[root@pod2 /]# memload 600
Attempting to allocate 600 Mebibytes of resident memory...
Killed
[root@pod2 /]# exit
command terminated with exit code 137
root@vms33:~# kubectl delete -f pod2.yaml 
pod "pod2" deleted
root@vms33:~# kubectl apply -f pod2.yaml 
pod/pod2 created
root@vms33:~# kubectl cp memload-7.0-1.r29766.x86_64.rpm pod2:/root
root@vms33:~# systemctl restart kubelet.service 
root@vms33:~# kubectl exec -it pod2 -- bash
[root@pod2 /]# cd root/

[root@pod2 ~]# rpm -ivh memload-7.0-1.r29766.x86_64.rpm 
Verifying...                          ################################# [100%]
Preparing...                          ################################# [100%]
Updating / installing...
   1:memload-7.0-1.r29766             ################################# [100%]
[root@pod2 ~]# memload 600
Attempting to allocate 600 Mebibytes of resident memory...

# 启用ImagePolicyWebhook 准入控制器

webhook 服务器就是一个第三方的服务器,可以让k8s使用这里的配置。

# 搭建webhook服务器

  • 安装webhook服务,作用为禁止使用tag 为 latest 的镜像。
docker run -dit --name=c1 --restart=always -e BOUNCER_CERTIFICATE=/certs/webhook.pem -e BOUNCER_KEY=/certs/webhookkey.pem -v `pwd`/webhook-key.pem:/certs/webhook-key.pem:ro -v `pwd`/webhook.pem:/certs/webhook.pem:ro -p 1323:1323 flavio/kube-image-bouncer

不需要加 -e

注意,练习时不需要

-e BOUNCER_CERTIFICATE=/certs/webhook.pem -e BOUNCER_KEY=/certs/webhook-key.pem 因为证书问题,会报错: x509: certificate relies on legacy Common Name field, use SANs or temporarily enable Common Name matching with GODEBUG=x509ignoreCN=0

# 配置api-server

  • 由于实验没有使用tls认证,所以这里面的证书信息是没有用的。
root@vms33:/etc/kubernetes/webhook# ll   ## 直接解压用就行
-rw-r--r-- 1 root root  186 Sep 11 17:50 admission_configuration.json
-rw------- 1 root root 1704 Sep 11 16:37 apiserver-client-key.pem
-rw-r--r-- 1 root root 1245 Sep 11 16:37 apiserver-client.pem
-rw-r--r-- 1 root root  494 Sep 11 17:51 kubeconfig.yaml
-rw------- 1 root root 1704 Sep 11 16:37 webhook-key.pem
-rw-r--r-- 1 root root 1245 Sep 11 16:37 webhook.pem
root@vms33:~# cat /etc/kubernetes/webhook/admission_configuration.json 
{
  "imagePolicy": {
     "kubeConfigFile": "/etc/kubernetes/webhook/kubeconfig.yaml",
     "allowTTL": 50,
     "denyTTL": 50,
     "retryBackoff": 500,
     "defaultAllow": false
  }
}

root@vms33:/etc/kubernetes/webhook# cat kubeconfig.yaml 
apiVersion: v1
kind: Config
clusters:
- cluster:
    certificate-authority: /etc/kubernetes/webhook/webhook.pem
    server: http://192.168.26.150:1323/image_policy
  name: bouncer_webhook
contexts:
- context:
    cluster: bouncer_webhook
    user: api-server
  name: bouncer_validator
current-context: bouncer_validator
preferences: {}
users:
- name: api-server
  user:
    client-certificate: /etc/kubernetes/webhook/apiserver-client.pem
    client-key:  /etc/kubernetes/webhook/apiserver-client-key.pem

  • 启用ImagePolicyWebhook 访问控制
root@vms33:~# grep -i webhook -C2 /etc/kubernetes/manifests/kube-apiserver.yaml 

    - --enable-admission-plugins=NodeRestriction,ImagePolicyWebhook
    - --admission-control-config-file=/etc/kubernetes/webhook/admission_configuration.json

--
      name: usr-share-ca-certificates
      readOnly: true
    - mountPath: /etc/kubernetes/webhook
      name: webhook
      readOnly: true
  hostNetwork: true
--
  volumes:
  - hostPath:
      path: /etc/kubernetes/webhook
      type: DirectoryOrCreate
    name: webhook

  • 此时因为 tag 为latest,所有pod 创建失败。
root@vms33:~# cat pod1.yaml 
apiVersion: v1
kind: Pod
metadata:
  creationTimestamp: null
  labels:
    run: pod1
  name: pod1
spec:
  nodeName: vms34.rhce.cc
  terminationGracePeriodSeconds: 0
  containers:
  - image: centos:latest
    imagePullPolicy: IfNotPresent
    name: pod1
    resources: {}
  dnsPolicy: ClusterFirst
  restartPolicy: Always
status: {}

root@vms33:~# kubectl apply -f pod1.yaml 
Error from server (Forbidden): error when creating "pod1.yaml": pods "pod1" is forbidden: image policy webhook backend denied one or more images: Images using latest tag are not allowed

  • 换个非latest tag 的镜像就好了
root@vms33:~# cat pod1.yaml 
apiVersion: v1
kind: Pod
metadata:
  creationTimestamp: null
  labels:
    run: pod1
  name: pod1
spec:
  nodeName: vms34.rhce.cc
  terminationGracePeriodSeconds: 0
  containers:
  - image: nginx:v1
    imagePullPolicy: IfNotPresent
    name: pod1
    resources: {}
  dnsPolicy: ClusterFirst
  restartPolicy: Always
status: {}
root@vms33:~# kubectl apply -f pod1.yaml 
pod/pod1 created
root@vms33:~# kubectl get pod
NAME   READY   STATUS    RESTARTS   AGE
pod1   1/1     Running   0          10s

kubectl报错

  • 这种情况,大概率是没配kubeconfig文件。
root@vms150:~# kubectl get node
The connection to the server localhost:8080 was refused - did you specify the right host or port?
  • 这种情况大概率是kube-apiserver没起来。建议从crictl/nerdctl命令查看问题。此时kubectl不能用。
root@vms33:~# crictl logs e655d695c46a
I0911 10:48:45.150121       1 server.go:558] external host was not specified, using 192.168.26.33
I0911 10:48:45.150520       1 server.go:158] Version: v1.24.2
I0911 10:48:45.150533       1 server.go:160] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
I0911 10:48:45.445270       1 shared_informer.go:255] Waiting for caches to sync for node_authorizer
E0911 10:48:45.445409       1 run.go:74] "command failed" err="failed to initialize admission: couldn't init admission plugin \"ImagePolicyWebhook\": invalid configuration: unable to read client-key /etc/kubernetes/webhook/apiserver-client-key.pem for api-server due to open /etc/kubernetes/webhook/apiserver-client-key.pem: no such file or directory"
root@vms33:~# kubectl get node
The connection to the server 192.168.26.33:6443 was refused - did you specify the right host or port?
root@vms33:~# nerdctl ps -a| grep api
b0b0cda2f94e    registry.aliyuncs.com/google_containers/kube-apiserver:v1.24.2             "kube-apiserver --ad…"    2 minutes ago    Created             k8s://kube-system/kube-apiserver-vms33.rhce.cc/kube-apiserver                         
c82b9d1bb169    registry.aliyuncs.com/google_containers/pause:3.7                          "/pause"                  5 minutes ago    Up                  k8s://kube-system/kube-apiserver-vms33.rhce.cc                                        
root@vms33:~# crictl logs b0b0cda2f94e
I0911 08:24:51.169051       1 server.go:558] external host was not specified, using 192.168.26.33
E0911 08:24:51.169466       1 run.go:74] "command failed" err="disable-admission-plugins plugin \"xxx\" is unknown"

# 镜像安全扫描

# 利用trivy检测镜像的安全性

  • trivy 通过在线访问数据库信息,并同步数据库信息到本地。然后扫描镜像和本地库进行对比,从而提出安全建议。

  • CVE 的英文全称是“Common Vulnerabilities & Exposures”通用漏洞披露。CVE就好像是一个字典表,为广泛认同的信息安全漏洞或者已经暴露出来的漏洞给出一个公共的名称。

  • 使用trivy
root@vms150:~# apt install ./trivy_0.14.0_Linux-64bit.deb
root@vms150:~# trivy calico/node:v3.23.1  #扫描镜像
  • 缓存信息
root@vms150:~/.cache# du -sh trivy/
9.1M	trivy/
root@vms150:~/.cache# du -sh trivy/
10M	trivy/
root@vms150:~/.cache# du -sh trivy/
11M	trivy/
root@vms150:~/.cache# pwd
/root/.cache

# 优化Dockerfile

不同工具做镜像

podman,docker,containerd 做出的镜像都是通用的。这些镜像都遵守OCI (opens new window)标准。OCI标准定义了容器运行时和镜像规范,使得不同容器实现(LXC, Docker, Kata, rkt) 以相同的标准运行,这样开发人员构建、打包和部署容器,可以运行在不同厂商的解决方案上。

# 降低层数

镜像都是分层,在制作镜像时,首先从基础镜像运行,生成一个临时容器,通过Dockerfile 指定的命令构建容器,最后导出镜像。比如安装 yum install xxx 软件 只需要 1M 空间,但是加载环境需要100M。如果多行构建就会多出很多环境数据,建议放到一行写。

root@vms150:~/dockerImage# docker history centos:v1
IMAGE          CREATED         CREATED BY                                      SIZE      COMMENT
c58b27d6c7a0   6 minutes ago   /bin/sh -c #(nop)  CMD ["/bin/bash"]            0B        
ebcae73370b7   6 minutes ago   /bin/sh -c yum install -y iproute               30.1MB    
1d98fb826614   7 minutes ago   /bin/sh -c yum install -y net-tools             27.9MB    
7a78ecf0acf1   7 minutes ago   bash                                            40.9MB    
5d0da3dc9764   12 months ago   /bin/sh -c #(nop)  CMD ["/bin/bash"]            0B        
<missing>      12 months ago   /bin/sh -c #(nop)  LABEL org.label-schema.sc…   0B        
<missing>      12 months ago   /bin/sh -c #(nop) ADD file:805cb5e15fb6e0bb0…   231MB 
root@vms150:~/dockerImage# docker history centos:v2
IMAGE          CREATED         CREATED BY                                      SIZE      COMMENT
f7fafe07be33   5 minutes ago   /bin/sh -c #(nop)  CMD ["/bin/bash"]            0B        
e86f4939da42   5 minutes ago   /bin/sh -c yum install -y net-tools && yum i…   30.8MB    #少一层 
7a78ecf0acf1   8 minutes ago   bash                                            40.9MB    
5d0da3dc9764   12 months ago   /bin/sh -c #(nop)  CMD ["/bin/bash"]            0B        
<missing>      12 months ago   /bin/sh -c #(nop)  LABEL org.label-schema.sc…   0B        
<missing>      12 months ago   /bin/sh -c #(nop) ADD file:805cb5e15fb6e0bb0…   231MB 
root@vms150:~/dockerImage# docker images
REPOSITORY                  TAG       IMAGE ID       CREATED          SIZE
centos                      v2        f7fafe07be33   3 minutes ago    303MB
centos                      v1        c58b27d6c7a0   5 minutes ago    330MB
centos                      test      7a78ecf0acf1   6 minutes ago    272MB

# 清理无用数据

root@vms150:~/dockerImage# docker history centos:v3 --no-trunc 
 CREATED BY                                                                                                   SIZE      
 /bin/sh -c #(nop)  CMD ["/bin/bash"]                                                                         0B        
 /bin/sh -c yum install -y net-tools && yum install -y iproute && yum clean all && rm -rf /var/cache/dnf      17.1MB    
 bash                                                                                                         40.9MB    
 /bin/sh -c #(nop)  CMD ["/bin/bash"]                                                                         0B        
 /bin/sh -c #(nop)  LABEL org.label-schema.schema-version=1.0 org.label-schema.name=CentOS Base Image org. 0B 
 /bin/sh -c #(nop) ADD file:805cb5e15fb6e0bb0326ca33fd2942e068863ce2a8491bb71522c652f31fb466 in /             231MB 
root@vms150:~/dockerImage# docker images
REPOSITORY                  TAG       IMAGE ID       CREATED          SIZE
centos                      v3        03f39c6dc8d3   6 minutes ago    289MB
centos                      v2        f7fafe07be33   9 minutes ago    303MB
centos                      v1        c58b27d6c7a0   11 minutes ago   330MB
centos                      test      7a78ecf0acf1   12 minutes ago   272MB

# 多阶段构建

  • 生成临时容器给第二阶段提供一些构建成果。
root@vms150:~/dockerImage# cat Dockerfile
FROM centos:test
RUN yum install -y net-tools && yum install -y  iproute
FROM centos:test
COPY --from=0 /usr/sbin/ip /usr/sbin/ip
COPY --from=0 /usr/sbin/ifconfig /usr/sbin/ifconfig
CMD ["/bin/bash"]
oot@vms150:~/dockerImage# docker images | grep cent
centos                      v4        eae1bddf9c38   20 seconds ago      273MB
centos                      v3        03f39c6dc8d3   About an hour ago   289MB
centos                      v2        f7fafe07be33   2 hours ago         303MB
centos                      v1        c58b27d6c7a0   2 hours ago         330MB
centos                      test      7a78ecf0acf1   2 hours ago         272MB
centos                      latest    5d0da3dc9764   12 months ago       231MB
  • 第二阶段 from latest,进一步缩小镜像体积。
root@vms150:~/dockerImage# docker images | grep cent
centos                      v5        53e656ebda95   27 seconds ago   232MB
centos                      v4        eae1bddf9c38   7 minutes ago    273MB
centos                      v3        03f39c6dc8d3   2 hours ago      289MB
centos                      v2        f7fafe07be33   2 hours ago      303MB
centos                      v1        c58b27d6c7a0   2 hours ago      330MB
centos                      test      7a78ecf0acf1   2 hours ago      272MB
centos                      latest    5d0da3dc9764   12 months ago    231MB
root@vms150:~/dockerImage# cat Dockerfile
FROM centos:test
RUN yum install -y net-tools && yum install -y  iproute

FROM centos:latest
COPY --from=0 /usr/sbin/ip /usr/sbin/ip
COPY --from=0 /usr/sbin/ifconfig /usr/sbin/ifconfig
CMD ["/bin/bash"]

链接器ld提示找不到库文件。并不是只拷贝一个二进制文件就行。

root@vms150:~/dockerImage# docker run -it -u root --name=v5 --rm centos:v5 bash 
[root@fedcf22c3632 /]# ip      ##注意
ip: error while loading shared libraries: libbpf.so.0: cannot open shared object file: No such file or directory
[root@fedcf22c3632 /]# ifconfig
eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 172.17.0.5  netmask 255.255.0.0  broadcast 172.17.255.255
        ether 02:42:ac:11:00:05  txqueuelen 0  (Ethernet)
        RX packets 7  bytes 586 (586.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 0  bytes 0 (0.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

lo: flags=73<UP,LOOPBACK,RUNNING>  mtu 65536
        inet 127.0.0.1  netmask 255.0.0.0
        loop  txqueuelen 1000  (Local Loopback)
        RX packets 0  bytes 0 (0.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 0  bytes 0 (0.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 

# 限制特定用户登录

root@vms150:~/dockerImage# docker images | grep cen
centos                      v6        87982ff64db7   17 seconds ago   233MB
centos                      v5        53e656ebda95   15 minutes ago   232MB
centos                      v4        eae1bddf9c38   23 minutes ago   273MB
centos                      v3        03f39c6dc8d3   2 hours ago      289MB
centos                      v2        f7fafe07be33   2 hours ago      303MB
centos                      v1        c58b27d6c7a0   2 hours ago      330MB
centos                      test      7a78ecf0acf1   2 hours ago      272MB
centos                      latest    5d0da3dc9764   12 months ago    231MB
root@vms150:~/dockerImage# cat Dockerfile

FROM centos:test
RUN yum install -y net-tools && yum install -y  iproute

FROM centos:latest
COPY --from=0 /usr/sbin/ip /usr/sbin/ip
COPY --from=0 /usr/sbin/ifconfig /usr/sbin/ifconfig
RUN groupadd -g 2000 tgrp && useradd -u 2000 -g 2000 tadmin
USER tadmin 
CMD ["/bin/bash"]

root@vms150:~/dockerImage# docker run -it --name=v6 --rm centos:v6 bash 
[tadmin@518a7f0938d3 /]$ id
uid=2000(tadmin) gid=2000(tgrp) groups=2000(tgrp)

# 检查yaml文件安全性

  • 安装kubesec,并扫描pod文件。
root@vms33:~# kubesec scan pod1.yaml 
[
  {
    "object": "Pod/pod1.default",
    "valid": true,
    "message": "Passed with a score of 0 points",
    "score": 0,
    "scoring": {
      "advise": [
        {
          "selector": ".metadata .annotations .\"container.apparmor.security.beta.kubernetes.io/nginx\"",
          "reason": "Well defined AppArmor policies may provide greater protection from unknown threats. WARNING: NOT PRODUCTION READY",
          "points": 3
        },
        {
          "selector": ".spec .serviceAccountName",
          "reason": "Service accounts restrict Kubernetes API access and should be configured with least privilege",
          "points": 3
        },
        {
          "selector": ".metadata .annotations .\"container.seccomp.security.alpha.kubernetes.io/pod\"",
          "reason": "Seccomp profiles set minimum privilege and secure against unknown threats",
          "points": 1
        },
        {
          "selector": "containers[] .resources .limits .cpu",
          "reason": "Enforcing CPU limits prevents DOS via resource exhaustion",
          "points": 1
        },
        {
          "selector": "containers[] .resources .limits .memory",
          "reason": "Enforcing memory limits prevents DOS via resource exhaustion",
          "points": 1
        },
        {
          "selector": "containers[] .resources .requests .cpu",
          "reason": "Enforcing CPU requests aids a fair balancing of resources across the cluster",
          "points": 1
        },
        {
          "selector": "containers[] .resources .requests .memory",
          "reason": "Enforcing memory requests aids a fair balancing of resources across the cluster",
          "points": 1
        },
        {
          "selector": "containers[] .securityContext .capabilities .drop",
          "reason": "Reducing kernel capabilities available to a container limits its attack surface",
          "points": 1
        },
        {
          "selector": "containers[] .securityContext .capabilities .drop | index(\"ALL\")",
          "reason": "Drop all capabilities and add only those required to reduce syscall attack surface",
          "points": 1
        },
        {
          "selector": "containers[] .securityContext .readOnlyRootFilesystem == true",
          "reason": "An immutable root filesystem can prevent malicious binaries being added to PATH and increase attack cost",
          "points": 1
        },
        {
          "selector": "containers[] .securityContext .runAsNonRoot == true",
          "reason": "Force the running image to run as a non-root user to ensure least privilege",
          "points": 1
        },
        {
          "selector": "containers[] .securityContext .runAsUser -gt 10000",
          "reason": "Run as a high-UID user to avoid conflicts with the host's user table",
          "points": 1
        }
      ]
    }
  }
]

# 使用secret保存密码

如果镜像仓库没有设置为公开,那么节点需要登陆后才能拉取镜像。但是创建pod 又不能确定是 在哪个节点上创建,在每个节点都登录一下又太麻烦。所以可以通过secret 保存仓库登录信息,创建pod 的时候引用secret。

# 安装harbor

root@vms150:~/harbor# egrep -v "^#|\s#" harbor.yml
hostname: 192.168.26.150

http:
  port: 80

harbor_admin_password: xxxx

database:
  password: xxx
  max_idle_conns: 100
  max_open_conns: 900

data_volume: /data

trivy:
  ignore_unfixed: false
  skip_update: false
  offline_scan: false
  insecure: false

jobservice:
  max_job_workers: 10

notification:
  webhook_job_max_retry: 10

chart:
  absolute_url: disabled

log:
  level: info
  local:
    rotate_count: 50
    rotate_size: 200M
    location: /var/log/harbor
upload_purging:
  enabled: true
  age: 168h
  interval: 24h
  dryrun: false

cache:
  enabled: false
  expire_hours: 24
root@vms150:~/harbor# ./install.sh

# 配置containerd 跳过https

root@vms33:~# grep registry -C2 /etc/containerd/config.toml 

    [plugins."io.containerd.grpc.v1.cri".registry]
      config_path = ""

      [plugins."io.containerd.grpc.v1.cri".registry.auths]

     
      [plugins."io.containerd.grpc.v1.cri".registry.headers]

      [plugins."io.containerd.grpc.v1.cri".registry.mirrors]
        [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
          endpoint = ["https://frz7i079.mirror.aliyuncs.com"]
        [plugins."io.containerd.grpc.v1.cri".registry.mirrors."192.168.26.150"]
          endpoint = ["http://192.168.26.150"]

# 登录,推送拉取镜像

root@vms33:~# nerdctl login 192.168.26.150 --insecure-registry
root@vms33:~# nerdctl push 192.168.26.150/baseos/nginx:v1 --insecure-registry
  • 创建pod,由于没有密码导致拉取失败。
root@vms33:~# kubectl describe pod pod1 | grep Event  -A10
Events:
  Type     Reason   Age                From     Message
  ----     ------   ----               ----     -------
  Normal   Pulling  17s (x3 over 61s)  kubelet  Pulling image "192.168.26.150/baseos/nginx:v1"
  Warning  Failed   17s (x3 over 60s)  kubelet  Failed to pull image "192.168.26.150/baseos/nginx:v1": rpc error: code = Unknown desc = failed to pull and unpack image "192.168.26.150/baseos/nginx:v1": failed to resolve reference "192.168.26.150/baseos/nginx:v1": failed to do request: Head "https://192.168.26.150/v2/baseos/nginx/manifests/v1": dial tcp 192.168.26.150:443: connect: connection refused
  Warning  Failed   17s (x3 over 60s)  kubelet  Error: ErrImagePull
  Normal   BackOff  6s (x4 over 59s)   kubelet  Back-off pulling image "192.168.26.150/baseos/nginx:v1"
  Warning  Failed   6s (x4 over 59s)   kubelet  Error: ImagePullBackOff

# 创建secret,并在pod 中引用

root@vms33:~# kubectl create secret docker-registry my-secret --docker-server=192.168.26.150 --docker-username=admin --docker-password=xxx
secret/my-secret created
root@vms33:~# kubectl get secrets my-secret 
NAME        TYPE                             DATA   AGE
my-secret   kubernetes.io/dockerconfigjson   1      15s
# 修改目标节点的 containerd tls 访问关系, 访问https,自动跳转http
root@vms34:~# grep mirror -A3 /etc/containerd/config.toml 
      [plugins."io.containerd.grpc.v1.cri".registry.mirrors]
        [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
          endpoint = ["https://frz7i079.mirror.aliyuncs.com"]
        [plugins."io.containerd.grpc.v1.cri".registry.mirrors."192.168.26.150"]
          endpoint = ["http://192.168.26.150"]

root@vms33:~# cat pod1.yaml 
apiVersion: v1
kind: Pod
metadata:
  creationTimestamp: null
  labels:
    run: pod1
  name: pod1
spec:
  nodeName: vms34.rhce.cc
  terminationGracePeriodSeconds: 0
  imagePullSecrets:     ## 引用secret
  - name: my-secret
  containers:
  - image: 192.168.26.150/baseos/nginx:v1
    imagePullPolicy: IfNotPresent
    name: pod1
    resources: {}
  dnsPolicy: ClusterFirst
  restartPolicy: Always
status: {}
root@vms33:~# kubectl apply -f pod1.yaml 
pod/pod1 created
root@vms33:~# kubectl get pod
NAME   READY   STATUS    RESTARTS   AGE
pod1   1/1     Running   0          6s
最后修改时间: 12/31/2022, 12:00:03 PM