longhorn-强制删除命名空间
原创大约 6 分钟
问题现象
# kubectl -n longhorn-system get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
longhorn-csi-plugin-b2kkk 0/3 ContainerCreating 0 10m <none> 10.0.1.201 <none> <none>
longhorn-manager-rjjzr 0/1 ContainerCreating 0 49m <none> 10.0.1.201 <none> <none># kubectl -n longhorn-system describe pod longhorn-csi-plugin-b2kkk
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 11m default-scheduler Successfully assigned longhorn-system/longhorn-csi-plugin-b2kkk to 10.0.1.201
Normal Pulled 10m kubelet Container image "longhornio/csi-node-driver-registrar:v2.7.0" already present on machine
Normal Created 10m kubelet Created container node-driver-registrar
Normal Started 10m kubelet Started container node-driver-registrar
Normal Pulled 10m kubelet Container image "longhornio/livenessprobe:v2.9.0" already present on machine
Normal Created 10m kubelet Created container longhorn-liveness-probe
Normal Started 10m kubelet Started container longhorn-liveness-probe
Normal Pulling 10m kubelet Pulling image "longhornio/longhorn-manager:v1.5.3"# kubectl -n longhorn-system describe pod longhorn-manager-rjjzr
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 51m default-scheduler Successfully assigned longhorn-system/longhorn-manager-rjjzr to 10.0.1.201
Normal Pulling 50m kubelet Pulling image "longhornio/longhorn-manager:v1.5.3"
Normal SandboxChanged 15m kubelet Pod sandbox changed, it will be killed and re-created.
Normal Pulling 12m kubelet Pulling image "longhornio/longhorn-manager:v1.5.3"longhornio/longhorn-manager:v1.5.3https://hub.docker.com/r/longhornio/longhorn-manager/tags?page=&page_size=&ordering=&name=1.5.3
在dockerhub确实有longhornio/longhorn-manager:v1.5.3解决
手动导入镜像
# ctr namespace ls
NAME LABELS
k8s.io
# containerd需要指定命令空间导入镜像
# docker pull nginx:1.21.6 && docker save nginx:1.21.6 > nginx-1.21.6.tar
# ctr -n k8s.io images import nginx-1.21.6.tar
# 查看所有镜像
# ctr -n k8s.io images ls | grep nginx# 查看使用了那些镜像
# kubectl -n longhorn-system describe pod longhorn-csi-plugin-djnlm
Containers:
node-driver-registrar:
Container ID: containerd://04f05918e53146c7ab8a33e4867a3de5c2e8cae40e6c14596736be8c0a0dd336
Image: longhornio/csi-node-driver-registrar:v2.7.0
Image ID: docker.io/longhornio/csi-node-driver-registrar@sha256:4ffdfca7630c81006c9dad7ce2407df238516522fce6e37864cfc5c3f781bed9
longhorn-liveness-probe:
Container ID: containerd://f97da62f1a663f9cbc33e1edd1460f1856b3238f66512724a3a4313d93f51b8b
Image: longhornio/livenessprobe:v2.9.0
Image ID: docker.io/longhornio/livenessprobe@sha256:555e27888f3692ab4130616ce4de62f950bdac16da3af4c432e13a71604e0261
longhorn-csi-plugin:
Container ID: containerd://b7802bc31cd4e9f913341b4c2879cb570d7284a896966f03034840fecb44eab4
Image: longhornio/longhorn-manager:v1.5.3
Image ID: docker.io/longhornio/longhorn-manager@sha256:b5769e761b172b441567c11416c3181078e29dea450683eb1bc66192b0caa494# 搜索现在有那些镜像
# ctr -n k8s.io images ls | grep longhornio
docker.io/longhornio/csi-node-driver-registrar:v2.7.0 application/vnd.docker.distribution.manifest.list.v2+json sha256:4ffdfca7630c81006c9dad7ce2407df238516522fce6e37864cfc5c3f781bed9 9.7 MiB linux/amd64,linux/arm/v7,linux/arm64,linux/s390x,windows/amd64 io.cri-containerd.image=managed
docker.io/longhornio/csi-node-driver-registrar@sha256:4ffdfca7630c81006c9dad7ce2407df238516522fce6e37864cfc5c3f781bed9 application/vnd.docker.distribution.manifest.list.v2+json sha256:4ffdfca7630c81006c9dad7ce2407df238516522fce6e37864cfc5c3f781bed9 9.7 MiB linux/amd64,linux/arm/v7,linux/arm64,linux/s390x,windows/amd64 io.cri-containerd.image=managed
docker.io/longhornio/livenessprobe:v2.9.0 application/vnd.docker.distribution.manifest.list.v2+json sha256:555e27888f3692ab4130616ce4de62f950bdac16da3af4c432e13a71604e0261 8.8 MiB linux/amd64,linux/arm/v7,linux/arm64,linux/s390x,windows/amd64 io.cri-containerd.image=managed
docker.io/longhornio/livenessprobe@sha256:555e27888f3692ab4130616ce4de62f950bdac16da3af4c432e13a71604e0261 application/vnd.docker.distribution.manifest.list.v2+json sha256:555e27888f3692ab4130616ce4de62f950bdac16da3af4c432e13a71604e0261 8.8 MiB linux/amd64,linux/arm/v7,linux/arm64,linux/s390x,windows/amd64 io.cri-containerd.image=managed
docker.io/longhornio/longhorn-engine:v1.5.3 application/vnd.docker.distribution.manifest.list.v2+json sha256:3fb3b5d911242514e996941efb411eee3c926ec3b9766b514deddda9e6d18924 265.3 MiB linux/amd64,linux/arm64,linux/s390x io.cri-containerd.image=managed
docker.io/longhornio/longhorn-engine@sha256:3fb3b5d911242514e996941efb411eee3c926ec3b9766b514deddda9e6d18924 application/vnd.docker.distribution.manifest.list.v2+json sha256:3fb3b5d911242514e996941efb411eee3c926ec3b9766b514deddda9e6d18924 265.3 MiB linux/amd64,linux/arm64,linux/s390x io.cri-containerd.image=managed没有找到longhornio/longhorn-manager:v1.5.3
# containerd需要指定命令空间导入镜像
# 下载打包
# docker pull longhornio/longhorn-manager:v1.5.3 && docker save longhornio/longhorn-manager:v1.5.3 > longhorn-manager-1.5.3.tar
v1.5.3: Pulling from longhornio/longhorn-manager
c0f002a71c84: Pull complete
270fa7aa4245: Pull complete
067cb336aaee: Pull complete
Digest: sha256:b5769e761b172b441567c11416c3181078e29dea450683eb1bc66192b0caa494
Status: Downloaded newer image for longhornio/longhorn-manager:v1.5.3
docker.io/longhornio/longhorn-manager:v1.5.3
# 导入
# ctr -n k8s.io images import longhorn-manager-1.5.3.tar
unpacking docker.io/longhornio/longhorn-manager:v1.5.3 (sha256:aa52f71041f88964f2373f5fd56c7dd699bd00c0630ccdfe06168a62a95db472)...done
# 查看
ctr -n k8s.io images ls | grep longhornio
# ctr -n k8s.io images ls | grep longhorn-manager需求
docker.io/longhornio/longhorn-manager@sha256:b5769e761b172b441567c11416c3181078e29dea450683eb1bc66192b0caa494
https://hub.docker.com/layers/longhornio/longhorn-manager/v1.5.3/images/sha256-b5769e761b172b441567c11416c3181078e29dea450683eb1bc66192b0caa494?context=explore
显示
Layer details are not available for this image.删除命名空间
https://learn.microsoft.com/zh-cn/troubleshoot/azure/azure-kubernetes/storage/pods-namespaces-terminating-state
https://blog.csdn.net/Michaelwubo/article/details/110878183
https://www.cnblogs.com/zhangzhide/p/14953915.html
在用longhorn工具做k8s存储卷动态预配的时候,需要修改longhorn.yaml的一个默认参数,修改完成需要重新加载longhorn.yaml,结果重新加载出错了,修改的参数没有生效,于是执行kubectl delete -f longhorn.yaml想将部署的资源全部删除重新启动;但是发现创建的namespace无法删除,状态一直是Terminating;
[root@k8smaster longhorn]# kubectl get ns
NAME STATUS AGE
default Active 49d
dev Active 29d
kube-node-lease Active 49d
kube-public Active 49d
kube-system Active 49d
longhorn-system Terminating 11h
stage Active 29d
[root@k8smaster longhorn]# kubectl delete ns longhorn-system
Error from server (Conflict): Operation cannot be fulfilled on namespaces "longhorn-system": The system is ensuring all content is removed from this namespace. Upon completion, this namespace will automatically be purged by the system.
从报错来看是因为在该名称空间中还有没有删除的内容,但是查找了该名称空间,该名称空间中已经没有运行的pod了;kubectl delete ns 命名空间的名字 --force --grace-period=0
kubectl delete ns es --force --grace-period=0
kubectl delete pod --grace-period=0 --force quickstart-es-default-0 -n es解决方式:通过api来删除
# 先查找到该namespace的api接口地址
[root@k8smaster longhorn]# kubectl get ns/longhorn-system -o yaml
apiVersion: v1
kind: Namespace
metadata:
annotations:
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"v1","kind":"Namespace","metadata":{"annotations":{},"name":"longhorn-system"}}
creationTimestamp: "2021-06-29T15:43:23Z"
deletionTimestamp: "2021-06-30T02:48:32Z"
name: longhorn-system
resourceVersion: "6197365"
selfLink: /api/v1/namespaces/longhorn-system
uid: bcc59118-d8f0-11eb-b1e9-000c29087c24
spec:
finalizers:
- kubernetes
status:
phase: Terminating导出该namespace json格式的详细信息
[root@k8smaster longhorn]# kubectl get ns longhorn-system -o json > longhorn-system.json
[root@k8smaster longhorn]# cat longhorn-system.json
{
"apiVersion": "v1",
"kind": "Namespace",
"metadata": {
"annotations": {
"kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"kind\":\"Namespace\",\"metadata\":{\"annotations\":{},\"name\":\"longhorn-system\"}}\n"
},
"creationTimestamp": "2021-06-29T15:43:23Z",
"deletionTimestamp": "2021-06-30T02:48:32Z",
"name": "longhorn-system",
"resourceVersion": "6197365",
"selfLink": "/api/v1/namespaces/longhorn-system",
"uid": "bcc59118-d8f0-11eb-b1e9-000c29087c24"
},
"spec": {
"finalizers": [
"kubernetes"
]
},
"status": {
"phase": "Terminating"
}
}
删除finalizers的认证方式;
删除后的json文件;
[root@k8smaster longhorn]# cat longhorn-system.json
{
"apiVersion": "v1",
"kind": "Namespace",
"metadata": {
"annotations": {
"kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"kind\":\"Namespace\",\"metadata\":{\"annotations\":{},\"name\":\"longhorn-system\"}}\n"
},
"creationTimestamp": "2021-06-29T15:43:23Z",
"deletionTimestamp": "2021-06-30T02:48:32Z",
"name": "longhorn-system",
"resourceVersion": "6197365",
"selfLink": "/api/v1/namespaces/longhorn-system",
"uid": "bcc59118-d8f0-11eb-b1e9-000c29087c24"
},
"spec": {
"finalizers": [
]
},
"status": {
"phase": "Terminating"
}
}调用接口删除该namespace
1、因为k8s接口默认使用https访问的,所以需要临时开一个HTTP代理端口
[root@k8smaster ~]# kubectl proxy
Starting to serve on 127.0.0.1:8001
2、新开一个终端,执行调用接口命令,注意接口最后加上finalize
[root@k8smaster longhorn]# curl -k -H "Content-Type: application/json" -X PUT --data-binary @longhorn-system.json http://127.0.0.1:8001/api/v1/namespaces/longhorn-system/finalize
{
"kind": "Namespace",
"apiVersion": "v1",
"metadata": {
"name": "longhorn-system",
"selfLink": "/api/v1/namespaces/longhorn-system/finalize",
"uid": "bcc59118-d8f0-11eb-b1e9-000c29087c24",
"resourceVersion": "6202444",
"creationTimestamp": "2021-06-29T15:43:23Z",
"deletionTimestamp": "2021-06-30T02:48:32Z",
"annotations": {
"kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"kind\":\"Namespace\",\"metadata\":{\"annotations\":{},\"name\":\"longhorn-system\"}}\n"
}
},
"spec": {
},
"status": {
"phase": "Terminating"
}
}
3、查看k8s的ns,发现longhorn-system被删除了
[root@k8smaster longhorn]#kubectl get ns
NAME STATUS AGE
default Active 49d
dev Active 29d
kube-node-lease Active 49d
kube-public Active 49d
kube-system Active 49d
stage Active 29dreboot
删除PV
https://blog.csdn.net/margu_168/article/details/130579236
kubectl patch pv pvc-8e4b8b3e-b364-44e8-9194-de7a59327074 -p '{"metadata":{"finalizers":null}}'kubectl get pv
发现没有了get pods | grep Terminating | awk '{print $1}' | xargs kubectl delete podkubectl -n es delete pod quickstart-kb-5c77f999bd-9f9fj --force --grace-period=0