K8s集群api server
原创大约 13 分钟
场景
只启动一个主节点
k8s原本有2个master2个work node,现在只启动一个主节点,执行命令显示
# kubectl get pod
E0719 09:43:11.337292 4693 memcache.go:265] couldn't get current server API group list: Get "https://10.0.1.201:6443/api?timeout=32s": dial tcp 10.0.1.201:6443: connect: connection refused - error from a previous attempt: read tcp 10.0.1.201:42614->10.0.1.201:6443: read: connection reset by peer
E0719 09:43:11.339107 4693 memcache.go:265] couldn't get current server API group list: Get "https://10.0.1.201:6443/api?timeout=32s": dial tcp 10.0.1.201:6443: connect: connection refused
E0719 09:43:11.339343 4693 memcache.go:265] couldn't get current server API group list: Get "https://10.0.1.201:6443/api?timeout=32s": dial tcp 10.0.1.201:6443: connect: connection refused
E0719 09:43:11.340823 4693 memcache.go:265] couldn't get current server API group list: Get "https://10.0.1.201:6443/api?timeout=32s": dial tcp 10.0.1.201:6443: connect: connection refused
E0719 09:43:11.343356 4693 memcache.go:265] couldn't get current server API group list: Get "https://10.0.1.201:6443/api?timeout=32s": dial tcp 10.0.1.201:6443: connect: connection refused
The connection to the server 10.0.1.201:6443 was refused - did you specify the right host or port?服务状态
● kube-apiserver.service - Kubernetes API Server
Loaded: loaded (/etc/systemd/system/kube-apiserver.service; enabled; vendor preset: enabled)
Active: activating (start) since Fri 2024-07-19 10:28:59 CST; 1s ago
Docs: https://github.com/GoogleCloudPlatform/kubernetes
Main PID: 5553 (kube-apiserver)
Tasks: 9 (limit: 4514)
Memory: 25.8M
CPU: 397ms
CGroup: /system.slice/kube-apiserver.service
└─5553 /opt/kube/bin/kube-apiserver --allow-privileged=true --anonymous-auth=false --api-audiences=api,istio-ca --authorization-mode=Node,RBAC --bind-address=10.0.1.201 --client-ca-file=/etc/kubernetes/ssl/ca.pem --endpoint-reconciler-type=lease --etcd-cafile=/etc/kubernetes/ssl/ca.pem --etcd-certfile=/etc/kubernetes/ssl/kubernetes.pem --etcd-keyfile=/etc/kubernetes/ssl/kubernetes-key.pem --etcd-servers=https://10.0.1.201:2379,https://10.0.1.202:2379,https://10.0.1.203:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/ca.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kubernetes.pem --kubelet-client-key=/etc/kubernetes/ssl/kubernetes-key.pem --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc --service-account-signing-key-file=/etc/kubernetes/ssl/ca-key.pem --service-account-key-file=/etc/kubernetes/ssl/ca.pem --service-cluster-ip-range=10.68.0.0/16 --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kubernetes.pem --tls-private-key-file=/etc/kubernetes/ssl/kubernetes-key.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/ca.pem --requestheader-allowed-names= --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --proxy-client-cert-file=/etc/kubernetes/ssl/aggregator-proxy.pem --proxy-client-key-file=/etc/kubernetes/ssl/aggregator-proxy-key.pem --enable-aggregator-routing=true --v=2
Jul 19 10:28:59 node-1 kube-apiserver[5553]: I0719 10:28:59.914494 5553 dynamic_cafile_content.go:119] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/ssl/ca.pem"
Jul 19 10:28:59 node-1 kube-apiserver[5553]: I0719 10:28:59.914678 5553 dynamic_cafile_content.go:119] "Loaded a new CA Bundle and Verifier" name="request-header::/etc/kubernetes/ssl/ca.pem"
Jul 19 10:28:59 node-1 kube-apiserver[5553]: W0719 10:28:59.918750 5553 logging.go:59] [core] [Channel #1 SubChannel #2] grpc: addrConn.createTransport failed to connect to {
Jul 19 10:28:59 node-1 kube-apiserver[5553]: "Addr": "10.0.1.201:2379",
Jul 19 10:28:59 node-1 kube-apiserver[5553]: "ServerName": "10.0.1.201",
Jul 19 10:28:59 node-1 kube-apiserver[5553]: "Attributes": null,
Jul 19 10:28:59 node-1 kube-apiserver[5553]: "BalancerAttributes": null,
Jul 19 10:28:59 node-1 kube-apiserver[5553]: "Type": 0,
Jul 19 10:28:59 node-1 kube-apiserver[5553]: "Metadata": null
Jul 19 10:28:59 node-1 kube-apiserver[5553]: }. Err: connection error: desc = "transport: Error while dialing dial tcp 10.0.1.201:2379: connect: connection refused"链接不上10.0.1.201的etcd
root@node-1:~# systemctl status etcd
● etcd.service - Etcd Server
Loaded: loaded (/etc/systemd/system/etcd.service; enabled; vendor preset: enabled)
Active: activating (start) since Fri 2024-07-19 10:34:28 CST; 1min 23s ago
Docs: https://github.com/coreos
Main PID: 7856 (etcd)
Tasks: 7 (limit: 4514)
Memory: 19.2M
CPU: 1.798s
CGroup: /system.slice/etcd.service
└─7856 /opt/kube/bin/etcd --name=etcd-10.0.1.201 --cert-file=/etc/kubernetes/ssl/etcd.pem --key-file=/etc/kubernetes/ssl/etcd-key.pem --peer-cert-file=/etc/kubernetes/ssl/etcd.pem --peer-key-file=/etc/kubernetes/ssl/etcd-key.pem --trusted-ca-file=/etc/kubernetes/ssl/ca.pem --peer-trusted-ca-file=/etc/kubernetes/ssl/ca.pem --initial-advertise-peer-urls=https://10.0.1.201:2380 --listen-peer-urls=https://10.0.1.201:2380 --listen-client-urls=https://10.0.1.201:2379,http://127.0.0.1:2379 --advertise-client-urls=https://10.0.1.201:2379 --initial-cluster-token=etcd-cluster-0 --initial-cluster=etcd-10.0.1.201=https://10.0.1.201:2380,etcd-10.0.1.202=https://10.0.1.202:2380,etcd-10.0.1.203=https://10.0.1.203:2380 --initial-cluster-state=new --data-dir=/var/lib/etcd --wal-dir= --snapshot-count=50000 --auto-compaction-retention=1 --auto-compaction-mode=periodic --max-request-bytes=10485760 --quota-backend-bytes=8589934592
Jul 19 10:35:49 node-1 etcd[7856]: {"level":"info","ts":"2024-07-19T10:35:49.573408+0800","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"8101c35cd09528f6 is starting a new election at term 163"}
Jul 19 10:35:49 node-1 etcd[7856]: {"level":"info","ts":"2024-07-19T10:35:49.573471+0800","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"8101c35cd09528f6 became pre-candidate at term 163"}
Jul 19 10:35:49 node-1 etcd[7856]: {"level":"info","ts":"2024-07-19T10:35:49.573486+0800","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"8101c35cd09528f6 received MsgPreVoteResp from 8101c35cd09528f6 at term 163"}
Jul 19 10:35:49 node-1 etcd[7856]: {"level":"info","ts":"2024-07-19T10:35:49.573502+0800","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"8101c35cd09528f6 [logterm: 163, index: 900088] sent MsgPreVote request to 74b215bed61842ec at term 163"}
Jul 19 10:35:49 node-1 etcd[7856]: {"level":"info","ts":"2024-07-19T10:35:49.573511+0800","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"8101c35cd09528f6 [logterm: 163, index: 900088] sent MsgPreVote request to e38111e340e17236 at term 163"}
Jul 19 10:35:50 node-1 etcd[7856]: {"level":"info","ts":"2024-07-19T10:35:50.873824+0800","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"8101c35cd09528f6 is starting a new election at term 163"}
Jul 19 10:35:50 node-1 etcd[7856]: {"level":"info","ts":"2024-07-19T10:35:50.873903+0800","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"8101c35cd09528f6 became pre-candidate at term 163"}
Jul 19 10:35:50 node-1 etcd[7856]: {"level":"info","ts":"2024-07-19T10:35:50.873928+0800","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"8101c35cd09528f6 received MsgPreVoteResp from 8101c35cd09528f6 at term 163"}
Jul 19 10:35:50 node-1 etcd[7856]: {"level":"info","ts":"2024-07-19T10:35:50.873947+0800","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"8101c35cd09528f6 [logterm: 163, index: 900088] sent MsgPreVote request to 74b215bed61842ec at term 163"}
Jul 19 10:35:50 node-1 etcd[7856]: {"level":"info","ts":"2024-07-19T10:35:50.873957+0800","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"8101c35cd09528f6 [logterm: 163, index: 900088] sent MsgPreVote request to e38111e340e17236 at term 163"}journalctl -u etcd -e现在只启动一个主节点和一个work node,执行命令显示
# kubectl get pod
No resources found in default namespace.启动一个主节点和一个work node,然后关闭work node,执行命令显示
# kubectl get pod
Error from server: etcdserver: request timed outsystemctl list-units --type=service
journalctl -u kube-apiserver# ps -ef | grep kube-apiserver
root 5841 1 4 09:45 ? 00:00:00 /opt/kube/bin/kube-apiserver --allow-privileged=true --anonymous-auth=false --api-audiences=api,istio-ca --authorization-mode=Node,RBAC --bind-address=10.0.1.201 --client-ca-file=/etc/kubernetes/ssl/ca.pem --endpoint-reconciler-type=lease --etcd-cafile=/etc/kubernetes/ssl/ca.pem --etcd-certfile=/etc/kubernetes/ssl/kubernetes.pem --etcd-keyfile=/etc/kubernetes/ssl/kubernetes-key.pem --etcd-servers=https://10.0.1.201:2379,https://10.0.1.202:2379,https://10.0.1.203:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/ca.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kubernetes.pem --kubelet-client-key=/etc/kubernetes/ssl/kubernetes-key.pem --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc --service-account-signing-key-file=/etc/kubernetes/ssl/ca-key.pem --service-account-key-file=/etc/kubernetes/ssl/ca.pem --service-cluster-ip-range=10.68.0.0/16 --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kubernetes.pem --tls-private-key-file=/etc/kubernetes/ssl/kubernetes-key.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/ca.pem --requestheader-allowed-names= --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --proxy-client-cert-file=/etc/kubernetes/ssl/aggregator-proxy.pem --proxy-client-key-file=/etc/kubernetes/ssl/aggregator-proxy-key.pem --enable-aggregator-routing=true --v=2/opt/kube/bin/kube-apiserver \
--allow-privileged=true \
--anonymous-auth=false \
--api-audiences=api,istio-ca \
--authorization-mode=Node,RBAC \
--bind-address=10.0.1.201 \
--client-ca-file=/etc/kubernetes/ssl/ca.pem \
--endpoint-reconciler-type=lease \
--etcd-cafile=/etc/kubernetes/ssl/ca.pem \
--etcd-certfile=/etc/kubernetes/ssl/kubernetes.pem \
--etcd-keyfile=/etc/kubernetes/ssl/kubernetes-key.pem \
--etcd-servers=https://10.0.1.201:2379,https://10.0.1.202:2379,https://10.0.1.203:2379 \
--kubelet-certificate-authority=/etc/kubernetes/ssl/ca.pem \
--kubelet-client-certificate=/etc/kubernetes/ssl/kubernetes.pem \
--kubelet-client-key=/etc/kubernetes/ssl/kubernetes-key.pem \
--secure-port=6443 \
--service-account-issuer=https://kubernetes.default.svc \
--service-account-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \
--service-account-key-file=/etc/kubernetes/ssl/ca.pem \
--service-cluster-ip-range=10.68.0.0/16 \
--service-node-port-range=30000-32767 \
--tls-cert-file=/etc/kubernetes/ssl/kubernetes.pem \
--tls-private-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \
--requestheader-client-ca-file=/etc/kubernetes/ssl/ca.pem \
--requestheader-allowed-names= \
--requestheader-extra-headers-prefix=X-Remote-Extra- \
--requestheader-group-headers=X-Remote-Group \
--requestheader-username-headers=X-Remote-User \
--proxy-client-cert-file=/etc/kubernetes/ssl/aggregator-proxy.pem \
--proxy-client-key-file=/etc/kubernetes/ssl/aggregator-proxy-key.pem \
--enable-aggregator-routing=true \
--v=2# telnet 10.0.1.201 6443
Trying 10.0.1.201...
Connected to 10.0.1.201.
Escape character is '^]'.root@node-1:~# systemctl status kube-apiserver
● kube-apiserver.service - Kubernetes API Server
Loaded: loaded (/etc/systemd/system/kube-apiserver.service; enabled; vendor preset: enabled)
Active: active (running) since Fri 2024-07-19 09:46:46 CST; 16min ago
Docs: https://github.com/GoogleCloudPlatform/kubernetes
Main PID: 6256 (kube-apiserver)
Tasks: 13 (limit: 4514)
Memory: 473.8M
CPU: 2min 43.019s
CGroup: /system.slice/kube-apiserver.service
└─6256 /opt/kube/bin/kube-apiserver --allow-privileged=true --anonymous-auth=false --api-audiences=api,istio-ca --authorization-mode=Node,RBAC --bind-address=10.0.1.201 --client-ca-file=/etc/kubernetes/ssl/ca.pem --endpoint-reconciler-type=lease --etcd-cafile=/etc/kubernetes/ssl/ca.pem --etcd-certfile=/etc/kubernetes/ssl/kubernetes.pem --etcd-keyfile=/etc/kubernetes/ssl/kubernetes-key.pem --etcd-servers=https://10.0.1.201:2379,https://10.0.1.202:2379,https://10.0.1.203:2379 --kubelet-certificate-authority=/etc/kubernetes/ssl/ca.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kubernetes.pem --kubelet-client-key=/etc/kubernetes/ssl/kubernetes-key.pem --secure-port=6443 --service-account-issuer=https://kubernetes.default.svc --service-account-signing-key-file=/etc/kubernetes/ssl/ca-key.pem --service-account-key-file=/etc/kubernetes/ssl/ca.pem --service-cluster-ip-range=10.68.0.0/16 --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kubernetes.pem --tls-private-key-file=/etc/kubernetes/ssl/kubernetes-key.pem --requestheader-client-ca-file=/etc/kubernetes/ssl/ca.pem --requestheader-allowed-names= --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --proxy-client-cert-file=/etc/kubernetes/ssl/aggregator-proxy.pem --proxy-client-key-file=/etc/kubernetes/ssl/aggregator-proxy-key.pem --enable-aggregator-routing=true --v=2
Jul 19 10:03:01 node-1 kube-apiserver[6256]: "Metadata": null
Jul 19 10:03:01 node-1 kube-apiserver[6256]: }. Err: connection error: desc = "transport: Error while dialing dial tcp 10.0.1.202:2379: connect: no route to host"
Jul 19 10:03:04 node-1 kube-apiserver[6256]: W0719 10:03:04.938084 6256 logging.go:59] [core] [Channel #718 SubChannel #719] grpc: addrConn.createTransport failed to connect to {
Jul 19 10:03:04 node-1 kube-apiserver[6256]: "Addr": "10.0.1.202:2379",
Jul 19 10:03:04 node-1 kube-apiserver[6256]: "ServerName": "10.0.1.202",
Jul 19 10:03:04 node-1 kube-apiserver[6256]: "Attributes": null,
Jul 19 10:03:04 node-1 kube-apiserver[6256]: "BalancerAttributes": null,
Jul 19 10:03:04 node-1 kube-apiserver[6256]: "Type": 0,
Jul 19 10:03:04 node-1 kube-apiserver[6256]: "Metadata": null
Jul 19 10:03:04 node-1 kube-apiserver[6256]: }. Err: connection error: desc = "transport: Error while dialing dial tcp 10.0.1.202:2379: connect: no route to host"etcd选举
单节点
Jul 19 12:27:19 node-1 etcd[45810]: {"level":"info","ts":"2024-07-19T12:27:19.606511+0800","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"8101c35cd09528f6 is starting a new election at term 163"}
这是一条信息级别的日志,记录了时间戳为 2024-07-19 12:27:19.606511 的事件。
node-1 是节点的名称。
etcd[45810] 表示日志来自 etcd 进程,进程 ID 为 45810。
logger="raft" 表示日志由 Raft 模块生成。
caller="etcdserver/zap_raft.go:77" 表示日志调用的文件和行号。
msg="8101c35cd09528f6 is starting a new election at term 163" 表示节点 ID 为 8101c35cd09528f6 的节点正在启动一个新的选举,选举的任期是 163。Jul 19 12:27:19 node-1 etcd[45810]: {"level":"info","ts":"2024-07-19T12:27:19.606584+0800","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"8101c35cd09528f6 became pre-candidate at term 163"}
这条日志表示节点 8101c35cd09528f6 变成了预候选者。Jul 19 12:27:19 node-1 etcd[45810]: {"level":"info","ts":"2024-07-19T12:27:19.606603+0800","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"8101c35cd09528f6 received MsgPreVoteResp from 8101c35cd09528f6 at term 163"}
这条日志表示节点 8101c35cd09528f6 收到了来自它自己的预投票响应。Jul 19 12:27:19 node-1 etcd[45810]: {"level":"info","ts":"2024-07-19T12:27:19.60663+0800","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"8101c35cd09528f6 [logterm: 163, index: 900088] sent MsgPreVote request to 74b215bed61842ec at term 163"}
这条日志表示节点 8101c35cd09528f6 向另一节点 74b215bed61842ec 发送了预投票请求。Jul 19 12:27:19 node-1 etcd[45810]: {"level":"info","ts":"2024-07-19T12:27:19.606649+0800","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"8101c35cd09528f6 [logterm: 163, index: 900088] sent MsgPreVote request to e38111e340e17236 at term 163"}
这条日志表示节点 8101c35cd09528f6 向另一节点 e38111e340e17236 发送了预投票请求。Jul 19 12:27:19 node-1 etcd[45810]: {"level":"warn","ts":"2024-07-19T12:27:19.839049+0800","caller":"etcdserver/server.go:2083","msg":"failed to publish local member to cluster through raft","local-member-id":"8101c35cd09528f6","local-member-id":"8101c35cd09528f6","local-member-attributes":"{Name:etcd-10.0.1.201 ClientURLs:[https://10.0.1.201:2379]}","request-path":"/0/members/8101c35cd09528f6/attributes","publish-timeout":"7s","error":"etcdserver: request timed out"}
这是一条警告级别的日志,表示节点 8101c35cd09528f6 尝试通过 Raft 将本地成员发布到集群时失败了。
local-member-attributes 显示了节点的名称和客户端 URL。
request-path 显示了请求的路径。
publish-timeout 显示了发布超时的时间。
error 显示了错误信息,这里是请求超时。启动另一节点后
`Jul 19 12:27:21 node-1 etcd[45810]: {"level":"info","ts":"2024-07-19T12:27:21.87621+0800","caller":"rafthttp/stream.go:249","msg":"set message encoder","from":"8101c35cd09528f6","to":"74b215bed61842ec","stream-type":"stream Message"}`
- 这条日志表示节点 8101c35cd09528f6 为与节点 74b215bed61842ec 之间的消息流设置了消息编码器。Jul 19 12:27:21 node-1 etcd[45810]: {"level":"info","ts":"2024-07-19T12:27:21.876246+0800","caller":"rafthttp/peer_status.go:53","msg":"peer became active","peer-id":"74b215bed61842ec"}
这条日志表示节点 74b215bed61842ec 成为了活跃的对等节点。Jul 19 12:27:21 node-1 etcd[45810]: {"level":"info","ts":"2024-07-19T12:27:21.876263+0800","caller":"rafthttp/stream.go:274","msg":"established TCP streaming connection with remote peer","stream-writer-type":"stream Message","local-member-id":"8101c35cd09528f6","remote-peer-id":"74b215bed61842ec"}
这条日志表示节点 8101c35cd09528f6 与远程节点 74b215bed61842ec 建立了 TCP 流连接。Jul 19 12:27:21 node-1 etcd[45810]: {"level":"info","ts":"2024-07-19T12:27:21.880945+0800","caller":"rafthttp/stream.go:249","msg":"set message encoder","from":"8101c35cd09528f6","to":"74b215bed61842ec","stream-type":"stream MsgApp v2"}
这条日志表示节点 8101c35cd09528f6 为与节点 74b215bed61842ec 之间的消息流设置了消息编码器,这次是用于处理消息应用(MsgApp v2)。Jul 19 12:27:21 node-1 etcd[45810]: {"level":"info","ts":"2024-07-19T12:27:21.888384+0800","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"8101c35cd09528f6 received MsgVoteResp from 74b215bed61842ec at term 164"}
这条日志表示节点 8101c35cd09528f6 收到了来自节点 74b215bed61842ec 的投票响应。Jul 19 12:27:21 node-1 etcd[45810]: {"level":"info","ts":"2024-07-19T12:27:21.888439+0800","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"8101c35cd09528f6 has received 2 MsgVoteResp votes and 0 vote rejections"}
这条日志表示节点 8101c35cd09528f6 已经收到了 2 个投票响应的赞成票,没有反对票。Jul 19 12:27:21 node-1 etcd[45810]: {"level":"info","ts":"2024-07-19T12:27:21.888457+0800","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"8101c35cd09528f6 became leader at term 164"}
这条日志表示节点 8101c35cd09528f6 在任期 164 成为了领导者。Jul 19 12:27:21 node-1 etcd[45810]: {"level":"info","ts":"2024-07-19T12:27:21.88847+0800","logger":"raft","caller":"etcdserver/zap_raft.go:77","msg":"raft.node: 8101c35cd09528f6 elected leader 8101c35cd09528f6 at term 164"}
这条日志表示节点 8101c35cd09528f6 在任期 164 被选举为领导者。选举成功后
`Jul 19 12:27:21 node-1 etcd[45810]: {"level":"info","ts":"2024-07-19T12:27:21.891904+0800","caller":"etcdserver/server.go:2062","msg":"published local member to cluster through raft","local-member-id":"8101c35cd09528f6","local-member-attributes":"{Name:etcd-10.0.1.201 ClientURLs:[https://10.0.1.201:2379]}","request-path":"/0/members/8101c35cd09528f6/attributes","cluster-id":"7d0ce2d3ba49fcc9","publish-timeout":"7s"}`
- 这条日志表示节点 8101c35cd09528f6 成功地通过 Raft 将本地成员信息发布到集群。Jul 19 12:27:21 node-1 etcd[45810]: {"level":"info","ts":"2024-07-19T12:27:21.891973+0800","caller":"embed/serve.go:103","msg":"ready to serve client requests"}
这条日志表示 etcd 服务器准备好接收客户端请求。Jul 19 12:27:21 node-1 etcd[45810]: {"level":"info","ts":"2024-07-19T12:27:21.892948+0800","caller":"embed/serve.go:187","msg":"serving client traffic insecurely; this is strongly discouraged!","traffic":"grpc+http","address":"127.0.0.1:2379"}
这条日志表示 etcd 服务器正在本地地址 127.0.0.1:2379 上以不安全的方式提供客户端流量服务(即没有加密),这是不推荐的。Jul 19 12:27:21 node-1 etcd[45810]: {"level":"info","ts":"2024-07-19T12:27:21.893273+0800","caller":"embed/serve.go:250","msg":"serving client traffic securely","traffic":"grpc+http","address":"10.0.1.201:2379"}
这条日志表示 etcd 服务器在地址 10.0.1.201:2379 上安全地提供客户端流量服务(即加密)。Jul 19 12:27:21 node-1 etcd[45810]: {"level":"info","ts":"2024-07-19T12:27:21.893503+0800","caller":"etcdmain/main.go:44","msg":"notifying init daemon"}
这条日志表示 etcd 服务器正在通知初始化守护进程。Jul 19 12:27:21 node-1 systemd[1]: Started Etcd Server.
这是 systemd 的日志,表示 Etcd 服务器已经启动。测试
cd /etc/kubeasz/clusters/test-cn
cp hosts hosts.back# 'etcd' cluster should have odd member(s) (1,3,5,...)
[etcd]
10.0.1.201
10.0.1.202
10.0.1.203
# master node(s), set unique 'k8s_nodename' for each node
# CAUTION: 'k8s_nodename' must consist of lower case alphanumeric characters, '-' or '.',
# and must start and end with an alphanumeric character
[kube_master]
10.0.1.201
10.0.1.202
# work node(s), set unique 'k8s_nodename' for each node
# CAUTION: 'k8s_nodename' must consist of lower case alphanumeric characters, '-' or '.',
# and must start and end with an alphanumeric character
[kube_node]
10.0.1.203
10.0.1.204
# [optional] harbor server, a private docker registry
# 'NEW_INSTALL': 'true' to install a harbor server; 'false' to integrate with existed one
[harbor]
#192.168.1.8 NEW_INSTALL=false
# [optional] loadbalance for accessing k8s from outside
[ex_lb]
# [optional] ntp server for the cluster
[chrony]
[all:vars]
# --------- Main Variables ---------------
# Secure port for apiservers
SECURE_PORT="6443"
# Cluster container-runtime supported: docker, containerd
# if k8s version >= 1.24, docker is not supported
CONTAINER_RUNTIME="containerd"
# Network plugins supported: calico, flannel, kube-router, cilium, kube-ovn
CLUSTER_NETWORK="calico"
# Service proxy mode of kube-proxy: 'iptables' or 'ipvs'
PROXY_MODE="ipvs"
# K8S Service CIDR, not overlap with node(host) networking
SERVICE_CIDR="10.68.0.0/16"
# Cluster CIDR (Pod CIDR), not overlap with node(host) networking
CLUSTER_CIDR="172.20.0.0/16"
# NodePort Range
NODE_PORT_RANGE="30000-32767"
# Cluster DNS Domain
CLUSTER_DNS_DOMAIN="cluster.local"
# -------- Additional Variables (don't change the default value right now) ---
# Binaries Directory
bin_dir="/opt/kube/bin"
# Deploy Directory (kubeasz workspace)
base_dir="/etc/kubeasz"
# Directory for a specific cluster
cluster_dir="{{ base_dir }}/clusters/test-cn"
# CA and other components cert/key Directory
ca_dir="/etc/kubernetes/ssl"
# Default 'k8s_nodename' is empty
k8s_nodename=''
# Default python interpreter
ansible_python_interpreter=/usr/bin/python3