sealos
sealos copied to clipboard
通过节点1的nodePort 访问节点2的pod 失败
初步思路 是 ipforward accept 问题
问题尝试解决的 参考连接:https://blog.csdn.net/zixuan_zhao/article/details/125805234
按照以上处理访问 处理完 还是依然无法访问
在本节点可以通过nodeport访问服务 例如 在master1(192.168.52.11) 可以访问 curl http://192.168.52.21:32119/ 但是在node2(192.168.52.22) 就访问不通 curl http://192.168.52.21:32119/
节点状态
[root@work3 ~]# iptables -S | grep "FORWARD "A
-P FORWARD ACCEPT
[root@work3 ~]# iptables -S | grep "FORWARD ACCEPT"
-P FORWARD ACCEPT
[root@work3 ~]# iptables -S
-P INPUT ACCEPT
-P FORWARD ACCEPT
-P OUTPUT ACCEPT
-N KUBE-FIREWALL
-N KUBE-FORWARD
-N KUBE-KUBELET-CANARY
-N KUBE-NODE-PORT
-N cali-FORWARD
-N cali-INPUT
-N cali-OUTPUT
-N cali-cidr-block
-N cali-forward-check
-N cali-forward-endpoint-mark
-N cali-from-endpoint-mark
-N cali-from-hep-forward
-N cali-from-host-endpoint
-N cali-from-wl-dispatch
-N cali-set-endpoint-mark
-N cali-to-hep-forward
-N cali-to-host-endpoint
-N cali-to-wl-dispatch
-N cali-wl-to-host
-A INPUT -m comment --comment "cali:Cz_u1IQiXIMmKD4c" -j cali-INPUT
-A INPUT -m comment --comment "kubernetes health check rules" -j KUBE-NODE-PORT
-A INPUT -j KUBE-FIREWALL
-A FORWARD -m comment --comment "cali:wUHhoiAYhphO9Mso" -j cali-FORWARD
-A FORWARD -m comment --comment "kubernetes forwarding rules" -j KUBE-FORWARD
-A FORWARD -m comment --comment "cali:S93hcgKJrXEqnTfs" -m comment --comment "Policy explicitly accepted packet." -m mark --mark 0x10000/0x10000 -j ACCEPT
-A FORWARD -m comment --comment "cali:mp77cMpurHhyjLrM" -j MARK --set-xmark 0x10000/0x10000
-A OUTPUT -m comment --comment "cali:tVnHkvAo15HuiPy0" -j cali-OUTPUT
-A OUTPUT -j KUBE-FIREWALL
-A KUBE-FIREWALL -m comment --comment "kubernetes firewall for dropping marked packets" -m mark --mark 0x8000/0x8000 -j DROP
-A KUBE-FIREWALL ! -s 127.0.0.0/8 -d 127.0.0.0/8 -m comment --comment "block incoming localnet connections" -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A KUBE-NODE-PORT -m comment --comment "Kubernetes health check node port" -m set --match-set KUBE-HEALTH-CHECK-NODE-PORT dst -j ACCEPT
-A cali-FORWARD -m comment --comment "cali:vjrMJCRpqwy5oRoX" -j MARK --set-xmark 0x0/0xe0000
-A cali-FORWARD -m comment --comment "cali:A_sPAO0mcxbT9mOV" -m mark --mark 0x0/0x10000 -j cali-from-hep-forward
-A cali-FORWARD -i cali+ -m comment --comment "cali:8ZoYfO5HKXWbB3pk" -j cali-from-wl-dispatch
-A cali-FORWARD -o cali+ -m comment --comment "cali:jdEuaPBe14V2hutn" -j cali-to-wl-dispatch
-A cali-FORWARD -m comment --comment "cali:12bc6HljsMKsmfr-" -j cali-to-hep-forward
-A cali-FORWARD -m comment --comment "cali:NOSxoaGx8OIstr1z" -j cali-cidr-block
-A cali-INPUT -p ipv4 -m comment --comment "cali:PajejrV4aFdkZojI" -m comment --comment "Allow IPIP packets from Calico hosts" -m set --match-set cali40all-hosts-net src -m addrtype --dst-type LOCAL -j ACCEPT
-A cali-INPUT -p ipv4 -m comment --comment "cali:_wjq-Yrma8Ly1Svo" -m comment --comment "Drop IPIP packets from non-Calico hosts" -j DROP
-A cali-INPUT -m comment --comment "cali:ss8lEMQsXi-s6qYT" -j MARK --set-xmark 0x0/0xfff00000
-A cali-INPUT -m comment --comment "cali:PgIW-V0nEjwPhF_8" -j cali-forward-check
-A cali-INPUT -m comment --comment "cali:QMJlDwlS0OjHyfMN" -m mark ! --mark 0x0/0xfff00000 -j RETURN
-A cali-INPUT -i cali+ -m comment --comment "cali:nDRe73txrna-aZjG" -g cali-wl-to-host
-A cali-INPUT -m comment --comment "cali:iX2AYvqGXaVqwkro" -m mark --mark 0x10000/0x10000 -j ACCEPT
-A cali-INPUT -m comment --comment "cali:bhpnxD5IRtBP8KW0" -j MARK --set-xmark 0x0/0xf0000
-A cali-INPUT -m comment --comment "cali:H5_bccAbHV0sooVy" -j cali-from-host-endpoint
-A cali-INPUT -m comment --comment "cali:inBL01YlfurT0dbI" -m comment --comment "Host endpoint policy accepted packet." -m mark --mark 0x10000/0x10000 -j ACCEPT
-A cali-OUTPUT -m comment --comment "cali:Mq1_rAdXXH3YkrzW" -m mark --mark 0x10000/0x10000 -j ACCEPT
-A cali-OUTPUT -m comment --comment "cali:5Z67OUUpTOM7Xa1a" -m mark ! --mark 0x0/0xfff00000 -g cali-forward-endpoint-mark
-A cali-OUTPUT -o cali+ -m comment --comment "cali:M2Wf0OehNdig8MHR" -j RETURN
-A cali-OUTPUT -p ipv4 -m comment --comment "cali:AJBkLho_0Qd8LNr3" -m comment --comment "Allow IPIP packets to other Calico hosts" -m set --match-set cali40all-hosts-net dst -m addrtype --src-type LOCAL -j ACCEPT
-A cali-OUTPUT -m comment --comment "cali:iz2RWXlXJDUfsLpe" -j MARK --set-xmark 0x0/0xf0000
-A cali-OUTPUT -m comment --comment "cali:xQqLi8S0sxbiyvjR" -m conntrack ! --ctstate DNAT -j cali-to-host-endpoint
-A cali-OUTPUT -m comment --comment "cali:aSnsxZdmhxm_ilRZ" -m comment --comment "Host endpoint policy accepted packet." -m mark --mark 0x10000/0x10000 -j ACCEPT
-A cali-forward-check -m comment --comment "cali:Pbldlb4FaULvpdD8" -m conntrack --ctstate RELATED,ESTABLISHED -j RETURN
-A cali-forward-check -p tcp -m comment --comment "cali:ZD-6UxuUtGW-xtzg" -m comment --comment "To kubernetes NodePort service" -m multiport --dports 30000:32767 -m set --match-set cali40this-host dst -g cali-set-endpoint-mark
-A cali-forward-check -p udp -m comment --comment "cali:CbPfUajQ2bFVnDq4" -m comment --comment "To kubernetes NodePort service" -m multiport --dports 30000:32767 -m set --match-set cali40this-host dst -g cali-set-endpoint-mark
-A cali-forward-check -m comment --comment "cali:jmhU0ODogX-Zfe5g" -m comment --comment "To kubernetes service" -m set ! --match-set cali40this-host dst -j cali-set-endpoint-mark
-A cali-forward-endpoint-mark -m comment --comment "cali:O0SmFDrnm7KggWqW" -m mark ! --mark 0x100000/0xfff00000 -j cali-from-endpoint-mark
-A cali-forward-endpoint-mark -o cali+ -m comment --comment "cali:aFl0WFKRxDqj8oA6" -j cali-to-wl-dispatch
-A cali-forward-endpoint-mark -m comment --comment "cali:AZKVrO3i_8cLai5f" -j cali-to-hep-forward
-A cali-forward-endpoint-mark -m comment --comment "cali:96HaP1sFtb-NYoYA" -j MARK --set-xmark 0x0/0xfff00000
-A cali-forward-endpoint-mark -m comment --comment "cali:VxO6hyNWz62YEtul" -m comment --comment "Policy explicitly accepted packet." -m mark --mark 0x10000/0x10000 -j ACCEPT
-A cali-from-endpoint-mark -m comment --comment "cali:9dpftzl-pNycbr37" -m comment --comment "Unknown interface" -j DROP
-A cali-from-wl-dispatch -m comment --comment "cali:zTj6P0TIgYvgz-md" -m comment --comment "Unknown interface" -j DROP
-A cali-set-endpoint-mark -i cali+ -m comment --comment "cali:MN61lcxFj1yWuYBo" -m comment --comment "Unknown endpoint" -j DROP
-A cali-set-endpoint-mark -m comment --comment "cali:nKOjq8N2yzfmS3jk" -m comment --comment "Non-Cali endpoint mark" -j MARK --set-xmark 0x100000/0xfff00000
-A cali-to-wl-dispatch -m comment --comment "cali:7KNphB1nNHw80nIO" -m comment --comment "Unknown interface" -j DROP
-A cali-wl-to-host -m comment --comment "cali:Ee9Sbo10IpVujdIY" -j cali-from-wl-dispatch
-A cali-wl-to-host -m comment --comment "cali:nSZbcOoG1xPONxb8" -m comment --comment "Configured DefaultEndpointToHostAction" -j ACCEPT
在节点1上 curl 非节点1的coredns的 http接口,判断下是不是跨界点不通
curl coredns_podIP:9153
试了 可以访问通
[root@master1 ~]# kubectl get pod -n kube-system -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
coredns-6d4b75cb6d-knw6f 1/1 Running 2 (6h49m ago) 2d1h 100.69.198.69 wrok4 <none> <none>
coredns-6d4b75cb6d-kz5dk 1/1 Running 3 (6h50m ago) 2d1h 100.75.199.68 work2 <none> <none>
etcd-master1 1/1 Running 2 (6h50m ago) 2d1h 192.168.52.11 master1 <none> <none>
kube-apiserver-master1 1/1 Running 2 (6h50m ago) 2d1h 192.168.52.11 master1 <none> <none>
kube-controller-manager-master1 1/1 Running 7 (6h50m ago) 2d1h 192.168.52.11 master1 <none> <none>
kube-proxy-gwtl9 1/1 Running 2 (6h49m ago) 2d1h 192.168.52.26 work6 <none> <none>
kube-proxy-j9995 1/1 Running 2 (6h49m ago) 2d1h 192.168.52.21 work1 <none> <none>
kube-proxy-jmmsx 1/1 Running 2 (6h49m ago) 2d1h 192.168.52.24 wrok4 <none> <none>
kube-proxy-lxscb 1/1 Running 2 (6h50m ago) 8h 192.168.52.11 master1 <none> <none>
kube-proxy-t957g 1/1 Running 2 (6h49m ago) 2d1h 192.168.52.25 wrok5 <none> <none>
kube-proxy-v6tgj 1/1 Running 4 (6h38m ago) 2d1h 192.168.52.22 work2 <none> <none>
kube-proxy-xlvj5 1/1 Running 2 (6h49m ago) 2d1h 192.168.52.23 work3 <none> <none>
kube-scheduler-master1 1/1 Running 7 (6h50m ago) 2d1h 192.168.52.11 master1 <none> <none>
kube-sealos-lvscare-work1 1/1 Running 3 (6h38m ago) 2d1h 192.168.52.21 work1 <none> <none>
kube-sealos-lvscare-work2 1/1 Running 4 (6h38m ago) 2d1h 192.168.52.22 work2 <none> <none>
kube-sealos-lvscare-work3 1/1 Running 3 (6h38m ago) 2d1h 192.168.52.23 work3 <none> <none>
kube-sealos-lvscare-work6 1/1 Running 3 (6h37m ago) 2d1h 192.168.52.26 work6 <none> <none>
kube-sealos-lvscare-wrok4 1/1 Running 3 (6h38m ago) 2d1h 192.168.52.24 wrok4 <none> <none>
kube-sealos-lvscare-wrok5 1/1 Running 3 (6h38m ago) 2d1h 192.168.52.25 wrok5 <none> <none>
[root@master1 ~]# curl 100.69.198.69:9153
404 page not found
[root@master1 ~]#
你在容器内部ping 114.114.114.114能通吗
可以的 正常
~ # ping 114.114.114.114
PING 114.114.114.114 (114.114.114.114): 56 data bytes
64 bytes from 114.114.114.114: seq=0 ttl=92 time=12.194 ms
64 bytes from 114.114.114.114: seq=1 ttl=91 time=11.864 ms
64 bytes from 114.114.114.114: seq=2 ttl=90 time=11.685 ms
64 bytes from 114.114.114.114: seq=3 ttl=72 time=11.701 ms
64 bytes from 114.114.114.114: seq=4 ttl=79 time=11.314 ms
64 bytes from 114.114.114.114: seq=5 ttl=68 time=10.952 ms
64 bytes from 114.114.114.114: seq=6 ttl=73 time=11.068 ms
64 bytes from 114.114.114.114: seq=7 ttl=85 time=10.497 ms
64 bytes from 114.114.114.114: seq=8 ttl=89 time=10.130 ms
64 bytes from 114.114.114.114: seq=9 ttl=74 time=14.702 ms
64 bytes from 114.114.114.114: seq=10 ttl=77 time=13.413 ms
64 bytes from 114.114.114.114: seq=11 ttl=89 time=13.258 ms
64 bytes from 114.114.114.114: seq=12 ttl=76 time=12.765 ms
64 bytes from 114.114.114.114: seq=13 ttl=88 time=12.657 ms
64 bytes from 114.114.114.114: seq=14 ttl=84 time=12.453 ms
64 bytes from 114.114.114.114: seq=15 ttl=78 time=12.235 ms
64 bytes from 114.114.114.114: seq=16 ttl=91 time=11.986 ms
64 bytes from 114.114.114.114: seq=17 ttl=90 time=11.843 ms
^C
每次修改完 /etc/sysctl.d/k8s.conf 重启后又变成原本的内容了
[root@master1 ~]# cat /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.conf.all.rp_filter=0
这个好像是kubelet还是docker的exec pre 阶段的那个脚本修改的
systemctl cat --no-pager kubelet
没看到相关的参数设置
[root@master1 ~]# systemctl cat --no-pager kubelet
# /etc/systemd/system/kubelet.service
[Unit]
Description=kubelet: The Kubernetes Node Agent
Documentation=http://kubernetes.io/docs/
[Service]
ExecStart=/usr/bin/kubelet
ExecStartPre=/usr/bin/kubelet-pre-start.sh
ExecStopPost=/usr/bin/kubelet-post-stop.sh
Restart=always
StartLimitInterval=0
RestartSec=10
[Install]
WantedBy=multi-user.target
# /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
# Note: This dropin only works with kubeadm and kubelet v1.11+
[Service]
Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf"
Environment="KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml"
# This is a file that "kubeadm init" and "kubeadm join" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically
EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env
# This is a file that the user can use for overrides of the kubelet args as a last resort. Preferably, the user should use
# the .NodeRegistration.KubeletExtraArgs object in the configuration files instead. KUBELET_EXTRA_ARGS should be sourced from this file.
EnvironmentFile=-/var/lib/kubelet/kubelet-flags.env
ExecStart=
ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS
ExecStartPre=/usr/bin/kubelet-pre-start.sh
改过内核参数了 还是不行
[root@master1 ~]# cat /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.ipv4.tcp_tw_recycle = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.conf.all.rp_filter=0
所有信息
[root@master1 ~]# sysctl --system
* Applying /usr/lib/sysctl.d/00-system.conf ...
net.bridge.bridge-nf-call-ip6tables = 0
net.bridge.bridge-nf-call-iptables = 0
net.bridge.bridge-nf-call-arptables = 0
* Applying /usr/lib/sysctl.d/10-default-yama-scope.conf ...
* Applying /usr/lib/sysctl.d/50-default.conf ...
kernel.sysrq = 16
kernel.core_uses_pid = 1
net.ipv4.conf.default.rp_filter = 1
net.ipv4.conf.all.rp_filter = 1
net.ipv4.conf.default.accept_source_route = 0
net.ipv4.conf.all.accept_source_route = 0
net.ipv4.conf.default.promote_secondaries = 1
net.ipv4.conf.all.promote_secondaries = 1
fs.protected_hardlinks = 1
fs.protected_symlinks = 1
* Applying /etc/sysctl.d/99-ipv4_forward.conf ...
net.ipv4.ip_forward = 1
* Applying /etc/sysctl.d/99-sysctl.conf ...
net.ipv4.tcp_keepalive_time = 1200
net.ipv4.ip_local_port_range = 1024 65000
net.ipv4.tcp_max_syn_backlog = 8192
net.ipv4.tcp_max_tw_buckets = 5000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_fin_timeout = 30
vm.max_map_count = 262144
fs.file-max = 655350
net.core.somaxconn = 20480
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
* Applying /etc/sysctl.d/k8s.conf ...
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.conf.all.rp_filter = 0
* Applying /etc/sysctl.conf ...
net.ipv4.tcp_keepalive_time = 1200
net.ipv4.ip_local_port_range = 1024 65000
net.ipv4.tcp_max_syn_backlog = 8192
net.ipv4.tcp_max_tw_buckets = 5000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_fin_timeout = 30
vm.max_map_count = 262144
fs.file-max = 655350
net.core.somaxconn = 20480
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1