在自己的实验 kubernetes 集群内 想通过 gateway api 的高级特性替代 ingress 的方式来发布应用,目前在用的是envoy gateway 1.0.2
在同一个 Kuberntes with Cilium CNI 集群下面,对同一个 bakend 的app:moon
的 web 应用,同时发布
目前的测试只有通过 envoy gateway 发布的应用无法在 kuberntes 集群外访问
export ENVOY_GATEWAY_SERVICE=$(kubectl get svc -n envoy-gateway-system --selector=gateway.envoyproxy.io/owning-gateway-name=envoy-gateway,gateway.envoyproxy.io/owning-gateway-namespace=default -o jsonpath='{.items[0].metadata.name}')
export ENVOY_GATEWAY_HOST=$(kubectl get svc/${ENVOY_GATEWAY_SERVICE} -n envoy-gateway-system -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
export MOON_LB_SVC=moon-lb-svc
export MOON_LB_HOST=$(kubectl get svc/${MOON_LB_SVC} -n default -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
export NGINX_INGREE_SVC=ingress-nginx-controller
export INGRESS_HOST=$(kubectl get svc/${NGINX_INGREE_SVC} -n ingress-nginx -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
# curl --verbose -sIL -w "%{http_code}\n" http://$ENVOY_GATEWAY_HOST
* Trying 192.168.2.130:80...
* Connected to 192.168.2.130 (192.168.2.130) port 80 (#0)
> HEAD / HTTP/1.1
> Host: 192.168.2.130
> User-Agent: curl/7.88.1
> Accept: */*
>
< HTTP/1.1 404 Not Found
HTTP/1.1 404 Not Found
< date: Fri, 05 Jul 2024 02:23:21 GMT
date: Fri, 05 Jul 2024 02:23:21 GMT
< transfer-encoding: chunked
transfer-encoding: chunked
<
* Connection #0 to host 192.168.2.130 left intact
404
# curl --verbose -sIL -w "%{http_code}\n" http://$MOON_LB_HOST
* Trying 192.168.2.129:80...
* Connected to 192.168.2.129 (192.168.2.129) port 80 (#0)
> HEAD / HTTP/1.1
> Host: 192.168.2.129
> User-Agent: curl/7.88.1
> Accept: */*
>
< HTTP/1.1 200 OK
HTTP/1.1 200 OK
< Server: nginx/1.21.6
Server: nginx/1.21.6
< Date: Fri, 05 Jul 2024 02:23:17 GMT
Date: Fri, 05 Jul 2024 02:23:17 GMT
< Content-Type: text/html
Content-Type: text/html
< Connection: keep-alive
Connection: keep-alive
< Expires: Fri, 05 Jul 2024 02:23:16 GMT
Expires: Fri, 05 Jul 2024 02:23:16 GMT
< Cache-Control: no-cache
Cache-Control: no-cache
<
* Connection #0 to host 192.168.2.129 left intact
200
# curl --verbose -sIL -w "%{http_code}\n" --header "Host: moon-ui.com" http://$NGINX_INGREE_SVC
* Trying 192.168.2.131:80...
* Connected to 192.168.2.131 (192.168.2.131) port 80 (#0)
> HEAD / HTTP/1.1
> Host: moon-ui.com
> User-Agent: curl/7.88.1
> Accept: */*
>
< HTTP/1.1 200 OK
HTTP/1.1 200 OK
< Date: Fri, 05 Jul 2024 02:24:53 GMT
Date: Fri, 05 Jul 2024 02:24:53 GMT
< Content-Type: text/html
Content-Type: text/html
< Connection: keep-alive
Connection: keep-alive
< Expires: Fri, 05 Jul 2024 02:24:52 GMT
Expires: Fri, 05 Jul 2024 02:24:52 GMT
< Cache-Control: no-cache
Cache-Control: no-cache
<
* Connection #0 to host 192.168.2.131 left intact
200
目前在cilium-agent
的bpf lb list
列表里,转发规则都是正常的,目前来看对于curl
的流量是卡在envoy
的10080
端口,没有最终转发到 backend 的moon pod
的10.233.66.57:8080
端口
# LoadBalancer IP -> MOON Pod IP
# kubectl exec -it -n kube-system cilium-mwp8t -- cilium bpf lb list | grep "$MOON_LB_HOST"
Defaulted container "cilium-agent" out of: cilium-agent, mount-cgroup (init), apply-sysctl-overwrites (init), clean-cilium-state (init), install-cni-binaries (init)
192.168.2.129:80 (0) 0.0.0.0:0 (65) (0) [LoadBalancer]
192.168.2.129:80 (1) 10.233.66.57:8080 (65) (1)
# LoadBalancer IP -> ENVOY GATEWAY IP
# kubectl exec -it -n kube-system cilium-mwp8t -- cilium bpf lb list | grep "$ENVOY_GATEWAY_HOST"
Defaulted container "cilium-agent" out of: cilium-agent, mount-cgroup (init), apply-sysctl-overwrites (init), clean-cilium-state (init), install-cni-binaries (init)
192.168.2.130:80 (0) 0.0.0.0:0 (71) (0) [LoadBalancer, Local, two-scopes]
192.168.2.130:80/i (1) 10.233.64.44:10080 (72) (1)
192.168.2.130:80 (1) 10.233.64.44:10080 (71) (1)
192.168.2.130:80/i (0) 0.0.0.0:0 (72) (0) [LoadBalancer, Local, two-scopes]
# LoadBalancer IP -> INGRESS NGINX CONTROLLER IP
# kubectl exec -it -n kube-system cilium-mwp8t -- cilium bpf lb list | grep "$NGINX_INGREE_SVC"
Defaulted container "cilium-agent" out of: cilium-agent, mount-cgroup (init), apply-sysctl-overwrites (init), clean-cilium-state (init), install-cni-binaries (init)
192.168.2.131:80 (1) 10.233.66.184:80 (80) (1)
192.168.2.131:80 (0) 0.0.0.0:0 (80) (0) [LoadBalancer]
192.168.2.131:443 (0) 0.0.0.0:0 (81) (0) [LoadBalancer]
192.168.2.131:443 (1) 10.233.66.184:443 (81) (1)
envoy 的10080
端口监听也是正常的
## envoy gateway PID: 435726
# nsenter -t 435726 -n ss -ntlp
State Recv-Q Send-Q Local Address:Port Peer Address:Port Process
LISTEN 0 4096 0.0.0.0:10080 0.0.0.0:* users:(("envoy",pid=435726,fd=78))
LISTEN 0 4096 0.0.0.0:10080 0.0.0.0:* users:(("envoy",pid=435726,fd=77))
LISTEN 0 4096 0.0.0.0:10080 0.0.0.0:* users:(("envoy",pid=435726,fd=76))
LISTEN 0 4096 0.0.0.0:10080 0.0.0.0:* users:(("envoy",pid=435726,fd=75))
LISTEN 0 4096 0.0.0.0:10080 0.0.0.0:* users:(("envoy",pid=435726,fd=74))
LISTEN 0 4096 0.0.0.0:10080 0.0.0.0:* users:(("envoy",pid=435726,fd=73))
LISTEN 0 4096 0.0.0.0:10080 0.0.0.0:* users:(("envoy",pid=435726,fd=72))
LISTEN 0 4096 0.0.0.0:10080 0.0.0.0:* users:(("envoy",pid=435726,fd=71))
LISTEN 0 4096 127.0.0.1:19000 0.0.0.0:* users:(("envoy",pid=435726,fd=47))
LISTEN 0 4096 0.0.0.0:19002 0.0.0.0:* users:(("envoy-gateway",pid=435815,fd=3))
LISTEN 0 4096 0.0.0.0:19001 0.0.0.0:* users:(("envoy",pid=435726,fd=59))
LISTEN 0 4096 0.0.0.0:19001 0.0.0.0:* users:(("envoy",pid=435726,fd=58))
LISTEN 0 4096 0.0.0.0:19001 0.0.0.0:* users:(("envoy",pid=435726,fd=57))
LISTEN 0 4096 0.0.0.0:19001 0.0.0.0:* users:(("envoy",pid=435726,fd=56))
LISTEN 0 4096 0.0.0.0:19001 0.0.0.0:* users:(("envoy",pid=435726,fd=55))
LISTEN 0 4096 0.0.0.0:19001 0.0.0.0:* users:(("envoy",pid=435726,fd=54))
LISTEN 0 4096 0.0.0.0:19001 0.0.0.0:* users:(("envoy",pid=435726,fd=53))
LISTEN 0 4096 0.0.0.0:19001 0.0.0.0:* users:(("envoy",pid=435726,fd=52))
1
hazbinGuy 173 天前 via Android
出 LFCS k8s istio Premethues cka cks ica pca 老段工作室 终身培训会员
|
2
naison 137 天前
会不会是 envoy 的规则没匹配到尼?
|
3
naison 137 天前
没用过 envoy gateway ,不知道能不能打开 9003 端口看看,类似于这样配置。
admin: access_log_path: /dev/null address: socket_address: address: "::" port_value: 9003 ipv4_compat: true |