Disabling port forwarding is broken in 2.x
Description
Using the following yaml disabling port forwarding, I see that ports are forwarded. Same yaml works with lima 1.2.1.
The expected behavior is seeing only ssh port forwarding.
Version: 2.0.1 (from brew).
% grep Forward drenv.log
2025-11-24 19:10:51,817 DEBUG [cluster] [hostagent] Forwarding TCP from 127.0.0.1:42585 to 127.0.0.1:42585
2025-11-24 19:11:29,525 DEBUG [cluster] [hostagent] Forwarding TCP from 127.0.0.1:45901 to 127.0.0.1:45901
2025-11-24 19:11:29,583 DEBUG [cluster] [hostagent] Forwarding TCP from 127.0.0.1:45267 to 127.0.0.1:45267
2025-11-24 19:11:37,999 DEBUG [cluster] [hostagent] Forwarding TCP from 127.0.0.1:10248 to 127.0.0.1:10248
2025-11-24 19:11:40,134 DEBUG [cluster] [hostagent] Forwarding TCP from 127.0.0.1:2379 to 127.0.0.1:2379
2025-11-24 19:11:40,168 DEBUG [cluster] [hostagent] Forwarding TCP from 127.0.0.1:2381 to 127.0.0.1:2381
2025-11-24 19:11:40,218 DEBUG [cluster] [hostagent] Forwarding TCP from 127.0.0.1:10257 to 127.0.0.1:10257
2025-11-24 19:11:40,592 DEBUG [cluster] [hostagent] Forwarding TCP from 127.0.0.1:10259 to 127.0.0.1:10259
2025-11-24 19:11:43,693 DEBUG [cluster] [hostagent] Forwarding TCP from 127.0.0.1:10248 to 127.0.0.1:10248
images:
- location: https://cloud-images.ubuntu.com/releases/24.04/release/ubuntu-24.04-server-cloudimg-arm64.img
arch: aarch64
- location: https://cloud-images.ubuntu.com/releases/24.04/release/ubuntu-24.04-server-cloudimg-amd64.img
arch: x86_64
mounts: []
containerd:
system: true
user: false
portForwards:
- ignore: true
proto: any
guestIP: 0.0.0.0
param:
LOCAL_REGISTRY: host.lima.internal:5050
provision:
- mode: system
script: |
#!/bin/bash
set -eux -o pipefail
command -v kubeadm >/dev/null 2>&1 && exit 0
# Install and configure prerequisites
cat <<EOF | tee /etc/modules-load.d/containerd.conf
overlay
br_netfilter
EOF
modprobe overlay
modprobe br_netfilter
cat <<EOF | tee /etc/sysctl.d/99-kubernetes-cri.conf
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF
# Avoid "failed to creating a fsnotify watcher: too many open files"
# errors with bigger setups.
cat <<EOF | tee /etc/sysctl.d/99-fs-inotify.conf
fs.inotify.max_user_instances = 8192
fs.inotify.max_user_watches = 65536
EOF
sysctl --system
# Installing kubeadm, kubelet and kubectl
export DEBIAN_FRONTEND=noninteractive
apt-get update
apt-get install -y apt-transport-https ca-certificates curl
VERSION=1.34
echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v${VERSION}/deb/ /" | tee /etc/apt/sources.list.d/kubernetes.list
curl -fsSL https://pkgs.k8s.io/core:/stable:/v${VERSION}/deb/Release.key | gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
apt-get update
# cri-tools
apt-get install -y cri-tools
cat <<EOF | tee /etc/crictl.yaml
runtime-endpoint: unix:///run/containerd/containerd.sock
EOF
# cni-plugins
apt-get install -y kubernetes-cni
rm -f /etc/cni/net.d/*.conf*
apt-get install -y kubelet kubeadm kubectl && apt-mark hold kubelet kubeadm kubectl
systemctl enable --now kubelet
- mode: system
script: |
#!/bin/bash
set -eux -o pipefail
test -e /etc/containerd/conf.d/k8s.toml && exit 0
mkdir -p /etc/containerd/conf.d
# Configuring the systemd cgroup driver
# Overriding the sandbox (pause) image
cat <<EOF >/etc/containerd/conf.d/k8s.toml
version = 2
[plugins]
[plugins."io.containerd.grpc.v1.cri"]
sandbox_image = "$(kubeadm config images list | grep pause | sort -r | head -n1)"
# Ramen: Allow unprivileged pods to access block devices.
device_ownership_from_security_context = true
[plugins."io.containerd.grpc.v1.cri".containerd]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
runtime_type = "io.containerd.runc.v2"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
SystemdCgroup = true
[plugins."io.containerd.grpc.v1.cri".registry]
config_path = "/etc/containerd/certs.d"
[plugins."io.containerd.cri.v1.runtime".cni]
bin_dirs = ["/usr/local/libexec/cni","/opt/cni/bin"]
EOF
systemctl restart containerd
- mode: system
script: |
#!/bin/bash
set -eux -o pipefail
test -z "{{.Param.LOCAL_REGISTRY}}" && exit 0
test -f "/etc/containerd/certs.d/{{.Param.LOCAL_REGISTRY}}/hosts.toml" && exit 0
mkdir -p "/etc/containerd/certs.d/{{.Param.LOCAL_REGISTRY}}"
cat << EOF > "/etc/containerd/certs.d/{{.Param.LOCAL_REGISTRY}}/hosts.toml"
server = "http://{{.Param.LOCAL_REGISTRY}}"
[host."http://{{.Param.LOCAL_REGISTRY}}"]
skip_verify = true
EOF
systemctl restart containerd
- mode: system
script: |
#!/bin/bash
set -eux -o pipefail
test -e /etc/kubernetes/admin.conf && exit 0
export KUBECONFIG=/etc/kubernetes/admin.conf
# Ramen: serve the addiontal shared network instead of the user network.
export ADVERTISE_ADDRESS=$(ip -j -4 addr show dev lima0 | jq -r '.[0].addr_info[0].local')
# Ramen: Use local registry for k8s images
if [ -n "{{.Param.LOCAL_REGISTRY}}" ]; then
IMAGE_REPOSITORY="{{.Param.LOCAL_REGISTRY}}/k8s"
else
IMAGE_REPOSITORY=""
fi
# Initializing your control-plane node
cat <<EOF >kubeadm-config.yaml
kind: InitConfiguration
apiVersion: kubeadm.k8s.io/v1beta3
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
kubeletExtraArgs:
# Ramen: use specific network
node-ip: "$ADVERTISE_ADDRESS"
# Ramen: speed up image pulls
serialize-image-pulls: "false"
# Ramen: serve specific network.
localAPIEndpoint:
advertiseAddress: "$ADVERTISE_ADDRESS"
---
kind: ClusterConfiguration
apiVersion: kubeadm.k8s.io/v1beta3
imageRepository: "$IMAGE_REPOSITORY"
apiServer:
certSANs: # --apiserver-cert-extra-sans
- "127.0.0.1"
networking:
podSubnet: "10.244.0.0/16" # --pod-network-cidr
---
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
cgroupDriver: systemd
featureGates:
StatefulSetAutoDeletePVC: true
EOF
# We ignore NumCPU preflight error for running a minimal cluster in
# github actions and for testing drenv.
# [ERROR NumCPU]: the number of available CPUs 1 is less than the required 2
kubeadm init --config kubeadm-config.yaml --ignore-preflight-errors NumCPU
# Scale down coredns like minikube
kubectl scale deploy coredns -n kube-system --replicas=1
# Installing a Pod network add-on
kubectl apply -f https://github.com/flannel-io/flannel/releases/download/v0.24.0/kube-flannel.yml
# Control plane node isolation
kubectl taint nodes --all node-role.kubernetes.io/control-plane-
- mode: system
script: |
#!/bin/bash
set -eux -o pipefail
KUBECONFIG=/etc/kubernetes/admin.conf
mkdir -p ${HOME:-/root}/.kube
cp -f $KUBECONFIG ${HOME:-/root}/.kube/config
mkdir -p {{.Home}}/.kube
cp -f $KUBECONFIG {{.Home}}/.kube/config
chown -R {{.User}} {{.Home}}/.kube
probes:
- description: kubeadm installed
script: |
#!/bin/bash
set -eux -o pipefail
if ! timeout 30s bash -c "until command -v kubeadm >/dev/null 2>&1; do sleep 1; done"; then
echo >&2 "kubeadm is not installed yet"
exit 1
fi
hint: |
See "/var/log/cloud-init-output.log". in the guest
- description: kubeadm completed
script: |
#!/bin/bash
set -eux -o pipefail
if ! timeout 300s bash -c "until test -f /etc/kubernetes/admin.conf; do sleep 1; done"; then
echo >&2 "k8s is not running yet"
exit 1
fi
hint: |
The k8s kubeconfig file has not yet been created.
- description: kubernetes cluster is ready
script: |
#!/bin/bash
set -eux -o pipefail
if ! timeout 300s bash -c "until kubectl get --raw /readyz >/dev/null 2>&1; do sleep 1; done"; then
echo >&2 "kubernetes cluster is not ready yet"
exit 1
fi
copyToHost:
- guest: /etc/kubernetes/admin.conf
host: '{{.Dir}}/copied-from-guest/kubeconfig.yaml'
deleteOnStop: true
vmType: vz
networks:
- socket: /var/run/socket_vmnet
cpus: 2
memory: 3g
disk: 20g
additionalDisks: []
Reproduce with minimal yaml:
base:
- template:_images/ubuntu
vmType: vz
cpus: 1
memory: 1g
mounts: []
containerd:
system: true
user: false
portForwards:
- guestIP: 0.0.0.0
proto: any
ignore: true
% limactl create test.yaml
? Creating an instance "test" Proceed with the current configuration
INFO[0002] Attempting to download the image arch=aarch64 digest="sha256:696b9ec765c0e3964b7845feb7217ef7e159e026c6b70eb0c8c2fed34e067067" location="https://cloud-images.ubuntu.com/releases/questing/release-20251023/ubuntu-25.10-server-cloudimg-arm64.img"
Downloading the image (ubuntu-25.10-server-cloudimg-arm64.img)
837.38 MiB / 837.38 MiB [----------------------------------] 100.00% 24.45 MiB/s
INFO[0036] Downloaded the image from "https://cloud-images.ubuntu.com/releases/questing/release-20251023/ubuntu-25.10-server-cloudimg-arm64.img"
INFO[0040] Attempting to download the nerdctl archive arch=aarch64 digest="sha256:5398f037ae095d43cf3cb8c30765a24e511e38cafe02977b928a41b26e842ed1" location="https://github.com/containerd/nerdctl/releases/download/v2.2.0/nerdctl-full-2.2.0-linux-arm64.tar.gz"
INFO[0040] Using cache "/Users/nsoffer/Library/Caches/lima/download/by-url-sha256/e27422aaa9393572e7dc134eeb158f08328f22ca5e7556c80bdd221497a0a84a/data"
INFO[0040] Run `limactl start test` to start the instance.
% limactl start test
INFO[0000] Using the existing instance "test"
INFO[0000] Starting the instance "test" with internal VM driver "vz"
INFO[0001] [hostagent] TCP (except for SSH) and UDP port forwarding is disabled
INFO[0001] [hostagent] hostagent socket created at /Users/nsoffer/.lima/test/ha.sock
INFO[0001] [hostagent] Starting VZ (hint: to watch the boot progress, see "/Users/nsoffer/.lima/test/serial*.log")
INFO[0001] [hostagent] [VZ] - vm state change: running
INFO[0009] [hostagent] Started vsock forwarder: 127.0.0.1:52327 -> vsock:22 on VM
INFO[0009] [hostagent] Detected SSH server is listening on the vsock port; changed 127.0.0.1:52327 to proxy for the vsock port
INFO[0010] SSH Local Port: 52327
INFO[0010] [hostagent] Waiting for the essential requirement 1 of 3: "ssh"
INFO[0011] [hostagent] The essential requirement 1 of 3 is satisfied
INFO[0011] [hostagent] Waiting for the essential requirement 2 of 3: "user session is ready for ssh"
INFO[0012] [hostagent] The essential requirement 2 of 3 is satisfied
INFO[0012] [hostagent] Waiting for the essential requirement 3 of 3: "Explicitly start ssh ControlMaster"
INFO[0012] [hostagent] The essential requirement 3 of 3 is satisfied
INFO[0012] [hostagent] Waiting for the optional requirement 1 of 2: "systemd must be available"
INFO[0012] [hostagent] Guest agent is running
INFO[0012] [hostagent] Forwarding UDP from 127.0.0.1:323 to 127.0.0.1:323
INFO[0012] [hostagent] The optional requirement 1 of 2 is satisfied
INFO[0012] [hostagent] Forwarding UDP from [::1]:323 to 127.0.0.1:323
INFO[0012] [hostagent] Waiting for the optional requirement 2 of 2: "containerd binaries to be installed"
ERRO[0012] [hostagent] failed to listen udp: listen udp 0.0.0.0:323: bind: address already in use
ERRO[0012] [hostagent] failed to listen udp: listen udp 0.0.0.0:323: bind: address already in use
INFO[0021] [hostagent] The optional requirement 2 of 2 is satisfied
INFO[0021] [hostagent] Waiting for the guest agent to be running
INFO[0021] [hostagent] Waiting for the final requirement 1 of 1: "boot scripts must have finished"
INFO[0022] [hostagent] Forwarding TCP from 127.0.0.1:35693 to 127.0.0.1:35693
INFO[0024] [hostagent] The final requirement 1 of 1 is satisfied
INFO[0024] READY. Run `limactl shell test` to open the shell.
#4221 seems to be the root cause.
This is backward incompatible change (as the PR claim): https://github.com/lima-vm/lima/pull/4221/commits/4d04ff373b7d47105688c2424053eb4ae04ca67d#diff-e9ced80afbf7de8678a2483dce1448551731173b533ec60bdca549cfc7baa194
The the docs for ignoring the port forwards were not updated to show the minimal example:
portForwards:
- ignore: true
guestIP: 0.0.0.0
guestIPMustBeZero: false
proto: any
This change does not look good, we should really need a simpler way to ignore all port forwarding. Using zero value of all fields would be much better.
With this yaml it works:
base:
- template:_images/ubuntu
vmType: vz
cpus: 1
memory: 1g
mounts: []
containerd:
system: true
user: false
portForwards:
- guestIP: 0.0.0.0
guestIPMustBeZero: false
proto: any
ignore: true
% limactl start test.yaml
? Creating an instance "test" Proceed with the current configuration
INFO[0002] Starting the instance "test" with internal VM driver "vz"
INFO[0002] Attempting to download the image arch=aarch64 digest="sha256:9ee451b7c20f47558e555dc3d595edd0ee3bc6d878c9e92cae389ee78c4ccd59" location="https://cloud-images.ubuntu.com/releases/questing/release-20251031/ubuntu-25.10-server-cloudimg-arm64.img"
INFO[0002] Using cache "/Users/nsoffer/Library/Caches/lima/download/by-url-sha256/b6b961865adb8968513153e0c986f0e0a2fe73046958419fe878f1bde635296e/data"
INFO[0005] Attempting to download the nerdctl archive arch=aarch64 digest="sha256:5398f037ae095d43cf3cb8c30765a24e511e38cafe02977b928a41b26e842ed1" location="https://github.com/containerd/nerdctl/releases/download/v2.2.0/nerdctl-full-2.2.0-linux-arm64.tar.gz"
INFO[0005] Using cache "/Users/nsoffer/Library/Caches/lima/download/by-url-sha256/e27422aaa9393572e7dc134eeb158f08328f22ca5e7556c80bdd221497a0a84a/data"
INFO[0005] [hostagent] TCP (except for SSH) and UDP port forwarding is disabled
INFO[0005] [hostagent] hostagent socket created at /Users/nsoffer/.lima/test/ha.sock
INFO[0005] [hostagent] Starting VZ (hint: to watch the boot progress, see "/Users/nsoffer/.lima/test/serial*.log")
INFO[0006] [hostagent] [VZ] - vm state change: running
INFO[0014] [hostagent] Started vsock forwarder: 127.0.0.1:55795 -> vsock:22 on VM
INFO[0014] [hostagent] Detected SSH server is listening on the vsock port; changed 127.0.0.1:55795 to proxy for the vsock port
INFO[0015] SSH Local Port: 55795
INFO[0015] [hostagent] Waiting for the essential requirement 1 of 3: "ssh"
INFO[0015] [hostagent] The essential requirement 1 of 3 is satisfied
INFO[0015] [hostagent] Waiting for the essential requirement 2 of 3: "user session is ready for ssh"
INFO[0026] [hostagent] Waiting for the essential requirement 2 of 3: "user session is ready for ssh"
INFO[0026] [hostagent] The essential requirement 2 of 3 is satisfied
INFO[0026] [hostagent] Waiting for the essential requirement 3 of 3: "Explicitly start ssh ControlMaster"
INFO[0027] [hostagent] The essential requirement 3 of 3 is satisfied
INFO[0027] [hostagent] Waiting for the optional requirement 1 of 2: "systemd must be available"
INFO[0027] [hostagent] Guest agent is running
INFO[0027] [hostagent] The optional requirement 1 of 2 is satisfied
INFO[0027] [hostagent] Waiting for the optional requirement 2 of 2: "containerd binaries to be installed"
INFO[0027] [hostagent] The optional requirement 2 of 2 is satisfied
INFO[0027] [hostagent] Waiting for the guest agent to be running
INFO[0027] [hostagent] Waiting for the final requirement 1 of 1: "boot scripts must have finished"
INFO[0027] [hostagent] The final requirement 1 of 1 is satisfied
INFO[0028] READY. Run `limactl shell test` to open the shell.
Since this was already released, it is probably too late to change the public API. We can consider simplifying it for future release.
We need to fix the docs to show the minimal example for disabling port forwarding. Keeping this issue open for this fix.
#4221 seems to be the root cause.
This is backward incompatible change (as the PR claim): 4d04ff3#diff-e9ced80afbf7de8678a2483dce1448551731173b533ec60bdca549cfc7baa194
This was decided/confirmed in https://github.com/lima-vm/lima/issues/4193#issuecomment-3409221743