flannel
flannel copied to clipboard
flannel didn't work
I have folow the below link to setup k8s cluster with container runtime is contained and external etcd as below config https://kubernetes.io/docs/setup/production-environment/container-runtimes/
master01 --> 192.168.1.85 master02 --> 192.168.1.86 haproxy01, keepalived --> 192.168.1.87 , VIP --> 192.168.1.88 worker01 --> 192.168.1.90 worker02 --> 192.168.1.91
Install containerd, kubelet kubeadm kubectl on all cluster node
HAProxy setup root@haproxy01:~# cat /etc/haproxy/haproxy.cfg global ... defaults ... frontend k8s_frontend bind 192.168.1.88:6443 option tcplog mode tcp default_backend k8s_backend
backend k8s_backend mode tcp balance roundrobin option tcp-check server master01 192.168.1.85:6443 check fall 3 rise 2 server master02 192.168.1.86:6443 check fall 3 rise 2 server haproxu01 192.168.1.87:6443 check fall 3 rise 2
Keepalived setup
root@haproxy01:~# cat /etc/keepalived/keepalived.conf vrrp_script chk_haproxy { # Requires keepalived-1.1.13 script "killall -0 haproxy" # cheaper than pidof interval 2 # check every 2 seconds weight 2 # add 2 points of prio if OK }
vrrp_instance VI_1 { interface enp0s3 state MASTER virtual_router_id 51 priority 100 # 101 on master, 100 on backup virtual_ipaddress { 192.168.1.88 brd 192.168.1.255 dev enp0s3 label enp0s3:1 } track_script { chk_haproxy } }
Generating the TLS certificates
$ vim ca-config.json { "signing": { "default": { "expiry": "8760h" }, "profiles": { "kubernetes": { "usages": ["signing", "key encipherment", "server auth", "client auth"], "expiry": "8760h" } } } }
$ vim ca-csr.json { "CN": "Kubernetes", "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "IE", "L": "Cork", "O": "Kubernetes", "OU": "CA", "ST": "Cork Co." } ] }
$ cfssl gencert -initca ca-csr.json | cfssljson -bare ca $ vim kubernetes-csr.json { "CN": "kubernetes", "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "IE", "L": "Cork", "O": "Kubernetes", "OU": "Kubernetes", "ST": "Cork Co." } ] }
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -hostname=192.168.1.85,192.168.1.86,192.168.1.87,192.168.1.88,192.168.1.89,127.0.0.1,kubernetes.default -profile=kubernetes kubernetes-csr.json | cfssljson -bare kubernetes
scp ca.pem kubernetes.pem kubernetes-key.pem [email protected]:/etc/etcd/ scp ca.pem kubernetes.pem kubernetes-key.pem [email protected]:/etc/etcd/ scp ca.pem kubernetes.pem kubernetes-key.pem [email protected]:/etc/etcd/
Etcd on two masters setup root@master01:~# cat /etc/systemd/system/etcd.service
[Unit] Description=etcd Documentation=https://github.com/coreos
[Service] ExecStart=/usr/local/bin/etcd --name 192.168.1.85 --cert-file=/etc/etcd/kubernetes.pem --key-file=/etc/etcd/kubernetes-key.pem --peer-cert-file=/etc/etcd/kubernetes.pem --peer-key-file=/etc/etcd/kubernetes-key.pem --trusted-ca-file=/etc/etcd/ca.pem --peer-trusted-ca-file=/etc/etcd/ca.pem --peer-client-cert-auth --client-cert-auth --initial-advertise-peer-urls https://192.168.1.85:2380 --listen-peer-urls https://192.168.1.85:2380 --listen-client-urls https://192.168.1.85:2379,http://127.0.0.1:2379 --advertise-client-urls https://192.168.1.85:2379 --initial-cluster-token etcd-cluster-1 --initial-cluster 192.168.1.85=https://192.168.1.85:2380,192.168.1.86=https://192.168.1.86:2380 --initial-cluster-state new --data-dir=/var/lib/etcd Restart=on-failure RestartSec=5
[Install] WantedBy=multi-user.target
root@master02:~# cat /etc/systemd/system/etcd.service [Unit] Description=etcd Documentation=https://github.com/coreos
[Service] ExecStart=/usr/local/bin/etcd --name 192.168.1.86 --cert-file=/etc/etcd/kubernetes.pem --key-file=/etc/etcd/kubernetes-key.pem --peer-cert-file=/etc/etcd/kubernetes.pem --peer-key-file=/etc/etcd/kubernetes-key.pem --trusted-ca-file=/etc/etcd/ca.pem --peer-trusted-ca-file=/etc/etcd/ca.pem --peer-client-cert-auth --client-cert-auth --initial-advertise-peer-urls https://192.168.1.86:2380 --listen-peer-urls https://192.168.1.86:2380 --listen-client-urls https://192.168.1.86:2379,http://127.0.0.1:2379 --advertise-client-urls https://192.168.1.86:2379 --initial-cluster-token etcd-cluster-0 --initial-cluster 192.168.1.85=https://192.168.1.85:2380,192.168.1.86=https://192.168.1.86:2380 --initial-cluster-state new --data-dir=/var/lib/etcd Restart=on-failure RestartSec=5
[Install] WantedBy=multi-user.target
root@haproxy01:~# cat /etc/systemd/system/etcd.service [Unit] Description=etcd Documentation=https://github.com/coreos
[Service]
ExecStart=/usr/local/bin/etcd
--name 192.168.1.87
--cert-file=/etc/etcd/kubernetes.pem
--key-file=/etc/etcd/kubernetes-key.pem
--peer-cert-file=/etc/etcd/kubernetes.pem
--peer-key-file=/etc/etcd/kubernetes-key.pem
--trusted-ca-file=/etc/etcd/ca.pem
--peer-trusted-ca-file=/etc/etcd/ca.pem
--peer-client-cert-auth
--client-cert-auth
--initial-advertise-peer-urls https://192.168.1.87:2380
--listen-peer-urls https://192.168.1.87:2380
--listen-client-urls https://192.168.1.87:2379,http://127.0.0.1:2379
--advertise-client-urls https://192.168.1.87:2379
--initial-cluster-token etcd-cluster-1
--initial-cluster 192.168.1.85=https://192.168.1.85:2380,192.168.1.86=https://192.168.1.86:2380,192.168.1.87=https://192.168.1.87:2380
--initial-cluster-state new
--data-dir=/var/lib/etcd
--heartbeat-interval '1000'
--election-timeout '10000'
Restart=on-failure
RestartSec=5
[Install] WantedBy=multi-user.target
root@master01:~# ETCDCTL_API=3 etcdctl member list 123c02beae00dc2d, started, 192.168.1.85, https://192.168.1.85:2380, https://192.168.1.85:2379, false 163611f60028fb12, started, 192.168.1.87, https://192.168.1.87:2380, https://192.168.1.87:2379, false 531a86cf76f6bb38, started, 192.168.1.86, https://192.168.1.86:2380, https://192.168.1.86:2379, false
Prepare cluster config file root@master01:~/k8s# cat cluster.yaml kind: InitConfiguration apiVersion: kubeadm.k8s.io/v1beta3 bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token token: abcdef.0123456789abcdef ttl: 24h0m0s usages:
- signing
- authentication
localAPIEndpoint:
advertiseAddress: 192.168.1.85
bindPort: 6443
nodeRegistration:
criSocket: /run/containerd/containerd.sock
imagePullPolicy: IfNotPresent
taints: null
--
kind: ClusterConfiguration
apiVersion: kubeadm.k8s.io/v1beta3
etcd:
external:
caFile: /etc/etcd/ca.pem
certFile: /etc/etcd/kubernetes.pem
keyFile: /etc/etcd/kubernetes-key.pem
endpoints:
- https://192.168.1.85:2379
- https://192.168.1.86:2379
- https://192.168.1.87:2379 networking: dnsDomain: cluster.local podSubnet: 10.30.0.0/24 serviceSubnet: 10.96.0.0/12 kubernetesVersion: v1.22.4 controlPlaneEndpoint: 192.168.1.88:6443 apiServer: request-timeout: 500s timeoutForControlPlane: 5m0s extraArgs: authorization-mode: "RBAC,Node" certSANs:
- "127.0.0.1"
- "192.168.1.85"
- "192.168.1.86"
- "192.168.1.87"
- "192.168.1.88"
- "192.168.1.89"
- "*.fe.me" controllerManager: {} scheduler: {} certificatesDir: /etc/kubernetes/pki imageRepository: k8s.gcr.io clusterName: k8s-cluster dns: {}
apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: kubeadm:get-nodes rules:
- apiGroups:
- "" resources:
- nodes verbs:
- get
apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: kubeadm:get-nodes roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: kubeadm:get-nodes subjects:
- apiGroup: rbac.authorization.k8s.io kind: Group name: system:bootstrappers:kubeadm:default-node-token
root@master01:~# kubeadm init --config=cluster.yaml
After the Kubernetes setup successfully, I have applied the flannel network and got an error
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
root@haproxy01:~# kubectl logs -f pod/kube-flannel-ds-zfs5b
Error from server (NotFound): pods "kube-flannel-ds-zfs5b" not found
root@haproxy01:~# kubectl logs -f ^Cbe-flannel-ds-zfs5b
root@haproxy01:~# kubectl logs -f pod/kube-flannel-ds-zfs5b -n kube-system
I1202 15:09:12.455692 1 main.go:217] CLI flags config: {etcdEndpoints:http://127.0.0.1:4001,http://127.0.0.1:2379 etcdPrefix:/coreos.com/network etcdKeyfile: etcdCertfile: etcdCAFile: etcdUsername: etcdPassword: help:false version:false autoDetectIPv4:false autoDetectIPv6:false kubeSubnetMgr:true kubeApiUrl: kubeAnnotationPrefix:flannel.alpha.coreos.com kubeConfigFile: iface:[] ifaceRegex:[] ipMasq:true subnetFile:/run/flannel/subnet.env subnetDir: publicIP: publicIPv6: subnetLeaseRenewMargin:60 healthzIP:0.0.0.0 healthzPort:0 charonExecutablePath: charonViciUri: iptablesResyncSeconds:5 iptablesForwardRules:true netConfPath:/etc/kube-flannel/net-conf.json setNodeNetworkUnavailable:true}
W1202 15:09:12.455819 1 client_config.go:608] Neither --kubeconfig nor --master was specified. Using the inClusterConfig. This might not work.
I1202 15:09:17.161267 1 kube.go:120] Waiting 10m0s for node controller to sync
I1202 15:09:17.161331 1 kube.go:378] Starting kube subnet manager
I1202 15:09:18.162998 1 kube.go:127] Node controller sync successful
I1202 15:09:18.163040 1 main.go:237] Created subnet manager: Kubernetes Subnet Manager - worker01.fe.me
I1202 15:09:18.163045 1 main.go:240] Installing signal handlers
I1202 15:09:18.163340 1 main.go:459] Found network config - Backend type: vxlan
I1202 15:09:18.163417 1 main.go:651] Determining IP address of default interface
I1202 15:09:18.166539 1 main.go:698] Using interface with name enp0s3 and address 192.168.1.90
I1202 15:09:18.166610 1 main.go:720] Defaulting external address to interface address (192.168.1.90)
I1202 15:09:18.166615 1 main.go:733] Defaulting external v6 address to interface address (
Your Environment
-
Flannel version: latest
-
Backend used (e.g. vxlan or udp):
-
Etcd version: 3.5.1
-
Kubernetes version (if used): v1.22.4
-
Operating System and version:
-
root@master01:~/k8s# uname -a Linux master01.fe.me 5.4.0-90-generic #101-Ubuntu SMP Fri Oct 15 20:00:55 UTC 2021 x86_64 x86_64 x86_64 GNU/Linux root@master01:~/k8s# cat /etc/lsb-release DISTRIB_ID=Ubuntu DISTRIB_RELEASE=20.04 DISTRIB_CODENAME=focal DISTRIB_DESCRIPTION="Ubuntu 20.04.3 LTS" root@master01:~/k8s#
-
Link to your project (optional):