zfs pool definitions
Define pool vms-on-zfs
, assuming pool tank
already exists.
virsh # pool-define-as --name vms-on-zfs --source-name tank --type zfs Pool vms-on-zfs defined
Start pool vms-on-zfs
virsh # pool-start vms-on-zfs Pool vms-on-zfs started
Mark pool vms-on-zfs
fro autostart
virsh # pool-autostart vms-on-zfs Pool vms-on-zfs marked as autostarted
then use vms-on-zfs
in terraform resources.
-
and for some reson terraform timeouts on creating images
-
and pool becoming inactive
vms-on-zfs inactive yes
folder method
zfs create tank/vm-disks
virsh pool-define-as --name default --type dir --target /tank/vm-disks/ Pool default defined
NAT gateway node
2: ens3: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000 link/ether 52:54:00:8a:d8:25 brd ff:ff:ff:ff:ff:ff altname enp0s3 inet 10.98.98.254/24 brd 10.98.98.255 scope global ens3 valid_lft forever preferred_lft forever inet 24.255.161.241/24 brd 24.255.161.255 scope global ens3 valid_lft forever preferred_lft forever inet6 2006:aaaa:bbbb:3::1:d825/112 scope global valid_lft forever preferred_lft forever inet6 fe80::5054:ff:fe8a:d825/64 scope link valid_lft forever preferred_lft forever
nft add table nat nft -- add chain nat prerouting { type nat hook prerouting priority -100 \; } nft add chain nat postrouting { type nat hook postrouting priority 100 \; } nft add rule nat postrouting ip saddr 10.98.98.0/24 snat to 24.255.161.241 nft add rule nat prerouting ip daddr 24.255.161.241 tcp dport { 80, 443, 6443 } dnat to 10.98.98.1 # nft list table nat table ip nat { chain prerouting { type nat hook prerouting priority dstnat; policy accept; } chain postrouting { type nat hook postrouting priority srcnat; policy accept; ip saddr 10.98.98.0/24 snat to 24.255.161.241 } }
k8s nodes
-
node1: 10.98.98.1/24 2006:aaaa:bbbb:3::1:f286/112
-
node2: 10.98.98.2/24 2006:aaaa:bbbb:3::1:2a4f/112
-
node3: 10.98.98.3/24 2006:aaaa:bbbb:3::1:aa0b/112
-
node4: 10.98.98.4/24 2006:aaaa:bbbb:3::1:57c1/112
-
node5: 10.98.98.5/24 2006:aaaa:bbbb:3::1:2047/112
-
node6: 10.98.98.6/24 2006:aaaa:bbbb:3::1:892d/112
-
node7: 10.98.98.7/24 2006:aaaa:bbbb:3::1:5cdf/112
-
node8: 10.98.98.8/24 2006:aaaa:bbbb:3::1:833c/112
Node preparations
sudo modprobe overlay sudo modprobe br_netfilter sudo tee /etc/sysctl.d/kubernetes.conf<<EOF net.bridge.bridge-nf-call-ip6tables = 1 net.bridge.bridge-nf-call-iptables = 1 net.ipv4.ip_forward = 1 net.ipv6.conf.all.forwarding=1 EOF sudo sysctl --system apt -y update apt-get install -y apt-transport-https ca-certificates curl gpg curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.29/deb/Release.key | sudo gpg --dearmor -o /etc/apt/trusted.gpg.d/kubernetes-apt-keyring.gpg echo 'deb [signed-by=/etc/apt/trusted.gpg.d/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.29/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list apt update apt-get install -y kubelet kubeadm kubectl #pin version apt-mark hold kubelet kubeadm kubectl export VERSION=1.28 export OS=Debian_11 echo "deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/$OS/ /" > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list echo "deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/$VERSION/$OS/ /" > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable:cri-o:$VERSION.list curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/$VERSION/$OS/Release.key | apt-key add - curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/$OS/Release.key | apt-key add - apt update apt install cri-o cri-o-runc -y systemctl enable crio systemctl restart crio
#on node6
export LOAD_BALANCER_DNS=my-lb.my-domain.test.com export LOAD_BALANCER_PORT=6443
kubeadm init --control-plane-endpoint "${LOAD_BALANCER_DNS}:${LOAD_BALANCER_PORT}" --upload-certs --pod-network-cidr 10.111.0.0/16,fd44:f220:de64::/56 --service-cidr 10.222.0.0/16,fd06:48e0:7314::/112
remove taint to schedule pods on control nodes
kubectl taint nodes m7.home.arpa node-role.kubernetes.io/control-plane:NoSchedule- kubectl taint nodes n6.home.arpa node-role.kubernetes.io/control-plane:NoSchedule- kubectl taint nodes n8.home.arpa node-role.kubernetes.io/control-plane:NoSchedule-
Cilium in dual-stack modprobe
helm repo add cilium https://helm.cilium.io/ helm install cilium cilium/cilium --version 1.15.1 --namespace kube-system --set bgpControlPlane.enabled=true --set ipv6.enabled=true