Skip to content

Instantly share code, notes, and snippets.

@intlabs
Last active June 21, 2021 19:32
Show Gist options
  • Select an option

  • Save intlabs/c96b77594469eb871999ec6ca2451c4a to your computer and use it in GitHub Desktop.

Select an option

Save intlabs/c96b77594469eb871999ec6ca2451c4a to your computer and use it in GitHub Desktop.
home-k8s
#!/bin/bash
set -ex
kubectl create -f https://docs.projectcalico.org/manifests/tigera-operator.yaml
cat <<EOF | kubectl apply -f -
# This section includes base Calico installation configuration.
# For more information, see: https://docs.projectcalico.org/v3.19/reference/installation/api#operator.tigera.io/v1.Installation
apiVersion: operator.tigera.io/v1
kind: Installation
metadata:
name: default
spec:
# Configures Calico networking.
calicoNetwork:
nodeAddressAutodetectionV4:
canReach: 192.168.1.1
# Note: The ipPools section cannot be modified post-install.
ipPools:
- blockSize: 26
cidr: 172.16.0.0/20
encapsulation: None
natOutgoing: Enabled
nodeSelector: all()
EOF
cat <<EOF | kubectl apply -f -
kind: ConfigMap
apiVersion: v1
metadata:
name: kubernetes-services-endpoint
namespace: tigera-operator
data:
KUBERNETES_SERVICE_HOST: "soulard.localdomain"
KUBERNETES_SERVICE_PORT: "6443"
EOF
kubectl patch installation.operator.tigera.io default --type merge -p '{"spec":{"calicoNetwork":{"linuxDataplane":"BPF", "hostPorts":null}}}'
calicoctl patch felixconfiguration default --patch='{"spec": {"bpfExternalServiceMode": "DSR"}}'
#!/bin/bash
set -ex
export GITHUB_TOKEN=*****
flux bootstrap github \
--owner=intlabs \
--repository=soulard-fluxcd \
--path=clusters/soulard \
--personal
git clone [email protected]:intlabs/soulard-fluxcd.git ~/Development/github.com/intlabs/soulard-fluxcd
#!/bin/bash
set -ex
sudo tee /etc/modules-load.d/k8s.conf <<EOF
br_netfilter
EOF
sudo modprobe br_netfilter
sudo tee /etc/sysctl.d/k8s.conf <<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sudo sysctl --system
sudo apt-get update
sudo apt-get install -y --no-install-recommends \
apt-transport-https \
curl
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
sudo tee /etc/apt/sources.list.d/kubernetes.list <<EOF
deb https://apt.kubernetes.io/ kubernetes-xenial main
EOF
sudo apt-get update
sudo apt-get install -y --no-install-recommends kubelet kubeadm kubectl conntrack cri-tools ebtables ethtool kubernetes-cni socat ipvsadm
#sudo apt-mark hold kubelet kubeadm kubectl
sudo tee /etc/crictl.yaml <<EOF
runtime-endpoint: unix:///run/containerd/containerd.sock
image-endpoint: unix:///run/containerd/containerd.sock
timeout: 10
debug: false
EOF
curl https://baltocdn.com/helm/signing.asc | sudo apt-key add -
sudo tee /etc/apt/sources.list.d/helm-stable-debian.list <<EOF
deb https://baltocdn.com/helm/stable/debian/ all main
EOF
sudo apt-get update
sudo apt-get install -y --no-install-recommends \
helm
sudo mkdir -p /etc/kubernetes
sudo tee /etc/kubernetes/kubeadm-input.yaml <<EOF
---
apiVersion: kubeadm.k8s.io/v1beta2
kind: InitConfiguration
nodeRegistration:
criSocket: /run/containerd/containerd.sock
---
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
controlPlaneEndpoint: soulard.localdomain:6443
clusterName: soulard
networking:
dnsDomain: cluster.local
podSubnet: 172.16.0.0/20
serviceSubnet: 172.16.16.0/20
apiServer:
extraArgs:
service-node-port-range: 80-32767
certSANs:
- 192.168.1.2
- soulard
- soulard.localdomain
- soulard.port.direct
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: "ipvs"
ipvs:
strictARP: true
...
EOF
sudo tee /etc/default/kubelet <<EOF
KUBELET_EXTRA_ARGS='--cgroup-driver=systemd --container-runtime=remote --runtime-request-timeout=15m --container-runtime-endpoint=unix:///run/containerd/containerd.sock --cpu-manager-policy=static --kube-reserved="cpu=1,memory=2Gi,ephemeral-storage=1Gi" --system-reserved="cpu=500m,memory=1Gi,ephemeral-storage=1Gi" --eviction-hard="memory.available<500Mi,nodefs.available<10%"'
EOF
sudo tee /etc/default/kubelet <<EOF
KUBELET_EXTRA_ARGS='--cgroup-driver=systemd'
EOF
sudo kubeadm config images pull --config /etc/kubernetes/kubeadm-input.yaml
sudo kubeadm init --upload-certs --config /etc/kubernetes/kubeadm-input.yaml
kubectl create -f https://docs.projectcalico.org/manifests/tigera-operator.yaml
cat <<EOF | kubectl apply -f -
# This section includes base Calico installation configuration.
# For more information, see: https://docs.projectcalico.org/v3.19/reference/installation/api#operator.tigera.io/v1.Installation
apiVersion: operator.tigera.io/v1
kind: Installation
metadata:
name: default
spec:
# Configures Calico networking.
calicoNetwork:
nodeAddressAutodetectionV4:
canReach: 192.168.1.1
# Note: The ipPools section cannot be modified post-install.
ipPools:
- blockSize: 26
cidr: 172.16.0.0/20
encapsulation: None
natOutgoing: Enabled
nodeSelector: all()
EOF
cat <<EOF | kubectl apply -f -
kind: ConfigMap
apiVersion: v1
metadata:
name: kubernetes-services-endpoint
namespace: tigera-operator
data:
KUBERNETES_SERVICE_HOST: "soulard.localdomain"
KUBERNETES_SERVICE_PORT: "6443"
EOF
kubectl patch installation.operator.tigera.io default --type merge -p '{"spec":{"calicoNetwork":{"linuxDataplane":"BPF", "hostPorts":null}}}'
calicoctl patch felixconfiguration default --patch='{"spec": {"bpfExternalServiceMode": "DSR"}}'
mkdir -p $HOME/.kube
sudo cp -f /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
curl https://docs.projectcalico.org/manifests/calico.yaml | \
sed '/ - name: CALICO_IPV4POOL_IPIP/{n;s/.*/ value: "Never"/}' | \
sed 's|docker.io|quay.io|g' | \
sudo kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f -
# NOTE: Wait for dns to be running.
END=$(($(date +%s) + 240))
until kubectl --namespace=kube-system \
get pods -l k8s-app=kube-dns --no-headers -o name | grep -q "^pod/coredns"; do
NOW=$(date +%s)
[ "${NOW}" -gt "${END}" ] && exit 1
echo "still waiting for dns"
sleep 10
done
kubectl -n kube-system wait --timeout=240s --for=condition=Ready pods -l k8s-app=kube-dns
# Remove master node taint
kubectl taint nodes --all node-role.kubernetes.io/master-
#!/bin/bash
set -ex
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.10.2/manifests/namespace.yaml
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.10.2/manifests/metallb.yaml
# On first install only
kubectl create secret generic -n metallb-system memberlist --from-literal=secretkey="\$(openssl rand -base64 128)"
kubectl apply -f - <<EOF
---
apiVersion: v1
kind: ConfigMap
metadata:
namespace: metallb-system
name: config
data:
config: |
peers:
- peer-address: 192.168.1.1
peer-asn: 64512
my-asn: 64513
address-pools:
- name: default
protocol: bgp
addresses:
- 172.16.32.0/20
...
EOF
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment