Skip to content

Instantly share code, notes, and snippets.

@intlabs
Last active September 26, 2021 16:17
Show Gist options
  • Select an option

  • Save intlabs/61ca0e6fcf80de6802adf44bce0ec555 to your computer and use it in GitHub Desktop.

Select an option

Save intlabs/61ca0e6fcf80de6802adf44bce0ec555 to your computer and use it in GitHub Desktop.
Dev-Env
#!/bin/bash
set -ex
tdnf update
tdnf install -y openssh-server
tdnf install -y moby-containerd moby-cli moby-buildx moby-engine moby-runc
tee /etc/docker/daemon.json <<EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "journald"
}
EOF
systemctl enable --now docker
usermod -aG docker "${USER}"
#!/bin/bash
set -ex
function demo_slide() {
set +x
if ! [ $slide_number -eq 0 ]; then
read -r -s -p $'Press enter to continue...'
fi
let "slide_number+=1"
clear
local cyan
local normal
cyan=$(tput setaf 153)
normal=$(tput sgr0)
echo "${cyan}${demo_section} | ${@}${normal}"
}
function substrate_build() {
local slide_number=0
local demo_section="AFO-NC Airship Mariner Substrate"
demo_slide "Physical Hosts"
kubectx "admin@afonc-airship-substrate"
set -x
kubectl get -n target-infra bmh --output=custom-columns=SERVER:.metadata.name,POWER:.status.poweredOn,BMC:.spec.bmc.address
set +x
demo_slide "Cluster API Cluster"
set -x
kubectl get -n capi-system pods
kubectl get -n capm3-system pods
kubectl get -n metal3 pods --output=custom-columns=NAME:.metadata.name,HOST_NET:.spec.hostNetwork,NODE:.spec.nodeName
kubectl get -n target-infra clusters
clusterctl describe cluster -n target-infra target-cluster
set +x
demo_slide "Kubernetes Cluster"
set -x
kubectl get nodes -o wide
set +x
}
function substrate_storage() {
local slide_number=0
local demo_section="AFO-NC HCI | Storage"
demo_slide "Storageclasses"
set -x
kubectx "admin@afonc-airship-substrate"
kubectl get storageclasses
set +x
demo_slide "Logical Volume Manager"
set -x
kubectl get -n csi-driver-lvm statefulset
kubectl get -n csi-driver-lvm daemonset
local lvm_ds_pod
lvm_ds_pod="$(kubectl get -n csi-driver-lvm pods -l app=csi-lvmplugin -o go-template='{{ (index .items 0).metadata.name }}')"
kubectl exec -it -n csi-driver-lvm ${lvm_ds_pod} -c lvm -- env | grep -C 3 "CSI_DEVICESPATTERN"
set +x
}
function substrate_networking() {
local slide_number=0
local demo_section="AFO-NC HCI | Networking"
demo_slide "Multus"
kubectx "admin@afonc-airship-substrate"
set -x
kubectl get -n kube-system ds -l app=multus
kubectl get pods -n calico-system -l k8s-app=calico-node
kubectl get -n kube-system ds -l app=sriov-cni
set +x
demo_slide "Creating VFs from PF on HCI Node"
set -x
substrate_worker_node_ip="$(kubectl get nodes nc-mariner-02 -o jsonpath={.status.addresses[?\(@.type==\"InternalIP\"\)].address})"
substrate_worker_node_username="deployer"
kubectl ssh-jump -u "${substrate_worker_node_username}" "${substrate_worker_node_ip}" --args "
set -x;
sudo systemctl status sriov-NIC.service;
"
set +x
demo_slide "Overriding ivaf driver to vfio-pci"
set -x
kubectl ssh-jump -u "${substrate_worker_node_username}" "${substrate_worker_node_ip}" --args "
set -x;
ip link show dev eth1;
ethtool -i eth1;
driverctl list-overrides;
"
set +x
demo_slide "Exposing VFs to Kubernetes"
set -x
kubectl get -n kube-system ds -l app=sriovdp
kubectl get -n kube-system configmap sriovdp-config -o json | \
yq eval '.data."config.json"' - | yq -o yaml --prettyPrint eval '{.}' -
local sriov_device_plugin_pod_node
local sriov_device_plugin_pod
sriov_device_plugin_pod_node="$(kubectl get node --selector='node-role.kubernetes.io/worker' -o 'go-template={{ (index .items 0).metadata.name }}')"
sriov_device_plugin_pod="$(kubectl get -n kube-system pods -l app=sriovdp --field-selector spec.nodeName=${sriov_device_plugin_pod_node} -o 'go-template={{ (index .items 0).metadata.name }}')"
kubectl -n kube-system logs "${sriov_device_plugin_pod}" | grep "device added"
kubectl get node "${sriov_device_plugin_pod_node}" -o json | \
yq -o yaml --prettyPrint eval '.status.allocatable' -
set +x
}
function substrate_hypervisor() {
local substrate_worker_node="$(kubectl get node --selector='node-role.kubernetes.io/worker' -o 'go-template={{ (index .items 0).metadata.name }}')"
local substrate_worker_node_ip="$(kubectl get nodes "${substrate_worker_node}" -o jsonpath={.status.addresses[?\(@.type==\"InternalIP\"\)].address})"
local substrate_worker_node_username="deployer"
local slide_number=0
local demo_section="AFO-NC HCI | Hypervisor"
demo_slide "Kubevirt"
set -x
kubectx "admin@afonc-airship-substrate"
kubectl get -n kubevirt pods
kubectl get -n cdi pods
kubectl get -n kubevirt kubevirts kubevirt -o json | \
yq -o yaml --prettyPrint eval '.spec.configuration' -
set +x
demo_slide "Capabilities"
set -x
kubectl get node "${substrate_worker_node}" -o json | \
yq -o yaml --prettyPrint eval '.metadata.labels' - | grep "node.kubevirt.io"
set +x
demo_slide "Hugepages"
set -x
kubectl ssh-jump -u "${substrate_worker_node_username}" "${substrate_worker_node_ip}" --args "
set -x;
grep HugePages_ /proc/meminfo;
"
kubectl get node "${substrate_worker_node}" -o json | \
yq -o yaml --prettyPrint eval '.status.allocatable' -
set +x
}
function subcluster_fusioncore_controllers() {
local slide_number=0
local demo_section="Subcluster | Fusioncore | Control Plane"
demo_slide "Infra"
set -x
kubectx "admin@afonc-airship-substrate"
kubectl get virtualmachineinstances -l app.kubernetes.io/instance=afo-nc-fusion
kubectl get pods -l app.kubernetes.io/instance=afo-nc-fusion
kubectl get svc -l app.kubernetes.io/instance=afo-nc-fusion
set +x
demo_slide "Controllers"
set -x
kubectx "admin@afonc-subcluster-fusioncore"
kubectl get nodes
kubectl get -n kube-system pods -l component=etcd
set +x
demo_slide "Networking"
local fusioncore_controller_node="$(kubectl get node --selector='node-role.kubernetes.io/master' -o 'go-template={{ (index .items 0).metadata.name }}')"
local fusioncore_controller_node_ip="$(kubectl get nodes ${fusioncore_controller_node} -o jsonpath={.status.addresses[?\(@.type==\"InternalIP\"\)].address})"
local fusioncore_controller_node_username="ubuntu"
set -x
kubectl ssh-jump -u "${fusioncore_controller_node_username}" "${fusioncore_controller_node_ip}" --args "
set -x;
networkctl status enp*;
"
calico_node_ds="$(kubectl get -n kube-system ds -l k8s-app=calico-node -o go-template='{{ (index .items 0).metadata.name }}')"
kubectl get -n kube-system ds "${calico_node_ds}" -o json | \
yq -o yaml --prettyPrint eval '.spec.template.spec.containers[] | select(.name == "calico-node").env[] | select(.name == "IP_AUTODETECTION_METHOD")' -
set +x
}
function subcluster_fusioncore_worker() {
local slide_number=0
local demo_section="Subcluster | Fusioncore | Worker"
demo_slide "VirtualMachines"
set -x
kubectx "admin@afonc-airship-substrate"
kubectl get virtualmachines -l app.kubernetes.io/instance=afo-nc-fusion
kubectl get virtualmachineinstances -l app.kubernetes.io/instance=afo-nc-fusion
set +x
demo_slide "Worker Node | Domain"
set -x
kubectl get virtualmachineinstances -l afo-nc.azure.com=afo-nc-fusion-subcluster-worker -o json | \
yq -o yaml --prettyPrint eval '.items[0].spec.domain | del(.resources) | del(.features)' -
set +x
demo_slide "Worker Node | Network Configuration"
tee /dev/null <<EOF
Interfaces exposed to the VM:
enp1s0: Calico: Airship Infrastructure
enp6s0: SR-IOV: UPF N3 interface
enp7s0: SR-IOV: UPF N6 interface
enp8s0: SR-IOV: AMF N2 interface
enp9s0: SR-IOV: Calico CNI
EOF
set -x
kubectl get virtualmachineinstances -l afo-nc.azure.com=afo-nc-fusion-subcluster-worker -o json | \
yq -o yaml --prettyPrint eval '.items[0].spec.volumes' -
kubectl get secret afo-nc-fusion-subcluster-worker-0-networkdata -o go-template='{{ .data.networkdata | base64decode }}' | \
yq -o yaml --prettyPrint eval '' -
set +x
demo_slide "Worker Node | Network Configuration | DPDK"
kubectx "admin@afonc-subcluster-fusioncore"
local fusioncore_worker_node="$(kubectl get node --selector='!node-role.kubernetes.io/master' -o 'go-template={{ (index .items 0).metadata.name }}')"
local fusioncore_worker_node_ip="$(kubectl get nodes ${fusioncore_worker_node} -o jsonpath={.status.addresses[?\(@.type==\"InternalIP\"\)].address})"
local fusioncore_worker_node_username="ubuntu"
set -x
kubectl ssh-jump -u "${fusioncore_worker_node_username}" "${fusioncore_worker_node_ip}" --args "
set -x;
dpdk-devbind.py --status;
networkctl list enp*;
"
set +x
}
function subcluster_fusioncore_workload() {
local slide_number=0
local demo_section="Subcluster | Fusioncore | Workload"
demo_slide "Helm"
set -x
kubectx "admin@afonc-subcluster-fusioncore"
helm -n core ls
helm get values core -o json | yq -o yaml --prettyPrint eval '' -
set +x
demo_slide "AMF"
set -x
helm get values core -o json | yq -o yaml --prettyPrint eval '.5g-core.amfV1' -
kubectl get pods -l app.kubernetes.io/component=amf
amf_pod="$(kubectl get pods -l app.kubernetes.io/component=amf -o 'go-template={{ (index .items 0).metadata.name }}')"
kubectl get pods "${amf_pod}" -o json | yq eval '.metadata.annotations."k8s.v1.cni.cncf.io/networks-status"' - | jq
kubectl logs "${amf_pod}" -c amf --tail=20
set +x
demo_slide "UPF"
set -x
helm get values core -o json | yq -o yaml --prettyPrint eval '.global.networks' -
helm get values core -o json | yq -o yaml --prettyPrint eval '.global.cpuManager' -
kubectl get pods -l app.kubernetes.io/component=upf-pp
upf_pod="$(kubectl get pods -l app.kubernetes.io/component=upf-pp -o 'go-template={{ (index .items 0).metadata.name }}')"
kubectl get pods "${upf_pod}" -o json | yq eval '.metadata.annotations."k8s.v1.cni.cncf.io/networks-status"' - | jq
kubectl logs "${upf_pod}" -c cppe --tail=40
set +x
demo_slide "Dashboard"
echo "http://10.36.3.13:30001/d/fusion-core/fusion-core?orgId=1&refresh=5s&from=now-15m&to=now"
}
function fusioncore_validate() {
local slide_number=0
local demo_section="Fusioncore | Validate"
demo_slide "Helm"
set -x
kubectx "admin@afonc-subcluster-harness"
helm -n core ls
helm get values core -o json | yq -o yaml --prettyPrint eval '' -
set +x
demo_slide "Test AMF Communication"
local fusioncore_worker_node="$(kubectl get node --selector='!node-role.kubernetes.io/master' -o 'go-template={{ (index .items 0).metadata.name }}')"
local fusioncore_worker_node_ip="$(kubectl get nodes ${fusioncore_worker_node} -o jsonpath={.status.addresses[?\(@.type==\"InternalIP\"\)].address})"
local fusioncore_worker_node_username="ubuntu"
tmux new-session -d -s fusioncore_amf_hello "kubectl ssh-jump -u ${fusioncore_worker_node_username} ${fusioncore_worker_node_ip} --args \"
set -x;
networkctl list enp*;
sudo docker run --rm \
--network=host \
-v /home/ubuntu/opt/metaswitch/testinfra:/tests \
-v /root/.kube/config:/root/.kube/config \
--workdir /tests \
art-docker.metaswitch.com/test-environment-image:0.24.3
./run_tests.py \
--technology=5g \
--reuse-instance=core \
--tcpdump tests \
-- \
-k test_hello_to_amf;
sleep infinity;
\""
amf_pod="$(kubectl --context "admin@afonc-subcluster-fusioncore" get pods -l app.kubernetes.io/component=amf -o 'go-template={{ (index .items 0).metadata.name }}')"
tmux split-window -d -t fusioncore_amf_hello:0 -p20 -v "set -x; kubectl --context \"admin@afonc-subcluster-fusioncore\" exec -it -n core ${amf_pod} -c amf -- tcpdump -i any sctp";
tmux select-layout -t fusioncore_amf_hello:0 main-horizontal
tmux attach-session -t fusioncore_amf_hello
set +x
}
substrate_build
substrate_storage
substrate_networking
substrate_hypervisor
subcluster_fusioncore_controllers
subcluster_fusioncore_worker
subcluster_fusioncore_workload
fusioncore_validate
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment