Created
November 19, 2025 07:47
-
-
Save terraboops/3b9d00b8376b229eeb9783fe2275dddb to your computer and use it in GitHub Desktop.
Test: tc qdisc on both sides of veth pair (host vs pod namespace) - Proves AWS VPC CNI and pod-side rate limiting don't conflict
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| #!/bin/bash | |
| # Test: Can we attach tc qdisc to pod-side of veth while AWS VPC CNI uses host-side? | |
| # Run with: sudo bash test-tc-veth-conflict.sh | |
| set -e | |
| NAMESPACE="test-pod-tc" | |
| VETH_HOST="veth-host-test" | |
| VETH_POD="veth-pod-test" | |
| echo "=======================================================================" | |
| echo "TEST: tc qdisc conflict between host and pod namespaces" | |
| echo "=======================================================================" | |
| echo "" | |
| echo "Simulating:" | |
| echo " - AWS VPC CNI with tc on HOST side of veth" | |
| echo " - Natra with tc qdisc on POD side of veth" | |
| echo "" | |
| echo "Question: Do they conflict?" | |
| echo "" | |
| # Check kernel and tc version | |
| echo "=== System Info ===" | |
| echo "Kernel: $(uname -r)" | |
| echo "tc version: $(tc -Version 2>&1 || echo 'not found')" | |
| echo "" | |
| # Check available qdiscs | |
| echo "=== Available qdiscs ===" | |
| tc qdisc help 2>&1 | grep -A 50 "Usage:" | head -20 || echo "Could not list qdiscs" | |
| echo "" | |
| # Cleanup function | |
| cleanup() { | |
| echo "" | |
| echo "=== Cleanup ===" | |
| ip netns del $NAMESPACE 2>/dev/null || true | |
| echo "✓ Cleaned up namespace" | |
| } | |
| # Cleanup on exit | |
| trap cleanup EXIT | |
| # 1. Create network namespace (simulates pod) | |
| echo "=== Step 1: Create network namespace (simulates pod) ===" | |
| ip netns add $NAMESPACE | |
| echo "✓ Created namespace: $NAMESPACE" | |
| echo "" | |
| # 2. Create veth pair (simulates k8s pod networking) | |
| echo "=== Step 2: Create veth pair ===" | |
| ip link add $VETH_HOST type veth peer name $VETH_POD | |
| echo "✓ Created veth pair: $VETH_HOST <-> $VETH_POD" | |
| echo "" | |
| # 3. Move pod side into namespace | |
| echo "=== Step 3: Move pod side into namespace ===" | |
| ip link set $VETH_POD netns $NAMESPACE | |
| echo "✓ Moved $VETH_POD into $NAMESPACE" | |
| echo "" | |
| # 4. Configure interfaces | |
| echo "=== Step 4: Configure interfaces ===" | |
| ip addr add 10.99.0.1/24 dev $VETH_HOST | |
| ip link set $VETH_HOST up | |
| echo "✓ Host side: 10.99.0.1/24" | |
| ip netns exec $NAMESPACE ip addr add 10.99.0.2/24 dev $VETH_POD | |
| ip netns exec $NAMESPACE ip link set $VETH_POD up | |
| ip netns exec $NAMESPACE ip link set lo up | |
| echo "✓ Pod side: 10.99.0.2/24" | |
| echo "" | |
| # 5. Try to load sch_clsact module | |
| echo "=== Step 5a: Try to load clsact kernel module ===" | |
| modprobe sch_clsact 2>/dev/null && echo "✓ Loaded sch_clsact module" || echo "⚠ sch_clsact module not available or already loaded" | |
| echo "" | |
| # 5b. Simulate AWS VPC CNI: Try clsact first, fallback to ingress | |
| echo "=== Step 5b: Simulate AWS VPC CNI (tc on HOST side) ===" | |
| if tc qdisc add dev $VETH_HOST clsact 2>/dev/null; then | |
| echo "✓ Added clsact qdisc to $VETH_HOST (host side)" | |
| HOST_QDISC="clsact" | |
| else | |
| echo "⚠ clsact not available, trying 'ingress' qdisc instead..." | |
| tc qdisc add dev $VETH_HOST ingress 2>/dev/null || { | |
| echo "✗ Both clsact and ingress failed!" | |
| echo "" | |
| echo "Trying to show what qdiscs ARE available on this interface:" | |
| tc qdisc show dev $VETH_HOST | |
| exit 1 | |
| } | |
| echo "✓ Added ingress qdisc to $VETH_HOST (host side)" | |
| HOST_QDISC="ingress" | |
| fi | |
| echo "" | |
| echo "Host side qdiscs:" | |
| tc qdisc show dev $VETH_HOST | sed 's/^/ /' | |
| echo "" | |
| # 6. Try to add qdisc to POD side (THE CRITICAL TEST) | |
| echo "=== Step 6: Add tc qdisc to POD side (our rate limiter) ===" | |
| if [ "$HOST_QDISC" = "clsact" ]; then | |
| # Try clsact on pod side too | |
| if ip netns exec $NAMESPACE tc qdisc add dev $VETH_POD clsact 2>/dev/null; then | |
| echo "✓ Added clsact qdisc to $VETH_POD (pod side)" | |
| POD_QDISC="clsact" | |
| else | |
| echo "⚠ clsact failed on pod side, trying ingress..." | |
| ip netns exec $NAMESPACE tc qdisc add dev $VETH_POD ingress | |
| echo "✓ Added ingress qdisc to $VETH_POD (pod side)" | |
| POD_QDISC="ingress" | |
| fi | |
| else | |
| # Use ingress on pod side | |
| ip netns exec $NAMESPACE tc qdisc add dev $VETH_POD ingress | |
| echo "✓ Added ingress qdisc to $VETH_POD (pod side)" | |
| POD_QDISC="ingress" | |
| fi | |
| echo "" | |
| echo "Pod side qdiscs:" | |
| ip netns exec $NAMESPACE tc qdisc show dev $VETH_POD | sed 's/^/ /' | |
| echo "" | |
| # 7. Try adding a TBF rate limiter to POD side root | |
| echo "=== Step 7: Add TBF rate limiter on POD side root ===" | |
| # Remove the ingress/clsact first if we added it to root | |
| if [ "$POD_QDISC" = "clsact" ]; then | |
| ip netns exec $NAMESPACE tc qdisc del dev $VETH_POD clsact | |
| elif [ "$POD_QDISC" = "ingress" ]; then | |
| ip netns exec $NAMESPACE tc qdisc del dev $VETH_POD ingress | |
| fi | |
| ip netns exec $NAMESPACE tc qdisc add dev $VETH_POD root tbf rate 10mbit burst 32kbit latency 400ms | |
| echo "✓ Added TBF rate limiter (10mbit) to $VETH_POD root" | |
| echo "" | |
| echo "Pod side qdiscs after TBF:" | |
| ip netns exec $NAMESPACE tc qdisc show dev $VETH_POD | sed 's/^/ /' | |
| echo "" | |
| # 8. Add ingress/clsact back for ingress filtering | |
| echo "=== Step 8: Add $POD_QDISC back for ingress (with TBF on root) ===" | |
| if [ "$POD_QDISC" = "clsact" ]; then | |
| ip netns exec $NAMESPACE tc qdisc add dev $VETH_POD clsact | |
| echo "✓ Added clsact alongside TBF" | |
| else | |
| ip netns exec $NAMESPACE tc qdisc add dev $VETH_POD ingress | |
| echo "✓ Added ingress alongside TBF" | |
| fi | |
| echo "" | |
| echo "Final pod side qdiscs:" | |
| ip netns exec $NAMESPACE tc qdisc show dev $VETH_POD | sed 's/^/ /' | |
| echo "" | |
| # 9. Verify connectivity | |
| echo "=== Step 9: Test connectivity ===" | |
| if ip netns exec $NAMESPACE ping -c 2 -W 1 10.99.0.1 >/dev/null 2>&1; then | |
| echo "✓ Ping from pod to host: SUCCESS" | |
| else | |
| echo "⚠ Ping failed (but qdiscs still attached successfully)" | |
| fi | |
| echo "" | |
| # 10. Summary | |
| echo "=======================================================================" | |
| echo "RESULTS" | |
| echo "=======================================================================" | |
| echo "" | |
| echo "✅ Host side $HOST_QDISC: ATTACHED" | |
| echo "✅ Pod side $POD_QDISC: ATTACHED" | |
| echo "✅ Pod side TBF rate limiter: ATTACHED" | |
| echo "✅ Both qdiscs coexist: YES" | |
| echo "✅ Connectivity: $(ip netns exec $NAMESPACE ping -c 1 -W 1 10.99.0.1 >/dev/null 2>&1 && echo 'WORKING' || echo 'N/A')" | |
| echo "" | |
| echo "CONCLUSION:" | |
| echo " AWS VPC CNI tc (host side) and Natra tc qdisc (pod side)" | |
| echo " DO NOT CONFLICT! They operate on different interfaces." | |
| echo "" | |
| echo "IMPLICATIONS FOR NATRA:" | |
| echo " ✓ Init container can use standard tc (any kernel!)" | |
| echo " ✓ No need for tcx if we attach from pod side" | |
| echo " ✓ No kernel 6.6+ requirement for init mode" | |
| echo " ✓ CNI mode might still need tcx (attaches to host side)" | |
| echo "" | |
| echo "QDISC USED: $HOST_QDISC (host) + $POD_QDISC (pod)" | |
| echo "" | |
| echo "=======================================================================" | |
| echo "TEST PASSED" | |
| echo "=======================================================================" |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment