This script benchmarks a TrueNAS ZFS pool, including sequential and random I/O tests, and logs results for analysis.
#!/bin/sh
# trueNAS_pool_benchmark.sh
# Usage: sh trueNAS_pool_benchmark.sh <pool_name>
# Example: sh trueNAS_pool_benchmark.sh pool-01
POOL=$1
if [ -z "$POOL" ]; then
echo "Usage: $0 <pool_name>"
exit 1
fi
LOGFILE="./${POOL}_benchmark_$(date +%Y%m%d_%H%M%S).log"
log_section() {
echo "" | tee -a $LOGFILE
echo "====================================================================" | tee -a $LOGFILE
echo ">>> $1 ($(date))" | tee -a $LOGFILE
echo "====================================================================" | tee -a $LOGFILE
}
echo "=== TrueNAS Pool Benchmark ===" | tee -a $LOGFILE
echo "Pool: $POOL" | tee -a $LOGFILE
echo "Date: $(date)" | tee -a $LOGFILE
echo "Log file: $LOGFILE" | tee -a $LOGFILE
# 1. Zpool info
log_section "ZPOOL STATUS"
zpool status $POOL | tee -a $LOGFILE
# 2. Zpool iostat (2 seconds, 5 iterations)
log_section "ZPOOL IOSTAT (2s interval, 5 samples)"
zpool iostat -v $POOL 2 5 | tee -a $LOGFILE
# 3. Sequential write test
DD_TESTFILE="/mnt/$POOL/benchmark_testfile"
log_section "SEQUENTIAL WRITE (dd, 1GiB)"
sync
dd if=/dev/zero of=$DD_TESTFILE bs=1M count=1024 oflag=direct 2>&1 | tee -a $LOGFILE
# 4. Sequential read test
log_section "SEQUENTIAL READ (dd, 1GiB)"
dd if=$DD_TESTFILE of=/dev/null bs=1M iflag=direct 2>&1 | tee -a $LOGFILE
# 5. Random read/write test with fio
FIO_FILES=""
if command -v fio >/dev/null 2>&1; then
log_section "RANDOM READ/WRITE (fio, 128k, 4 jobs, iodepth=16)"
for i in 0 1 2 3; do
FIO_FILES="$FIO_FILES /mnt/$POOL/randrw.$i.0"
done
fio --name=randrw \
--rw=randrw \
--size=1G \
--bs=128k \
--numjobs=4 \
--iodepth=16 \
--direct=1 \
--ioengine=libaio \
--runtime=60 \
--group_reporting \
--directory=/mnt/$POOL 2>&1 | tee -a $LOGFILE
else
section "fio NOT FOUND — skipping random I/O test"
fi
# 6. Cleanup
log_section "CLEANUP"
rm -f $DD_TESTFILE $FIO_FILES
echo "Benchmark files removed." | tee -a $LOGFILE
# 7. Extract summary numbers
log_section "SUMMARY"
SEQ_WRITE=$(grep -A5 "SEQUENTIAL WRITE" $LOGFILE | grep copied | tail -1 | awk '{print $(NF-1), $NF}')
SEQ_READ=$(grep -A5 "SEQUENTIAL READ" $LOGFILE | grep copied | tail -1 | awk '{print $(NF-1), $NF}')
FIO_READ_BW=$(grep -A20 "RANDOM READ/WRITE" $LOGFILE | grep -m1 "read:" | awk -F'BW=' '{print $2}' | awk '{print $1}')
FIO_READ_IOPS=$(grep -A20 "RANDOM READ/WRITE" "$LOGFILE" | grep -m1 "read:" | awk -F'IOPS=|,' '{print $2}')
FIO_WRITE_BW=$(grep -A30 "RANDOM READ/WRITE" $LOGFILE | grep -m1 "write:" | awk -F'BW=' '{print $2}' | awk '{print $1}')
FIO_WRITE_IOPS=$(grep -A30 "RANDOM READ/WRITE" "$LOGFILE" | grep -m1 "write:" | awk -F'IOPS=|,' '{print $2}')
echo "Sequential Write (dd): $SEQ_WRITE" | tee -a $LOGFILE
echo "Sequential Read (dd): $SEQ_READ" | tee -a $LOGFILE
[ -n "$FIO_READ_BW" ] && echo "Random Read (fio): $FIO_READ_BW ($FIO_READ_IOPS IOPS)" | tee -a $LOGFILE
[ -n "$FIO_WRITE_BW" ] && echo "Random Write (fio): $FIO_WRITE_BW ($FIO_WRITE_IOPS IOPS)" | tee -a $LOGFILE
echo "" | tee -a $LOGFILE
echo ">>> Benchmark completed. Results saved to $LOGFILE" | tee -a $LOGFILE| Storage | Seq. Read (dd) | Seq. Write (dd) | Random Read (fio) | Random Write (fio) | IOPS (128 KiB) |
|---|---|---|---|---|---|
| RAIDZ1 3-disk pool | 7.3 GB/s | 3.9 GB/s | 1.59 GiB/s | 1.64 GiB/s | 12.7k / 13.1k |
| Single 7200 RPM HDD | 150 MB/s | 150 MB/s | 80 MB/s | 80 MB/s | ~200 / 300 |
| SATA SSD (consumer) | 500 MB/s | 500 MB/s | 400 MB/s | 400 MB/s | 3–5k |
Notes:
- dd sequential tests are very high. That’s not raw disk speed — it’s likely benefiting from ARC (ZFS RAM cache), since you’re only writing/reading 1 GiB, which fits in memory.
→ If you want true disk speed, you should bump the test size to something larger than your ARC (e.g. 20–50 GiB). - Random I/O tests (
fio) better reflect real-world workloads with multiple simultaneous small/medium-sized operations.