Skip to content

Instantly share code, notes, and snippets.

@mccaffers
Created September 25, 2025 16:45
Show Gist options
  • Select an option

  • Save mccaffers/5a392abdd77e0a22bd4bdec6a0b50037 to your computer and use it in GitHub Desktop.

Select an option

Save mccaffers/5a392abdd77e0a22bd4bdec6a0b50037 to your computer and use it in GitHub Desktop.
QuestDB Maximum Performance Configuration. This configuration maximises performance at the expense of memory usage for QuestDB
# QuestDB Maximum Performance Configuration
# WARNING: This configuration maximizes performance at the expense of memory usage
# Ensure you have sufficient RAM and CPU cores before applying these settings
# ===========================================
# SHARED WORKER CONFIGURATION (CRITICAL)
# ===========================================
# Set to number of CPU cores (or slightly less to leave cores for OS)
# Running on a 94 core machine, so we set to 32 to leave room for OS and other tasks
shared.worker.count=32
# Pin threads to specific CPU cores for optimal performance
# ===========================================
# HTTP SERVER - MAXIMUM CONNECTIONS
# ===========================================
# Maximum simultaneous HTTP connections
http.net.connection.limit=1024
# ILP connection limit
http.ilp.connection.limit=512
# Connection pool sizing
http.connection.pool.initial.capacity=64
http.connection.string.pool.capacity=512
# Buffer sizes - maximize for performance
http.net.connection.sndbuf=8M
http.net.connection.rcvbuf=8M
http.send.buffer.size=8M
http.request.header.buffer.size=128K
# Reduce timeouts for faster connection recycling
http.net.connection.timeout=120000
http.net.connection.queue.timeout=1000
# Enable compression for better throughput
http.allow.deflate.before.send=true
# Dedicated HTTP worker pool
http.worker.count=20
# Text processing pools - maximize for CSV imports
http.text.date.adapter.pool.capacity=64
http.text.timestamp.adapter.pool.capacity=256
http.text.metadata.string.pool.capacity=512
http.text.lexer.string.pool.capacity=256
http.text.roll.buffer.size=8192
http.text.roll.buffer.limit=16M
http.text.utf8.sink.size=16384
http.text.analysis.max.lines=1000
# Query cache - maximize
http.query.cache.enabled=true
http.query.cache.block.count=32
http.query.cache.row.count=128
# ===========================================
# POSTGRESQL WIRE PROTOCOL
# ===========================================
pg.net.connection.limit=512
pg.connection.pool.capacity=512
pg.net.connection.sndbuf=4M
pg.net.connection.rcvbuf=4M
pg.recv.buffer.size=4M
pg.send.buffer.size=4M
pg.net.connection.timeout=120000
# Dedicated PG worker pool
pg.worker.count=16
# PG caches - maximize
pg.select.cache.enabled=true
pg.select.cache.block.count=64
pg.select.cache.row.count=128
pg.insert.cache.enabled=true
pg.insert.cache.block.count=32
pg.insert.cache.row.count=64
pg.update.cache.enabled=true
pg.update.cache.block.count=32
pg.update.cache.row.count=64
pg.character.store.capacity=16384
pg.character.store.pool.capacity=256
pg.binary.param.count.capacity=16
pg.named.statement.limit=256
pg.max.blob.size.on.query=4M
# ===========================================
# CSV IMPORT OPTIMIZATION
# ===========================================
cairo.sql.copy.buffer.size=16M
cairo.sql.copy.max.index.chunk.size=500M
cairo.sql.copy.queue.capacity=128
# ===========================================
# INFLUXDB LINE PROTOCOL (ILP)
# ===========================================
# HTTP ILP (preferred)
line.http.enabled=true
# TCP ILP settings (if using TCP)
line.tcp.enabled=true
line.tcp.net.connection.limit=1024
line.tcp.connection.pool.capacity=512
line.tcp.net.connection.rcvbuf=4M
line.tcp.msg.buffer.size=131072
line.tcp.max.measurement.size=131072
line.tcp.writer.queue.capacity=512
# Dedicated ILP workers
line.tcp.writer.worker.count=16
line.tcp.io.worker.count=16
# ILP optimization
line.tcp.writer.worker.sleep.threshold=100
line.tcp.writer.worker.yield.threshold=5
line.tcp.io.worker.sleep.threshold=100
line.tcp.io.worker.yield.threshold=5
line.tcp.maintenance.job.interval=500
line.tcp.min.idle.ms.before.writer.release=100
line.tcp.commit.interval.fraction=0.3
line.tcp.commit.interval.default=500
# Auto-create settings
line.auto.create.new.columns=true
line.auto.create.new.tables=true
line.default.partition.by=HOUR
# ===========================================
# GLOBAL TIMEOUTS AND LIMITS
# ===========================================
query.timeout.sec=300
cairo.idle.check.interval=60000
cairo.inactive.reader.ttl=30000
cairo.wal.inactive.writer.ttl=30000
cairo.inactive.writer.ttl=300000
# ===========================================
# ADVANCED SETTINGS
# ===========================================
# Commit mode for maximum performance (data loss risk)
cairo.commit.mode=nosync
# File operation retries
cairo.file.operation.retry.count=10
cairo.max.swap.file.count=100
# Circuit breaker optimization
circuit.breaker.throttle=500000
circuit.breaker.buffer.size=128
# Keep alive optimization
http.keep-alive.timeout=30
http.keep-alive.max=100000
http.server.keep.alive=true
# Enable config reloading
config.reload.enabled=true
# ===========================================
# MEMORY WARNINGS
# ===========================================
# This configuration will use significant memory:
# - Estimate 8-16GB+ RAM minimum
# - Each connection can use 8-16MB
# - Large buffer pools will consume GB of memory
# - JIT compilation uses additional memory
# - Monitor memory usage carefully in production
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment