Forked from rgreenjr/postgres_queries_and_commands.sql
Last active
September 18, 2018 19:09
-
-
Save rahulwa/52f618c9cc064276bee68dcb9a112437 to your computer and use it in GitHub Desktop.
Useful PostgreSQL Queries and Commands
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| -- show running queries (9.2) | |
| SELECT pid, age(query_start, clock_timestamp()), usename, query | |
| FROM pg_stat_activity | |
| WHERE query != '<IDLE>' AND query NOT ILIKE '%pg_stat_activity%' | |
| ORDER BY query_start desc; | |
| -- kill running query | |
| SELECT pg_cancel_backend(procpid); | |
| -- kill idle query | |
| SELECT pg_terminate_backend(procpid); | |
| -- vacuum command | |
| VACUUM (VERBOSE, ANALYZE); | |
| -- all database users | |
| select * from pg_stat_activity where current_query not like '<%'; | |
| -- all databases and their sizes | |
| select * from pg_user; | |
| -- all tables and their size, with/without indexes | |
| select datname, pg_size_pretty(pg_database_size(datname)) | |
| from pg_database | |
| order by pg_database_size(datname) desc; | |
| -- Total number of connections | |
| SELECT count(*) FROM pg_stat_activity; | |
| -- Number of connections by state | |
| SELECT state, count(*) FROM pg_stat_activity GROUP BY state; | |
| -- Connections waiting for a lock | |
| SELECT count(distinct pid) FROM pg_locks WHERE granted = false; | |
| -- Maximum transaction age | |
| SELECT max(now() - xact_start) FROM pg_stat_activity | |
| WHERE state IN ('idle in transaction', 'active'); | |
| -- cache hit rates (should not be less than 0.99) | |
| SELECT sum(heap_blks_read) as heap_read, sum(heap_blks_hit) as heap_hit, (sum(heap_blks_hit) - sum(heap_blks_read)) / sum(heap_blks_hit) as ratio | |
| FROM pg_statio_user_tables; | |
| -- table index usage rates (should not be less than 0.99) | |
| SELECT relname, 100 * idx_scan / (seq_scan + idx_scan) percent_of_times_index_used, n_live_tup rows_in_table | |
| FROM pg_stat_user_tables | |
| ORDER BY n_live_tup DESC; | |
| -- how many indexes are in cache | |
| SELECT sum(idx_blks_read) as idx_read, sum(idx_blks_hit) as idx_hit, (sum(idx_blks_hit) - sum(idx_blks_read)) / sum(idx_blks_hit) as ratio | |
| FROM pg_statio_user_indexes; | |
| -- Dump table on remote host to file | |
| pg_dump -h HOSTNAME -p 5432 -U USERNAME -d DATABASE -F c --table TABLE > TABLE.csql | |
| -- Restore dump into existing database | |
| pg_restore -h HOSTNAME -p 5432 -U USERNAME -d DATABASE -Fc --no-owner < TABLE.csql | |
| -- Dump database on remote host to file | |
| pg_dump -U USERNAME -h HOSTNAME DATABASE > dump.sql | |
| -- Import dump into existing database | |
| $ psql -d newdb -f dump.sql | |
| -- row count for all your tables in Postgres | |
| SELECT schemaname,relname,n_live_tup | |
| FROM pg_stat_user_tables | |
| ORDER BY n_live_tup DESC; | |
| -- No. of connection per client address | |
| SELECT count(client_addr), client_addr FROM pg_stat_activity GROUP BY client_addr; | |
| -- Index and table Size | |
| SELECT | |
| t.tablename, | |
| indexname, | |
| c.reltuples AS num_rows, | |
| pg_size_pretty(pg_relation_size(quote_ident(t.tablename)::text)) AS table_size, | |
| pg_size_pretty(pg_relation_size(quote_ident(indexrelname)::text)) AS index_size, | |
| CASE WHEN indisunique THEN 'Y' | |
| ELSE 'N' | |
| END AS UNIQUE, | |
| idx_scan AS number_of_scans, | |
| idx_tup_read AS tuples_read, | |
| idx_tup_fetch AS tuples_fetched | |
| FROM pg_tables t | |
| LEFT OUTER JOIN pg_class c ON t.tablename=c.relname | |
| LEFT OUTER JOIN | |
| ( SELECT c.relname AS ctablename, ipg.relname AS indexname, x.indnatts AS number_of_columns, idx_scan, idx_tup_read, idx_tup_fetch, indexrelname, indisunique FROM pg_index x | |
| JOIN pg_class c ON c.oid = x.indrelid | |
| JOIN pg_class ipg ON ipg.oid = x.indexrelid | |
| JOIN pg_stat_all_indexes psai ON x.indexrelid = psai.indexrelid ) | |
| AS foo | |
| ON t.tablename = foo.ctablename | |
| WHERE t.schemaname='public' | |
| ORDER BY 1,2; | |
| -- list all queries running from 1 hour | |
| SELECT | |
| pid, | |
| age(query_start, clock_timestamp()) | |
| FROM pg_stat_activity | |
| WHERE query != '<IDLE>' AND query NOT ILIKE '%pg_stat_activity%' AND | |
| age(query_start, clock_timestamp()) < -(INTERVAL '1 hour') | |
| ORDER BY age(query_start, clock_timestamp()) ASC; | |
| -- Kill all queries running more than 1 day | |
| -- If process is stuck, use pg_terminate_backend | |
| DO $$ | |
| DECLARE | |
| r RECORD; | |
| BEGIN | |
| FOR r IN | |
| SELECT pid | |
| FROM pg_stat_activity | |
| WHERE query != '<IDLE>' AND query NOT ILIKE '%pg_stat_activity%' AND | |
| age(query_start, clock_timestamp()) < -(INTERVAL '1 days') | |
| ORDER BY age(query_start, clock_timestamp()) ASC | |
| LOOP | |
| PERFORM pg_cancel_backend(r.pid); | |
| END LOOP; | |
| END $$; | |
| --- Requires Extensions --- | |
| --- Enable PG_STAT_STATEMENTS | |
| --- shared_preload_libraries = pg_stat_statements | |
| --- track_activity_query_size = 2048 | |
| --- pg_stat_statements.track = ALL | |
| --- pg_stat_statements.max = 10000 | |
| --- List queries by total_time & see which query spends most time in the database: | |
| SELECT round(total_time*1000)/1000 AS total_time,query | |
| FROM pg_stat_statements | |
| ORDER BY total_time DESC; | |
| --- List queries with total no. of calls, total rows & rows returned etc: | |
| SELECT query, calls, total_time, rows, 100.0 * shared_blks_hit / nullif(shared_blks_hit + shared_blks_read, 0) AS hit_percent | |
| FROM pg_stat_statements | |
| ORDER BY total_time | |
| DESC LIMIT 5; | |
| --- List queries on 'per execution' basis & try to sample them over time: | |
| SELECT queryid, query, calls, total_time/calls, rows/calls, temp_blks_read/calls, temp_blks_written/calls | |
| FROM pg_stat_statements | |
| WHERE calls != 0 | |
| ORDER BY total_time | |
| DESC LIMIT 10; |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment