Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 2 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ Questions? Ideas? Contact me: nik@postgres.ai, Nikolay Samokhvalov.

## Credits

**postgres_dba** is based on useful queries created and improved by many developers. Here is incomplete list of them:
**postgres_dba** is based on useful queries created and improved by many developers. Here is an incomplete list of them:
* Jehan-Guillaume (ioguix) de Rorthais https://github.com/ioguix/pgsql-bloat-estimation
* Alexey Lesovsky, Alexey Ermakov, Maxim Boguk, Ilya Kosmodemiansky et al. https://github.com/dataegret/pg-utils
* Josh Berkus, Quinn Weaver et al. from PostgreSQL Experts, Inc. https://github.com/pgexperts/pgx_scripts
Expand Down Expand Up @@ -104,9 +104,7 @@ If you are running psql and Postgres server on the same machine, just launch psq
psql -U <username> <dbname>
```

And type `:dba <Enter>` in psql. (Or `\i /path/to/postgres_dba/start.psql` if you haven't added shortcut to your `~/.psqlrc` file).

– it will open interactive menu.
And type `:dba <Enter>` in psql. (Or `\i /path/to/postgres_dba/start.psql` if you haven't added shortcut to your `~/.psqlrc` file). This will open an interactive menu.

### Connect to Remote Postgres Server
What to do if you need to connect to a remote Postgres server? Usually, Postgres is behind a firewall and/or doesn't listen to a public network interface. So you need to be able to connect to the server using SSH. If you can do it, then just create SSH tunnel (assuming that Postgres listens to default port 5432 on that server:
Expand Down
2 changes: 1 addition & 1 deletion init/generate.sh
Original file line number Diff line number Diff line change
Expand Up @@ -84,5 +84,5 @@ echo " \\ir ./$OUT" >> "$OUT"
echo "\\endif" >> "$OUT"

echo "Done."
cd ->/dev/null
cd - >/dev/null
exit 0
6 changes: 3 additions & 3 deletions matviews/refresh_all.sql
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
-- Use this to do the very 1st REFRESH for your matviews
-- In case when there are complex relations between matviews,
-- it might perform multiple iterations and eventually refreshes
-- all matviews (either all w/o data or absolutely all -- it's up to you).
-- all matviews (either all without data or absolutely all -- it's up to you).

-- set this to TRUE here if you need ALL matviews to be refreshed, not only those that already have been refreshed
set postgres_dba.refresh_matviews_with_data = FALSE;
Expand All @@ -22,7 +22,7 @@ begin
set postgres_dba.refresh_matviews_with_data = true;
end if;
if current_setting('postgres_dba.refresh_matviews_with_data')::boolean then
raise notice 'Refreshing ALL matviews (run ''set postgres_dba.refresh_matviews_with_data_forced = TRUE;'' to refresh only matviews w/o data).';
raise notice 'Refreshing ALL matviews (run ''set postgres_dba.refresh_matviews_with_data_forced = FALSE;'' to refresh only matviews without data).';
for matview in
select format('"%s"."%s"', schemaname::text, matviewname::text)
from pg_matviews
Expand All @@ -32,7 +32,7 @@ begin
execute sql;
end loop;
else
raise notice 'Refreshing only matviews w/o data (run ''set postgres_dba.refresh_matviews_with_data_forced = TRUE;'' to refresh all matviews).';
raise notice 'Refreshing only matviews without data (run ''set postgres_dba.refresh_matviews_with_data_forced = TRUE;'' to refresh all matviews).';
end if;

iter := 1;
Expand Down
2 changes: 1 addition & 1 deletion roles/alter_user_with_random_password.psql
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ begin
(case when lower(current_setting('postgres_dba.login')::text) not in ('0', '', 'no', 'false', 'n', 'f') then ' login' else '' end));
raise debug 'SQL: %', sql;
execute sql;
raise debug 'User % altered, password: %', current_setting('postgres_dba.username')::text, pwd;
raise info 'User % altered, password: %', current_setting('postgres_dba.username')::text, pwd;
end;
$$ language plpgsql;

Expand Down
2 changes: 1 addition & 1 deletion sql/0_node.sql
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ select
end)::int)::text || ' second')::interval)::text
|| '; paused: ' || :postgres_dba_is_wal_replay_paused()::text || ')'
else
'Master'
'Primary'
end as value
union all
(
Expand Down
6 changes: 3 additions & 3 deletions sql/b1_table_estimation.sql
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
--This SQL is derived from https://github.com/ioguix/pgsql-bloat-estimation/blob/master/table/table_bloat.sql

/*
* WARNING: executed with a non-superuser role, the query inspect only tables you are granted to read.
* WARNING: executed with a non-superuser role, the query inspects only tables you are granted to read.
* This query is compatible with PostgreSQL 9.0 and more
*/

Expand Down Expand Up @@ -85,10 +85,10 @@ select
then '~' || pg_size_pretty((real_size - bloat_size)::numeric)
else null
end as "Live",
greatest(last_autovacuum, last_vacuum)::timestamp(0)::text
greatest(last_autovacuum, last_vacuum)::timestamp(0)::text
|| case greatest(last_autovacuum, last_vacuum)
when last_autovacuum then ' (auto)'
else '' end as "Last Vaccuum",
else '' end as "Last Vacuum",
(
select
coalesce(substring(array_to_string(reloptions, ' ') from 'fillfactor=([0-9]+)')::smallint, 100)
Expand Down
8 changes: 4 additions & 4 deletions sql/b2_btree_estimation.sql
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

-- enhanced version of https://github.com/ioguix/pgsql-bloat-estimation/blob/master/btree/btree_bloat.sql

-- WARNING: executed with a non-superuser role, the query inspect only index on tables you are granted to read.
-- WARNING: executed with a non-superuser role, the query inspects only indexes on tables you are granted to read.
-- WARNING: rows with is_na = 't' are known to have bad statistics ("name" type is not supported).
-- This query is compatible with PostgreSQL 8.2+

Expand All @@ -26,9 +26,9 @@ with step1 as (
/* per tuple header: add IndexAttributeBitMapData if some cols are null-able */
case
when max(coalesce(s.null_frac,0)) = 0 then 2 -- IndexTupleData size
else 2 + (( 32 + 8 - 1 ) / 8) -- IndexTupleData size + IndexAttributeBitMapData size ( max num filed per index + 8 - 1 /8)
else 2 + (( 32 + 8 - 1 ) / 8) -- IndexTupleData size + IndexAttributeBitMapData size ( max num fields per index + 8 - 1 /8)
end as index_tuple_hdr_bm,
/* data len: we remove null values save space using it fractionnal part from stats */
/* data len: we remove null values save space using its fractional part from stats */
sum((1 - coalesce(s.null_frac, 0)) * coalesce(s.avg_width, 1024)) as nulldatawidth,
max(case when a.atttypid = 'pg_catalog.name'::regtype then 1 else 0 end) > 0 as is_na
from pg_attribute as a
Expand All @@ -47,7 +47,7 @@ with step1 as (
s.schemaname = i.nspname
and (
(s.tablename = i.tblname and s.attname = pg_catalog.pg_get_indexdef(a.attrelid, a.attnum, true)) -- stats from tbl
OR (s.tablename = i.idxname AND s.attname = a.attname) -- stats from functionnal cols
OR (s.tablename = i.idxname AND s.attname = a.attname) -- stats from functional cols
)
join pg_type as t on a.atttypid = t.oid
where a.attnum > 0
Expand Down
8 changes: 4 additions & 4 deletions sql/b3_table_pgstattuple.sql
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
--Table bloat (requires pgstattuple; expensive)

--https://github.com/dataegret/pg-utils/tree/master/sql
--pgstattuple extension required
--WARNING: without table name/mask query will read all available tables which could cause I/O spikes
-- https://github.com/dataegret/pg-utils/tree/master/sql
-- pgstattuple extension required
-- WARNING: without table name/mask query will read all available tables which could cause I/O spikes
select nspname,
relname,
pg_size_pretty(relation_size + toast_relation_size) as total_size,
Expand All @@ -22,7 +22,7 @@ from (
left join pg_namespace n on (n.oid = c.relnamespace)
where nspname not in ('pg_catalog', 'information_schema')
and nspname !~ '^pg_toast' and relkind = 'r'
--put your table name/mask here
-- put your table name/mask here
and relname ~ ''
) t
order by (toast_free_space + relation_size - (relation_size - free_space)*100/fillfactor) desc
Expand Down
8 changes: 4 additions & 4 deletions sql/b4_btree_pgstattuple.sql
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
--B-tree indexes bloat (requires pgstattuple; expensive)

--https://github.com/dataegret/pg-utils/tree/master/sql
--pgstattuple extension required
--WARNING: without index name/mask query will read all available indexes which could cause I/O spikes
-- https://github.com/dataegret/pg-utils/tree/master/sql
-- pgstattuple extension required
-- WARNING: without index name/mask query will read all available indexes which could cause I/O spikes
with data as (
select
schemaname as schema_name,
Expand All @@ -25,7 +25,7 @@ with data as (
join pg_class c_table on p.relid = c_table.oid
where
pg_get_indexdef(p.indexrelid) like '%USING btree%'
--put your index name/mask here
-- put your index name/mask here
and indexrelname ~ ''
)
select
Expand Down
8 changes: 1 addition & 7 deletions sql/i2_redundant_indexes.sql
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
-- -- so feel free to use it in your clouds (Heroku, AWS RDS, etc)

-- (Keep in mind, that on replicas, the whole picture of index usage
-- is usually very different from master).
-- is usually very different from the primary).

with fk_indexes as (
select
Expand Down Expand Up @@ -116,12 +116,6 @@ redundant_indexes_tmp_num as (
*
from redundant_indexes_tmp_cut
order by index_size_bytes desc
), redundant_indexes_grouped as (
select
distinct(num),
*
from redundant_indexes_tmp_cut
order by index_size_bytes desc
)
select
schema_name,
Expand Down
2 changes: 1 addition & 1 deletion sql/i4_invalid_indexes.sql
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
-- -- so feel free to use it in your clouds (Heroku, AWS RDS, etc)

-- (Keep in mind, that on replicas, the whole picture of index usage
-- is usually very different from master).
-- is usually very different from the primary).

select
coalesce(nullif(pn.nspname, 'public') || '.', '') || pct.relname as "relation_name",
Expand Down
4 changes: 2 additions & 2 deletions sql/i5_indexes_migration.sql
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,9 @@

-- It also doesn't do anything except reading system catalogs and
-- printing NOTICEs, so you can easily run it on your
-- production *master* database.
-- production *primary* database.
-- (Keep in mind, that on replicas, the whole picture of index usage
-- is usually very different from master).
-- is usually very different from the primary).

-- TODO: take into account type of index and opclass
-- TODO: schemas
Expand Down
2 changes: 1 addition & 1 deletion sql/l1_lock_trees.sql
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ with recursive l as (
select (clock_timestamp() - a.xact_start)::interval(0) as ts_age,
(clock_timestamp() - a.state_change)::interval(0) as change_age,
a.datname,a.usename,a.client_addr,
--w.obj wait_on_object,
-- w.obj wait_on_object,
tree.pid,replace(a.state, 'idle in transaction', 'idletx') state,
lvl,(select count(*) from tree p where p.path ~ ('^'||tree.path) and not p.path=tree.path) blocked,
case when tree.pid=any(tree.dl) then '!>' else repeat(' .', lvl) end||' '||trim(left(regexp_replace(a.query, e'\\s+', ' ', 'g'),100)) query
Expand Down
2 changes: 1 addition & 1 deletion sql/l2_lock_trees.sql
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ select
format(
'%s %s%s',
lpad('[' || pid::text || ']', 7, ' '),
repeat('.', level - 1) || case when level > 1 then ' ' end,
repeat('.', level - 1) || case when level > 1 then ' ' else '' end,
left(query, 1000)
) as query
from tree
Expand Down
2 changes: 1 addition & 1 deletion sql/p1_alignment_padding.sql
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ with recursive constants as (
r1.shifts,
r2.cols as alt_cols,
r2.types as alt_types,
r2.shifts as alt_shits,
r2.shifts as alt_shifts,
r1.pads,
r1.curleft,
r2.pads as alt_pads,
Expand Down
1 change: 0 additions & 1 deletion sql/s1_pg_stat_statements_top_total.sql
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@ select
round(max(max_plan_time)::numeric, 2)
) as min_max_plan_t,
\else
sum(calls) as calls,
round(sum(total_time)::numeric, 2) as total_time,
round((sum(mean_time * calls) / sum(calls))::numeric, 2) as mean_time,
format(
Expand Down
4 changes: 2 additions & 2 deletions sql/s2_pg_stat_statements_report.sql
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ with pg_stat_statements_slice as (
select
(select datname from pg_database where oid = p.dbid) as database,
(select rolname from pg_roles where oid = p.userid) as username,
--select shortest query, replace \n\n-- strings to avoid email clients format text as footer
-- select shortest query, replace \n\n-- strings to avoid email clients formatting text as footer
substring(
translate(
replace(
Expand Down Expand Up @@ -205,7 +205,7 @@ with pg_stat_statements_slice as (
select
(select datname from pg_database where oid = p.dbid) as database,
(select rolname from pg_roles where oid = p.userid) as username,
--select shortest query, replace \n\n-- strings to avoid email clients format text as footer
-- select shortest query, replace \n\n-- strings to avoid email clients formatting text as footer
substring(
translate(
replace(
Expand Down
1 change: 0 additions & 1 deletion sql/t1_tuning.sql
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,6 @@ where
'autovacuum_vacuum_scale_factor',
'autovacuum_work_mem',
'autovacuum_naptime',
'random_page_cost',
'seq_page_cost'
)
order by category, name;
Expand Down
2 changes: 1 addition & 1 deletion sql/v2_autovacuum_progress_and_queue.sql
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ with table_opts as (
from pg_stat_progress_vacuum
)
select
--vacuum_settings.oid,
-- vacuum_settings.oid,
coalesce(
coalesce(nullif(vacuum_settings.nspname, 'public') || '.', '') || vacuum_settings.relname, -- current DB
format('[something in "%I"]', p.datname)
Expand Down
Loading