diff --git a/Dockerfile b/Dockerfile index 9c663ab..05aee50 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,68 +1,21 @@ ARG ALPINE_VERSION=3.22 ARG DENO_VERSION=2.4.5 -FROM alpine:${ALPINE_VERSION} AS pg-builder +ARG PG_IMAGE=ghcr.io/query-doctor/postgres:pg14-timescale-2.16 -# Install build dependencies -RUN apk add --no-cache \ - build-base \ - git \ - openssh-client \ - readline-dev \ - zlib-dev \ - flex \ - bison \ - perl \ - bash \ - cmake \ - openssl-dev \ - krb5-dev \ - linux-headers - -# Clone PostgreSQL 17 from official source -RUN git clone --depth 1 --branch REL_17_STABLE https://github.com/postgres/postgres.git /postgres - -WORKDIR /postgres - -# Copy and apply the patch -COPY patches/pg17/zero_cost_plan.patch /tmp -RUN git apply /tmp/*.patch - -# Build PostgreSQL with debug flags -RUN ./configure \ - --without-icu \ - --with-openssl \ - --prefix=/usr/local/pgsql - -RUN make -j$(nproc) all -RUN make install - -RUN cd contrib && make -j$(nproc) && make install - -# Clone and build TimescaleDB -ARG TIMESCALEDB_VERSION=2.24.0 -WORKDIR /timescaledb -RUN git clone --depth 1 --branch ${TIMESCALEDB_VERSION} https://github.com/timescale/timescaledb.git . - -# Bootstrap and build TimescaleDB -RUN ./bootstrap -DREGRESS_CHECKS=OFF -DPG_CONFIG=/usr/local/pgsql/bin/pg_config -RUN cd build && make -j$(nproc) -RUN cd build && make install - -# Adapted from https://github.com/dojyorin/deno_docker_image/blob/master/src/alpine.dockerfile FROM denoland/deno:alpine-${DENO_VERSION} AS deno RUN apk add --no-cache \ perl \ curl \ make \ - git \ - postgresql-client + postgresql-client \ + git -# Download, build, and install pgBadger ARG PGBADGER_VERSION=13.2 WORKDIR /tmp -RUN curl -L https://github.com/darold/pgbadger/archive/v${PGBADGER_VERSION}.tar.gz | tar -xzf - && \ +RUN curl -L https://github.com/darold/pgbadger/archive/v${PGBADGER_VERSION}.tar.gz | \ + tar -xzf - && \ cd pgbadger-${PGBADGER_VERSION} && \ perl Makefile.PL && \ make && \ @@ -95,33 +48,33 @@ RUN deno compile \ src/main.ts FROM alpine:${ALPINE_VERSION} +ARG PG_IMAGE ENV LD_LIBRARY_PATH="/usr/local/lib" RUN apk add -uU --no-cache \ - postgresql-client \ readline \ zlib \ bash \ su-exec \ openssl \ + ossp-uuid \ + postgresql-client \ krb5 -COPY --from=deno --chmod=755 --chown=root:root /usr/bin/pg_dump /usr/bin/pg_dump COPY --from=build --chmod=755 --chown=root:root /app/analyzer /app/analyzer COPY --from=cc --chmod=755 --chown=root:root /lib/*-linux-gnu/* /usr/local/lib/ COPY --from=sym --chmod=755 --chown=root:root /tmp/lib /lib COPY --from=sym --chmod=755 --chown=root:root /tmp/lib /lib64 -# Copy PostgreSQL installation from builder -COPY --from=pg-builder /usr/local/pgsql /usr/local/pgsql +COPY --from=ghcr.io/query-doctor/postgres:pg14-timescale-2.16 /usr/local/pgsql /usr/local/pgsql -# Setup postgres user and directories RUN mkdir -p /var/lib/postgresql/data \ && chown -R postgres:postgres /var/lib/postgresql \ && chown -R postgres:postgres /usr/local/pgsql \ && chmod 1777 /tmp WORKDIR /app +# making sure we use the binaries from the installed postgresql17 client ENV PG_DUMP_BINARY=/usr/bin/pg_dump ENV PG_RESTORE_BINARY=/usr/bin/pg_restore ENV PATH="/usr/local/pgsql/bin:$PATH" @@ -131,15 +84,16 @@ RUN sed -i 's|nobody:/|nobody:/home|' /etc/passwd && chown nobody:nobody /home ENV POSTGRES_URL=postgresql://postgres@localhost/postgres?host=/tmp -EXPOSE 5432 +RUN su-exec postgres initdb -D $PGDATA || true && \ + echo "shared_preload_libraries = 'timescaledb,pg_stat_statements'" >> $PGDATA/postgresql.conf && \ + echo "listen_addresses = ''" >> $PGDATA/postgresql.conf && \ + echo "unix_socket_directories = '/tmp'" >> $PGDATA/postgresql.conf + +USER postgres + +EXPOSE 2345 -# Development command - starts both PostgreSQL and the analyzer CMD ["/bin/bash", "-c", "\ - su-exec postgres initdb -D $PGDATA || true && \ - echo \"shared_preload_libraries = 'timescaledb,pg_stat_statements'\" >> $PGDATA/postgresql.conf && \ - echo \"max_locks_per_transaction = 256\" >> $PGDATA/postgresql.conf && \ - echo \"listen_addresses = ''\" >> $PGDATA/postgresql.conf && \ - echo \"unix_socket_directories = '/tmp'\" >> $PGDATA/postgresql.conf && \ - su-exec postgres pg_ctl -D $PGDATA -l $PGDATA/logfile start || (cat $PGDATA/logfile && exit 1) && \ - until su-exec postgres pg_isready -h /tmp; do sleep 0.5; done && \ + pg_ctl -D $PGDATA -l $PGDATA/logfile start || (cat $PGDATA/logfile && exit 1) && \ + until pg_isready -h /tmp; do sleep 0.5; done && \ /app/analyzer"] diff --git a/Dockerfile.dev b/Dockerfile.dev deleted file mode 100644 index 5145a71..0000000 --- a/Dockerfile.dev +++ /dev/null @@ -1,31 +0,0 @@ -FROM denoland/deno:alpine - -# Install pgbadger dependencies -RUN apk add --no-cache \ - perl \ - wget \ - make \ - git - -# Download, build, and install pgBadger -ARG PGBADGER_VERSION=13.1 -WORKDIR /tmp -RUN wget https://github.com/darold/pgbadger/archive/v${PGBADGER_VERSION}.tar.gz && \ - tar -xzf v${PGBADGER_VERSION}.tar.gz && \ - cd pgbadger-${PGBADGER_VERSION} && \ - perl Makefile.PL && \ - make && \ - make install && \ - rm -rf /tmp/pgbadger* - -# RUN curl -L https://github.com/supabase-community/postgres-language-server/releases/download//postgrestools_aarch64-apple-darwin -o postgrestools -# RUN chmod +x postgrestools - -WORKDIR /app - -# Copy dependency files -COPY deno.json deno.lock* ./ - -RUN deno install --frozen-lockfile -# Development command -CMD ["deno", "run", "dev"] diff --git a/patches/pg14/generic_plan_backport.patch b/patches/pg14/generic_plan_backport.patch deleted file mode 100644 index 23617c4..0000000 --- a/patches/pg14/generic_plan_backport.patch +++ /dev/null @@ -1,334 +0,0 @@ -commit 33aae1f840b94292d91446728c3d4c8c23ae54b6 -Author: Xetera -Date: Tue Jan 13 16:54:28 2026 +0300 - - experimental: backport generic plan to pg14 - -diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c -index 70551522dac..bc60cae80fc 100644 ---- a/src/backend/commands/explain.c -+++ b/src/backend/commands/explain.c -@@ -190,6 +190,8 @@ ExplainQuery(ParseState *pstate, ExplainStmt *stmt, - es->wal = defGetBoolean(opt); - else if (strcmp(opt->defname, "settings") == 0) - es->settings = defGetBoolean(opt); -+ else if (strcmp(opt->defname, "generic_plan") == 0) -+ es->generic = defGetBoolean(opt); - else if (strcmp(opt->defname, "timing") == 0) - { - timing_set = true; -@@ -227,6 +229,7 @@ ExplainQuery(ParseState *pstate, ExplainStmt *stmt, - parser_errposition(pstate, opt->location))); - } - -+ /* check that WAL is used with EXPLAIN ANALYZE */ - if (es->wal && !es->analyze) - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), -@@ -241,6 +244,12 @@ ExplainQuery(ParseState *pstate, ExplainStmt *stmt, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("EXPLAIN option TIMING requires ANALYZE"))); - -+ /* check that GENERIC_PLAN is not used with EXPLAIN ANALYZE */ -+ if (es->generic && es->analyze) -+ ereport(ERROR, -+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE), -+ errmsg("EXPLAIN options ANALYZE and GENERIC_PLAN cannot be used together"))); -+ - /* if the summary was not set explicitly, set default value */ - es->summary = (summary_set) ? es->summary : es->analyze; - -@@ -572,6 +581,8 @@ ExplainOnePlan(PlannedStmt *plannedstmt, IntoClause *into, ExplainState *es, - eflags = 0; /* default run-to-completion flags */ - else - eflags = EXEC_FLAG_EXPLAIN_ONLY; -+ if (es->generic) -+ eflags |= EXEC_FLAG_EXPLAIN_GENERIC; - if (into) - eflags |= GetIntoRelEFlags(into); - -diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c -index 5d6410480cd..e0983cadaed 100644 ---- a/src/backend/executor/execMain.c -+++ b/src/backend/executor/execMain.c -@@ -910,7 +910,7 @@ InitPlan(QueryDesc *queryDesc, int eflags) - * prepared to handle REWIND efficiently; otherwise there is no need. - */ - sp_eflags = eflags -- & (EXEC_FLAG_EXPLAIN_ONLY | EXEC_FLAG_WITH_NO_DATA); -+ & ~(EXEC_FLAG_REWIND | EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK); - if (bms_is_member(i, plannedstmt->rewindPlanIDs)) - sp_eflags |= EXEC_FLAG_REWIND; - -diff --git a/src/backend/executor/execPartition.c b/src/backend/executor/execPartition.c -index 5386e665a94..0a5216a7f1b 100644 ---- a/src/backend/executor/execPartition.c -+++ b/src/backend/executor/execPartition.c -@@ -1719,7 +1719,7 @@ ExecCreatePartitionPruneState(PlanState *planstate, - * Initialize pruning contexts as needed. - */ - pprune->initial_pruning_steps = pinfo->initial_pruning_steps; -- if (pinfo->initial_pruning_steps) -+ if (pinfo->initial_pruning_steps && !(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_GENERIC)) - { - ExecInitPruningContext(&pprune->initial_context, - pinfo->initial_pruning_steps, -@@ -1728,7 +1728,7 @@ ExecCreatePartitionPruneState(PlanState *planstate, - prunestate->do_initial_prune = true; - } - pprune->exec_pruning_steps = pinfo->exec_pruning_steps; -- if (pinfo->exec_pruning_steps) -+ if (pinfo->exec_pruning_steps && !(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_GENERIC)) - { - ExecInitPruningContext(&pprune->exec_context, - pinfo->exec_pruning_steps, -diff --git a/src/backend/parser/analyze.c b/src/backend/parser/analyze.c -index fcb79c12fb5..35c9255377a 100644 ---- a/src/backend/parser/analyze.c -+++ b/src/backend/parser/analyze.c -@@ -27,6 +27,7 @@ - #include "access/sysattr.h" - #include "catalog/pg_proc.h" - #include "catalog/pg_type.h" -+#include "commands/defrem.h" - #include "miscadmin.h" - #include "nodes/makefuncs.h" - #include "nodes/nodeFuncs.h" -@@ -2898,10 +2899,38 @@ static Query * - transformExplainStmt(ParseState *pstate, ExplainStmt *stmt) - { - Query *result; -+ bool generic_plan = false; -+ Oid *paramTypes = NULL; -+ int numParams = 0; -+ -+ /* -+ * If we have no external source of parameter definitions, and the -+ * GENERIC_PLAN option is specified, then accept variable parameter -+ * definitions (similarly to PREPARE, for example). -+ */ -+ if (pstate->p_paramref_hook == NULL) -+ { -+ ListCell *lc; -+ -+ foreach(lc, stmt->options) -+ { -+ DefElem *opt = (DefElem *) lfirst(lc); -+ -+ if (strcmp(opt->defname, "generic_plan") == 0) -+ generic_plan = defGetBoolean(opt); -+ /* don't "break", as we want the last value */ -+ } -+ if (generic_plan) -+ setup_parse_variable_parameters(pstate, ¶mTypes, &numParams); -+ } - - /* transform contained query, allowing SELECT INTO */ - stmt->query = (Node *) transformOptionalSelectInto(pstate, stmt->query); - -+ /* make sure all is well with parameter types */ -+ if (generic_plan) -+ check_variable_parameters(pstate, (Query *) stmt->query); -+ - /* represent the command as a utility Query */ - result = makeNode(Query); - result->commandType = CMD_UTILITY; -diff --git a/src/bin/psql/tab-complete.c b/src/bin/psql/tab-complete.c -index 4f0416d6b30..2ef37dea1b5 100644 ---- a/src/bin/psql/tab-complete.c -+++ b/src/bin/psql/tab-complete.c -@@ -3270,9 +3270,9 @@ psql_completion(const char *text, int start, int end) - * one word, so the above test is correct. - */ - if (ends_with(prev_wd, '(') || ends_with(prev_wd, ',')) -- COMPLETE_WITH("ANALYZE", "VERBOSE", "COSTS", "SETTINGS", -+ COMPLETE_WITH("ANALYZE", "VERBOSE", "COSTS", "SETTINGS", "GENERIC_PLAN", - "BUFFERS", "WAL", "TIMING", "SUMMARY", "FORMAT"); -- else if (TailMatches("ANALYZE|VERBOSE|COSTS|SETTINGS|BUFFERS|WAL|TIMING|SUMMARY")) -+ else if (TailMatches("ANALYZE|VERBOSE|COSTS|SETTINGS|GENERIC_PLAN|BUFFERS|WAL|TIMING|SUMMARY")) - COMPLETE_WITH("ON", "OFF"); - else if (TailMatches("FORMAT")) - COMPLETE_WITH("TEXT", "XML", "JSON", "YAML"); -diff --git a/src/include/commands/explain.h b/src/include/commands/explain.h -index e94d9e49cf6..8175402bd36 100644 ---- a/src/include/commands/explain.h -+++ b/src/include/commands/explain.h -@@ -46,6 +46,7 @@ typedef struct ExplainState - bool timing; /* print detailed node timing */ - bool summary; /* print total planning and execution timing */ - bool settings; /* print modified settings */ -+ bool generic; /* generate a generic plan */ - ExplainFormat format; /* output format */ - /* state for output formatting --- not reset for each new plan tree */ - int indent; /* current indentation level */ -diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h -index 4bb1744286a..dc4ddf5a022 100644 ---- a/src/include/executor/executor.h -+++ b/src/include/executor/executor.h -@@ -36,6 +36,11 @@ - * of startup should occur. However, error checks (such as permission checks) - * should be performed. - * -+ * EXPLAIN_GENERIC can only be used together with EXPLAIN_ONLY. It indicates -+ * that a generic plan is being shown using EXPLAIN (GENERIC_PLAN), which -+ * means that missing parameter values must be tolerated. Currently, the only -+ * effect is to suppress execution-time partition pruning. -+ * - * REWIND indicates that the plan node should try to efficiently support - * rescans without parameter changes. (Nodes must support ExecReScan calls - * in any case, but if this flag was not given, they are at liberty to do it -@@ -52,13 +57,18 @@ - * AfterTriggerBeginQuery/AfterTriggerEndQuery. This does not necessarily - * mean that the plan can't queue any AFTER triggers; just that the caller - * is responsible for there being a trigger context for them to be queued in. -+ * -+ * WITH_NO_DATA indicates that we are performing REFRESH MATERIALIZED VIEW -+ * ... WITH NO DATA. Currently, the only effect is to suppress errors about -+ * scanning unpopulated materialized views. - */ --#define EXEC_FLAG_EXPLAIN_ONLY 0x0001 /* EXPLAIN, no ANALYZE */ --#define EXEC_FLAG_REWIND 0x0002 /* need efficient rescan */ --#define EXEC_FLAG_BACKWARD 0x0004 /* need backward scan */ --#define EXEC_FLAG_MARK 0x0008 /* need mark/restore */ --#define EXEC_FLAG_SKIP_TRIGGERS 0x0010 /* skip AfterTrigger calls */ --#define EXEC_FLAG_WITH_NO_DATA 0x0020 /* rel scannability doesn't matter */ -+#define EXEC_FLAG_EXPLAIN_ONLY 0x0001 /* EXPLAIN, no ANALYZE */ -+#define EXEC_FLAG_EXPLAIN_GENERIC 0x0002 /* EXPLAIN (GENERIC_PLAN) */ -+#define EXEC_FLAG_REWIND 0x0004 /* need efficient rescan */ -+#define EXEC_FLAG_BACKWARD 0x0008 /* need backward scan */ -+#define EXEC_FLAG_MARK 0x0010 /* need mark/restore */ -+#define EXEC_FLAG_SKIP_TRIGGERS 0x0020 /* skip AfterTrigger setup */ -+#define EXEC_FLAG_WITH_NO_DATA 0x0040 /* REFRESH ... WITH NO DATA */ - - - /* Hook for plugins to get control in ExecutorStart() */ -diff --git a/src/test/regress/expected/explain.out b/src/test/regress/expected/explain.out -index cda28098baa..8cfc7a6b7de 100644 ---- a/src/test/regress/expected/explain.out -+++ b/src/test/regress/expected/explain.out -@@ -290,6 +290,48 @@ select explain_filter_to_json('explain (settings, format json) select * from int - (1 row) - - rollback; -+-- GENERIC_PLAN option -+select explain_filter('explain (generic_plan) select unique1 from tenk1 where thousand = $1'); -+ explain_filter -+--------------------------------------------------------------------------------- -+ Bitmap Heap Scan on tenk1 (cost=N.N..N.N rows=N width=N) -+ Recheck Cond: (thousand = $N) -+ -> Bitmap Index Scan on tenk1_thous_tenthous (cost=N.N..N.N rows=N width=N) -+ Index Cond: (thousand = $N) -+(4 rows) -+ -+-- should fail -+select explain_filter('explain (analyze, generic_plan) select unique1 from tenk1 where thousand = $1'); -+ERROR: EXPLAIN options ANALYZE and GENERIC_PLAN cannot be used together -+CONTEXT: PL/pgSQL function explain_filter(text) line 5 at FOR over EXECUTE statement -+-- Test EXPLAIN (GENERIC_PLAN) with partition pruning -+-- partitions should be pruned at plan time, based on constants, -+-- but there should be no pruning based on parameter placeholders -+create table gen_part ( -+ key1 integer not null, -+ key2 integer not null -+) partition by list (key1); -+create table gen_part_1 -+ partition of gen_part for values in (1) -+ partition by range (key2); -+create table gen_part_1_1 -+ partition of gen_part_1 for values from (1) to (2); -+create table gen_part_1_2 -+ partition of gen_part_1 for values from (2) to (3); -+create table gen_part_2 -+ partition of gen_part for values in (2); -+-- should scan gen_part_1_1 and gen_part_1_2, but not gen_part_2 -+select explain_filter('explain (generic_plan) select key1, key2 from gen_part where key1 = 1 and key2 = $1'); -+ explain_filter -+--------------------------------------------------------------------------- -+ Append (cost=N.N..N.N rows=N width=N) -+ -> Seq Scan on gen_part_1_1 gen_part_1 (cost=N.N..N.N rows=N width=N) -+ Filter: ((key1 = N) AND (key2 = $N)) -+ -> Seq Scan on gen_part_1_2 gen_part_2 (cost=N.N..N.N rows=N width=N) -+ Filter: ((key1 = N) AND (key2 = $N)) -+(5 rows) -+ -+drop table gen_part; - -- - -- Test production of per-worker data - -- -diff --git a/src/test/regress/sql/explain.sql b/src/test/regress/sql/explain.sql -index 3f9ae9843a2..820d90f979f 100644 ---- a/src/test/regress/sql/explain.sql -+++ b/src/test/regress/sql/explain.sql -@@ -75,6 +75,32 @@ select true as "OK" - select explain_filter_to_json('explain (settings, format json) select * from int8_tbl i8') #> '{0,Settings,plan_cache_mode}'; - rollback; - -+-- GENERIC_PLAN option -+ -+select explain_filter('explain (generic_plan) select unique1 from tenk1 where thousand = $1'); -+-- should fail -+select explain_filter('explain (analyze, generic_plan) select unique1 from tenk1 where thousand = $1'); -+ -+-- Test EXPLAIN (GENERIC_PLAN) with partition pruning -+-- partitions should be pruned at plan time, based on constants, -+-- but there should be no pruning based on parameter placeholders -+create table gen_part ( -+ key1 integer not null, -+ key2 integer not null -+) partition by list (key1); -+create table gen_part_1 -+ partition of gen_part for values in (1) -+ partition by range (key2); -+create table gen_part_1_1 -+ partition of gen_part_1 for values from (1) to (2); -+create table gen_part_1_2 -+ partition of gen_part_1 for values from (2) to (3); -+create table gen_part_2 -+ partition of gen_part for values in (2); -+-- should scan gen_part_1_1 and gen_part_1_2, but not gen_part_2 -+select explain_filter('explain (generic_plan) select key1, key2 from gen_part where key1 = 1 and key2 = $1'); -+drop table gen_part; -+ - -- - -- Test production of per-worker data - -- -diff --git a/src/backend/parser/parse_param.c b/src/backend/parser/parse_param.c -index 68a55343939..55125702485 100644 ---- a/src/backend/parser/parse_param.c -+++ b/src/backend/parser/parse_param.c -@@ -92,6 +92,22 @@ parse_variable_parameters(ParseState *pstate, - pstate->p_coerce_param_hook = variable_coerce_param_hook; - } - -+/* -+ * Set up to process a query containing references to variable parameters. -+ */ -+void -+setup_parse_variable_parameters(ParseState *pstate, -+ Oid **paramTypes, int *numParams) -+{ -+ VarParamState *parstate = palloc_object(VarParamState); -+ -+ parstate->paramTypes = paramTypes; -+ parstate->numParams = numParams; -+ pstate->p_ref_hook_state = parstate; -+ pstate->p_paramref_hook = variable_paramref_hook; -+ pstate->p_coerce_param_hook = variable_coerce_param_hook; -+} -+ - /* - * Transform a ParamRef using fixed parameter types. - */ -diff --git a/src/include/parser/parse_param.h b/src/include/parser/parse_param.h -index b42fff296ce..fee8aa557ca 100644 ---- a/src/include/parser/parse_param.h -+++ b/src/include/parser/parse_param.h -@@ -19,6 +19,8 @@ extern void parse_fixed_parameters(ParseState *pstate, - Oid *paramTypes, int numParams); - extern void parse_variable_parameters(ParseState *pstate, - Oid **paramTypes, int *numParams); -+extern void setup_parse_variable_parameters(ParseState *pstate, -+ Oid **paramTypes, int *numParams); - extern void check_variable_parameters(ParseState *pstate, Query *query); - extern bool query_contains_extern_params(Query *query); - diff --git a/patches/pg14/zero_cost_plan.patch b/patches/pg14/zero_cost_plan.patch deleted file mode 100644 index 7e8a0de..0000000 --- a/patches/pg14/zero_cost_plan.patch +++ /dev/null @@ -1,54 +0,0 @@ -commit b19fbdf7db9fff706c43d1dedd49c0d45e7a0291 -Author: Xetera -Date: Mon Jan 12 20:18:33 2026 +0300 - - fix: use stats for block size in planner - -diff --git a/src/backend/access/table/tableam.c b/src/backend/access/table/tableam.c -index 66f0f84386c..eed9368f11a 100644 ---- a/src/backend/access/table/tableam.c -+++ b/src/backend/access/table/tableam.c -@@ -675,7 +675,7 @@ table_block_relation_estimate_size(Relation rel, int32 *attr_widths, - double density; - - /* it should have storage, so we can call the smgr */ -- curpages = RelationGetNumberOfBlocks(rel); -+ curpages = (BlockNumber) rel->rd_rel->relpages; - - /* coerce values in pg_class to more desirable types */ - relpages = (BlockNumber) rel->rd_rel->relpages; -diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c -index b5917309a5e..532f0c39b4e 100644 ---- a/src/backend/commands/analyze.c -+++ b/src/backend/commands/analyze.c -@@ -203,7 +203,7 @@ analyze_rel(Oid relid, RangeVar *relation, - /* Regular table, so we'll use the regular row acquisition function */ - acquirefunc = acquire_sample_rows; - /* Also get regular table's size */ -- relpages = RelationGetNumberOfBlocks(onerel); -+ relpages = onerel->rd_rel->relpages; - } - else if (onerel->rd_rel->relkind == RELKIND_FOREIGN_TABLE) - { -diff --git a/src/backend/optimizer/util/plancat.c b/src/backend/optimizer/util/plancat.c -index e7856b54bbb..5e73448046e 100644 ---- a/src/backend/optimizer/util/plancat.c -+++ b/src/backend/optimizer/util/plancat.c -@@ -417,7 +417,7 @@ get_relation_info(PlannerInfo *root, Oid relationObjectId, bool inhparent, - */ - if (info->indpred == NIL) - { -- info->pages = RelationGetNumberOfBlocks(indexRelation); -+ info->pages = indexRelation->rd_rel->relpages; - info->tuples = rel->tuples; - } - else -@@ -1022,7 +1022,7 @@ estimate_rel_size(Relation rel, int32 *attr_widths, - */ - - /* it has storage, ok to call the smgr */ -- curpages = RelationGetNumberOfBlocks(rel); -+ curpages = rel->rd_rel->relpages; - - /* report estimated # pages */ - *pages = curpages; diff --git a/patches/pg17/zero_cost_plan.patch b/patches/pg17/zero_cost_plan.patch deleted file mode 100644 index 1f85748..0000000 --- a/patches/pg17/zero_cost_plan.patch +++ /dev/null @@ -1,79 +0,0 @@ -commit af216329e97ae69911a017c6ac1b8723dc5df028 -Author: Xetera -Date: Mon Sep 29 23:49:25 2025 +0300 - - patch: use pg_class stats for all page estimates in the planner - -diff --git a/src/backend/access/table/tableam.c b/src/backend/access/table/tableam.c -index e57a0b7ea3..a85697cd0b 100644 ---- a/src/backend/access/table/tableam.c -+++ b/src/backend/access/table/tableam.c -@@ -663,7 +663,8 @@ table_block_relation_estimate_size(Relation rel, int32 *attr_widths, - double density; - - /* it should have storage, so we can call the smgr */ -- curpages = RelationGetNumberOfBlocks(rel); -+ // curpages = RelationGetNumberOfBlocks(rel); -+ curpages = (BlockNumber) rel->rd_rel->relpages; - - /* coerce values in pg_class to more desirable types */ - relpages = (BlockNumber) rel->rd_rel->relpages; -diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c -index c590a2adc3..c70ffa70cd 100644 ---- a/src/backend/commands/analyze.c -+++ b/src/backend/commands/analyze.c -@@ -193,7 +193,8 @@ analyze_rel(Oid relid, RangeVar *relation, - /* Regular table, so we'll use the regular row acquisition function */ - acquirefunc = acquire_sample_rows; - /* Also get regular table's size */ -- relpages = RelationGetNumberOfBlocks(onerel); -+ // relpages = RelationGetNumberOfBlocks(onerel); -+ relpages = onerel->rd_rel->relpages; - } - else if (onerel->rd_rel->relkind == RELKIND_FOREIGN_TABLE) - { -@@ -1177,7 +1178,8 @@ acquire_sample_rows(Relation onerel, int elevel, - - Assert(targrows > 0); - -- totalblocks = RelationGetNumberOfBlocks(onerel); -+ // totalblocks = RelationGetNumberOfBlocks(onerel); -+ totalblocks = onerel->rd_rel->relpages; - - /* Need a cutoff xmin for HeapTupleSatisfiesVacuum */ - OldestXmin = GetOldestNonRemovableTransactionId(onerel); -@@ -1423,7 +1425,8 @@ acquire_inherited_sample_rows(Relation onerel, int elevel, - { - /* Regular table, so use the regular row acquisition function */ - acquirefunc = acquire_sample_rows; -- relpages = RelationGetNumberOfBlocks(childrel); -+ // relpages = RelationGetNumberOfBlocks(childrel); -+ relpages = childrel->rd_rel->relpages; - } - else if (childrel->rd_rel->relkind == RELKIND_FOREIGN_TABLE) - { -diff --git a/src/backend/optimizer/util/plancat.c b/src/backend/optimizer/util/plancat.c -index 86655f05dc..53284f6df5 100644 ---- a/src/backend/optimizer/util/plancat.c -+++ b/src/backend/optimizer/util/plancat.c -@@ -472,7 +472,8 @@ get_relation_info(PlannerInfo *root, Oid relationObjectId, bool inhparent, - { - if (info->indpred == NIL) - { -- info->pages = RelationGetNumberOfBlocks(indexRelation); -+ // info->pages = RelationGetNumberOfBlocks(indexRelation); -+ info->pages = indexRelation->rd_rel->relpages; - info->tuples = rel->tuples; - } - else -@@ -1079,7 +1080,8 @@ estimate_rel_size(Relation rel, int32 *attr_widths, - */ - - /* it has storage, ok to call the smgr */ -- curpages = RelationGetNumberOfBlocks(rel); -+ // curpages = RelationGetNumberOfBlocks(rel); -+ curpages = rel->rd_rel->relpages; - - /* report estimated # pages */ - *pages = curpages; - diff --git a/pg14.Dockerfile b/pg14.Dockerfile deleted file mode 100644 index 8a184b8..0000000 --- a/pg14.Dockerfile +++ /dev/null @@ -1,153 +0,0 @@ -ARG ALPINE_VERSION=3.22 -ARG DENO_VERSION=2.4.5 -FROM alpine:${ALPINE_VERSION} AS pg-builder - -ARG PG_BRANCH=REL_14_STABLE - -# Install build dependencies -RUN apk add --no-cache \ - build-base \ - git \ - openssh-client \ - readline-dev \ - zlib-dev \ - flex \ - bison \ - perl \ - bash \ - cmake \ - openssl-dev \ - krb5-dev \ - # needed for the uuid-ossp extension - ossp-uuid-dev \ - # required for timescaledb - linux-headers - -RUN git clone --depth 1 --branch ${PG_BRANCH} https://github.com/postgres/postgres.git /postgres - -WORKDIR /postgres - -# apply our custom patches on top of postgres -COPY patches/pg14/*.patch /tmp -RUN git apply /tmp/*.patch - -# Build PostgreSQL with debug flags -RUN ./configure \ - --with-uuid=ossp \ - --without-icu \ - --with-openssl \ - --prefix=/usr/local/pgsql - -RUN make -j$(nproc) all -RUN make install - -RUN cd contrib && make -j$(nproc) && make install - -# Clone and build TimescaleDB -ARG TIMESCALEDB_VERSION=2.16.1 -WORKDIR /timescaledb -RUN git clone --depth 1 --branch ${TIMESCALEDB_VERSION} https://github.com/timescale/timescaledb.git . - -# Bootstrap and build TimescaleDB -RUN ./bootstrap -DREGRESS_CHECKS=OFF -DPG_CONFIG=/usr/local/pgsql/bin/pg_config -RUN cd build && make -j$(nproc) -RUN cd build && make install - -# Adapted from https://github.com/dojyorin/deno_docker_image/blob/master/src/alpine.dockerfile -FROM denoland/deno:alpine-${DENO_VERSION} AS deno - -RUN apk add --no-cache \ - perl \ - curl \ - make \ - postgresql-client \ - git - -# RUN apk add --no-cache \ -# postgresql14-client --repository=http://dl-cdn.alpinelinux.org/alpine/v3.20/community - -# Download, build, and install pgBadger -ARG PGBADGER_VERSION=13.2 -WORKDIR /tmp - -RUN curl -L https://github.com/darold/pgbadger/archive/v${PGBADGER_VERSION}.tar.gz | tar -xzf - && \ - cd pgbadger-${PGBADGER_VERSION} && \ - perl Makefile.PL && \ - make && \ - make install && \ - rm -rf /tmp/pgbadger* - -FROM gcr.io/distroless/cc-debian12:latest AS cc - -FROM alpine:${ALPINE_VERSION} AS sym - -COPY --from=cc --chmod=755 --chown=root:root /lib/*-linux-gnu/ld-linux-* /usr/local/lib/ -RUN mkdir -p -m 755 /tmp/lib -RUN ln -s /usr/local/lib/ld-linux-* /tmp/lib/ - -FROM denoland/deno:alpine-${DENO_VERSION} AS build - -COPY deno.json deno.lock* ./ -RUN deno install --frozen-lockfile - -COPY . . - -RUN deno compile \ - --allow-run \ - --allow-read \ - --allow-write \ - --allow-env \ - --allow-net \ - --allow-sys \ - -o /app/analyzer \ - src/main.ts - -FROM alpine:${ALPINE_VERSION} -ENV LD_LIBRARY_PATH="/usr/local/lib" - -RUN apk add -uU --no-cache \ - readline \ - zlib \ - bash \ - su-exec \ - openssl \ - ossp-uuid-dev \ - postgresql-client \ - krb5 - -COPY --from=build --chmod=755 --chown=root:root /app/analyzer /app/analyzer -COPY --from=cc --chmod=755 --chown=root:root /lib/*-linux-gnu/* /usr/local/lib/ -COPY --from=sym --chmod=755 --chown=root:root /tmp/lib /lib -COPY --from=sym --chmod=755 --chown=root:root /tmp/lib /lib64 - -COPY --from=pg-builder /usr/local/pgsql /usr/local/pgsql - -RUN mkdir -p /var/lib/postgresql/data \ - && chown -R postgres:postgres /var/lib/postgresql \ - && chown -R postgres:postgres /usr/local/pgsql \ - && chmod 1777 /tmp - -WORKDIR /app -# making sure we use the binaries from the installed postgresql17 client -ENV PG_DUMP_BINARY=/usr/bin/pg_dump -ENV PG_RESTORE_BINARY=/usr/bin/pg_restore -ENV PATH="/usr/local/pgsql/bin:$PATH" -ENV PGDATA=/var/lib/postgresql/data - -RUN sed -i 's|nobody:/|nobody:/home|' /etc/passwd && chown nobody:nobody /home - -ENV POSTGRES_URL=postgresql://postgres@localhost/postgres?host=/tmp - -RUN su-exec postgres initdb -D $PGDATA || true && \ - echo "shared_preload_libraries = 'timescaledb,pg_stat_statements'" >> $PGDATA/postgresql.conf && \ - echo "listen_addresses = ''" >> $PGDATA/postgresql.conf && \ - echo "unix_socket_directories = '/tmp'" >> $PGDATA/postgresql.conf - -USER postgres - -EXPOSE 2345 - -CMD ["/bin/bash", "-c", "\ - pg_ctl -D $PGDATA -l $PGDATA/logfile start || (cat $PGDATA/logfile && exit 1) && \ - until pg_isready -h /tmp; do sleep 0.5; done && \ - /app/analyzer"]