From de8d65509d0361a8648d31a11bef8090f2d2ca87 Mon Sep 17 00:00:00 2001 From: Alexey Kondratov Date: Mon, 12 Apr 2021 14:50:13 +0300 Subject: [PATCH 01/65] Update README.md --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 6dd309a..57a7c5c 100644 --- a/README.md +++ b/README.md @@ -143,6 +143,8 @@ export PG_BRANCH=REL_12_STABLE export TEST_CASE=all export MODE=paranoia +./make_dockerfile.sh + docker-compose build docker-compose run tests ``` From dd6fdc06513698567e374c02bd2e631d2519807f Mon Sep 17 00:00:00 2001 From: Alexey Kondratov Date: Thu, 22 Apr 2021 23:08:30 +0300 Subject: [PATCH 02/65] Resolve issue#5: store update_lsn of each block into two independent slots. Previously we thought that 1 MB can track changes page-to-page in the 1 GB of data files. However, recently it became evident that our ptrack map or basic hash table behaves more like a Bloom filter with a number of hash functions k = 1. See more here: https://en.wikipedia.org/wiki/Bloom_filter#Probability_of_false_positives. Such filter has naturally more collisions. By storing update_lsn of each block in the additional slot we perform as a Bloom filter with k = 2, which significatly reduces collision rate. --- engine.c | 95 +++++++++++++++++++++++++++++++++----------------------- engine.h | 8 ++--- ptrack.c | 31 +++++++++++++----- ptrack.h | 2 +- 4 files changed, 85 insertions(+), 51 deletions(-) diff --git a/engine.c b/engine.c index 35cc14c..c8085a0 100644 --- a/engine.c +++ b/engine.c @@ -156,6 +156,8 @@ ptrackMapInit(void) sprintf(ptrack_path, "%s/%s", DataDir, PTRACK_PATH); sprintf(ptrack_mmap_path, "%s/%s", DataDir, PTRACK_MMAP_PATH); +ptrack_map_reinit: + /* Remove old PTRACK_MMAP_PATH file, if exists */ if (ptrack_file_exists(ptrack_mmap_path)) durable_unlink(ptrack_mmap_path, LOG); @@ -175,18 +177,15 @@ ptrackMapInit(void) if (stat(ptrack_path, &stat_buf) == 0) { copy_file(ptrack_path, ptrack_mmap_path); - is_new_map = false; /* flag to check checksum */ + is_new_map = false; /* flag to check map file format and checksum */ ptrack_fd = BasicOpenFile(ptrack_mmap_path, O_RDWR | PG_BINARY); - if (ptrack_fd < 0) - elog(ERROR, "ptrack init: failed to open map file \"%s\": %m", ptrack_mmap_path); } else - { /* Create new file for PTRACK_MMAP_PATH */ ptrack_fd = BasicOpenFile(ptrack_mmap_path, O_RDWR | O_CREAT | PG_BINARY); - if (ptrack_fd < 0) - elog(ERROR, "ptrack init: failed to open map file \"%s\": %m", ptrack_mmap_path); - } + + if (ptrack_fd < 0) + elog(ERROR, "ptrack init: failed to open map file \"%s\": %m", ptrack_mmap_path); #ifdef WIN32 { @@ -227,7 +226,19 @@ ptrackMapInit(void) elog(ERROR, "ptrack init: wrong map format of file \"%s\"", ptrack_path); /* Check ptrack version inside old ptrack map */ - /* No-op for now, but may be used for future compatibility checks */ + if (ptrack_map->version_num != PTRACK_VERSION_NUM) + { + ereport(WARNING, + (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("ptrack init: map format version %d in the file \"%s\" is incompatible with loaded version %d", + ptrack_map->version_num, ptrack_path, PTRACK_VERSION_NUM), + errdetail("Deleting file \"%s\" and reinitializing ptrack map.", ptrack_path))); + + /* Delete and try again */ + durable_unlink(ptrack_path, LOG); + is_new_map = true; + goto ptrack_map_reinit; + } /* Check CRC */ INIT_CRC32C(crc); @@ -641,48 +652,56 @@ void ptrack_mark_block(RelFileNodeBackend smgr_rnode, ForkNumber forknum, BlockNumber blocknum) { + PtBlockId bid; size_t hash; + size_t slot1; + size_t slot2; XLogRecPtr new_lsn; - PtBlockId bid; /* * We use pg_atomic_uint64 here only for alignment purposes, because - * pg_atomic_uint64 is forcely aligned on 8 bytes during the MSVC build. + * pg_atomic_uint64 is forcedly aligned on 8 bytes during the MSVC build. */ pg_atomic_uint64 old_lsn; pg_atomic_uint64 old_init_lsn; - if (ptrack_map_size != 0 && (ptrack_map != NULL) && - smgr_rnode.backend == InvalidBackendId) /* do not track temporary - * relations */ - { - bid.relnode = smgr_rnode.node; - bid.forknum = forknum; - bid.blocknum = blocknum; - hash = BID_HASH_FUNC(bid); - - if (RecoveryInProgress()) - new_lsn = GetXLogReplayRecPtr(NULL); - else - new_lsn = GetXLogInsertRecPtr(); + if (ptrack_map_size == 0 + || ptrack_map == NULL + || smgr_rnode.backend != InvalidBackendId) /* do not track temporary + * relations */ + return; - old_lsn.value = pg_atomic_read_u64(&ptrack_map->entries[hash]); + bid.relnode = smgr_rnode.node; + bid.forknum = forknum; + bid.blocknum = blocknum; - /* Atomically assign new init LSN value */ - old_init_lsn.value = pg_atomic_read_u64(&ptrack_map->init_lsn); + hash = BID_HASH_FUNC(bid); + slot1 = hash % PtrackContentNblocks; + slot2 = ((hash << 32) | (hash >> 32)) % PtrackContentNblocks; - if (old_init_lsn.value == InvalidXLogRecPtr) - { - elog(DEBUG1, "ptrack_mark_block: init_lsn " UINT64_FORMAT " <- " UINT64_FORMAT, old_init_lsn.value, new_lsn); - - while (old_init_lsn.value < new_lsn && - !pg_atomic_compare_exchange_u64(&ptrack_map->init_lsn, (uint64 *) &old_init_lsn.value, new_lsn)); - } + if (RecoveryInProgress()) + new_lsn = GetXLogReplayRecPtr(NULL); + else + new_lsn = GetXLogInsertRecPtr(); - elog(DEBUG3, "ptrack_mark_block: map[%zu]=" UINT64_FORMAT " <- " UINT64_FORMAT, hash, old_lsn.value, new_lsn); + /* Atomically assign new init LSN value */ + old_init_lsn.value = pg_atomic_read_u64(&ptrack_map->init_lsn); + if (old_init_lsn.value == InvalidXLogRecPtr) + { + elog(DEBUG1, "ptrack_mark_block: init_lsn " UINT64_FORMAT " <- " UINT64_FORMAT, old_init_lsn.value, new_lsn); - /* Atomically assign new LSN value */ - while (old_lsn.value < new_lsn && - !pg_atomic_compare_exchange_u64(&ptrack_map->entries[hash], (uint64 *) &old_lsn.value, new_lsn)); - elog(DEBUG3, "ptrack_mark_block: map[%zu]=" UINT64_FORMAT, hash, pg_atomic_read_u64(&ptrack_map->entries[hash])); + while (old_init_lsn.value < new_lsn && + !pg_atomic_compare_exchange_u64(&ptrack_map->init_lsn, (uint64 *) &old_init_lsn.value, new_lsn)); } + + /* Atomically assign new LSN value to the first slot */ + old_lsn.value = pg_atomic_read_u64(&ptrack_map->entries[slot1]); + elog(DEBUG3, "ptrack_mark_block: map[%zu]=" UINT64_FORMAT " <- " UINT64_FORMAT, slot1, old_lsn.value, new_lsn); + while (old_lsn.value < new_lsn && + !pg_atomic_compare_exchange_u64(&ptrack_map->entries[slot1], (uint64 *) &old_lsn.value, new_lsn)); + elog(DEBUG3, "ptrack_mark_block: map[%zu]=" UINT64_FORMAT, hash, pg_atomic_read_u64(&ptrack_map->entries[slot1])); + + /* And to the second */ + old_lsn.value = pg_atomic_read_u64(&ptrack_map->entries[slot2]); + while (old_lsn.value < new_lsn && + !pg_atomic_compare_exchange_u64(&ptrack_map->entries[slot2], (uint64 *) &old_lsn.value, new_lsn)); } diff --git a/engine.h b/engine.h index 34cf15f..e46f803 100644 --- a/engine.h +++ b/engine.h @@ -50,7 +50,7 @@ typedef struct PtrackMapHdr { /* * Three magic bytes (+ \0) to be sure, that we are reading ptrack.map - * with a right PtrackMapHdr strucutre. + * with a right PtrackMapHdr structure. */ char magic[PTRACK_MAGIC_SIZE]; @@ -72,7 +72,6 @@ typedef struct PtrackMapHdr typedef PtrackMapHdr * PtrackMap; -/* TODO: check MAXALIGN usage below */ /* Number of elements in ptrack map (LSN array) */ #define PtrackContentNblocks \ ((ptrack_map_size - offsetof(PtrackMapHdr, entries) - sizeof(pg_crc32c)) / sizeof(pg_atomic_uint64)) @@ -84,9 +83,10 @@ typedef PtrackMapHdr * PtrackMap; /* CRC32 value offset in order to directly access it in the mmap'ed memory chunk */ #define PtrackCrcOffset (PtrackActualSize - sizeof(pg_crc32c)) -/* Map block address 'bid' to map slot */ +/* Block address 'bid' to hash. To get slot position in map should be divided + * with '% PtrackContentNblocks' */ #define BID_HASH_FUNC(bid) \ - (size_t)(DatumGetUInt64(hash_any_extended((unsigned char *)&bid, sizeof(bid), 0)) % PtrackContentNblocks) + (size_t)(DatumGetUInt64(hash_any_extended((unsigned char *)&bid, sizeof(bid), 0))) /* * Per process pointer to shared ptrack_map diff --git a/ptrack.c b/ptrack.c index d897ecf..4992726 100644 --- a/ptrack.c +++ b/ptrack.c @@ -420,10 +420,9 @@ PG_FUNCTION_INFO_V1(ptrack_get_pagemapset); Datum ptrack_get_pagemapset(PG_FUNCTION_ARGS) { + PtScanCtx *ctx; FuncCallContext *funcctx; - PtScanCtx *ctx; MemoryContext oldcontext; - XLogRecPtr update_lsn; datapagemap_t pagemap; char gather_path[MAXPGPATH]; @@ -486,6 +485,12 @@ ptrack_get_pagemapset(PG_FUNCTION_ARGS) while (true) { + size_t hash; + size_t slot1; + size_t slot2; + XLogRecPtr update_lsn1; + XLogRecPtr update_lsn2; + /* Stop traversal if there are no more segments */ if (ctx->bid.blocknum > ctx->relsize) { @@ -525,15 +530,25 @@ ptrack_get_pagemapset(PG_FUNCTION_ARGS) } } - update_lsn = pg_atomic_read_u64(&ptrack_map->entries[BID_HASH_FUNC(ctx->bid)]); + hash = BID_HASH_FUNC(ctx->bid); + slot1 = hash % PtrackContentNblocks; + slot2 = ((hash << 32) | (hash >> 32)) % PtrackContentNblocks; + + update_lsn1 = pg_atomic_read_u64(&ptrack_map->entries[slot1]); + update_lsn2 = pg_atomic_read_u64(&ptrack_map->entries[slot2]); + + if (update_lsn1 != InvalidXLogRecPtr) + elog(DEBUG3, "ptrack: update_lsn1 %X/%X of blckno %u of file %s", + (uint32) (update_lsn1 >> 32), (uint32) update_lsn1, + ctx->bid.blocknum, ctx->relpath); - if (update_lsn != InvalidXLogRecPtr) - elog(DEBUG3, "ptrack: update_lsn %X/%X of blckno %u of file %s", - (uint32) (update_lsn >> 32), (uint32) update_lsn, + if (update_lsn2 != InvalidXLogRecPtr) + elog(DEBUG3, "ptrack: update_lsn2 %X/%X of blckno %u of file %s", + (uint32) (update_lsn1 >> 32), (uint32) update_lsn2, ctx->bid.blocknum, ctx->relpath); - /* Block has been changed since specified LSN. Mark it in the bitmap */ - if (update_lsn >= ctx->lsn) + /* Block has been changed since specified LSN. Mark it in the bitmap */ + if (update_lsn1 >= ctx->lsn && update_lsn2 >= ctx->lsn) datapagemap_add(&pagemap, ctx->bid.blocknum % ((BlockNumber) RELSEG_SIZE)); ctx->bid.blocknum += 1; diff --git a/ptrack.h b/ptrack.h index 7e6b6e5..4375963 100644 --- a/ptrack.h +++ b/ptrack.h @@ -24,7 +24,7 @@ /* Ptrack version as a string */ #define PTRACK_VERSION "2.1" /* Ptrack version as a number */ -#define PTRACK_VERSION_NUM 210 +#define PTRACK_VERSION_NUM 220 /* * Structure identifying block on the disk. From 829f96cf53131a0d57a31ff929ea8fd46fbeb09b Mon Sep 17 00:00:00 2001 From: Alexey Kondratov Date: Fri, 23 Apr 2021 00:12:27 +0300 Subject: [PATCH 03/65] Resolve issue#1: add ptrack_get_change_stat(). Also bump extversion to 2.2 --- .gitignore | 2 -- Makefile | 18 +++--------------- ptrack--2.1--2.2.sql | 29 +++++++++++++++++++++++++++++ ptrack.sql => ptrack--2.1.sql | 2 ++ ptrack.control | 2 +- ptrack.h | 2 +- t/001_basic.pl | 6 +++++- 7 files changed, 41 insertions(+), 20 deletions(-) create mode 100644 ptrack--2.1--2.2.sql rename ptrack.sql => ptrack--2.1.sql (94%) diff --git a/.gitignore b/.gitignore index b46b4ef..4990aa6 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,4 @@ .deps *.so *.o -ptrack--2.0.sql Dockerfile - diff --git a/Makefile b/Makefile index 8544f90..ba9ce1d 100644 --- a/Makefile +++ b/Makefile @@ -2,13 +2,11 @@ MODULE_big = ptrack OBJS = ptrack.o datapagemap.o engine.o $(WIN32RES) -EXTENSION = ptrack -EXTVERSION = 2.1 -DATA = ptrack.sql ptrack--2.0--2.1.sql -DATA_built = $(EXTENSION)--$(EXTVERSION).sql PGFILEDESC = "ptrack - block-level incremental backup engine" -EXTRA_CLEAN = $(EXTENSION)--$(EXTVERSION).sql +EXTENSION = ptrack +EXTVERSION = 2.2 +DATA = ptrack--2.1.sql ptrack--2.0--2.1.sql ptrack--2.1--2.2.sql TAP_TESTS = 1 @@ -22,13 +20,3 @@ top_builddir = ../.. include $(top_builddir)/src/Makefile.global include $(top_srcdir)/contrib/contrib-global.mk endif - -$(EXTENSION)--$(EXTVERSION).sql: ptrack.sql - cat $^ > $@ - -# temp-install: EXTRA_INSTALL=contrib/ptrack - -# check-tap: temp-install -# $(prove_check) - -# check: check-tap diff --git a/ptrack--2.1--2.2.sql b/ptrack--2.1--2.2.sql new file mode 100644 index 0000000..d666fc3 --- /dev/null +++ b/ptrack--2.1--2.2.sql @@ -0,0 +1,29 @@ +/* ptrack/ptrack--2.1--2.2.sql */ + +-- Complain if script is sourced in psql, rather than via ALTER EXTENSION +\echo Use "ALTER EXTENSION ptrack UPDATE;" to load this file.\ quit + +CREATE FUNCTION ptrack_get_change_stat(start_lsn pg_lsn) + RETURNS TABLE ( + files bigint, + pages bigint, + "size, MB" numeric + ) AS +$func$ +DECLARE +block_size bigint; +BEGIN + block_size := (SELECT setting FROM pg_settings WHERE name = 'block_size'); + + RETURN QUERY + SELECT changed_files, + changed_pages, + block_size*changed_pages/(1024.0*1024) + FROM + (SELECT count(path) AS changed_files, + sum( + length(replace(right((pagemap)::text, -1)::varbit::text, '0', '')) + ) AS changed_pages + FROM ptrack_get_pagemapset(start_lsn)) s; +END +$func$ LANGUAGE plpgsql; diff --git a/ptrack.sql b/ptrack--2.1.sql similarity index 94% rename from ptrack.sql rename to ptrack--2.1.sql index 80ae927..c963964 100644 --- a/ptrack.sql +++ b/ptrack--2.1.sql @@ -1,3 +1,5 @@ +/* ptrack/ptrack--2.1.sql */ + -- Complain if script is sourced in psql, rather than via CREATE EXTENSION \echo Use "CREATE EXTENSION ptrack" to load this file. \quit diff --git a/ptrack.control b/ptrack.control index d2d8792..ec0af9d 100644 --- a/ptrack.control +++ b/ptrack.control @@ -1,5 +1,5 @@ # ptrack extension comment = 'block-level incremental backup engine' -default_version = '2.1' +default_version = '2.2' module_pathname = '$libdir/ptrack' relocatable = true diff --git a/ptrack.h b/ptrack.h index 4375963..d205115 100644 --- a/ptrack.h +++ b/ptrack.h @@ -22,7 +22,7 @@ #include "utils/relcache.h" /* Ptrack version as a string */ -#define PTRACK_VERSION "2.1" +#define PTRACK_VERSION "2.2" /* Ptrack version as a number */ #define PTRACK_VERSION_NUM 220 diff --git a/t/001_basic.pl b/t/001_basic.pl index 1abc788..bac81f2 100644 --- a/t/001_basic.pl +++ b/t/001_basic.pl @@ -10,7 +10,7 @@ use TestLib; use Test::More; -plan tests => 23; +plan tests => 24; my $node; my $res; @@ -115,6 +115,10 @@ qr/$rel_oid/, 'ptrack pagemapset should contain new relation oid'); +# Check change stats +$res_stdout = $node->safe_psql("postgres", "SELECT pages FROM ptrack_get_change_stat('$flush_lsn')"); +is($res_stdout > 0, 1, 'should be able to get aggregated stats of changes'); + # We should be able to change ptrack map size (but loose all changes) $node->append_conf( 'postgresql.conf', q{ From 3026be92c398eeeb7bc8edf65ca0deef25c82c17 Mon Sep 17 00:00:00 2001 From: Alexey Kondratov Date: Wed, 12 May 2021 20:02:26 +0300 Subject: [PATCH 04/65] Add new function ptrack_get_change_file_stat(start_lsn pg_lsn) --- README.md | 8 +++++++- ptrack--2.1--2.2.sql | 25 +++++++++++++++++++++++++ t/001_basic.pl | 5 ++++- 3 files changed, 36 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 57a7c5c..06e4b18 100644 --- a/README.md +++ b/README.md @@ -65,7 +65,9 @@ To disable `ptrack` and clean up all remaining service files set `ptrack.map_siz * ptrack_version() — returns ptrack version string. * ptrack_init_lsn() — returns LSN of the last ptrack map initialization. - * ptrack_get_pagemapset('LSN') — returns a set of changed data files with bitmaps of changed blocks since specified LSN. + * ptrack_get_pagemapset(start_lsn pg_lsn) — returns a set of changed data files with bitmaps of changed blocks since specified `start_lsn`. + * ptrack_get_change_stat(start_lsn pg_lsn) — returns statistic of changes (number of files, pages and size in MB) since specified `start_lsn`. + * ptrack_get_change_file_stat(start_lsn pg_lsn) — returns per file statistic of changes (number of pages and size in MB) since specified `start_lsn`. Usage example: @@ -102,6 +104,10 @@ Usually, you have to only install new version of `ptrack` and do `ALTER EXTENSIO * Do `ALTER EXTENSION 'ptrack' UPDATE;`. * Restart your server. +#### Upgrading from 2.1.* to 2.2.*: + +Since version 2.2 we use a different algorithm for tracking changed pages. Thus, data recorded in the `ptrack.map` using pre 2.2 versions of `ptrack` is incompatible with newer versions. After extension upgrade and server restart old `ptrack.map` will be discarded with `WARNING` and initialized from the scratch. + ## Limitations 1. You can only use `ptrack` safely with `wal_level >= 'replica'`. Otherwise, you can lose tracking of some changes if crash-recovery occurs, since [certain commands are designed not to write WAL at all if wal_level is minimal](https://www.postgresql.org/docs/12/populate.html#POPULATE-PITR), but we only durably flush `ptrack` map at checkpoint time. diff --git a/ptrack--2.1--2.2.sql b/ptrack--2.1--2.2.sql index d666fc3..2a0d97f 100644 --- a/ptrack--2.1--2.2.sql +++ b/ptrack--2.1--2.2.sql @@ -27,3 +27,28 @@ BEGIN FROM ptrack_get_pagemapset(start_lsn)) s; END $func$ LANGUAGE plpgsql; + +CREATE FUNCTION ptrack_get_change_file_stat(start_lsn pg_lsn) + RETURNS TABLE ( + file_path text, + pages int, + "size, MB" numeric + ) AS +$func$ +DECLARE +block_size bigint; +BEGIN + block_size := (SELECT setting FROM pg_settings WHERE name = 'block_size'); + + RETURN QUERY + SELECT s.path, + changed_pages, + block_size*changed_pages/(1024.0*1024) + FROM + (SELECT path, + length(replace(right((pagemap)::text, -1)::varbit::text, '0', '')) + AS changed_pages + FROM ptrack_get_pagemapset(start_lsn)) s + ORDER BY (changed_pages, s.path) DESC; +END +$func$ LANGUAGE plpgsql; diff --git a/t/001_basic.pl b/t/001_basic.pl index bac81f2..37285d9 100644 --- a/t/001_basic.pl +++ b/t/001_basic.pl @@ -10,7 +10,7 @@ use TestLib; use Test::More; -plan tests => 24; +plan tests => 25; my $node; my $res; @@ -119,6 +119,9 @@ $res_stdout = $node->safe_psql("postgres", "SELECT pages FROM ptrack_get_change_stat('$flush_lsn')"); is($res_stdout > 0, 1, 'should be able to get aggregated stats of changes'); +$res_stdout = $node->safe_psql("postgres", "SELECT count(*) FROM ptrack_get_change_file_stat('$flush_lsn')"); +is($res_stdout > 0, 1, 'should be able to get per file stats of changes'); + # We should be able to change ptrack map size (but loose all changes) $node->append_conf( 'postgresql.conf', q{ From cf8e30962cf87afd9388f31428c895dd5b15278b Mon Sep 17 00:00:00 2001 From: Alexey Kondratov Date: Wed, 12 May 2021 20:33:42 +0300 Subject: [PATCH 05/65] Slightly optimize ptrack_get_pagemapset Probe the second slot only if the first one succeded. --- ptrack.c | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/ptrack.c b/ptrack.c index 4992726..f2701af 100644 --- a/ptrack.c +++ b/ptrack.c @@ -532,24 +532,29 @@ ptrack_get_pagemapset(PG_FUNCTION_ARGS) hash = BID_HASH_FUNC(ctx->bid); slot1 = hash % PtrackContentNblocks; - slot2 = ((hash << 32) | (hash >> 32)) % PtrackContentNblocks; update_lsn1 = pg_atomic_read_u64(&ptrack_map->entries[slot1]); - update_lsn2 = pg_atomic_read_u64(&ptrack_map->entries[slot2]); if (update_lsn1 != InvalidXLogRecPtr) elog(DEBUG3, "ptrack: update_lsn1 %X/%X of blckno %u of file %s", (uint32) (update_lsn1 >> 32), (uint32) update_lsn1, ctx->bid.blocknum, ctx->relpath); - if (update_lsn2 != InvalidXLogRecPtr) - elog(DEBUG3, "ptrack: update_lsn2 %X/%X of blckno %u of file %s", - (uint32) (update_lsn1 >> 32), (uint32) update_lsn2, - ctx->bid.blocknum, ctx->relpath); + /* Only probe the second slot if the first one is marked */ + if (update_lsn1 >= ctx->lsn) + { + slot2 = ((hash << 32) | (hash >> 32)) % PtrackContentNblocks; + update_lsn2 = pg_atomic_read_u64(&ptrack_map->entries[slot2]); - /* Block has been changed since specified LSN. Mark it in the bitmap */ - if (update_lsn1 >= ctx->lsn && update_lsn2 >= ctx->lsn) - datapagemap_add(&pagemap, ctx->bid.blocknum % ((BlockNumber) RELSEG_SIZE)); + if (update_lsn2 != InvalidXLogRecPtr) + elog(DEBUG3, "ptrack: update_lsn2 %X/%X of blckno %u of file %s", + (uint32) (update_lsn1 >> 32), (uint32) update_lsn2, + ctx->bid.blocknum, ctx->relpath); + + /* Block has been changed since specified LSN. Mark it in the bitmap */ + if (update_lsn2 >= ctx->lsn) + datapagemap_add(&pagemap, ctx->bid.blocknum % ((BlockNumber) RELSEG_SIZE)); + } ctx->bid.blocknum += 1; } From fbfba8c73bce64e176dadb3b66cb0576a20ebe59 Mon Sep 17 00:00:00 2001 From: Alexey Kondratov Date: Thu, 13 May 2021 18:56:41 +0300 Subject: [PATCH 06/65] Do a proper cleanup when ptrack.map version is incompatible --- engine.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/engine.c b/engine.c index c8085a0..86a1b60 100644 --- a/engine.c +++ b/engine.c @@ -234,8 +234,9 @@ ptrackMapInit(void) ptrack_map->version_num, ptrack_path, PTRACK_VERSION_NUM), errdetail("Deleting file \"%s\" and reinitializing ptrack map.", ptrack_path))); - /* Delete and try again */ - durable_unlink(ptrack_path, LOG); + /* Clean up everything and try again */ + ptrackCleanFilesAndMap(); + is_new_map = true; goto ptrack_map_reinit; } From ab17447196d46eaf5eef4b2edf07a155a9a8b11a Mon Sep 17 00:00:00 2001 From: Alexey Kondratov Date: Thu, 13 May 2021 20:19:34 +0300 Subject: [PATCH 07/65] Correct some typos --- engine.c | 6 +++--- ptrack.c | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/engine.c b/engine.c index 86a1b60..89217a9 100644 --- a/engine.c +++ b/engine.c @@ -390,7 +390,7 @@ ptrackCheckpoint(void) /* * We are writing ptrack map values to file, but we want to simply map it * into the memory with mmap after a crash/restart. That way, we have to - * write values taking into account all paddings/allignments. + * write values taking into account all paddings/alignments. * * Write both magic and varsion_num at once. */ @@ -447,7 +447,7 @@ ptrackCheckpoint(void) * going to overflow. */ /* - * We should not have any allignment issues here, since sizeof() + * We should not have any alignment issues here, since sizeof() * takes into account all paddings for us. */ ptrack_write_chunk(ptrack_tmp_fd, &crc, (char *) buf, writesz); @@ -458,7 +458,7 @@ ptrackCheckpoint(void) } } - /* Write if anythig left */ + /* Write if anything left */ if ((i + 1) % PTRACK_BUF_SIZE != 0) { size_t writesz = sizeof(pg_atomic_uint64) * j; diff --git a/ptrack.c b/ptrack.c index f2701af..1928499 100644 --- a/ptrack.c +++ b/ptrack.c @@ -137,7 +137,7 @@ _PG_fini(void) /* * Ptrack follow up for copydir() routine. It parses database OID - * and tablespace OID from path string. We do not need to recoursively + * and tablespace OID from path string. We do not need to recursively * walk subdirs here, copydir() will do it for us if needed. */ static void From 9c132a3a2f319ed712d11e9158e6dbda27f085b3 Mon Sep 17 00:00:00 2001 From: Alexey Kondratov Date: Fri, 14 May 2021 00:30:15 +0300 Subject: [PATCH 08/65] Refactor stats API and remove ptrack_get_change_file_stat --- README.md | 1 - ptrack--2.1--2.2.sql | 41 +++++++++++------------------------------ ptrack.c | 19 +++++++++++++------ t/001_basic.pl | 5 +---- 4 files changed, 25 insertions(+), 41 deletions(-) diff --git a/README.md b/README.md index 06e4b18..39ea00b 100644 --- a/README.md +++ b/README.md @@ -67,7 +67,6 @@ To disable `ptrack` and clean up all remaining service files set `ptrack.map_siz * ptrack_init_lsn() — returns LSN of the last ptrack map initialization. * ptrack_get_pagemapset(start_lsn pg_lsn) — returns a set of changed data files with bitmaps of changed blocks since specified `start_lsn`. * ptrack_get_change_stat(start_lsn pg_lsn) — returns statistic of changes (number of files, pages and size in MB) since specified `start_lsn`. - * ptrack_get_change_file_stat(start_lsn pg_lsn) — returns per file statistic of changes (number of pages and size in MB) since specified `start_lsn`. Usage example: diff --git a/ptrack--2.1--2.2.sql b/ptrack--2.1--2.2.sql index 2a0d97f..b09c15e 100644 --- a/ptrack--2.1--2.2.sql +++ b/ptrack--2.1--2.2.sql @@ -3,10 +3,18 @@ -- Complain if script is sourced in psql, rather than via ALTER EXTENSION \echo Use "ALTER EXTENSION ptrack UPDATE;" to load this file.\ quit +DROP FUNCTION ptrack_get_pagemapset(start_lsn pg_lsn); +CREATE FUNCTION ptrack_get_pagemapset(start_lsn pg_lsn) +RETURNS TABLE (path text, + pagecount bigint, + pagemap bytea) +AS 'MODULE_PATHNAME' +LANGUAGE C STRICT VOLATILE; + CREATE FUNCTION ptrack_get_change_stat(start_lsn pg_lsn) RETURNS TABLE ( files bigint, - pages bigint, + pages numeric, "size, MB" numeric ) AS $func$ @@ -18,37 +26,10 @@ BEGIN RETURN QUERY SELECT changed_files, changed_pages, - block_size*changed_pages/(1024.0*1024) + block_size * changed_pages / (1024.0 * 1024) FROM (SELECT count(path) AS changed_files, - sum( - length(replace(right((pagemap)::text, -1)::varbit::text, '0', '')) - ) AS changed_pages + sum(pagecount) AS changed_pages FROM ptrack_get_pagemapset(start_lsn)) s; END $func$ LANGUAGE plpgsql; - -CREATE FUNCTION ptrack_get_change_file_stat(start_lsn pg_lsn) - RETURNS TABLE ( - file_path text, - pages int, - "size, MB" numeric - ) AS -$func$ -DECLARE -block_size bigint; -BEGIN - block_size := (SELECT setting FROM pg_settings WHERE name = 'block_size'); - - RETURN QUERY - SELECT s.path, - changed_pages, - block_size*changed_pages/(1024.0*1024) - FROM - (SELECT path, - length(replace(right((pagemap)::text, -1)::varbit::text, '0', '')) - AS changed_pages - FROM ptrack_get_pagemapset(start_lsn)) s - ORDER BY (changed_pages, s.path) DESC; -END -$func$ LANGUAGE plpgsql; diff --git a/ptrack.c b/ptrack.c index 1928499..40630e7 100644 --- a/ptrack.c +++ b/ptrack.c @@ -424,6 +424,7 @@ ptrack_get_pagemapset(PG_FUNCTION_ARGS) FuncCallContext *funcctx; MemoryContext oldcontext; datapagemap_t pagemap; + int64 pagecount = 0; char gather_path[MAXPGPATH]; /* Exit immediately if there is no map */ @@ -444,12 +445,13 @@ ptrack_get_pagemapset(PG_FUNCTION_ARGS) /* Make tuple descriptor */ #if PG_VERSION_NUM >= 120000 - tupdesc = CreateTemplateTupleDesc(2); + tupdesc = CreateTemplateTupleDesc(3); #else - tupdesc = CreateTemplateTupleDesc(2, false); + tupdesc = CreateTemplateTupleDesc(3, false); #endif TupleDescInitEntry(tupdesc, (AttrNumber) 1, "path", TEXTOID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber) 2, "pagemap", BYTEAOID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber) 2, "pagecount", INT8OID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber) 3, "pagemap", BYTEAOID, -1, 0); funcctx->tuple_desc = BlessTupleDesc(tupdesc); funcctx->user_fctx = ctx; @@ -497,8 +499,8 @@ ptrack_get_pagemapset(PG_FUNCTION_ARGS) /* We completed a segment and there is a bitmap to return */ if (pagemap.bitmap != NULL) { - Datum values[2]; - bool nulls[2] = {false}; + Datum values[3]; + bool nulls[3] = {false}; char pathname[MAXPGPATH]; bytea *result = NULL; Size result_sz = pagemap.bitmapsize + VARHDRSZ; @@ -512,11 +514,13 @@ ptrack_get_pagemapset(PG_FUNCTION_ARGS) strcpy(pathname, ctx->relpath); values[0] = CStringGetTextDatum(pathname); - values[1] = PointerGetDatum(result); + values[1] = Int64GetDatum(pagecount); + values[2] = PointerGetDatum(result); pfree(pagemap.bitmap); pagemap.bitmap = NULL; pagemap.bitmapsize = 0; + pagecount = 0; htup = heap_form_tuple(funcctx->tuple_desc, values, nulls); if (htup) @@ -553,7 +557,10 @@ ptrack_get_pagemapset(PG_FUNCTION_ARGS) /* Block has been changed since specified LSN. Mark it in the bitmap */ if (update_lsn2 >= ctx->lsn) + { + pagecount += 1; datapagemap_add(&pagemap, ctx->bid.blocknum % ((BlockNumber) RELSEG_SIZE)); + } } ctx->bid.blocknum += 1; diff --git a/t/001_basic.pl b/t/001_basic.pl index 37285d9..bac81f2 100644 --- a/t/001_basic.pl +++ b/t/001_basic.pl @@ -10,7 +10,7 @@ use TestLib; use Test::More; -plan tests => 25; +plan tests => 24; my $node; my $res; @@ -119,9 +119,6 @@ $res_stdout = $node->safe_psql("postgres", "SELECT pages FROM ptrack_get_change_stat('$flush_lsn')"); is($res_stdout > 0, 1, 'should be able to get aggregated stats of changes'); -$res_stdout = $node->safe_psql("postgres", "SELECT count(*) FROM ptrack_get_change_file_stat('$flush_lsn')"); -is($res_stdout > 0, 1, 'should be able to get per file stats of changes'); - # We should be able to change ptrack map size (but loose all changes) $node->append_conf( 'postgresql.conf', q{ From d7b58240ca31a4302a44ce1056cabe114ddb06ec Mon Sep 17 00:00:00 2001 From: Roman Zharkov Date: Thu, 13 May 2021 14:15:33 +0600 Subject: [PATCH 09/65] [refer #PGPRO-4978] Update the .gitignore file. tags: ptrack --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 4990aa6..50591c5 100644 --- a/.gitignore +++ b/.gitignore @@ -2,3 +2,4 @@ *.so *.o Dockerfile +/tmp_check/ From 7d3b7f6954027a68b2c460e0fc25fbfe39439865 Mon Sep 17 00:00:00 2001 From: Sokolov Yura Date: Sun, 23 May 2021 14:17:37 +0300 Subject: [PATCH 10/65] Remove erroneous cast to size_t in BID_HASH_FUNC It were mistakenly remain after change of BID_HASH_FUNC meaning --- engine.c | 6 +++--- engine.h | 2 +- ptrack.c | 6 +++--- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/engine.c b/engine.c index 89217a9..f656146 100644 --- a/engine.c +++ b/engine.c @@ -654,7 +654,7 @@ ptrack_mark_block(RelFileNodeBackend smgr_rnode, ForkNumber forknum, BlockNumber blocknum) { PtBlockId bid; - size_t hash; + uint64 hash; size_t slot1; size_t slot2; XLogRecPtr new_lsn; @@ -676,8 +676,8 @@ ptrack_mark_block(RelFileNodeBackend smgr_rnode, bid.blocknum = blocknum; hash = BID_HASH_FUNC(bid); - slot1 = hash % PtrackContentNblocks; - slot2 = ((hash << 32) | (hash >> 32)) % PtrackContentNblocks; + slot1 = (size_t)(hash % PtrackContentNblocks); + slot2 = (size_t)(((hash << 32) | (hash >> 32)) % PtrackContentNblocks); if (RecoveryInProgress()) new_lsn = GetXLogReplayRecPtr(NULL); diff --git a/engine.h b/engine.h index e46f803..3386cc2 100644 --- a/engine.h +++ b/engine.h @@ -86,7 +86,7 @@ typedef PtrackMapHdr * PtrackMap; /* Block address 'bid' to hash. To get slot position in map should be divided * with '% PtrackContentNblocks' */ #define BID_HASH_FUNC(bid) \ - (size_t)(DatumGetUInt64(hash_any_extended((unsigned char *)&bid, sizeof(bid), 0))) + (DatumGetUInt64(hash_any_extended((unsigned char *)&bid, sizeof(bid), 0))) /* * Per process pointer to shared ptrack_map diff --git a/ptrack.c b/ptrack.c index 40630e7..66f5676 100644 --- a/ptrack.c +++ b/ptrack.c @@ -487,7 +487,7 @@ ptrack_get_pagemapset(PG_FUNCTION_ARGS) while (true) { - size_t hash; + uint64 hash; size_t slot1; size_t slot2; XLogRecPtr update_lsn1; @@ -535,7 +535,7 @@ ptrack_get_pagemapset(PG_FUNCTION_ARGS) } hash = BID_HASH_FUNC(ctx->bid); - slot1 = hash % PtrackContentNblocks; + slot1 = (size_t)(hash % PtrackContentNblocks); update_lsn1 = pg_atomic_read_u64(&ptrack_map->entries[slot1]); @@ -547,7 +547,7 @@ ptrack_get_pagemapset(PG_FUNCTION_ARGS) /* Only probe the second slot if the first one is marked */ if (update_lsn1 >= ctx->lsn) { - slot2 = ((hash << 32) | (hash >> 32)) % PtrackContentNblocks; + slot2 = (size_t)(((hash << 32) | (hash >> 32)) % PtrackContentNblocks); update_lsn2 = pg_atomic_read_u64(&ptrack_map->entries[slot2]); if (update_lsn2 != InvalidXLogRecPtr) From 0f9045f23d32770314638409cf9820418490604b Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Wed, 26 May 2021 08:23:42 +0300 Subject: [PATCH 11/65] fix debug message after hash type change --- engine.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/engine.c b/engine.c index f656146..bef0b2b 100644 --- a/engine.c +++ b/engine.c @@ -699,10 +699,10 @@ ptrack_mark_block(RelFileNodeBackend smgr_rnode, elog(DEBUG3, "ptrack_mark_block: map[%zu]=" UINT64_FORMAT " <- " UINT64_FORMAT, slot1, old_lsn.value, new_lsn); while (old_lsn.value < new_lsn && !pg_atomic_compare_exchange_u64(&ptrack_map->entries[slot1], (uint64 *) &old_lsn.value, new_lsn)); - elog(DEBUG3, "ptrack_mark_block: map[%zu]=" UINT64_FORMAT, hash, pg_atomic_read_u64(&ptrack_map->entries[slot1])); /* And to the second */ old_lsn.value = pg_atomic_read_u64(&ptrack_map->entries[slot2]); + elog(DEBUG3, "ptrack_mark_block: map[%zu]=" UINT64_FORMAT " <- " UINT64_FORMAT, slot2, old_lsn.value, new_lsn); while (old_lsn.value < new_lsn && !pg_atomic_compare_exchange_u64(&ptrack_map->entries[slot2], (uint64 *) &old_lsn.value, new_lsn)); } From a5ed3cd65ef3e5e373c42a899660a8f173434d0f Mon Sep 17 00:00:00 2001 From: Alexey Kondratov Date: Mon, 14 Jun 2021 19:00:58 +0300 Subject: [PATCH 12/65] Do not run some tests in repeat in CI --- .travis.yml | 4 ---- codecov.yml | 2 +- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/.travis.yml b/.travis.yml index 7119087..b3698e1 100644 --- a/.travis.yml +++ b/.travis.yml @@ -33,7 +33,3 @@ env: - PG_VERSION=11 PG_BRANCH=REL_11_STABLE TEST_CASE=tap MODE=legacy - PG_VERSION=11 PG_BRANCH=REL_11_STABLE TEST_CASE=all - PG_VERSION=11 PG_BRANCH=REL_11_STABLE TEST_CASE=all MODE=paranoia - - PG_VERSION=12 PG_BRANCH=REL_12_STABLE TEST_CASE=test_ptrack_multiple_segments TEST_REPEATS=5 MODE=paranoia - - PG_VERSION=12 PG_BRANCH=REL_12_STABLE TEST_CASE=test_ptrack_eat_my_data TEST_REPEATS=4 - - PG_VERSION=12 PG_BRANCH=REL_12_STABLE TEST_CASE=test_basic_ptrack_truncate_replica TEST_REPEATS=5 MODE=paranoia - - PG_VERSION=11 PG_BRANCH=REL_11_STABLE TEST_CASE=test_basic_ptrack_truncate_replica TEST_REPEATS=5 MODE=paranoia diff --git a/codecov.yml b/codecov.yml index 5ba398d..fe3b308 100644 --- a/codecov.yml +++ b/codecov.yml @@ -1,6 +1,6 @@ codecov: notify: - after_n_builds: 16 # keep in sync with .travis.yml number of builds + after_n_builds: 12 # keep in sync with .travis.yml number of builds # datapagemap.c/.h are copied from Postgres, so let's remove it # from report. Otherwise, we would have to remove some currently From 6f1a27b626408096774b8539209cb88a9dfb96ee Mon Sep 17 00:00:00 2001 From: Alexey Kondratov Date: Tue, 15 Jun 2021 23:42:57 +0300 Subject: [PATCH 13/65] Add info about recent changes into README.md --- README.md | 28 ++++++++++++++++++---------- 1 file changed, 18 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index 39ea00b..0d6d232 100644 --- a/README.md +++ b/README.md @@ -57,7 +57,7 @@ postgres=# CREATE EXTENSION ptrack; ## Configuration -The only one configurable option is `ptrack.map_size` (in MB). Default is `-1`, which means `ptrack` is turned off. To completely avoid false positives it is recommended to set `ptrack.map_size` to `1 / 1000` of expected `PGDATA` size (i.e. `1000` for a 1 TB database), since a single 8 byte `ptrack` map record tracks changes in a standard 8 KB PostgreSQL page. +The only one configurable option is `ptrack.map_size` (in MB). Default is `-1`, which means `ptrack` is turned off. In order to reduce number of false positives it is recommended to set `ptrack.map_size` to `1 / 1000` of expected `PGDATA` size (i.e. `1000` for a 1 TB database). To disable `ptrack` and clean up all remaining service files set `ptrack.map_size` to `0`. @@ -65,7 +65,7 @@ To disable `ptrack` and clean up all remaining service files set `ptrack.map_siz * ptrack_version() — returns ptrack version string. * ptrack_init_lsn() — returns LSN of the last ptrack map initialization. - * ptrack_get_pagemapset(start_lsn pg_lsn) — returns a set of changed data files with bitmaps of changed blocks since specified `start_lsn`. + * ptrack_get_pagemapset(start_lsn pg_lsn) — returns a set of changed data files with a number of changed blocks and their bitmaps since specified `start_lsn`. * ptrack_get_change_stat(start_lsn pg_lsn) — returns statistic of changes (number of files, pages and size in MB) since specified `start_lsn`. Usage example: @@ -74,7 +74,7 @@ Usage example: postgres=# SELECT ptrack_version(); ptrack_version ---------------- - 2.1 + 2.2 (1 row) postgres=# SELECT ptrack_init_lsn(); @@ -83,13 +83,21 @@ postgres=# SELECT ptrack_init_lsn(); 0/1814408 (1 row) -postgres=# SELECT ptrack_get_pagemapset('0/186F4C8'); - ptrack_get_pagemapset -------------------------------------------- - (global/1262,"\\x0100000000000000000000") - (global/2672,"\\x0200000000000000000000") - (global/2671,"\\x0200000000000000000000") -(3 rows) +postgres=# SELECT * FROM ptrack_get_pagemapset('0/185C8C0'); + path | pagecount | pagemap +---------------------+-----------+---------------------------------------- + base/16384/1255 | 3 | \x001000000005000000000000 + base/16384/2674 | 3 | \x0000000900010000000000000000 + base/16384/2691 | 1 | \x00004000000000000000000000 + base/16384/2608 | 1 | \x000000000000000400000000000000000000 + base/16384/2690 | 1 | \x000400000000000000000000 +(5 rows) + +postgres=# SELECT * FROM ptrack_get_change_stat('0/285C8C8'); + files | pages | size, MB +-------+-------+------------------------ + 20 | 25 | 0.19531250000000000000 +(1 row) ``` ## Upgrading From 2a4e3352ffba46fdd5411ac7112fe6603ad5b72d Mon Sep 17 00:00:00 2001 From: Victor Wagner Date: Mon, 2 Aug 2021 11:08:05 +0300 Subject: [PATCH 14/65] Support PostgresNode->new along with get_new_node in tests PostgreSQL recently dropped nonstandard way of create PostgesNode object - function get_new_node exported from the PostgresNode module. Now it is recommended to use PostgresNode->new constructor call This commit supports both ways of node creation - if get_new_node exists, use it, otherwice all constructor directly. --- t/001_basic.pl | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/t/001_basic.pl b/t/001_basic.pl index bac81f2..f73348d 100644 --- a/t/001_basic.pl +++ b/t/001_basic.pl @@ -18,7 +18,13 @@ my $res_stderr; # Initialize node -$node = get_new_node('node'); +# Older version of PostgresNode.pm use get_new_node function. +# Newer use standard perl object constructor syntax +if (PostgresNode->can('get_new_node')) { + $node = get_new_node('node'); +} else { + $node = PostgresNode->new("node"); +} $node->init; $node->start; From 3d6ccc610cb00c9750869ec3aace5204661899cb Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" <16117281+kulaginm@users.noreply.github.com> Date: Fri, 15 Oct 2021 15:04:00 +0300 Subject: [PATCH 15/65] PostgreSQL-14 support (#13) * PostgreSQL-14 support (patch for version 13 works for 14 and is just copied, readme update) * fixing travis tests (python version change) * test builds of postgres master branch --- .travis.yml | 13 ++ README.md | 10 +- patches/REL_14_STABLE-ptrack-core.diff | 309 +++++++++++++++++++++++++ patches/master-ptrack-core.diff | 309 +++++++++++++++++++++++++ run_tests.sh | 6 +- 5 files changed, 639 insertions(+), 8 deletions(-) create mode 100644 patches/REL_14_STABLE-ptrack-core.diff create mode 100644 patches/master-ptrack-core.diff diff --git a/.travis.yml b/.travis.yml index b3698e1..c3edfdf 100644 --- a/.travis.yml +++ b/.travis.yml @@ -21,6 +21,14 @@ notifications: # keep in sync with codecov.yml number of builds env: + - PG_VERSION=15 PG_BRANCH=master TEST_CASE=tap + - PG_VERSION=15 PG_BRANCH=master TEST_CASE=tap MODE=legacy + - PG_VERSION=15 PG_BRANCH=master TEST_CASE=all + - PG_VERSION=15 PG_BRANCH=master TEST_CASE=all MODE=paranoia + - PG_VERSION=14 PG_BRANCH=REL_14_STABLE TEST_CASE=tap + - PG_VERSION=14 PG_BRANCH=REL_14_STABLE TEST_CASE=tap MODE=legacy + - PG_VERSION=14 PG_BRANCH=REL_14_STABLE TEST_CASE=all + - PG_VERSION=14 PG_BRANCH=REL_14_STABLE TEST_CASE=all MODE=paranoia - PG_VERSION=13 PG_BRANCH=REL_13_STABLE TEST_CASE=tap - PG_VERSION=13 PG_BRANCH=REL_13_STABLE TEST_CASE=tap MODE=legacy - PG_VERSION=13 PG_BRANCH=REL_13_STABLE TEST_CASE=all @@ -33,3 +41,8 @@ env: - PG_VERSION=11 PG_BRANCH=REL_11_STABLE TEST_CASE=tap MODE=legacy - PG_VERSION=11 PG_BRANCH=REL_11_STABLE TEST_CASE=all - PG_VERSION=11 PG_BRANCH=REL_11_STABLE TEST_CASE=all MODE=paranoia + +jobs: + allow_failures: + - if: env(PG_BRANCH) = master + diff --git a/README.md b/README.md index 0d6d232..b6b35fd 100644 --- a/README.md +++ b/README.md @@ -12,7 +12,7 @@ It is designed to allow false positives (i.e. block/page is marked in the `ptrac Currently, `ptrack` codebase is split between small PostgreSQL core patch and extension. All public SQL API methods and main engine are placed in the `ptrack` extension, while the core patch contains only certain hooks and modifies binary utilities to ignore `ptrack.map.*` files. -This extension is compatible with PostgreSQL [11](https://github.com/postgrespro/ptrack/blob/master/patches/REL_11_STABLE-ptrack-core.diff), [12](https://github.com/postgrespro/ptrack/blob/master/patches/REL_12_STABLE-ptrack-core.diff), [13](https://github.com/postgrespro/ptrack/blob/master/patches/REL_13_STABLE-ptrack-core.diff). +This extension is compatible with PostgreSQL [11](https://github.com/postgrespro/ptrack/blob/master/patches/REL_11_STABLE-ptrack-core.diff), [12](https://github.com/postgrespro/ptrack/blob/master/patches/REL_12_STABLE-ptrack-core.diff), [13](https://github.com/postgrespro/ptrack/blob/master/patches/REL_13_STABLE-ptrack-core.diff), [14](https://github.com/postgrespro/ptrack/blob/master/patches/REL_14_STABLE-ptrack-core.diff). ## Installation @@ -25,13 +25,13 @@ git clone https://github.com/postgrespro/ptrack.git 2) Get latest PostgreSQL sources: ```shell -git clone https://github.com/postgres/postgres.git -b REL_12_STABLE && cd postgres +git clone https://github.com/postgres/postgres.git -b REL_14_STABLE && cd postgres ``` 3) Apply PostgreSQL core patch: ```shell -git apply -3 ../ptrack/patches/REL_12_STABLE-ptrack-core.diff +git apply -3 ../ptrack/patches/REL_14_STABLE-ptrack-core.diff ``` 4) Compile and install PostgreSQL @@ -151,8 +151,8 @@ Feel free to [send pull requests](https://github.com/postgrespro/ptrack/compare) Everything is tested automatically with [travis-ci.com](https://travis-ci.com/postgrespro/ptrack) and [codecov.io](https://codecov.io/gh/postgrespro/ptrack), but you can also run tests locally via `Docker`: ```sh -export PG_VERSION=12 -export PG_BRANCH=REL_12_STABLE +export PG_VERSION=14 +export PG_BRANCH=REL_14_STABLE export TEST_CASE=all export MODE=paranoia diff --git a/patches/REL_14_STABLE-ptrack-core.diff b/patches/REL_14_STABLE-ptrack-core.diff new file mode 100644 index 0000000..3491700 --- /dev/null +++ b/patches/REL_14_STABLE-ptrack-core.diff @@ -0,0 +1,309 @@ +commit a14ac459d71528c64df00c693e9c71ac70d3ba29 +Author: anastasia +Date: Mon Oct 19 14:53:06 2020 +0300 + + add ptrack 2.0 + +diff --git a/src/backend/replication/basebackup.c b/src/backend/replication/basebackup.c +index 50ae1f16d0..721b926ad2 100644 +--- a/src/backend/replication/basebackup.c ++++ b/src/backend/replication/basebackup.c +@@ -233,6 +233,13 @@ static const struct exclude_list_item excludeFiles[] = + {"postmaster.pid", false}, + {"postmaster.opts", false}, + ++ /* ++ * Skip all transient ptrack files, but do copy ptrack.map, since it may ++ * be successfully used immediately after backup. TODO: check, test? ++ */ ++ {"ptrack.map.mmap", false}, ++ {"ptrack.map.tmp", false}, ++ + /* end of list */ + {NULL, false} + }; +@@ -248,6 +255,11 @@ static const struct exclude_list_item noChecksumFiles[] = { + {"pg_filenode.map", false}, + {"pg_internal.init", true}, + {"PG_VERSION", false}, ++ ++ {"ptrack.map.mmap", false}, ++ {"ptrack.map", false}, ++ {"ptrack.map.tmp", false}, ++ + #ifdef EXEC_BACKEND + {"config_exec_params", true}, + #endif +diff --git a/src/backend/storage/file/copydir.c b/src/backend/storage/file/copydir.c +index 0cf598dd0c..c9c44a4ae7 100644 +--- a/src/backend/storage/file/copydir.c ++++ b/src/backend/storage/file/copydir.c +@@ -27,6 +27,8 @@ + #include "storage/copydir.h" + #include "storage/fd.h" + ++copydir_hook_type copydir_hook = NULL; ++ + /* + * copydir: copy a directory + * +@@ -78,6 +80,9 @@ copydir(char *fromdir, char *todir, bool recurse) + } + FreeDir(xldir); + ++ if (copydir_hook) ++ copydir_hook(todir); ++ + /* + * Be paranoid here and fsync all files to ensure the copy is really done. + * But if fsync is disabled, we're done. +diff --git a/src/backend/storage/smgr/md.c b/src/backend/storage/smgr/md.c +index 0eacd461cd..c2ef404a1a 100644 +--- a/src/backend/storage/smgr/md.c ++++ b/src/backend/storage/smgr/md.c +@@ -87,6 +87,8 @@ typedef struct _MdfdVec + + static MemoryContext MdCxt; /* context for all MdfdVec objects */ + ++mdextend_hook_type mdextend_hook = NULL; ++mdwrite_hook_type mdwrite_hook = NULL; + + /* Populate a file tag describing an md.c segment file. */ + #define INIT_MD_FILETAG(a,xx_rnode,xx_forknum,xx_segno) \ +@@ -435,6 +437,9 @@ mdextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, + register_dirty_segment(reln, forknum, v); + + Assert(_mdnblocks(reln, forknum, v) <= ((BlockNumber) RELSEG_SIZE)); ++ ++ if (mdextend_hook) ++ mdextend_hook(reln->smgr_rnode, forknum, blocknum); + } + + /* +@@ -721,6 +726,9 @@ mdwrite(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, + + if (!skipFsync && !SmgrIsTemp(reln)) + register_dirty_segment(reln, forknum, v); ++ ++ if (mdwrite_hook) ++ mdwrite_hook(reln->smgr_rnode, forknum, blocknum); + } + + /* +diff --git a/src/backend/storage/sync/sync.c b/src/backend/storage/sync/sync.c +index 3ded2cdd71..3a596a59f7 100644 +--- a/src/backend/storage/sync/sync.c ++++ b/src/backend/storage/sync/sync.c +@@ -75,6 +75,8 @@ static MemoryContext pendingOpsCxt; /* context for the above */ + static CycleCtr sync_cycle_ctr = 0; + static CycleCtr checkpoint_cycle_ctr = 0; + ++ProcessSyncRequests_hook_type ProcessSyncRequests_hook = NULL; ++ + /* Intervals for calling AbsorbSyncRequests */ + #define FSYNCS_PER_ABSORB 10 + #define UNLINKS_PER_ABSORB 10 +@@ -420,6 +422,9 @@ ProcessSyncRequests(void) + CheckpointStats.ckpt_longest_sync = longest; + CheckpointStats.ckpt_agg_sync_time = total_elapsed; + ++ if (ProcessSyncRequests_hook) ++ ProcessSyncRequests_hook(); ++ + /* Flag successful completion of ProcessSyncRequests */ + sync_in_progress = false; + } +diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c +index 1683629ee3..d2fc154576 100644 +--- a/src/backend/utils/misc/guc.c ++++ b/src/backend/utils/misc/guc.c +@@ -620,7 +620,6 @@ static char *recovery_target_xid_string; + static char *recovery_target_name_string; + static char *recovery_target_lsn_string; + +- + /* should be static, but commands/variable.c needs to get at this */ + char *role_string; + +diff --git a/src/bin/pg_checksums/pg_checksums.c b/src/bin/pg_checksums/pg_checksums.c +index ffdc23945c..7ae95866ce 100644 +--- a/src/bin/pg_checksums/pg_checksums.c ++++ b/src/bin/pg_checksums/pg_checksums.c +@@ -114,6 +114,11 @@ static const struct exclude_list_item skip[] = { + {"pg_filenode.map", false}, + {"pg_internal.init", true}, + {"PG_VERSION", false}, ++ ++ {"ptrack.map.mmap", false}, ++ {"ptrack.map", false}, ++ {"ptrack.map.tmp", false}, ++ + #ifdef EXEC_BACKEND + {"config_exec_params", true}, + #endif +diff --git a/src/bin/pg_resetwal/pg_resetwal.c b/src/bin/pg_resetwal/pg_resetwal.c +index 233441837f..cf7bd073bf 100644 +--- a/src/bin/pg_resetwal/pg_resetwal.c ++++ b/src/bin/pg_resetwal/pg_resetwal.c +@@ -84,6 +84,7 @@ static void RewriteControlFile(void); + static void FindEndOfXLOG(void); + static void KillExistingXLOG(void); + static void KillExistingArchiveStatus(void); ++static void KillExistingPtrack(void); + static void WriteEmptyXLOG(void); + static void usage(void); + +@@ -513,6 +514,7 @@ main(int argc, char *argv[]) + RewriteControlFile(); + KillExistingXLOG(); + KillExistingArchiveStatus(); ++ KillExistingPtrack(); + WriteEmptyXLOG(); + + printf(_("Write-ahead log reset\n")); +@@ -1102,6 +1104,53 @@ KillExistingArchiveStatus(void) + } + } + ++/* ++ * Remove existing ptrack files ++ */ ++static void ++KillExistingPtrack(void) ++{ ++#define PTRACKDIR "global" ++ ++ DIR *xldir; ++ struct dirent *xlde; ++ char path[MAXPGPATH + sizeof(PTRACKDIR)]; ++ ++ xldir = opendir(PTRACKDIR); ++ if (xldir == NULL) ++ { ++ pg_log_error("could not open directory \"%s\": %m", PTRACKDIR); ++ exit(1); ++ } ++ ++ while (errno = 0, (xlde = readdir(xldir)) != NULL) ++ { ++ if (strcmp(xlde->d_name, "ptrack.map.mmap") == 0 || ++ strcmp(xlde->d_name, "ptrack.map") == 0 || ++ strcmp(xlde->d_name, "ptrack.map.tmp") == 0) ++ { ++ snprintf(path, sizeof(path), "%s/%s", PTRACKDIR, xlde->d_name); ++ if (unlink(path) < 0) ++ { ++ pg_log_error("could not delete file \"%s\": %m", path); ++ exit(1); ++ } ++ } ++ } ++ ++ if (errno) ++ { ++ pg_log_error("could not read directory \"%s\": %m", PTRACKDIR); ++ exit(1); ++ } ++ ++ if (closedir(xldir)) ++ { ++ pg_log_error("could not close directory \"%s\": %m", PTRACKDIR); ++ exit(1); ++ } ++} ++ + + /* + * Write an empty XLOG file, containing only the checkpoint record +diff --git a/src/bin/pg_rewind/filemap.c b/src/bin/pg_rewind/filemap.c +index fbb97b5cf1..6cd7f2ae3e 100644 +--- a/src/bin/pg_rewind/filemap.c ++++ b/src/bin/pg_rewind/filemap.c +@@ -124,6 +124,10 @@ static const struct exclude_list_item excludeFiles[] = + {"postmaster.pid", false}, + {"postmaster.opts", false}, + ++ {"ptrack.map.mmap", false}, ++ {"ptrack.map", false}, ++ {"ptrack.map.tmp", false}, ++ + /* end of list */ + {NULL, false} + }; +diff --git a/src/include/miscadmin.h b/src/include/miscadmin.h +index 72e3352398..5c2e016501 100644 +--- a/src/include/miscadmin.h ++++ b/src/include/miscadmin.h +@@ -388,7 +388,7 @@ typedef enum ProcessingMode + NormalProcessing /* normal processing */ + } ProcessingMode; + +-extern ProcessingMode Mode; ++extern PGDLLIMPORT ProcessingMode Mode; + + #define IsBootstrapProcessingMode() (Mode == BootstrapProcessing) + #define IsInitProcessingMode() (Mode == InitProcessing) +diff --git a/src/include/port/pg_crc32c.h b/src/include/port/pg_crc32c.h +index 3c6f906683..a7355f7ad1 100644 +--- a/src/include/port/pg_crc32c.h ++++ b/src/include/port/pg_crc32c.h +@@ -69,8 +69,11 @@ extern pg_crc32c pg_comp_crc32c_armv8(pg_crc32c crc, const void *data, size_t le + #define FIN_CRC32C(crc) ((crc) ^= 0xFFFFFFFF) + + extern pg_crc32c pg_comp_crc32c_sb8(pg_crc32c crc, const void *data, size_t len); +-extern pg_crc32c (*pg_comp_crc32c) (pg_crc32c crc, const void *data, size_t len); +- ++extern ++#ifndef FRONTEND ++PGDLLIMPORT ++#endif ++pg_crc32c (*pg_comp_crc32c) (pg_crc32c crc, const void *data, size_t len); + #ifdef USE_SSE42_CRC32C_WITH_RUNTIME_CHECK + extern pg_crc32c pg_comp_crc32c_sse42(pg_crc32c crc, const void *data, size_t len); + #endif +diff --git a/src/include/storage/copydir.h b/src/include/storage/copydir.h +index 5d28f59c1d..0d3f04d8af 100644 +--- a/src/include/storage/copydir.h ++++ b/src/include/storage/copydir.h +@@ -13,6 +13,9 @@ + #ifndef COPYDIR_H + #define COPYDIR_H + ++typedef void (*copydir_hook_type) (const char *path); ++extern PGDLLIMPORT copydir_hook_type copydir_hook; ++ + extern void copydir(char *fromdir, char *todir, bool recurse); + extern void copy_file(char *fromfile, char *tofile); + +diff --git a/src/include/storage/md.h b/src/include/storage/md.h +index 07fd1bb7d0..5294811bc8 100644 +--- a/src/include/storage/md.h ++++ b/src/include/storage/md.h +@@ -19,6 +19,13 @@ + #include "storage/smgr.h" + #include "storage/sync.h" + ++typedef void (*mdextend_hook_type) (RelFileNodeBackend smgr_rnode, ++ ForkNumber forknum, BlockNumber blocknum); ++extern PGDLLIMPORT mdextend_hook_type mdextend_hook; ++typedef void (*mdwrite_hook_type) (RelFileNodeBackend smgr_rnode, ++ ForkNumber forknum, BlockNumber blocknum); ++extern PGDLLIMPORT mdwrite_hook_type mdwrite_hook; ++ + /* md storage manager functionality */ + extern void mdinit(void); + extern void mdopen(SMgrRelation reln); +diff --git a/src/include/storage/sync.h b/src/include/storage/sync.h +index e16ab8e711..88da9686eb 100644 +--- a/src/include/storage/sync.h ++++ b/src/include/storage/sync.h +@@ -50,6 +50,9 @@ typedef struct FileTag + uint32 segno; + } FileTag; + ++typedef void (*ProcessSyncRequests_hook_type) (void); ++extern PGDLLIMPORT ProcessSyncRequests_hook_type ProcessSyncRequests_hook; ++ + extern void InitSync(void); + extern void SyncPreCheckpoint(void); + extern void SyncPostCheckpoint(void); diff --git a/patches/master-ptrack-core.diff b/patches/master-ptrack-core.diff new file mode 100644 index 0000000..3491700 --- /dev/null +++ b/patches/master-ptrack-core.diff @@ -0,0 +1,309 @@ +commit a14ac459d71528c64df00c693e9c71ac70d3ba29 +Author: anastasia +Date: Mon Oct 19 14:53:06 2020 +0300 + + add ptrack 2.0 + +diff --git a/src/backend/replication/basebackup.c b/src/backend/replication/basebackup.c +index 50ae1f16d0..721b926ad2 100644 +--- a/src/backend/replication/basebackup.c ++++ b/src/backend/replication/basebackup.c +@@ -233,6 +233,13 @@ static const struct exclude_list_item excludeFiles[] = + {"postmaster.pid", false}, + {"postmaster.opts", false}, + ++ /* ++ * Skip all transient ptrack files, but do copy ptrack.map, since it may ++ * be successfully used immediately after backup. TODO: check, test? ++ */ ++ {"ptrack.map.mmap", false}, ++ {"ptrack.map.tmp", false}, ++ + /* end of list */ + {NULL, false} + }; +@@ -248,6 +255,11 @@ static const struct exclude_list_item noChecksumFiles[] = { + {"pg_filenode.map", false}, + {"pg_internal.init", true}, + {"PG_VERSION", false}, ++ ++ {"ptrack.map.mmap", false}, ++ {"ptrack.map", false}, ++ {"ptrack.map.tmp", false}, ++ + #ifdef EXEC_BACKEND + {"config_exec_params", true}, + #endif +diff --git a/src/backend/storage/file/copydir.c b/src/backend/storage/file/copydir.c +index 0cf598dd0c..c9c44a4ae7 100644 +--- a/src/backend/storage/file/copydir.c ++++ b/src/backend/storage/file/copydir.c +@@ -27,6 +27,8 @@ + #include "storage/copydir.h" + #include "storage/fd.h" + ++copydir_hook_type copydir_hook = NULL; ++ + /* + * copydir: copy a directory + * +@@ -78,6 +80,9 @@ copydir(char *fromdir, char *todir, bool recurse) + } + FreeDir(xldir); + ++ if (copydir_hook) ++ copydir_hook(todir); ++ + /* + * Be paranoid here and fsync all files to ensure the copy is really done. + * But if fsync is disabled, we're done. +diff --git a/src/backend/storage/smgr/md.c b/src/backend/storage/smgr/md.c +index 0eacd461cd..c2ef404a1a 100644 +--- a/src/backend/storage/smgr/md.c ++++ b/src/backend/storage/smgr/md.c +@@ -87,6 +87,8 @@ typedef struct _MdfdVec + + static MemoryContext MdCxt; /* context for all MdfdVec objects */ + ++mdextend_hook_type mdextend_hook = NULL; ++mdwrite_hook_type mdwrite_hook = NULL; + + /* Populate a file tag describing an md.c segment file. */ + #define INIT_MD_FILETAG(a,xx_rnode,xx_forknum,xx_segno) \ +@@ -435,6 +437,9 @@ mdextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, + register_dirty_segment(reln, forknum, v); + + Assert(_mdnblocks(reln, forknum, v) <= ((BlockNumber) RELSEG_SIZE)); ++ ++ if (mdextend_hook) ++ mdextend_hook(reln->smgr_rnode, forknum, blocknum); + } + + /* +@@ -721,6 +726,9 @@ mdwrite(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, + + if (!skipFsync && !SmgrIsTemp(reln)) + register_dirty_segment(reln, forknum, v); ++ ++ if (mdwrite_hook) ++ mdwrite_hook(reln->smgr_rnode, forknum, blocknum); + } + + /* +diff --git a/src/backend/storage/sync/sync.c b/src/backend/storage/sync/sync.c +index 3ded2cdd71..3a596a59f7 100644 +--- a/src/backend/storage/sync/sync.c ++++ b/src/backend/storage/sync/sync.c +@@ -75,6 +75,8 @@ static MemoryContext pendingOpsCxt; /* context for the above */ + static CycleCtr sync_cycle_ctr = 0; + static CycleCtr checkpoint_cycle_ctr = 0; + ++ProcessSyncRequests_hook_type ProcessSyncRequests_hook = NULL; ++ + /* Intervals for calling AbsorbSyncRequests */ + #define FSYNCS_PER_ABSORB 10 + #define UNLINKS_PER_ABSORB 10 +@@ -420,6 +422,9 @@ ProcessSyncRequests(void) + CheckpointStats.ckpt_longest_sync = longest; + CheckpointStats.ckpt_agg_sync_time = total_elapsed; + ++ if (ProcessSyncRequests_hook) ++ ProcessSyncRequests_hook(); ++ + /* Flag successful completion of ProcessSyncRequests */ + sync_in_progress = false; + } +diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c +index 1683629ee3..d2fc154576 100644 +--- a/src/backend/utils/misc/guc.c ++++ b/src/backend/utils/misc/guc.c +@@ -620,7 +620,6 @@ static char *recovery_target_xid_string; + static char *recovery_target_name_string; + static char *recovery_target_lsn_string; + +- + /* should be static, but commands/variable.c needs to get at this */ + char *role_string; + +diff --git a/src/bin/pg_checksums/pg_checksums.c b/src/bin/pg_checksums/pg_checksums.c +index ffdc23945c..7ae95866ce 100644 +--- a/src/bin/pg_checksums/pg_checksums.c ++++ b/src/bin/pg_checksums/pg_checksums.c +@@ -114,6 +114,11 @@ static const struct exclude_list_item skip[] = { + {"pg_filenode.map", false}, + {"pg_internal.init", true}, + {"PG_VERSION", false}, ++ ++ {"ptrack.map.mmap", false}, ++ {"ptrack.map", false}, ++ {"ptrack.map.tmp", false}, ++ + #ifdef EXEC_BACKEND + {"config_exec_params", true}, + #endif +diff --git a/src/bin/pg_resetwal/pg_resetwal.c b/src/bin/pg_resetwal/pg_resetwal.c +index 233441837f..cf7bd073bf 100644 +--- a/src/bin/pg_resetwal/pg_resetwal.c ++++ b/src/bin/pg_resetwal/pg_resetwal.c +@@ -84,6 +84,7 @@ static void RewriteControlFile(void); + static void FindEndOfXLOG(void); + static void KillExistingXLOG(void); + static void KillExistingArchiveStatus(void); ++static void KillExistingPtrack(void); + static void WriteEmptyXLOG(void); + static void usage(void); + +@@ -513,6 +514,7 @@ main(int argc, char *argv[]) + RewriteControlFile(); + KillExistingXLOG(); + KillExistingArchiveStatus(); ++ KillExistingPtrack(); + WriteEmptyXLOG(); + + printf(_("Write-ahead log reset\n")); +@@ -1102,6 +1104,53 @@ KillExistingArchiveStatus(void) + } + } + ++/* ++ * Remove existing ptrack files ++ */ ++static void ++KillExistingPtrack(void) ++{ ++#define PTRACKDIR "global" ++ ++ DIR *xldir; ++ struct dirent *xlde; ++ char path[MAXPGPATH + sizeof(PTRACKDIR)]; ++ ++ xldir = opendir(PTRACKDIR); ++ if (xldir == NULL) ++ { ++ pg_log_error("could not open directory \"%s\": %m", PTRACKDIR); ++ exit(1); ++ } ++ ++ while (errno = 0, (xlde = readdir(xldir)) != NULL) ++ { ++ if (strcmp(xlde->d_name, "ptrack.map.mmap") == 0 || ++ strcmp(xlde->d_name, "ptrack.map") == 0 || ++ strcmp(xlde->d_name, "ptrack.map.tmp") == 0) ++ { ++ snprintf(path, sizeof(path), "%s/%s", PTRACKDIR, xlde->d_name); ++ if (unlink(path) < 0) ++ { ++ pg_log_error("could not delete file \"%s\": %m", path); ++ exit(1); ++ } ++ } ++ } ++ ++ if (errno) ++ { ++ pg_log_error("could not read directory \"%s\": %m", PTRACKDIR); ++ exit(1); ++ } ++ ++ if (closedir(xldir)) ++ { ++ pg_log_error("could not close directory \"%s\": %m", PTRACKDIR); ++ exit(1); ++ } ++} ++ + + /* + * Write an empty XLOG file, containing only the checkpoint record +diff --git a/src/bin/pg_rewind/filemap.c b/src/bin/pg_rewind/filemap.c +index fbb97b5cf1..6cd7f2ae3e 100644 +--- a/src/bin/pg_rewind/filemap.c ++++ b/src/bin/pg_rewind/filemap.c +@@ -124,6 +124,10 @@ static const struct exclude_list_item excludeFiles[] = + {"postmaster.pid", false}, + {"postmaster.opts", false}, + ++ {"ptrack.map.mmap", false}, ++ {"ptrack.map", false}, ++ {"ptrack.map.tmp", false}, ++ + /* end of list */ + {NULL, false} + }; +diff --git a/src/include/miscadmin.h b/src/include/miscadmin.h +index 72e3352398..5c2e016501 100644 +--- a/src/include/miscadmin.h ++++ b/src/include/miscadmin.h +@@ -388,7 +388,7 @@ typedef enum ProcessingMode + NormalProcessing /* normal processing */ + } ProcessingMode; + +-extern ProcessingMode Mode; ++extern PGDLLIMPORT ProcessingMode Mode; + + #define IsBootstrapProcessingMode() (Mode == BootstrapProcessing) + #define IsInitProcessingMode() (Mode == InitProcessing) +diff --git a/src/include/port/pg_crc32c.h b/src/include/port/pg_crc32c.h +index 3c6f906683..a7355f7ad1 100644 +--- a/src/include/port/pg_crc32c.h ++++ b/src/include/port/pg_crc32c.h +@@ -69,8 +69,11 @@ extern pg_crc32c pg_comp_crc32c_armv8(pg_crc32c crc, const void *data, size_t le + #define FIN_CRC32C(crc) ((crc) ^= 0xFFFFFFFF) + + extern pg_crc32c pg_comp_crc32c_sb8(pg_crc32c crc, const void *data, size_t len); +-extern pg_crc32c (*pg_comp_crc32c) (pg_crc32c crc, const void *data, size_t len); +- ++extern ++#ifndef FRONTEND ++PGDLLIMPORT ++#endif ++pg_crc32c (*pg_comp_crc32c) (pg_crc32c crc, const void *data, size_t len); + #ifdef USE_SSE42_CRC32C_WITH_RUNTIME_CHECK + extern pg_crc32c pg_comp_crc32c_sse42(pg_crc32c crc, const void *data, size_t len); + #endif +diff --git a/src/include/storage/copydir.h b/src/include/storage/copydir.h +index 5d28f59c1d..0d3f04d8af 100644 +--- a/src/include/storage/copydir.h ++++ b/src/include/storage/copydir.h +@@ -13,6 +13,9 @@ + #ifndef COPYDIR_H + #define COPYDIR_H + ++typedef void (*copydir_hook_type) (const char *path); ++extern PGDLLIMPORT copydir_hook_type copydir_hook; ++ + extern void copydir(char *fromdir, char *todir, bool recurse); + extern void copy_file(char *fromfile, char *tofile); + +diff --git a/src/include/storage/md.h b/src/include/storage/md.h +index 07fd1bb7d0..5294811bc8 100644 +--- a/src/include/storage/md.h ++++ b/src/include/storage/md.h +@@ -19,6 +19,13 @@ + #include "storage/smgr.h" + #include "storage/sync.h" + ++typedef void (*mdextend_hook_type) (RelFileNodeBackend smgr_rnode, ++ ForkNumber forknum, BlockNumber blocknum); ++extern PGDLLIMPORT mdextend_hook_type mdextend_hook; ++typedef void (*mdwrite_hook_type) (RelFileNodeBackend smgr_rnode, ++ ForkNumber forknum, BlockNumber blocknum); ++extern PGDLLIMPORT mdwrite_hook_type mdwrite_hook; ++ + /* md storage manager functionality */ + extern void mdinit(void); + extern void mdopen(SMgrRelation reln); +diff --git a/src/include/storage/sync.h b/src/include/storage/sync.h +index e16ab8e711..88da9686eb 100644 +--- a/src/include/storage/sync.h ++++ b/src/include/storage/sync.h +@@ -50,6 +50,9 @@ typedef struct FileTag + uint32 segno; + } FileTag; + ++typedef void (*ProcessSyncRequests_hook_type) (void); ++extern PGDLLIMPORT ProcessSyncRequests_hook_type ProcessSyncRequests_hook; ++ + extern void InitSync(void); + extern void SyncPreCheckpoint(void); + extern void SyncPostCheckpoint(void); diff --git a/run_tests.sh b/run_tests.sh index 90654cc..b2ab300 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -105,7 +105,7 @@ else # Setup python environment echo "############### Setting up python env" - virtualenv pyenv + virtualenv --python=/usr/bin/python3 pyenv source pyenv/bin/activate pip install testgres==1.8.2 @@ -118,10 +118,10 @@ else if [ "$TEST_CASE" = "all" ]; then # Run all pg_probackup ptrack tests - python -m unittest -v tests.ptrack || status=$? + python3 -m unittest -v tests.ptrack || status=$? else for i in `seq $TEST_REPEATS`; do - python -m unittest -v tests.ptrack.PtrackTest.$TEST_CASE || status=$? + python3 -m unittest -v tests.ptrack.PtrackTest.$TEST_CASE || status=$? done fi From 3dbc4fa6f70ea74c46dd0de80d35726efded93fa Mon Sep 17 00:00:00 2001 From: Marina Polyakova Date: Wed, 20 Oct 2021 16:07:38 +0300 Subject: [PATCH 16/65] PGPRO-5646: remove compiler warning (gcc 11.2.0) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In function ‘ptrack_write_chunk’, inlined from ‘ptrackCheckpoint’ at engine.c:397:2: engine.c:78:13: warning: ‘write’ reading 8 bytes from a region of size 4 [-Wstringop-overread] 78 | if (write(fd, chunk, size) != size) | ^~~~~~~~~~~~~~~~~~~~~~ In file included from engine.c:47: engine.c: In function ‘ptrackCheckpoint’: engine.h:55:25: note: source object ‘magic’ of size 4 55 | char magic[PTRACK_MAGIC_SIZE]; | ^~~~~ In file included from engine.c:22: /usr/include/unistd.h:378:16: note: in a call to function ‘write’ declared with attribute ‘access (read_only, 2, 3)’ 378 | extern ssize_t write (int __fd, const void *__buf, size_t __n) __wur | ^~~~~ In C a pointer to the first field of a structure and a pointer to the structure itself are always equal. Add a compile-time assertion check in case the field magic is not the first field in the structure PtrackMapHdr in the future. Thanks to Maksim Orlov for the review. --- engine.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/engine.c b/engine.c index bef0b2b..5685e9a 100644 --- a/engine.c +++ b/engine.c @@ -394,7 +394,17 @@ ptrackCheckpoint(void) * * Write both magic and varsion_num at once. */ - ptrack_write_chunk(ptrack_tmp_fd, &crc, (char *) &ptrack_map->magic, + + /* + * Previously we read from the field magic, now we read from the beginning + * of the structure PtrackMapHdr. Make sure nothing has changed since then. + */ + StaticAssertStmt( + offsetof(PtrackMapHdr, magic) == 0, + "old write format for PtrackMapHdr.magic and PtrackMapHdr.version_num " + "is not upward-compatible"); + + ptrack_write_chunk(ptrack_tmp_fd, &crc, (char *) ptrack_map, offsetof(PtrackMapHdr, init_lsn)); init_lsn = pg_atomic_read_u64(&ptrack_map->init_lsn); From 437a46942f785fb7f805192115f25dfd7600b398 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Fri, 22 Oct 2021 15:30:26 +0300 Subject: [PATCH 17/65] [travis] fix probackup tests --- run_tests.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/run_tests.sh b/run_tests.sh index b2ab300..c47617f 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -116,6 +116,7 @@ else export PG_PROBACKUP_PARANOIA=ON fi + export PG_PROBACKUP_PTRACK=ON if [ "$TEST_CASE" = "all" ]; then # Run all pg_probackup ptrack tests python3 -m unittest -v tests.ptrack || status=$? From b5d2c58e7426c9e3398c8587e43d2dc8144d94f9 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" <16117281+kulaginm@users.noreply.github.com> Date: Sun, 24 Oct 2021 04:49:14 +0300 Subject: [PATCH 18/65] Travis fixes (fix test_ptrack_vacuum_full test) (#14) * [travis] fix test_ptrack_vacuum_full Remove unused PG_VERSION (PG_MAJOR) variable Reworking working directories (helps to run tap test) Remove unused amcheck install --- .travis.yml | 40 ++++++++--------- Dockerfile.in | 12 ++--- README.md | 1 - docker-compose.yml | 20 +++++++-- make_dockerfile.sh | 7 --- run_tests.sh | 110 +++++++++++++++++++-------------------------- 6 files changed, 89 insertions(+), 101 deletions(-) diff --git a/.travis.yml b/.travis.yml index c3edfdf..b6bd63a 100644 --- a/.travis.yml +++ b/.travis.yml @@ -21,26 +21,26 @@ notifications: # keep in sync with codecov.yml number of builds env: - - PG_VERSION=15 PG_BRANCH=master TEST_CASE=tap - - PG_VERSION=15 PG_BRANCH=master TEST_CASE=tap MODE=legacy - - PG_VERSION=15 PG_BRANCH=master TEST_CASE=all - - PG_VERSION=15 PG_BRANCH=master TEST_CASE=all MODE=paranoia - - PG_VERSION=14 PG_BRANCH=REL_14_STABLE TEST_CASE=tap - - PG_VERSION=14 PG_BRANCH=REL_14_STABLE TEST_CASE=tap MODE=legacy - - PG_VERSION=14 PG_BRANCH=REL_14_STABLE TEST_CASE=all - - PG_VERSION=14 PG_BRANCH=REL_14_STABLE TEST_CASE=all MODE=paranoia - - PG_VERSION=13 PG_BRANCH=REL_13_STABLE TEST_CASE=tap - - PG_VERSION=13 PG_BRANCH=REL_13_STABLE TEST_CASE=tap MODE=legacy - - PG_VERSION=13 PG_BRANCH=REL_13_STABLE TEST_CASE=all - - PG_VERSION=13 PG_BRANCH=REL_13_STABLE TEST_CASE=all MODE=paranoia - - PG_VERSION=12 PG_BRANCH=REL_12_STABLE TEST_CASE=tap - - PG_VERSION=12 PG_BRANCH=REL_12_STABLE TEST_CASE=tap MODE=legacy - - PG_VERSION=12 PG_BRANCH=REL_12_STABLE TEST_CASE=all - - PG_VERSION=12 PG_BRANCH=REL_12_STABLE TEST_CASE=all MODE=paranoia - - PG_VERSION=11 PG_BRANCH=REL_11_STABLE TEST_CASE=tap - - PG_VERSION=11 PG_BRANCH=REL_11_STABLE TEST_CASE=tap MODE=legacy - - PG_VERSION=11 PG_BRANCH=REL_11_STABLE TEST_CASE=all - - PG_VERSION=11 PG_BRANCH=REL_11_STABLE TEST_CASE=all MODE=paranoia + - PG_BRANCH=master TEST_CASE=tap + - PG_BRANCH=master TEST_CASE=tap MODE=legacy +# - PG_BRANCH=master TEST_CASE=all + - PG_BRANCH=master TEST_CASE=all MODE=paranoia + - PG_BRANCH=REL_14_STABLE TEST_CASE=tap + - PG_BRANCH=REL_14_STABLE TEST_CASE=tap MODE=legacy +# - PG_BRANCH=REL_14_STABLE TEST_CASE=all + - PG_BRANCH=REL_14_STABLE TEST_CASE=all MODE=paranoia + - PG_BRANCH=REL_13_STABLE TEST_CASE=tap + - PG_BRANCH=REL_13_STABLE TEST_CASE=tap MODE=legacy +# - PG_BRANCH=REL_13_STABLE TEST_CASE=all + - PG_BRANCH=REL_13_STABLE TEST_CASE=all MODE=paranoia + - PG_BRANCH=REL_12_STABLE TEST_CASE=tap + - PG_BRANCH=REL_12_STABLE TEST_CASE=tap MODE=legacy +# - PG_BRANCH=REL_12_STABLE TEST_CASE=all + - PG_BRANCH=REL_12_STABLE TEST_CASE=all MODE=paranoia + - PG_BRANCH=REL_11_STABLE TEST_CASE=tap + - PG_BRANCH=REL_11_STABLE TEST_CASE=tap MODE=legacy +# - PG_BRANCH=REL_11_STABLE TEST_CASE=all + - PG_BRANCH=REL_11_STABLE TEST_CASE=all MODE=paranoia jobs: allow_failures: diff --git a/Dockerfile.in b/Dockerfile.in index 39541da..c2b0ffd 100644 --- a/Dockerfile.in +++ b/Dockerfile.in @@ -5,21 +5,21 @@ RUN apt-get update RUN apt-get -yq install python python-pip python-virtualenv # Environment -ENV PG_MAJOR=${PG_VERSION} PG_BRANCH=${PG_BRANCH} -ENV LANG=C.UTF-8 PGHOME=/pg/testdir/pgbin +ENV PG_BRANCH=${PG_BRANCH} +ENV LANG=C.UTF-8 PGHOME=/testdir/pgbin ENV MODE=${MODE} TEST_CASE=${TEST_CASE} TEST_REPEATS=${TEST_REPEATS} # Make directories -RUN mkdir -p /pg/testdir +RUN mkdir -p /testdir COPY run_tests.sh /run.sh RUN chmod 755 /run.sh -COPY . /pg/testdir -WORKDIR /pg/testdir +COPY . /testdir/ptrack +WORKDIR /testdir # Grant privileges -RUN chown -R postgres:postgres /pg/testdir +RUN chown -R postgres:postgres /testdir USER postgres ENTRYPOINT /run.sh diff --git a/README.md b/README.md index b6b35fd..898df12 100644 --- a/README.md +++ b/README.md @@ -151,7 +151,6 @@ Feel free to [send pull requests](https://github.com/postgrespro/ptrack/compare) Everything is tested automatically with [travis-ci.com](https://travis-ci.com/postgrespro/ptrack) and [codecov.io](https://codecov.io/gh/postgrespro/ptrack), but you can also run tests locally via `Docker`: ```sh -export PG_VERSION=14 export PG_BRANCH=REL_14_STABLE export TEST_CASE=all export MODE=paranoia diff --git a/docker-compose.yml b/docker-compose.yml index 544e59f..fc65455 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,3 +1,17 @@ -tests: - privileged: true - build: . +version: "3.7" +services: + tests: + build: + context: . + + cap_add: + - SYS_PTRACE + + security_opt: + - seccomp=unconfined + + # don't work + #sysctls: + # kernel.yama.ptrace_scope: 0 + privileged: true + diff --git a/make_dockerfile.sh b/make_dockerfile.sh index 52543e8..409a5b9 100755 --- a/make_dockerfile.sh +++ b/make_dockerfile.sh @@ -1,10 +1,5 @@ #!/usr/bin/env sh -if [ -z ${PG_VERSION+x} ]; then - echo PG_VERSION is not set! - exit 1 -fi - if [ -z ${PG_BRANCH+x} ]; then echo PG_BRANCH is not set! exit 1 @@ -28,11 +23,9 @@ else echo TEST_REPEATS=${TEST_REPEATS} fi -echo PG_VERSION=${PG_VERSION} echo PG_BRANCH=${PG_BRANCH} sed \ - -e 's/${PG_VERSION}/'${PG_VERSION}/g \ -e 's/${PG_BRANCH}/'${PG_BRANCH}/g \ -e 's/${MODE}/'${MODE}/g \ -e 's/${TEST_CASE}/'${TEST_CASE}/g \ diff --git a/run_tests.sh b/run_tests.sh index c47617f..c52d9ed 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -1,55 +1,47 @@ #!/usr/bin/env bash # -# Copyright (c) 2019-2020, Postgres Professional +# Copyright (c) 2019-2021, Postgres Professional # -PG_SRC=$PWD/postgres +PTRACK_SRC=${PWD}/ptrack +PG_SRC=${PWD}/postgres +PBK_SRC=${PWD}/pg_probackup status=0 -# curl "https://ftp.postgresql.org/pub/source/v$PG_VERSION/postgresql-$PG_VERSION.tar.bz2" -o postgresql.tar.bz2 -# echo "$PG_SHA256 *postgresql.tar.bz2" | sha256sum -c - - -# mkdir $PG_SRC - -# tar \ -# --extract \ -# --file postgresql.tar.bz2 \ -# --directory $PG_SRC \ -# --strip-components 1 - +######################################################### # Clone Postgres echo "############### Getting Postgres sources" -git clone https://github.com/postgres/postgres.git -b $PG_BRANCH --depth=1 +git clone https://github.com/postgres/postgres.git --depth=1 --branch=${PG_BRANCH} ${PG_SRC} # Clone pg_probackup echo "############### Getting pg_probackup sources" -git clone https://github.com/postgrespro/pg_probackup.git --depth=1 -b master -# git clone https://github.com/ololobus/pg_probackup.git --depth=1 -b ptrack-tests +git clone https://github.com/postgrespro/pg_probackup.git --depth=1 --branch=master ${PBK_SRC} +######################################################### # Compile and install Postgres -cd postgres # Go to postgres dir +cd ${PG_SRC} # Go to postgres dir echo "############### Applying ptrack patch" -git apply -v -3 ../patches/$PG_BRANCH-ptrack-core.diff +git apply --verbose --3way ${PTRACK_SRC}/patches/${PG_BRANCH}-ptrack-core.diff -if [ "$MODE" = "paranoia" ]; then +if [ "${MODE}" = "paranoia" ]; then echo "############### Paranoia mode: applying turn-off-hint-bits.diff" - git apply -v -3 ../patches/turn-off-hint-bits.diff + git apply --verbose --3way ${PTRACK_SRC}/patches/turn-off-hint-bits.diff fi echo "############### Compiling Postgres" -if [ "$TEST_CASE" = "tap" ] && [ "$MODE" = "legacy" ]; then - ./configure CFLAGS='-DEXEC_BACKEND' --disable-atomics --prefix=$PGHOME --enable-debug --enable-cassert --enable-depend --enable-tap-tests +if [ "${TEST_CASE}" = "tap" ] && [ "${MODE}" = "legacy" ]; then + ./configure CFLAGS='-DEXEC_BACKEND' --disable-atomics --prefix=${PGHOME} --enable-debug --enable-cassert --enable-depend --enable-tap-tests --quiet else - ./configure --prefix=$PGHOME --enable-debug --enable-cassert --enable-depend --enable-tap-tests + ./configure --prefix=${PGHOME} --enable-debug --enable-cassert --enable-depend --enable-tap-tests --quiet fi -make -s -j$(nproc) install -make -s -j$(nproc) -C contrib/ install +make --quiet --jobs=$(nproc) install +make --quiet --jobs=$(nproc) --directory=contrib/ install # Override default Postgres instance -export PATH=$PGHOME/bin:$PATH -export LD_LIBRARY_PATH=$PGHOME/lib +export PATH=${PGHOME}/bin:${PATH} +export LD_LIBRARY_PATH=${PGHOME}/lib export PG_CONFIG=$(which pg_config) # Show pg_config path (just in case) @@ -60,48 +52,37 @@ which pg_config echo "############### pg_config" pg_config -# Get amcheck if missing -if [ ! -d "contrib/amcheck" ]; then - echo "############### Getting missing amcheck" - git clone https://github.com/petergeoghegan/amcheck.git --depth=1 contrib/amcheck - make USE_PGXS=1 -C contrib/amcheck install -fi - -# Get back to testdir -cd .. - +######################################################### # Build and install ptrack extension echo "############### Compiling and installing ptrack extension" +cp --recursive ${PTRACK_SRC} ${PG_SRC}/contrib/ptrack +make USE_PGXS=1 PG_CPPFLAGS="-coverage" SHLIB_LINK="-coverage" --directory=${PG_SRC}/contrib/ptrack/ install -# XXX: Hackish way to make possible to run tap tests -mkdir $PG_SRC/contrib/ptrack -cp * $PG_SRC/contrib/ptrack/ -cp -R t $PG_SRC/contrib/ptrack/ - -make USE_PGXS=1 PG_CPPFLAGS="-coverage" SHLIB_LINK="-coverage" -C $PG_SRC/contrib/ptrack/ install - -if [ "$TEST_CASE" = "tap" ]; then +if [ "${TEST_CASE}" = "tap" ]; then # Run tap tests echo "############### Running tap tests" - if [ "$MODE" = "legacy" ]; then + if [ "${MODE}" = "legacy" ]; then # There is a known issue with attaching shared memory segment using the same # address each time, when EXEC_BACKEND mechanism is turned on. It happens due # to the ASLR address space randomization, so we are trying to attach a segment # to the already occupied location. That way we simply turning off ASLR here. # # Postgres comment: https://github.com/postgres/postgres/blob/5cbfce562f7cd2aab0cdc4694ce298ec3567930e/src/backend/postmaster/postmaster.c#L4929 - setarch x86_64 --addr-no-randomize make -C postgres/contrib/ptrack check || status=$? + setarch x86_64 --addr-no-randomize make --directory=${PG_SRC}/contrib/ptrack check || status=$? else - make -C postgres/contrib/ptrack check || status=$? + make --directory=${PG_SRC}/contrib/ptrack check || status=$? fi else + # Set kernel params (used for debugging -- probackup tests) + echo "############### setting kernel params" + sudo sh -c 'echo 0 > /proc/sys/kernel/yama/ptrace_scope' # Build and install pg_probackup echo "############### Compiling and installing pg_probackup" - cd pg_probackup # Go to pg_probackup dir - make USE_PGXS=1 top_srcdir=$PG_SRC install + cd ${PBK_SRC} # Go to pg_probackup dir + make USE_PGXS=1 top_srcdir=${PG_SRC} install # Setup python environment echo "############### Setting up python env" @@ -110,35 +91,36 @@ else pip install testgres==1.8.2 echo "############### Testing" - if [ "$MODE" = "basic" ]; then + export PG_PROBACKUP_PTRACK=ON + if [ "${MODE}" = "basic" ]; then export PG_PROBACKUP_TEST_BASIC=ON - elif [ "$MODE" = "paranoia" ]; then + elif [ "${MODE}" = "paranoia" ]; then export PG_PROBACKUP_PARANOIA=ON fi - export PG_PROBACKUP_PTRACK=ON - if [ "$TEST_CASE" = "all" ]; then + if [ "${TEST_CASE}" = "all" ]; then # Run all pg_probackup ptrack tests - python3 -m unittest -v tests.ptrack || status=$? + PBK_TEST_CASE=tests.ptrack else - for i in `seq $TEST_REPEATS`; do - python3 -m unittest -v tests.ptrack.PtrackTest.$TEST_CASE || status=$? - done + PBK_TEST_CASE=tests.ptrack.PtrackTest.${TEST_CASE} fi + for i in `seq ${TEST_REPEATS}`; do + python3 -m unittest -v ${PBK_TEST_CASE} || status=$? + done # Exit virtualenv deactivate - - # Get back to testdir - cd .. - fi +######################################################### +# codecov +echo "############### Codecov" +cd ${PTRACK_SRC} # Generate *.gcov files -gcov $PG_SRC/contrib/ptrack/*.c $PG_SRC/contrib/ptrack/*.h +gcov ${PG_SRC}/contrib/ptrack/*.c ${PG_SRC}/contrib/ptrack/*.h # Send coverage stats to Codecov bash <(curl -s https://codecov.io/bash) # Something went wrong, exit with code 1 -if [ $status -ne 0 ]; then exit 1; fi +if [ ${status} -ne 0 ]; then exit 1; fi From 838a560926ec978fc9673a019195e2142274265d Mon Sep 17 00:00:00 2001 From: Marina Polyakova Date: Wed, 8 Dec 2021 20:39:26 +0300 Subject: [PATCH 19/65] PGPRO-5983: fix Perl tests due to changes in PostgreSQL 15devel The standard PostgreSQL Perl modules for testing have been moved and some have been renamed. Try to maintain both old and new modules by checking if we can load new modules and loading the necessary modules at compile time. We cannot load these modules at runtime because they can have INIT blocks. Also use Perl's 'eval' function to call functions from these modules so we don't get compilation errors due to conditionally loading modules. --- t/001_basic.pl | 47 +++++++++++++++++++++++++++++++++++++---------- 1 file changed, 37 insertions(+), 10 deletions(-) diff --git a/t/001_basic.pl b/t/001_basic.pl index f73348d..48e0f66 100644 --- a/t/001_basic.pl +++ b/t/001_basic.pl @@ -6,25 +6,52 @@ use strict; use warnings; -use PostgresNode; -use TestLib; use Test::More; +my $pg_15_modules; + +BEGIN +{ + $pg_15_modules = eval + { + require PostgreSQL::Test::Cluster; + require PostgreSQL::Test::Utils; + return 1; + }; + + unless (defined $pg_15_modules) + { + $pg_15_modules = 0; + + require PostgresNode; + require TestLib; + } +} + plan tests => 24; +note('PostgreSQL 15 modules are used: ' . ($pg_15_modules ? 'yes' : 'no')); + my $node; my $res; my $res_stdout; my $res_stderr; -# Initialize node -# Older version of PostgresNode.pm use get_new_node function. -# Newer use standard perl object constructor syntax -if (PostgresNode->can('get_new_node')) { - $node = get_new_node('node'); -} else { - $node = PostgresNode->new("node"); -} +# Create node. +# Older versions of PostgreSQL modules use get_new_node function. +# Newer use standard perl object constructor syntax. +eval +{ + if ($pg_15_modules) + { + $node = PostgreSQL::Test::Cluster->new("node"); + } + else + { + $node = PostgresNode::get_new_node("node"); + } +}; + $node->init; $node->start; From a34e6c334a9e7b51527404954417bd545bf998c6 Mon Sep 17 00:00:00 2001 From: vegebird Date: Wed, 26 Jan 2022 18:10:16 +0800 Subject: [PATCH 20/65] remove additional and useless loop for each relation file when do ptrack_get_pagemapset query reported by https://github.com/postgrespro/ptrack/issues/12 --- ptrack.c | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/ptrack.c b/ptrack.c index 66f5676..e59ca2a 100644 --- a/ptrack.c +++ b/ptrack.c @@ -368,6 +368,14 @@ ptrack_filelist_getnext(PtScanCtx * ctx) return ptrack_filelist_getnext(ctx); } + if (fst.st_size == 0) + { + elog(WARNING, "ptrack: skip empty file %s", fullpath); + + /* But try the next one */ + return ptrack_filelist_getnext(ctx); + } + if (pfl->segno > 0) { ctx->relsize = pfl->segno * RELSEG_SIZE + fst.st_size / BLCKSZ; @@ -494,7 +502,7 @@ ptrack_get_pagemapset(PG_FUNCTION_ARGS) XLogRecPtr update_lsn2; /* Stop traversal if there are no more segments */ - if (ctx->bid.blocknum > ctx->relsize) + if (ctx->bid.blocknum + 1 > ctx->relsize) { /* We completed a segment and there is a bitmap to return */ if (pagemap.bitmap != NULL) @@ -526,12 +534,9 @@ ptrack_get_pagemapset(PG_FUNCTION_ARGS) if (htup) SRF_RETURN_NEXT(funcctx, HeapTupleGetDatum(htup)); } - else - { - /* We have just processed unchanged file, let's pick next */ - if (ptrack_filelist_getnext(ctx) < 0) - SRF_RETURN_DONE(funcctx); - } + + if (ptrack_filelist_getnext(ctx) < 0) + SRF_RETURN_DONE(funcctx); } hash = BID_HASH_FUNC(ctx->bid); From a0d5a205918a3cb3d12941dee65f42b2c7ca6fc0 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" <16117281+kulaginm@users.noreply.github.com> Date: Mon, 28 Feb 2022 11:40:05 +0300 Subject: [PATCH 21/65] [PGPRO-5691] move mmapped ptrack map into shared postgres memory (#19) --- Makefile | 4 +- README.md | 23 +-- engine.c | 327 +++++++++++++++++-------------------------- engine.h | 11 +- ptrack--2.2--2.3.sql | 5 + ptrack.c | 60 +++++++- ptrack.control | 2 +- ptrack.h | 7 +- run_tests.sh | 1 + t/001_basic.pl | 3 +- 10 files changed, 215 insertions(+), 228 deletions(-) create mode 100644 ptrack--2.2--2.3.sql diff --git a/Makefile b/Makefile index ba9ce1d..4083df2 100644 --- a/Makefile +++ b/Makefile @@ -5,8 +5,8 @@ OBJS = ptrack.o datapagemap.o engine.o $(WIN32RES) PGFILEDESC = "ptrack - block-level incremental backup engine" EXTENSION = ptrack -EXTVERSION = 2.2 -DATA = ptrack--2.1.sql ptrack--2.0--2.1.sql ptrack--2.1--2.2.sql +EXTVERSION = 2.3 +DATA = ptrack--2.1.sql ptrack--2.0--2.1.sql ptrack--2.1--2.2.sql ptrack--2.2--2.3.sql TAP_TESTS = 1 diff --git a/README.md b/README.md index 898df12..8042ce3 100644 --- a/README.md +++ b/README.md @@ -57,7 +57,7 @@ postgres=# CREATE EXTENSION ptrack; ## Configuration -The only one configurable option is `ptrack.map_size` (in MB). Default is `-1`, which means `ptrack` is turned off. In order to reduce number of false positives it is recommended to set `ptrack.map_size` to `1 / 1000` of expected `PGDATA` size (i.e. `1000` for a 1 TB database). +The only one configurable option is `ptrack.map_size` (in MB). Default is `0`, which means `ptrack` is turned off. In order to reduce number of false positives it is recommended to set `ptrack.map_size` to `1 / 1000` of expected `PGDATA` size (i.e. `1000` for a 1 TB database). To disable `ptrack` and clean up all remaining service files set `ptrack.map_size` to `0`. @@ -74,7 +74,7 @@ Usage example: postgres=# SELECT ptrack_version(); ptrack_version ---------------- - 2.2 + 2.3 (1 row) postgres=# SELECT ptrack_init_lsn(); @@ -115,15 +115,23 @@ Usually, you have to only install new version of `ptrack` and do `ALTER EXTENSIO Since version 2.2 we use a different algorithm for tracking changed pages. Thus, data recorded in the `ptrack.map` using pre 2.2 versions of `ptrack` is incompatible with newer versions. After extension upgrade and server restart old `ptrack.map` will be discarded with `WARNING` and initialized from the scratch. +#### Upgrading from 2.2.* to 2.3.*: + +* Stop your server +* Update ptrack binaries +* Remove global/ptrack.map.mmap if it exist in server data directory +* Start server +* Do `ALTER EXTENSION 'ptrack' UPDATE;`. + ## Limitations 1. You can only use `ptrack` safely with `wal_level >= 'replica'`. Otherwise, you can lose tracking of some changes if crash-recovery occurs, since [certain commands are designed not to write WAL at all if wal_level is minimal](https://www.postgresql.org/docs/12/populate.html#POPULATE-PITR), but we only durably flush `ptrack` map at checkpoint time. 2. The only one production-ready backup utility, that fully supports `ptrack` is [pg_probackup](https://github.com/postgrespro/pg_probackup). -3. Currently, you cannot resize `ptrack` map in runtime, only on postmaster start. Also, you will loose all tracked changes, so it is recommended to do so in the maintainance window and accompany this operation with full backup. See [TODO](#TODO) for details. +3. You cannot resize `ptrack` map in runtime, only on postmaster start. Also, you will loose all tracked changes, so it is recommended to do so in the maintainance window and accompany this operation with full backup. -4. You will need up to `ptrack.map_size * 3` of additional disk space, since `ptrack` uses two additional temporary files for durability purpose. See [Architecture section](#Architecture) for details. +4. You will need up to `ptrack.map_size * 2` of additional disk space, since `ptrack` uses additional temporary file for durability purpose. See [Architecture section](#Architecture) for details. ## Benchmarks @@ -131,11 +139,10 @@ Briefly, an overhead of using `ptrack` on TPS usually does not exceed a couple o ## Architecture -We use a single shared hash table in `ptrack`, which is mapped in memory from the file on disk using `mmap`. Due to the fixed size of the map there may be false positives (when some block is marked as changed without being actually modified), but not false negative results. However, these false postives may be completely eliminated by setting a high enough `ptrack.map_size`. +We use a single shared hash table in `ptrack`. Due to the fixed size of the map there may be false positives (when some block is marked as changed without being actually modified), but not false negative results. However, these false postives may be completely eliminated by setting a high enough `ptrack.map_size`. -All reads/writes are made using atomic operations on `uint64` entries, so the map is completely lockless during the normal PostgreSQL operation. Because we do not use locks for read/write access and cannot control `mmap` eviction back to disk, `ptrack` keeps a map (`ptrack.map`) since the last checkpoint intact and uses up to 2 additional temporary files: +All reads/writes are made using atomic operations on `uint64` entries, so the map is completely lockless during the normal PostgreSQL operation. Because we do not use locks for read/write access, `ptrack` keeps a map (`ptrack.map`) since the last checkpoint intact and uses up to 1 additional temporary file: -* working copy `ptrack.map.mmap` for doing `mmap` on it (there is a [TODO](#TODO) item); * temporary file `ptrack.map.tmp` to durably replace `ptrack.map` during checkpoint. Map is written on disk at the end of checkpoint atomically block by block involving the CRC32 checksum calculation that is checked on the next whole map re-read after crash-recovery or restart. @@ -165,8 +172,6 @@ Available test modes (`MODE`) are `basic` (default) and `paranoia` (per-block ch ### TODO -* Use POSIX `shm_open()` instead of `open()` to do not create an additional working copy of `ptrack` map file. * Should we introduce `ptrack.map_path` to allow `ptrack` service files storage outside of `PGDATA`? Doing that we will avoid patching PostgreSQL binary utilities to ignore `ptrack.map.*` files. * Can we resize `ptrack` map on restart but keep the previously tracked changes? -* Can we resize `ptrack` map dynamicaly? * Can we write a formal proof, that we never loose any modified page with `ptrack`? With TLA+? diff --git a/engine.c b/engine.c index 5685e9a..42fa65a 100644 --- a/engine.c +++ b/engine.c @@ -2,14 +2,14 @@ * engine.c * Block level incremental backup engine core * - * Copyright (c) 2019-2020, Postgres Professional + * Copyright (c) 2019-2022, Postgres Professional * * IDENTIFICATION * ptrack/engine.c * * INTERFACE ROUTINES (PostgreSQL side) * ptrackMapInit() --- allocate new shared ptrack_map - * ptrackMapAttach() --- attach to the existing ptrack_map + * ptrackCleanFiles() --- remove ptrack files * assign_ptrack_map_size() --- ptrack_map_size GUC assign callback * ptrack_walkdir() --- walk directory and mark all blocks of all * data files in ptrack_map @@ -88,160 +88,110 @@ ptrack_write_chunk(int fd, pg_crc32c *crc, char *chunk, size_t size) } /* - * Delete ptrack file and free the memory when ptrack is disabled. + * Delete ptrack files when ptrack is disabled. * - * This is performed by postmaster at start or by checkpointer, + * This is performed by postmaster at start, * so that there are no concurrent delete issues. */ -static void -ptrackCleanFilesAndMap(void) +void +ptrackCleanFiles(void) { char ptrack_path[MAXPGPATH]; - char ptrack_mmap_path[MAXPGPATH]; char ptrack_path_tmp[MAXPGPATH]; sprintf(ptrack_path, "%s/%s", DataDir, PTRACK_PATH); - sprintf(ptrack_mmap_path, "%s/%s", DataDir, PTRACK_MMAP_PATH); sprintf(ptrack_path_tmp, "%s/%s", DataDir, PTRACK_PATH_TMP); - elog(DEBUG1, "ptrack: clean files and map"); + elog(DEBUG1, "ptrack: clean map files"); if (ptrack_file_exists(ptrack_path_tmp)) durable_unlink(ptrack_path_tmp, LOG); if (ptrack_file_exists(ptrack_path)) durable_unlink(ptrack_path, LOG); - - if (ptrack_map != NULL) - { -#ifdef WIN32 - if (!UnmapViewOfFile(ptrack_map)) -#else - if (!munmap(ptrack_map, sizeof(ptrack_map))) -#endif - elog(LOG, "could not unmap ptrack_map"); - - ptrack_map = NULL; - } - - if (ptrack_file_exists(ptrack_mmap_path)) - durable_unlink(ptrack_mmap_path, LOG); } /* - * Copy PTRACK_PATH file to special temporary file PTRACK_MMAP_PATH used for mapping, - * or create new file, if there was no PTRACK_PATH file on disk. - * - * Map the content of PTRACK_MMAP_PATH file into memory structure 'ptrack_map' using mmap. + * Read ptrack map file into shared memory pointed by ptrack_map. + * This function is called only at startup, + * so data is read directly (without synchronization). */ -void -ptrackMapInit(void) +static bool +ptrackMapReadFromFile(const char *ptrack_path) { - int ptrack_fd; - pg_crc32c crc; - pg_crc32c *file_crc; - char ptrack_path[MAXPGPATH]; - char ptrack_mmap_path[MAXPGPATH]; - struct stat stat_buf; - bool is_new_map = true; - - elog(DEBUG1, "ptrack init"); - - /* We do it at server start, so the map must be not allocated yet. */ - Assert(ptrack_map == NULL); + elog(DEBUG1, "ptrack read map"); - if (ptrack_map_size == 0) - return; + /* Do actual file read */ + { + int ptrack_fd; + size_t readed; - sprintf(ptrack_path, "%s/%s", DataDir, PTRACK_PATH); - sprintf(ptrack_mmap_path, "%s/%s", DataDir, PTRACK_MMAP_PATH); + ptrack_fd = BasicOpenFile(ptrack_path, O_RDWR | PG_BINARY); -ptrack_map_reinit: + if (ptrack_fd < 0) + elog(ERROR, "ptrack read map: failed to open map file \"%s\": %m", ptrack_path); - /* Remove old PTRACK_MMAP_PATH file, if exists */ - if (ptrack_file_exists(ptrack_mmap_path)) - durable_unlink(ptrack_mmap_path, LOG); + readed = 0; + do + { + ssize_t last_readed; - if (stat(ptrack_path, &stat_buf) == 0 && - stat_buf.st_size != PtrackActualSize) - { - elog(WARNING, "ptrack init: unexpected \"%s\" file size %zu != " UINT64_FORMAT ", deleting", - ptrack_path, (Size) stat_buf.st_size, PtrackActualSize); - durable_unlink(ptrack_path, LOG); + /* + * Try to read as much as possible + * (linux guaranteed only 0x7ffff000 bytes in one read + * operation, see read(2)) + */ + last_readed = read(ptrack_fd, (char *) ptrack_map + readed, PtrackActualSize - readed); + + if (last_readed > 0) + { + readed += last_readed; + } + else if (last_readed == 0) + { + /* + * We don't try to read more that PtrackActualSize and + * file size was already checked in ptrackMapInit() + */ + elog(ERROR, "ptrack read map: unexpected end of file while reading map file \"%s\", expected to read %zu, but read only %zu bytes", + ptrack_path, PtrackActualSize, readed); + } + else if (last_readed < 0 && errno != EINTR) + { + ereport(WARNING, + (errcode_for_file_access(), + errmsg("ptrack read map: could not read map file \"%s\": %m", ptrack_path))); + close(ptrack_fd); + return false; + } + } while (readed < PtrackActualSize); + + close(ptrack_fd); } - /* - * If on-disk PTRACK_PATH file is present and has expected size, copy it - * to read and restore state. - */ - if (stat(ptrack_path, &stat_buf) == 0) + /* Check PTRACK_MAGIC */ + if (strcmp(ptrack_map->magic, PTRACK_MAGIC) != 0) { - copy_file(ptrack_path, ptrack_mmap_path); - is_new_map = false; /* flag to check map file format and checksum */ - ptrack_fd = BasicOpenFile(ptrack_mmap_path, O_RDWR | PG_BINARY); + elog(WARNING, "ptrack read map: wrong map format of file \"%s\"", ptrack_path); + return false; } - else - /* Create new file for PTRACK_MMAP_PATH */ - ptrack_fd = BasicOpenFile(ptrack_mmap_path, O_RDWR | O_CREAT | PG_BINARY); - - if (ptrack_fd < 0) - elog(ERROR, "ptrack init: failed to open map file \"%s\": %m", ptrack_mmap_path); -#ifdef WIN32 + /* Check ptrack version inside old ptrack map */ + if (ptrack_map->version_num != PTRACK_MAP_FILE_VERSION_NUM) { - HANDLE mh = CreateFileMapping((HANDLE) _get_osfhandle(ptrack_fd), - NULL, - PAGE_READWRITE, - 0, - (DWORD) PtrackActualSize, - NULL); - - if (mh == NULL) - elog(ERROR, "ptrack init: failed to create file mapping: %m"); - - ptrack_map = (PtrackMap) MapViewOfFile(mh, FILE_MAP_ALL_ACCESS, 0, 0, 0); - if (ptrack_map == NULL) - { - CloseHandle(mh); - elog(ERROR, "ptrack init: failed to mmap ptrack file: %m"); - } + ereport(WARNING, + (errcode(ERRCODE_DATA_CORRUPTED), + errmsg("ptrack read map: map format version %d in the file \"%s\" is incompatible with file format of extension %d", + ptrack_map->version_num, ptrack_path, PTRACK_MAP_FILE_VERSION_NUM), + errdetail("Deleting file \"%s\" and reinitializing ptrack map.", ptrack_path))); + return false; } -#else - if (ftruncate(ptrack_fd, PtrackActualSize) < 0) - elog(ERROR, "ptrack init: failed to truncate file: %m"); - - ptrack_map = (PtrackMap) mmap(NULL, PtrackActualSize, - PROT_READ | PROT_WRITE, MAP_SHARED, - ptrack_fd, 0); - if (ptrack_map == MAP_FAILED) - elog(ERROR, "ptrack init: failed to mmap file: %m"); -#endif - if (!is_new_map) + /* Check CRC */ { - XLogRecPtr init_lsn; - - /* Check PTRACK_MAGIC */ - if (strcmp(ptrack_map->magic, PTRACK_MAGIC) != 0) - elog(ERROR, "ptrack init: wrong map format of file \"%s\"", ptrack_path); - - /* Check ptrack version inside old ptrack map */ - if (ptrack_map->version_num != PTRACK_VERSION_NUM) - { - ereport(WARNING, - (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("ptrack init: map format version %d in the file \"%s\" is incompatible with loaded version %d", - ptrack_map->version_num, ptrack_path, PTRACK_VERSION_NUM), - errdetail("Deleting file \"%s\" and reinitializing ptrack map.", ptrack_path))); - - /* Clean up everything and try again */ - ptrackCleanFilesAndMap(); - - is_new_map = true; - goto ptrack_map_reinit; - } + pg_crc32c crc; + pg_crc32c *file_crc; - /* Check CRC */ INIT_CRC32C(crc); COMP_CRC32C(crc, (char *) ptrack_map, PtrackCrcOffset); FIN_CRC32C(crc); @@ -252,88 +202,85 @@ ptrackMapInit(void) * Read ptrack map values without atomics during initialization, since * postmaster is the only user right now. */ - init_lsn = ptrack_map->init_lsn.value; - elog(DEBUG1, "ptrack init: crc %u, file_crc %u, init_lsn %X/%X", - crc, *file_crc, (uint32) (init_lsn >> 32), (uint32) init_lsn); + elog(DEBUG1, "ptrack read map: crc %u, file_crc %u, init_lsn %X/%X", + crc, *file_crc, (uint32) (ptrack_map->init_lsn.value >> 32), (uint32) ptrack_map->init_lsn.value); - /* TODO: Handle this error. Probably we can just recreate the file */ if (!EQ_CRC32C(*file_crc, crc)) { - ereport(ERROR, + ereport(WARNING, (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("ptrack init: incorrect checksum of file \"%s\"", ptrack_path), - errhint("Delete \"%s\" and start the server again.", ptrack_path))); + errmsg("ptrack read map: incorrect checksum of file \"%s\"", ptrack_path), + errdetail("Deleting file \"%s\" and reinitializing ptrack map.", ptrack_path))); + return false; } } - else - { - memcpy(ptrack_map->magic, PTRACK_MAGIC, PTRACK_MAGIC_SIZE); - ptrack_map->version_num = PTRACK_VERSION_NUM; - } + return true; } /* - * Map must be already initialized by postmaster at start. - * mmap working copy of ptrack_map. + * Read PTRACK_PATH file into already allocated shared memory, check header and checksum + * or create new file, if there was no PTRACK_PATH file on disk. */ void -ptrackMapAttach(void) +ptrackMapInit(void) { - char ptrack_mmap_path[MAXPGPATH]; - int ptrack_fd; + char ptrack_path[MAXPGPATH]; struct stat stat_buf; + bool is_new_map = true; - elog(DEBUG1, "ptrack attach"); - - /* We do it at process start, so the map must be not allocated yet. */ - Assert(ptrack_map == NULL); + elog(DEBUG1, "ptrack init"); if (ptrack_map_size == 0) return; - sprintf(ptrack_mmap_path, "%s/%s", DataDir, PTRACK_MMAP_PATH); - if (!ptrack_file_exists(ptrack_mmap_path)) - { - elog(WARNING, "ptrack attach: '%s' file doesn't exist ", ptrack_mmap_path); - return; - } - - if (stat(ptrack_mmap_path, &stat_buf) == 0 && - stat_buf.st_size != PtrackActualSize) - elog(ERROR, "ptrack attach: ptrack_map_size doesn't match size of the file \"%s\"", ptrack_mmap_path); - - ptrack_fd = BasicOpenFile(ptrack_mmap_path, O_RDWR | PG_BINARY); - if (ptrack_fd < 0) - elog(ERROR, "ptrack attach: failed to open ptrack map file \"%s\": %m", ptrack_mmap_path); + sprintf(ptrack_path, "%s/%s", DataDir, PTRACK_PATH); - elog(DEBUG1, "ptrack attach: before mmap"); -#ifdef WIN32 + if (stat(ptrack_path, &stat_buf) == 0) { - HANDLE mh = CreateFileMapping((HANDLE) _get_osfhandle(ptrack_fd), - NULL, - PAGE_READWRITE, - 0, - (DWORD) PtrackActualSize, - NULL); - - if (mh == NULL) - elog(ERROR, "ptrack attach: failed to create file mapping: %m"); - - ptrack_map = (PtrackMap) MapViewOfFile(mh, FILE_MAP_ALL_ACCESS, 0, 0, 0); - if (ptrack_map == NULL) + elog(DEBUG3, "ptrack init: map \"%s\" detected, trying to load", ptrack_path); + if (stat_buf.st_size != PtrackActualSize) { - CloseHandle(mh); - elog(ERROR, "ptrack attach: failed to mmap ptrack file: %m"); + elog(WARNING, "ptrack init: unexpected \"%s\" file size %zu != " UINT64_FORMAT ", deleting", + ptrack_path, (Size) stat_buf.st_size, PtrackActualSize); + durable_unlink(ptrack_path, LOG); + } + else if (ptrackMapReadFromFile(ptrack_path)) + { + is_new_map = false; + } + else + { + /* + * ptrackMapReadFromFile failed + * this can be crc mismatch, version mismatch and other errors + * We treat it as non fatal and create new map in memory, + * that will be written on disk on checkpoint + */ + elog(WARNING, "ptrack init: broken map file \"%s\", deleting", + ptrack_path); + durable_unlink(ptrack_path, LOG); } } -#else - ptrack_map = (PtrackMap) mmap(NULL, PtrackActualSize, - PROT_READ | PROT_WRITE, MAP_SHARED, - ptrack_fd, 0); - if (ptrack_map == MAP_FAILED) - elog(ERROR, "ptrack attach: failed to mmap ptrack file: %m"); -#endif + + /* + * Initialyze memory for new map + */ + if (is_new_map) + { + memcpy(ptrack_map->magic, PTRACK_MAGIC, PTRACK_MAGIC_SIZE); + ptrack_map->version_num = PTRACK_MAP_FILE_VERSION_NUM; + ptrack_map->init_lsn.value = InvalidXLogRecPtr; + /* + * Fill entries with InvalidXLogRecPtr + * (InvalidXLogRecPtr is actually 0) + */ + memset(ptrack_map->entries, 0, PtrackContentNblocks * sizeof(pg_atomic_uint64)); + /* + * Last part of memory representation of ptrack_map (crc) is actually unused + * so leave it as it is + */ + } } /* @@ -365,7 +312,6 @@ ptrackCheckpoint(void) /* Delete ptrack_map and all related files, if ptrack was switched off */ if (ptrack_map_size == 0) { - ptrackCleanFilesAndMap(); return; } else if (ptrack_map == NULL) @@ -392,7 +338,7 @@ ptrackCheckpoint(void) * into the memory with mmap after a crash/restart. That way, we have to * write values taking into account all paddings/alignments. * - * Write both magic and varsion_num at once. + * Write both magic and version_num at once. */ /* @@ -519,20 +465,10 @@ assign_ptrack_map_size(int newval, void *extra) elog(DEBUG1, "assign_ptrack_map_size: MyProc %d newval %d ptrack_map_size " UINT64_FORMAT, MyProcPid, newval, ptrack_map_size); - /* - * XXX: for some reason assign_ptrack_map_size is called twice during the - * postmaster boot! First, it is always called with bootValue, so we use - * -1 as default value and no-op here. Next, it is called with the actual - * value from config. That way, we use 0 as an option for user to turn - * off ptrack and clean up all files. - */ - if (newval == -1) - return; - /* Delete ptrack_map and all related files, if ptrack was switched off. */ if (newval == 0) { - ptrackCleanFilesAndMap(); + ptrack_map_size = 0; return; } @@ -550,15 +486,6 @@ assign_ptrack_map_size(int newval, void *extra) elog(DEBUG1, "assign_ptrack_map_size: ptrack_map_size set to " UINT64_FORMAT, ptrack_map_size); - - /* Init map on postmaster start */ - if (!IsUnderPostmaster) - { - if (ptrack_map == NULL) - ptrackMapInit(); - } - else - ptrackMapAttach(); } } diff --git a/engine.h b/engine.h index 3386cc2..5daf69a 100644 --- a/engine.h +++ b/engine.h @@ -4,6 +4,7 @@ * header for ptrack map for tracking updates of relation's pages * * + * Copyright (c) 2019-2022, Postgres Professional * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * @@ -23,9 +24,6 @@ /* #include "utils/relcache.h" */ #include "access/hash.h" - -/* Working copy of ptrack.map */ -#define PTRACK_MMAP_PATH "global/ptrack.map.mmap" /* Persistent copy of ptrack.map to restore after crash */ #define PTRACK_PATH "global/ptrack.map" /* Used for atomical crash-safe update of ptrack.map */ @@ -36,6 +34,9 @@ * buffer size for disk writes. On fast NVMe SSD it gives * around 20% increase in ptrack checkpoint speed compared * to PTRACK_BUF_SIZE == 1000, i.e. 8 KB writes. + * (PTRACK_BUS_SIZE is a count of pg_atomic_uint64) + * + * NOTE: but POSIX defines _POSIX_SSIZE_MAX as 32767 (bytes) */ #define PTRACK_BUF_SIZE ((uint64) 8000) @@ -80,7 +81,7 @@ typedef PtrackMapHdr * PtrackMap; #define PtrackActualSize \ (offsetof(PtrackMapHdr, entries) + PtrackContentNblocks * sizeof(pg_atomic_uint64) + sizeof(pg_crc32c)) -/* CRC32 value offset in order to directly access it in the mmap'ed memory chunk */ +/* CRC32 value offset in order to directly access it in the shared memory chunk */ #define PtrackCrcOffset (PtrackActualSize - sizeof(pg_crc32c)) /* Block address 'bid' to hash. To get slot position in map should be divided @@ -102,7 +103,7 @@ extern int ptrack_map_size_tmp; extern void ptrackCheckpoint(void); extern void ptrackMapInit(void); -extern void ptrackMapAttach(void); +extern void ptrackCleanFiles(void); extern void assign_ptrack_map_size(int newval, void *extra); diff --git a/ptrack--2.2--2.3.sql b/ptrack--2.2--2.3.sql new file mode 100644 index 0000000..1a97e79 --- /dev/null +++ b/ptrack--2.2--2.3.sql @@ -0,0 +1,5 @@ +/* ptrack/ptrack--2.2--2.3.sql */ + +-- Complain if script is sourced in psql, rather than via ALTER EXTENSION +\echo Use "ALTER EXTENSION ptrack UPDATE;" to load this file.\ quit + diff --git a/ptrack.c b/ptrack.c index 66f5676..10b4d40 100644 --- a/ptrack.c +++ b/ptrack.c @@ -2,14 +2,13 @@ * ptrack.c * Block level incremental backup engine * - * Copyright (c) 2019-2020, Postgres Professional + * Copyright (c) 2019-2022, Postgres Professional * * IDENTIFICATION * ptrack/ptrack.c * * INTERFACE ROUTINES (PostgreSQL side) * ptrackMapInit() --- allocate new shared ptrack_map - * ptrackMapAttach() --- attach to the existing ptrack_map * assign_ptrack_map_size() --- ptrack_map_size GUC assign callback * ptrack_walkdir() --- walk directory and mark all blocks of all * data files in ptrack_map @@ -17,7 +16,7 @@ * * Currently ptrack has following public API methods: * - * # ptrack_version --- returns ptrack version string (2.0 currently). + * # ptrack_version --- returns ptrack version string (2.3 currently). * # ptrack_get_pagemapset('LSN') --- returns a set of changed data files with * bitmaps of changed blocks since specified LSN. * # ptrack_init_lsn --- returns LSN of the last ptrack map initialization. @@ -42,6 +41,7 @@ #include "replication/basebackup.h" #endif #include "storage/copydir.h" +#include "storage/ipc.h" #include "storage/lmgr.h" #if PG_VERSION_NUM >= 120000 #include "storage/md.h" @@ -59,9 +59,10 @@ PG_MODULE_MAGIC; PtrackMap ptrack_map = NULL; -uint64 ptrack_map_size; +uint64 ptrack_map_size = 0; int ptrack_map_size_tmp; +static shmem_startup_hook_type prev_shmem_startup_hook = NULL; static copydir_hook_type prev_copydir_hook = NULL; static mdwrite_hook_type prev_mdwrite_hook = NULL; static mdextend_hook_type prev_mdextend_hook = NULL; @@ -70,6 +71,7 @@ static ProcessSyncRequests_hook_type prev_ProcessSyncRequests_hook = NULL; void _PG_init(void); void _PG_fini(void); +static void ptrack_shmem_startup_hook(void); static void ptrack_copydir_hook(const char *path); static void ptrack_mdwrite_hook(RelFileNodeBackend smgr_rnode, ForkNumber forkno, BlockNumber blkno); @@ -103,15 +105,23 @@ _PG_init(void) "Sets the size of ptrack map in MB used for incremental backup (0 disabled).", NULL, &ptrack_map_size_tmp, - -1, - -1, 32 * 1024, /* limit to 32 GB */ - PGC_POSTMASTER, 0, + 0, 32 * 1024, /* limit to 32 GB */ + PGC_POSTMASTER, + GUC_UNIT_MB, NULL, assign_ptrack_map_size, NULL); + /* Request server shared memory */ + if (ptrack_map_size != 0) + RequestAddinShmemSpace(PtrackActualSize); + else + ptrackCleanFiles(); + /* Install hooks */ + prev_shmem_startup_hook = shmem_startup_hook; + shmem_startup_hook = ptrack_shmem_startup_hook; prev_copydir_hook = copydir_hook; copydir_hook = ptrack_copydir_hook; prev_mdwrite_hook = mdwrite_hook; @@ -129,12 +139,48 @@ void _PG_fini(void) { /* Uninstall hooks */ + shmem_startup_hook = prev_shmem_startup_hook; copydir_hook = prev_copydir_hook; mdwrite_hook = prev_mdwrite_hook; mdextend_hook = prev_mdextend_hook; ProcessSyncRequests_hook = prev_ProcessSyncRequests_hook; } +/* + * ptrack_shmem_startup hook: allocate or attach to shared memory. + */ +static void +ptrack_shmem_startup_hook(void) +{ + bool map_found; + + if (prev_shmem_startup_hook) + prev_shmem_startup_hook(); + + /* + * Create or attach to the shared memory state + */ + LWLockAcquire(AddinShmemInitLock, LW_EXCLUSIVE); + + if (ptrack_map_size != 0) + { + ptrack_map = ShmemInitStruct("ptrack map", + PtrackActualSize, + &map_found); + if (!map_found) + { + ptrackMapInit(); + elog(DEBUG1, "Shared memory for ptrack is ready"); + } + } + else + { + ptrack_map = NULL; + } + + LWLockRelease(AddinShmemInitLock); +} + /* * Ptrack follow up for copydir() routine. It parses database OID * and tablespace OID from path string. We do not need to recursively diff --git a/ptrack.control b/ptrack.control index ec0af9d..85ede4c 100644 --- a/ptrack.control +++ b/ptrack.control @@ -1,5 +1,5 @@ # ptrack extension comment = 'block-level incremental backup engine' -default_version = '2.2' +default_version = '2.3' module_pathname = '$libdir/ptrack' relocatable = true diff --git a/ptrack.h b/ptrack.h index d205115..a5c1f01 100644 --- a/ptrack.h +++ b/ptrack.h @@ -4,6 +4,7 @@ * header for ptrack map for tracking updates of relation's pages * * + * Copyright (c) 2019-2022, Postgres Professional * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * @@ -22,9 +23,11 @@ #include "utils/relcache.h" /* Ptrack version as a string */ -#define PTRACK_VERSION "2.2" +#define PTRACK_VERSION "2.3" /* Ptrack version as a number */ -#define PTRACK_VERSION_NUM 220 +#define PTRACK_VERSION_NUM 230 +/* Last ptrack version that changed map file format */ +#define PTRACK_MAP_FILE_VERSION_NUM 220 /* * Structure identifying block on the disk. diff --git a/run_tests.sh b/run_tests.sh index c52d9ed..1b4a693 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -56,6 +56,7 @@ pg_config # Build and install ptrack extension echo "############### Compiling and installing ptrack extension" cp --recursive ${PTRACK_SRC} ${PG_SRC}/contrib/ptrack +make USE_PGXS=1 --directory=${PG_SRC}/contrib/ptrack/ clean make USE_PGXS=1 PG_CPPFLAGS="-coverage" SHLIB_LINK="-coverage" --directory=${PG_SRC}/contrib/ptrack/ install if [ "${TEST_CASE}" = "tap" ]; then diff --git a/t/001_basic.pl b/t/001_basic.pl index 48e0f66..bdb1eca 100644 --- a/t/001_basic.pl +++ b/t/001_basic.pl @@ -28,7 +28,7 @@ BEGIN } } -plan tests => 24; +plan tests => 23; note('PostgreSQL 15 modules are used: ' . ($pg_15_modules ? 'yes' : 'no')); @@ -182,7 +182,6 @@ BEGIN # Check that we have lost everything ok(! -f $node->data_dir . "/global/ptrack.map", "ptrack.map should be cleaned up"); ok(! -f $node->data_dir . "/global/ptrack.map.tmp", "ptrack.map.tmp should be cleaned up"); -ok(! -f $node->data_dir . "/global/ptrack.map.mmap", "ptrack.map.mmap should be cleaned up"); ($res, $res_stdout, $res_stderr) = $node->psql("postgres", "SELECT ptrack_get_pagemapset('0/0')"); is($res, 3, 'errors out if ptrack is disabled'); From 8d066377e938ac7742d0eaa1efb0f9ac9134c7db Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 20 May 2022 16:42:09 +0300 Subject: [PATCH 22/65] Limit maximum size on 32bit builds && fix elog message. --- engine.c | 2 +- ptrack.c | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/engine.c b/engine.c index 42fa65a..89f4792 100644 --- a/engine.c +++ b/engine.c @@ -154,7 +154,7 @@ ptrackMapReadFromFile(const char *ptrack_path) * file size was already checked in ptrackMapInit() */ elog(ERROR, "ptrack read map: unexpected end of file while reading map file \"%s\", expected to read %zu, but read only %zu bytes", - ptrack_path, PtrackActualSize, readed); + ptrack_path, (size_t)PtrackActualSize, readed); } else if (last_readed < 0 && errno != EINTR) { diff --git a/ptrack.c b/ptrack.c index 10b4d40..3037bcc 100644 --- a/ptrack.c +++ b/ptrack.c @@ -106,7 +106,11 @@ _PG_init(void) NULL, &ptrack_map_size_tmp, 0, +#if SIZEOF_SIZE_T == 8 0, 32 * 1024, /* limit to 32 GB */ +#else + 0, 256, /* limit to 256 MB */ +#endif PGC_POSTMASTER, GUC_UNIT_MB, NULL, From 2f040bb5d6ddbedccec742ffcd73214671d6de61 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Fri, 3 Jun 2022 10:45:20 +0300 Subject: [PATCH 23/65] [PGPRO-6731] Fix '\ quit' typo in extension upgrade scripts --- ptrack--2.1--2.2.sql | 2 +- ptrack--2.2--2.3.sql | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ptrack--2.1--2.2.sql b/ptrack--2.1--2.2.sql index b09c15e..da897b6 100644 --- a/ptrack--2.1--2.2.sql +++ b/ptrack--2.1--2.2.sql @@ -1,7 +1,7 @@ /* ptrack/ptrack--2.1--2.2.sql */ -- Complain if script is sourced in psql, rather than via ALTER EXTENSION -\echo Use "ALTER EXTENSION ptrack UPDATE;" to load this file.\ quit +\echo Use "ALTER EXTENSION ptrack UPDATE;" to load this file. \quit DROP FUNCTION ptrack_get_pagemapset(start_lsn pg_lsn); CREATE FUNCTION ptrack_get_pagemapset(start_lsn pg_lsn) diff --git a/ptrack--2.2--2.3.sql b/ptrack--2.2--2.3.sql index 1a97e79..6c5f574 100644 --- a/ptrack--2.2--2.3.sql +++ b/ptrack--2.2--2.3.sql @@ -1,5 +1,5 @@ /* ptrack/ptrack--2.2--2.3.sql */ -- Complain if script is sourced in psql, rather than via ALTER EXTENSION -\echo Use "ALTER EXTENSION ptrack UPDATE;" to load this file.\ quit +\echo Use "ALTER EXTENSION ptrack UPDATE;" to load this file. \quit From 820380ae477bd5567872bde01984dcf84879ca66 Mon Sep 17 00:00:00 2001 From: vegebird Date: Mon, 27 Jun 2022 19:38:44 +0800 Subject: [PATCH 24/65] Update ptrack.c Co-authored-by: Sokolov Yura --- ptrack.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ptrack.c b/ptrack.c index e59ca2a..0911339 100644 --- a/ptrack.c +++ b/ptrack.c @@ -502,7 +502,7 @@ ptrack_get_pagemapset(PG_FUNCTION_ARGS) XLogRecPtr update_lsn2; /* Stop traversal if there are no more segments */ - if (ctx->bid.blocknum + 1 > ctx->relsize) + if (ctx->bid.blocknum >= ctx->relsize) { /* We completed a segment and there is a bitmap to return */ if (pagemap.bitmap != NULL) From 23dbeaeebf02bb4f5f39093b697fb750e2922453 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Mon, 27 Jun 2022 14:44:55 +0300 Subject: [PATCH 25/65] remove useless guc change Fixes #8 --- patches/REL_12_STABLE-ptrack-core.diff | 12 ------------ patches/REL_13_STABLE-ptrack-core.diff | 12 ------------ patches/REL_14_STABLE-ptrack-core.diff | 12 ------------ 3 files changed, 36 deletions(-) diff --git a/patches/REL_12_STABLE-ptrack-core.diff b/patches/REL_12_STABLE-ptrack-core.diff index d8c00e0..738b8e7 100644 --- a/patches/REL_12_STABLE-ptrack-core.diff +++ b/patches/REL_12_STABLE-ptrack-core.diff @@ -107,18 +107,6 @@ index aff3e885f36..4fffa5df17c 100644 /* Flag successful completion of ProcessSyncRequests */ sync_in_progress = false; } -diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c -index a70e79c4891..712f985f3e8 100644 ---- a/src/backend/utils/misc/guc.c -+++ b/src/backend/utils/misc/guc.c -@@ -581,7 +581,6 @@ static char *recovery_target_xid_string; - static char *recovery_target_name_string; - static char *recovery_target_lsn_string; - -- - /* should be static, but commands/variable.c needs to get at this */ - char *role_string; - diff --git a/src/bin/pg_checksums/pg_checksums.c b/src/bin/pg_checksums/pg_checksums.c index 03c3da3d730..fdfe5c1318e 100644 --- a/src/bin/pg_checksums/pg_checksums.c diff --git a/patches/REL_13_STABLE-ptrack-core.diff b/patches/REL_13_STABLE-ptrack-core.diff index 3491700..f61fb48 100644 --- a/patches/REL_13_STABLE-ptrack-core.diff +++ b/patches/REL_13_STABLE-ptrack-core.diff @@ -113,18 +113,6 @@ index 3ded2cdd71..3a596a59f7 100644 /* Flag successful completion of ProcessSyncRequests */ sync_in_progress = false; } -diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c -index 1683629ee3..d2fc154576 100644 ---- a/src/backend/utils/misc/guc.c -+++ b/src/backend/utils/misc/guc.c -@@ -620,7 +620,6 @@ static char *recovery_target_xid_string; - static char *recovery_target_name_string; - static char *recovery_target_lsn_string; - -- - /* should be static, but commands/variable.c needs to get at this */ - char *role_string; - diff --git a/src/bin/pg_checksums/pg_checksums.c b/src/bin/pg_checksums/pg_checksums.c index ffdc23945c..7ae95866ce 100644 --- a/src/bin/pg_checksums/pg_checksums.c diff --git a/patches/REL_14_STABLE-ptrack-core.diff b/patches/REL_14_STABLE-ptrack-core.diff index 3491700..f61fb48 100644 --- a/patches/REL_14_STABLE-ptrack-core.diff +++ b/patches/REL_14_STABLE-ptrack-core.diff @@ -113,18 +113,6 @@ index 3ded2cdd71..3a596a59f7 100644 /* Flag successful completion of ProcessSyncRequests */ sync_in_progress = false; } -diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c -index 1683629ee3..d2fc154576 100644 ---- a/src/backend/utils/misc/guc.c -+++ b/src/backend/utils/misc/guc.c -@@ -620,7 +620,6 @@ static char *recovery_target_xid_string; - static char *recovery_target_name_string; - static char *recovery_target_lsn_string; - -- - /* should be static, but commands/variable.c needs to get at this */ - char *role_string; - diff --git a/src/bin/pg_checksums/pg_checksums.c b/src/bin/pg_checksums/pg_checksums.c index ffdc23945c..7ae95866ce 100644 --- a/src/bin/pg_checksums/pg_checksums.c From fe9a17c437722f053ba5d80d7fb23ad8a275417a Mon Sep 17 00:00:00 2001 From: Marina Polyakova Date: Wed, 29 Jun 2022 20:06:58 +0300 Subject: [PATCH 26/65] PGPRO-6867: include required headers for PostgreSQL 15 in engine.c - It loooks like we now need to explicitly include storage/fd.h for the functions durable_unlink, BasicOpenFile etc. - We need to include access/xlogrecovery.h for the function GetXLogReplayRecPtr which was moved here in the commit 70e81861fadd9112fa2d425c762e163910a4ee52. --- engine.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/engine.c b/engine.c index 89f4792..ff4588e 100644 --- a/engine.c +++ b/engine.c @@ -29,6 +29,10 @@ #include "access/htup_details.h" #include "access/parallel.h" #include "access/xlog.h" +#if PG_VERSION_NUM >= 150000 +#include "access/xlogrecovery.h" +#include "storage/fd.h" +#endif #include "catalog/pg_tablespace.h" #include "miscadmin.h" #include "port/pg_crc32c.h" From 5714dd1dc3461a8b726a2bec82766427e52cfcdd Mon Sep 17 00:00:00 2001 From: Daniel Shelepanov Date: Mon, 4 Jul 2022 17:38:18 +0300 Subject: [PATCH 27/65] [PGPRO-6817] ptrack has been ported to version 15. Has been tested on 15beta2 and 16devel tags: ptrack --- patches/master-ptrack-core.diff | 116 ++++++++++---------------------- ptrack.c | 22 ++++++ 2 files changed, 56 insertions(+), 82 deletions(-) diff --git a/patches/master-ptrack-core.diff b/patches/master-ptrack-core.diff index 3491700..20316e5 100644 --- a/patches/master-ptrack-core.diff +++ b/patches/master-ptrack-core.diff @@ -1,14 +1,8 @@ -commit a14ac459d71528c64df00c693e9c71ac70d3ba29 -Author: anastasia -Date: Mon Oct 19 14:53:06 2020 +0300 - - add ptrack 2.0 - diff --git a/src/backend/replication/basebackup.c b/src/backend/replication/basebackup.c -index 50ae1f16d0..721b926ad2 100644 +index 5244823ff85..160889b4a04 100644 --- a/src/backend/replication/basebackup.c +++ b/src/backend/replication/basebackup.c -@@ -233,6 +233,13 @@ static const struct exclude_list_item excludeFiles[] = +@@ -197,6 +197,13 @@ static const struct exclude_list_item excludeFiles[] = {"postmaster.pid", false}, {"postmaster.opts", false}, @@ -22,7 +16,7 @@ index 50ae1f16d0..721b926ad2 100644 /* end of list */ {NULL, false} }; -@@ -248,6 +255,11 @@ static const struct exclude_list_item noChecksumFiles[] = { +@@ -212,6 +219,11 @@ static const struct exclude_list_item noChecksumFiles[] = { {"pg_filenode.map", false}, {"pg_internal.init", true}, {"PG_VERSION", false}, @@ -35,7 +29,7 @@ index 50ae1f16d0..721b926ad2 100644 {"config_exec_params", true}, #endif diff --git a/src/backend/storage/file/copydir.c b/src/backend/storage/file/copydir.c -index 0cf598dd0c..c9c44a4ae7 100644 +index 658fd95ba95..eee38eba176 100644 --- a/src/backend/storage/file/copydir.c +++ b/src/backend/storage/file/copydir.c @@ -27,6 +27,8 @@ @@ -58,7 +52,7 @@ index 0cf598dd0c..c9c44a4ae7 100644 * Be paranoid here and fsync all files to ensure the copy is really done. * But if fsync is disabled, we're done. diff --git a/src/backend/storage/smgr/md.c b/src/backend/storage/smgr/md.c -index 0eacd461cd..c2ef404a1a 100644 +index 43edaf5d873..bbaf7500944 100644 --- a/src/backend/storage/smgr/md.c +++ b/src/backend/storage/smgr/md.c @@ -87,6 +87,8 @@ typedef struct _MdfdVec @@ -70,7 +64,7 @@ index 0eacd461cd..c2ef404a1a 100644 /* Populate a file tag describing an md.c segment file. */ #define INIT_MD_FILETAG(a,xx_rnode,xx_forknum,xx_segno) \ -@@ -435,6 +437,9 @@ mdextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, +@@ -467,6 +469,9 @@ mdextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, register_dirty_segment(reln, forknum, v); Assert(_mdnblocks(reln, forknum, v) <= ((BlockNumber) RELSEG_SIZE)); @@ -80,7 +74,7 @@ index 0eacd461cd..c2ef404a1a 100644 } /* -@@ -721,6 +726,9 @@ mdwrite(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, +@@ -756,6 +761,9 @@ mdwrite(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, if (!skipFsync && !SmgrIsTemp(reln)) register_dirty_segment(reln, forknum, v); @@ -91,10 +85,10 @@ index 0eacd461cd..c2ef404a1a 100644 /* diff --git a/src/backend/storage/sync/sync.c b/src/backend/storage/sync/sync.c -index 3ded2cdd71..3a596a59f7 100644 +index e1fb6310038..76d75680b31 100644 --- a/src/backend/storage/sync/sync.c +++ b/src/backend/storage/sync/sync.c -@@ -75,6 +75,8 @@ static MemoryContext pendingOpsCxt; /* context for the above */ +@@ -81,6 +81,8 @@ static MemoryContext pendingOpsCxt; /* context for the above */ static CycleCtr sync_cycle_ctr = 0; static CycleCtr checkpoint_cycle_ctr = 0; @@ -103,7 +97,7 @@ index 3ded2cdd71..3a596a59f7 100644 /* Intervals for calling AbsorbSyncRequests */ #define FSYNCS_PER_ABSORB 10 #define UNLINKS_PER_ABSORB 10 -@@ -420,6 +422,9 @@ ProcessSyncRequests(void) +@@ -477,6 +479,9 @@ ProcessSyncRequests(void) CheckpointStats.ckpt_longest_sync = longest; CheckpointStats.ckpt_agg_sync_time = total_elapsed; @@ -113,23 +107,11 @@ index 3ded2cdd71..3a596a59f7 100644 /* Flag successful completion of ProcessSyncRequests */ sync_in_progress = false; } -diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c -index 1683629ee3..d2fc154576 100644 ---- a/src/backend/utils/misc/guc.c -+++ b/src/backend/utils/misc/guc.c -@@ -620,7 +620,6 @@ static char *recovery_target_xid_string; - static char *recovery_target_name_string; - static char *recovery_target_lsn_string; - -- - /* should be static, but commands/variable.c needs to get at this */ - char *role_string; - diff --git a/src/bin/pg_checksums/pg_checksums.c b/src/bin/pg_checksums/pg_checksums.c -index ffdc23945c..7ae95866ce 100644 +index 21dfe1b6ee5..266ac1ef40a 100644 --- a/src/bin/pg_checksums/pg_checksums.c +++ b/src/bin/pg_checksums/pg_checksums.c -@@ -114,6 +114,11 @@ static const struct exclude_list_item skip[] = { +@@ -118,6 +118,11 @@ static const struct exclude_list_item skip[] = { {"pg_filenode.map", false}, {"pg_internal.init", true}, {"PG_VERSION", false}, @@ -142,10 +124,10 @@ index ffdc23945c..7ae95866ce 100644 {"config_exec_params", true}, #endif diff --git a/src/bin/pg_resetwal/pg_resetwal.c b/src/bin/pg_resetwal/pg_resetwal.c -index 233441837f..cf7bd073bf 100644 +index d4772a29650..3318f64359d 100644 --- a/src/bin/pg_resetwal/pg_resetwal.c +++ b/src/bin/pg_resetwal/pg_resetwal.c -@@ -84,6 +84,7 @@ static void RewriteControlFile(void); +@@ -85,6 +85,7 @@ static void RewriteControlFile(void); static void FindEndOfXLOG(void); static void KillExistingXLOG(void); static void KillExistingArchiveStatus(void); @@ -153,7 +135,7 @@ index 233441837f..cf7bd073bf 100644 static void WriteEmptyXLOG(void); static void usage(void); -@@ -513,6 +514,7 @@ main(int argc, char *argv[]) +@@ -488,6 +489,7 @@ main(int argc, char *argv[]) RewriteControlFile(); KillExistingXLOG(); KillExistingArchiveStatus(); @@ -161,8 +143,8 @@ index 233441837f..cf7bd073bf 100644 WriteEmptyXLOG(); printf(_("Write-ahead log reset\n")); -@@ -1102,6 +1104,53 @@ KillExistingArchiveStatus(void) - } +@@ -1036,6 +1038,41 @@ KillExistingArchiveStatus(void) + pg_fatal("could not close directory \"%s\": %m", ARCHSTATDIR); } +/* @@ -173,16 +155,13 @@ index 233441837f..cf7bd073bf 100644 +{ +#define PTRACKDIR "global" + -+ DIR *xldir; ++ DIR *xldir; + struct dirent *xlde; -+ char path[MAXPGPATH + sizeof(PTRACKDIR)]; ++ char path[MAXPGPATH + sizeof(PTRACKDIR)]; + + xldir = opendir(PTRACKDIR); + if (xldir == NULL) -+ { -+ pg_log_error("could not open directory \"%s\": %m", PTRACKDIR); -+ exit(1); -+ } ++ pg_fatal("could not open directory \"%s\": %m", PTRACKDIR); + + while (errno = 0, (xlde = readdir(xldir)) != NULL) + { @@ -192,34 +171,25 @@ index 233441837f..cf7bd073bf 100644 + { + snprintf(path, sizeof(path), "%s/%s", PTRACKDIR, xlde->d_name); + if (unlink(path) < 0) -+ { -+ pg_log_error("could not delete file \"%s\": %m", path); -+ exit(1); -+ } ++ pg_fatal("could not delete file \"%s\": %m", path); + } + } + + if (errno) -+ { -+ pg_log_error("could not read directory \"%s\": %m", PTRACKDIR); -+ exit(1); -+ } ++ pg_fatal("could not read directory \"%s\": %m", PTRACKDIR); + + if (closedir(xldir)) -+ { -+ pg_log_error("could not close directory \"%s\": %m", PTRACKDIR); -+ exit(1); -+ } ++ pg_fatal("could not close directory \"%s\": %m", PTRACKDIR); +} + /* * Write an empty XLOG file, containing only the checkpoint record diff --git a/src/bin/pg_rewind/filemap.c b/src/bin/pg_rewind/filemap.c -index fbb97b5cf1..6cd7f2ae3e 100644 +index 62529310415..b496f54fb06 100644 --- a/src/bin/pg_rewind/filemap.c +++ b/src/bin/pg_rewind/filemap.c -@@ -124,6 +124,10 @@ static const struct exclude_list_item excludeFiles[] = +@@ -157,6 +157,10 @@ static const struct exclude_list_item excludeFiles[] = {"postmaster.pid", false}, {"postmaster.opts", false}, @@ -230,39 +200,21 @@ index fbb97b5cf1..6cd7f2ae3e 100644 /* end of list */ {NULL, false} }; -diff --git a/src/include/miscadmin.h b/src/include/miscadmin.h -index 72e3352398..5c2e016501 100644 ---- a/src/include/miscadmin.h -+++ b/src/include/miscadmin.h -@@ -388,7 +388,7 @@ typedef enum ProcessingMode - NormalProcessing /* normal processing */ - } ProcessingMode; - --extern ProcessingMode Mode; -+extern PGDLLIMPORT ProcessingMode Mode; - - #define IsBootstrapProcessingMode() (Mode == BootstrapProcessing) - #define IsInitProcessingMode() (Mode == InitProcessing) diff --git a/src/include/port/pg_crc32c.h b/src/include/port/pg_crc32c.h -index 3c6f906683..a7355f7ad1 100644 +index d7668651ba8..33994a27f5f 100644 --- a/src/include/port/pg_crc32c.h +++ b/src/include/port/pg_crc32c.h -@@ -69,8 +69,11 @@ extern pg_crc32c pg_comp_crc32c_armv8(pg_crc32c crc, const void *data, size_t le +@@ -69,7 +69,7 @@ extern pg_crc32c pg_comp_crc32c_armv8(pg_crc32c crc, const void *data, size_t le #define FIN_CRC32C(crc) ((crc) ^= 0xFFFFFFFF) extern pg_crc32c pg_comp_crc32c_sb8(pg_crc32c crc, const void *data, size_t len); -extern pg_crc32c (*pg_comp_crc32c) (pg_crc32c crc, const void *data, size_t len); -- -+extern -+#ifndef FRONTEND -+PGDLLIMPORT -+#endif -+pg_crc32c (*pg_comp_crc32c) (pg_crc32c crc, const void *data, size_t len); ++extern PGDLLIMPORT pg_crc32c (*pg_comp_crc32c) (pg_crc32c crc, const void *data, size_t len); + #ifdef USE_SSE42_CRC32C_WITH_RUNTIME_CHECK extern pg_crc32c pg_comp_crc32c_sse42(pg_crc32c crc, const void *data, size_t len); - #endif diff --git a/src/include/storage/copydir.h b/src/include/storage/copydir.h -index 5d28f59c1d..0d3f04d8af 100644 +index 50a26edeb06..af1602f5154 100644 --- a/src/include/storage/copydir.h +++ b/src/include/storage/copydir.h @@ -13,6 +13,9 @@ @@ -276,7 +228,7 @@ index 5d28f59c1d..0d3f04d8af 100644 extern void copy_file(char *fromfile, char *tofile); diff --git a/src/include/storage/md.h b/src/include/storage/md.h -index 07fd1bb7d0..5294811bc8 100644 +index ffffa40db71..3ff98e0bf01 100644 --- a/src/include/storage/md.h +++ b/src/include/storage/md.h @@ -19,6 +19,13 @@ @@ -287,17 +239,17 @@ index 07fd1bb7d0..5294811bc8 100644 + ForkNumber forknum, BlockNumber blocknum); +extern PGDLLIMPORT mdextend_hook_type mdextend_hook; +typedef void (*mdwrite_hook_type) (RelFileNodeBackend smgr_rnode, -+ ForkNumber forknum, BlockNumber blocknum); ++ ForkNumber forknum, BlockNumber blocknum); +extern PGDLLIMPORT mdwrite_hook_type mdwrite_hook; + /* md storage manager functionality */ extern void mdinit(void); extern void mdopen(SMgrRelation reln); diff --git a/src/include/storage/sync.h b/src/include/storage/sync.h -index e16ab8e711..88da9686eb 100644 +index 9737e1eb67c..914ad86328f 100644 --- a/src/include/storage/sync.h +++ b/src/include/storage/sync.h -@@ -50,6 +50,9 @@ typedef struct FileTag +@@ -55,6 +55,9 @@ typedef struct FileTag uint32 segno; } FileTag; diff --git a/ptrack.c b/ptrack.c index 96f946b..c81eb17 100644 --- a/ptrack.c +++ b/ptrack.c @@ -81,6 +81,10 @@ static void ptrack_ProcessSyncRequests_hook(void); static void ptrack_gather_filelist(List **filelist, char *path, Oid spcOid, Oid dbOid); static int ptrack_filelist_getnext(PtScanCtx * ctx); +#if PG_VERSION_NUM >= 150000 +static shmem_request_hook_type prev_shmem_request_hook = NULL; +static void ptrack_shmem_request(void); +#endif /* * Module load callback @@ -119,7 +123,14 @@ _PG_init(void) /* Request server shared memory */ if (ptrack_map_size != 0) + { +#if PG_VERSION_NUM >= 150000 + prev_shmem_request_hook = shmem_request_hook; + shmem_request_hook = ptrack_shmem_request; +#else RequestAddinShmemSpace(PtrackActualSize); +#endif + } else ptrackCleanFiles(); @@ -136,6 +147,17 @@ _PG_init(void) ProcessSyncRequests_hook = ptrack_ProcessSyncRequests_hook; } +#if PG_VERSION_NUM >= 150000 +static void +ptrack_shmem_request(void) +{ + if (prev_shmem_request_hook) + prev_shmem_request_hook(); + + RequestAddinShmemSpace(PtrackActualSize); +} +#endif + /* * Module unload callback */ From 6951c582a7f9edb3772b4c7a6c085df8060e42ad Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 16 Aug 2022 14:22:24 +0300 Subject: [PATCH 28/65] [PGPRO-6817] update for rel 15 beta. Tags: ptrack --- patches/REL_15_STABLE-ptrack-core.diff | 261 +++++++++++++++++++++++++ 1 file changed, 261 insertions(+) create mode 100644 patches/REL_15_STABLE-ptrack-core.diff diff --git a/patches/REL_15_STABLE-ptrack-core.diff b/patches/REL_15_STABLE-ptrack-core.diff new file mode 100644 index 0000000..18fcb6e --- /dev/null +++ b/patches/REL_15_STABLE-ptrack-core.diff @@ -0,0 +1,261 @@ +diff --git a/src/backend/backup/basebackup.c b/src/backend/backup/basebackup.c +index cc16c4b331f..69b1af16cf5 100644 +--- a/src/backend/backup/basebackup.c ++++ b/src/backend/backup/basebackup.c +@@ -197,6 +197,13 @@ static const struct exclude_list_item excludeFiles[] = + {"postmaster.pid", false}, + {"postmaster.opts", false}, + ++ /* ++ * Skip all transient ptrack files, but do copy ptrack.map, since it may ++ * be successfully used immediately after backup. TODO: check, test? ++ */ ++ {"ptrack.map.mmap", false}, ++ {"ptrack.map.tmp", false}, ++ + /* end of list */ + {NULL, false} + }; +@@ -212,6 +219,11 @@ static const struct exclude_list_item noChecksumFiles[] = { + {"pg_filenode.map", false}, + {"pg_internal.init", true}, + {"PG_VERSION", false}, ++ ++ {"ptrack.map.mmap", false}, ++ {"ptrack.map", false}, ++ {"ptrack.map.tmp", false}, ++ + #ifdef EXEC_BACKEND + {"config_exec_params", true}, + #endif +diff --git a/src/backend/storage/file/copydir.c b/src/backend/storage/file/copydir.c +index 658fd95ba95..eee38eba176 100644 +--- a/src/backend/storage/file/copydir.c ++++ b/src/backend/storage/file/copydir.c +@@ -27,6 +27,8 @@ + #include "storage/copydir.h" + #include "storage/fd.h" + ++copydir_hook_type copydir_hook = NULL; ++ + /* + * copydir: copy a directory + * +@@ -78,6 +80,9 @@ copydir(char *fromdir, char *todir, bool recurse) + } + FreeDir(xldir); + ++ if (copydir_hook) ++ copydir_hook(todir); ++ + /* + * Be paranoid here and fsync all files to ensure the copy is really done. + * But if fsync is disabled, we're done. +diff --git a/src/backend/storage/smgr/md.c b/src/backend/storage/smgr/md.c +index a0fc60b32a3..7f091951c0d 100644 +--- a/src/backend/storage/smgr/md.c ++++ b/src/backend/storage/smgr/md.c +@@ -87,6 +87,8 @@ typedef struct _MdfdVec + + static MemoryContext MdCxt; /* context for all MdfdVec objects */ + ++mdextend_hook_type mdextend_hook = NULL; ++mdwrite_hook_type mdwrite_hook = NULL; + + /* Populate a file tag describing an md.c segment file. */ + #define INIT_MD_FILETAG(a,xx_rnode,xx_forknum,xx_segno) \ +@@ -484,6 +486,9 @@ mdextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, + register_dirty_segment(reln, forknum, v); + + Assert(_mdnblocks(reln, forknum, v) <= ((BlockNumber) RELSEG_SIZE)); ++ ++ if (mdextend_hook) ++ mdextend_hook(reln->smgr_rnode, forknum, blocknum); + } + + /* +@@ -773,6 +778,9 @@ mdwrite(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, + + if (!skipFsync && !SmgrIsTemp(reln)) + register_dirty_segment(reln, forknum, v); ++ ++ if (mdwrite_hook) ++ mdwrite_hook(reln->smgr_rnode, forknum, blocknum); + } + + /* +diff --git a/src/backend/storage/sync/sync.c b/src/backend/storage/sync/sync.c +index e1fb6310038..76d75680b31 100644 +--- a/src/backend/storage/sync/sync.c ++++ b/src/backend/storage/sync/sync.c +@@ -81,6 +81,8 @@ static MemoryContext pendingOpsCxt; /* context for the above */ + static CycleCtr sync_cycle_ctr = 0; + static CycleCtr checkpoint_cycle_ctr = 0; + ++ProcessSyncRequests_hook_type ProcessSyncRequests_hook = NULL; ++ + /* Intervals for calling AbsorbSyncRequests */ + #define FSYNCS_PER_ABSORB 10 + #define UNLINKS_PER_ABSORB 10 +@@ -477,6 +479,9 @@ ProcessSyncRequests(void) + CheckpointStats.ckpt_longest_sync = longest; + CheckpointStats.ckpt_agg_sync_time = total_elapsed; + ++ if (ProcessSyncRequests_hook) ++ ProcessSyncRequests_hook(); ++ + /* Flag successful completion of ProcessSyncRequests */ + sync_in_progress = false; + } +diff --git a/src/bin/pg_checksums/pg_checksums.c b/src/bin/pg_checksums/pg_checksums.c +index 21dfe1b6ee5..266ac1ef40a 100644 +--- a/src/bin/pg_checksums/pg_checksums.c ++++ b/src/bin/pg_checksums/pg_checksums.c +@@ -118,6 +118,11 @@ static const struct exclude_list_item skip[] = { + {"pg_filenode.map", false}, + {"pg_internal.init", true}, + {"PG_VERSION", false}, ++ ++ {"ptrack.map.mmap", false}, ++ {"ptrack.map", false}, ++ {"ptrack.map.tmp", false}, ++ + #ifdef EXEC_BACKEND + {"config_exec_params", true}, + #endif +diff --git a/src/bin/pg_resetwal/pg_resetwal.c b/src/bin/pg_resetwal/pg_resetwal.c +index d4772a29650..3318f64359d 100644 +--- a/src/bin/pg_resetwal/pg_resetwal.c ++++ b/src/bin/pg_resetwal/pg_resetwal.c +@@ -85,6 +85,7 @@ static void RewriteControlFile(void); + static void FindEndOfXLOG(void); + static void KillExistingXLOG(void); + static void KillExistingArchiveStatus(void); ++static void KillExistingPtrack(void); + static void WriteEmptyXLOG(void); + static void usage(void); + +@@ -488,6 +489,7 @@ main(int argc, char *argv[]) + RewriteControlFile(); + KillExistingXLOG(); + KillExistingArchiveStatus(); ++ KillExistingPtrack(); + WriteEmptyXLOG(); + + printf(_("Write-ahead log reset\n")); +@@ -1036,6 +1038,41 @@ KillExistingArchiveStatus(void) + pg_fatal("could not close directory \"%s\": %m", ARCHSTATDIR); + } + ++/* ++ * Remove existing ptrack files ++ */ ++static void ++KillExistingPtrack(void) ++{ ++#define PTRACKDIR "global" ++ ++ DIR *xldir; ++ struct dirent *xlde; ++ char path[MAXPGPATH + sizeof(PTRACKDIR)]; ++ ++ xldir = opendir(PTRACKDIR); ++ if (xldir == NULL) ++ pg_fatal("could not open directory \"%s\": %m", PTRACKDIR); ++ ++ while (errno = 0, (xlde = readdir(xldir)) != NULL) ++ { ++ if (strcmp(xlde->d_name, "ptrack.map.mmap") == 0 || ++ strcmp(xlde->d_name, "ptrack.map") == 0 || ++ strcmp(xlde->d_name, "ptrack.map.tmp") == 0) ++ { ++ snprintf(path, sizeof(path), "%s/%s", PTRACKDIR, xlde->d_name); ++ if (unlink(path) < 0) ++ pg_fatal("could not delete file \"%s\": %m", path); ++ } ++ } ++ ++ if (errno) ++ pg_fatal("could not read directory \"%s\": %m", PTRACKDIR); ++ ++ if (closedir(xldir)) ++ pg_fatal("could not close directory \"%s\": %m", PTRACKDIR); ++} ++ + + /* + * Write an empty XLOG file, containing only the checkpoint record +diff --git a/src/bin/pg_rewind/filemap.c b/src/bin/pg_rewind/filemap.c +index 62529310415..b496f54fb06 100644 +--- a/src/bin/pg_rewind/filemap.c ++++ b/src/bin/pg_rewind/filemap.c +@@ -157,6 +157,10 @@ static const struct exclude_list_item excludeFiles[] = + {"postmaster.pid", false}, + {"postmaster.opts", false}, + ++ {"ptrack.map.mmap", false}, ++ {"ptrack.map", false}, ++ {"ptrack.map.tmp", false}, ++ + /* end of list */ + {NULL, false} + }; +diff --git a/src/include/port/pg_crc32c.h b/src/include/port/pg_crc32c.h +index d7668651ba8..33994a27f5f 100644 +--- a/src/include/port/pg_crc32c.h ++++ b/src/include/port/pg_crc32c.h +@@ -69,7 +69,7 @@ extern pg_crc32c pg_comp_crc32c_armv8(pg_crc32c crc, const void *data, size_t le + #define FIN_CRC32C(crc) ((crc) ^= 0xFFFFFFFF) + + extern pg_crc32c pg_comp_crc32c_sb8(pg_crc32c crc, const void *data, size_t len); +-extern pg_crc32c (*pg_comp_crc32c) (pg_crc32c crc, const void *data, size_t len); ++extern PGDLLIMPORT pg_crc32c (*pg_comp_crc32c) (pg_crc32c crc, const void *data, size_t len); + + #ifdef USE_SSE42_CRC32C_WITH_RUNTIME_CHECK + extern pg_crc32c pg_comp_crc32c_sse42(pg_crc32c crc, const void *data, size_t len); +diff --git a/src/include/storage/copydir.h b/src/include/storage/copydir.h +index 50a26edeb06..af1602f5154 100644 +--- a/src/include/storage/copydir.h ++++ b/src/include/storage/copydir.h +@@ -13,6 +13,9 @@ + #ifndef COPYDIR_H + #define COPYDIR_H + ++typedef void (*copydir_hook_type) (const char *path); ++extern PGDLLIMPORT copydir_hook_type copydir_hook; ++ + extern void copydir(char *fromdir, char *todir, bool recurse); + extern void copy_file(char *fromfile, char *tofile); + +diff --git a/src/include/storage/md.h b/src/include/storage/md.h +index ffffa40db71..3ff98e0bf01 100644 +--- a/src/include/storage/md.h ++++ b/src/include/storage/md.h +@@ -19,6 +19,13 @@ + #include "storage/smgr.h" + #include "storage/sync.h" + ++typedef void (*mdextend_hook_type) (RelFileNodeBackend smgr_rnode, ++ ForkNumber forknum, BlockNumber blocknum); ++extern PGDLLIMPORT mdextend_hook_type mdextend_hook; ++typedef void (*mdwrite_hook_type) (RelFileNodeBackend smgr_rnode, ++ ForkNumber forknum, BlockNumber blocknum); ++extern PGDLLIMPORT mdwrite_hook_type mdwrite_hook; ++ + /* md storage manager functionality */ + extern void mdinit(void); + extern void mdopen(SMgrRelation reln); +diff --git a/src/include/storage/sync.h b/src/include/storage/sync.h +index 9737e1eb67c..914ad86328f 100644 +--- a/src/include/storage/sync.h ++++ b/src/include/storage/sync.h +@@ -55,6 +55,9 @@ typedef struct FileTag + uint32 segno; + } FileTag; + ++typedef void (*ProcessSyncRequests_hook_type) (void); ++extern PGDLLIMPORT ProcessSyncRequests_hook_type ProcessSyncRequests_hook; ++ + extern void InitSync(void); + extern void SyncPreCheckpoint(void); + extern void SyncPostCheckpoint(void); From 76cf46c2af0c3db3f029e50e31aae5cea4f208ad Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 16 Aug 2022 15:59:01 +0300 Subject: [PATCH 29/65] [PGPRO-7068] live with postgresql master branch on 2022.08.16 Tags: ptrack --- datapagemap.h | 1 - engine.c | 10 +++++----- patches/master-ptrack-core.diff | 34 ++++++++++++++++----------------- ptrack.c | 20 +++++++++---------- ptrack.h | 18 +++++++++++++++++ 5 files changed, 50 insertions(+), 33 deletions(-) diff --git a/datapagemap.h b/datapagemap.h index 455705f..9b730da 100644 --- a/datapagemap.h +++ b/datapagemap.h @@ -9,7 +9,6 @@ #ifndef DATAPAGEMAP_H #define DATAPAGEMAP_H -#include "storage/relfilenode.h" #include "storage/block.h" struct datapagemap diff --git a/engine.c b/engine.c index ff4588e..2cd1908 100644 --- a/engine.c +++ b/engine.c @@ -516,22 +516,22 @@ ptrack_mark_file(Oid dbOid, Oid tablespaceOid, /* Mark of non-temporary relation */ rnode.backend = InvalidBackendId; - rnode.node.dbNode = dbOid; - rnode.node.spcNode = tablespaceOid; + nodeDb(nodeOf(rnode)) = dbOid; + nodeSpc(nodeOf(rnode)) = tablespaceOid; if (!parse_filename_for_nontemp_relation(filename, &oidchars, &forknum)) return; memcpy(oidbuf, filename, oidchars); oidbuf[oidchars] = '\0'; - rnode.node.relNode = atooid(oidbuf); + nodeRel(nodeOf(rnode)) = atooid(oidbuf); /* Compute number of blocks based on file size */ if (stat(filepath, &stat_buf) == 0) nblocks = stat_buf.st_size / BLCKSZ; elog(DEBUG1, "ptrack_mark_file %s, nblocks %u rnode db %u spc %u rel %u, forknum %d", - filepath, nblocks, rnode.node.dbNode, rnode.node.spcNode, rnode.node.relNode, forknum); + filepath, nblocks, nodeDb(nodeOf(rnode)), nodeSpc(nodeOf(rnode)), nodeRel(nodeOf(rnode)), forknum); for (blkno = 0; blkno < nblocks; blkno++) ptrack_mark_block(rnode, forknum, blkno); @@ -612,7 +612,7 @@ ptrack_mark_block(RelFileNodeBackend smgr_rnode, * relations */ return; - bid.relnode = smgr_rnode.node; + bid.relnode = nodeOf(smgr_rnode); bid.forknum = forknum; bid.blocknum = blocknum; diff --git a/patches/master-ptrack-core.diff b/patches/master-ptrack-core.diff index 20316e5..f1371a6 100644 --- a/patches/master-ptrack-core.diff +++ b/patches/master-ptrack-core.diff @@ -1,7 +1,7 @@ -diff --git a/src/backend/replication/basebackup.c b/src/backend/replication/basebackup.c -index 5244823ff85..160889b4a04 100644 ---- a/src/backend/replication/basebackup.c -+++ b/src/backend/replication/basebackup.c +diff --git a/src/backend/backup/basebackup.c b/src/backend/backup/basebackup.c +index 715428029b3..81f3218540a 100644 +--- a/src/backend/backup/basebackup.c ++++ b/src/backend/backup/basebackup.c @@ -197,6 +197,13 @@ static const struct exclude_list_item excludeFiles[] = {"postmaster.pid", false}, {"postmaster.opts", false}, @@ -52,7 +52,7 @@ index 658fd95ba95..eee38eba176 100644 * Be paranoid here and fsync all files to ensure the copy is really done. * But if fsync is disabled, we're done. diff --git a/src/backend/storage/smgr/md.c b/src/backend/storage/smgr/md.c -index 43edaf5d873..bbaf7500944 100644 +index 3deac496eed..07c4ee2ba03 100644 --- a/src/backend/storage/smgr/md.c +++ b/src/backend/storage/smgr/md.c @@ -87,6 +87,8 @@ typedef struct _MdfdVec @@ -63,29 +63,29 @@ index 43edaf5d873..bbaf7500944 100644 +mdwrite_hook_type mdwrite_hook = NULL; /* Populate a file tag describing an md.c segment file. */ - #define INIT_MD_FILETAG(a,xx_rnode,xx_forknum,xx_segno) \ -@@ -467,6 +469,9 @@ mdextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, + #define INIT_MD_FILETAG(a,xx_rlocator,xx_forknum,xx_segno) \ +@@ -484,6 +486,9 @@ mdextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, register_dirty_segment(reln, forknum, v); Assert(_mdnblocks(reln, forknum, v) <= ((BlockNumber) RELSEG_SIZE)); + + if (mdextend_hook) -+ mdextend_hook(reln->smgr_rnode, forknum, blocknum); ++ mdextend_hook(reln->smgr_rlocator, forknum, blocknum); } /* -@@ -756,6 +761,9 @@ mdwrite(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, +@@ -773,6 +778,9 @@ mdwrite(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, if (!skipFsync && !SmgrIsTemp(reln)) register_dirty_segment(reln, forknum, v); + + if (mdwrite_hook) -+ mdwrite_hook(reln->smgr_rnode, forknum, blocknum); ++ mdwrite_hook(reln->smgr_rlocator, forknum, blocknum); } /* diff --git a/src/backend/storage/sync/sync.c b/src/backend/storage/sync/sync.c -index e1fb6310038..76d75680b31 100644 +index 9d6a9e91090..990d0722229 100644 --- a/src/backend/storage/sync/sync.c +++ b/src/backend/storage/sync/sync.c @@ -81,6 +81,8 @@ static MemoryContext pendingOpsCxt; /* context for the above */ @@ -108,7 +108,7 @@ index e1fb6310038..76d75680b31 100644 sync_in_progress = false; } diff --git a/src/bin/pg_checksums/pg_checksums.c b/src/bin/pg_checksums/pg_checksums.c -index 21dfe1b6ee5..266ac1ef40a 100644 +index 324ccf77834..e82cae5f325 100644 --- a/src/bin/pg_checksums/pg_checksums.c +++ b/src/bin/pg_checksums/pg_checksums.c @@ -118,6 +118,11 @@ static const struct exclude_list_item skip[] = { @@ -186,7 +186,7 @@ index d4772a29650..3318f64359d 100644 /* * Write an empty XLOG file, containing only the checkpoint record diff --git a/src/bin/pg_rewind/filemap.c b/src/bin/pg_rewind/filemap.c -index 62529310415..b496f54fb06 100644 +index 269ed6446e6..6318a8c1f55 100644 --- a/src/bin/pg_rewind/filemap.c +++ b/src/bin/pg_rewind/filemap.c @@ -157,6 +157,10 @@ static const struct exclude_list_item excludeFiles[] = @@ -228,17 +228,17 @@ index 50a26edeb06..af1602f5154 100644 extern void copy_file(char *fromfile, char *tofile); diff --git a/src/include/storage/md.h b/src/include/storage/md.h -index ffffa40db71..3ff98e0bf01 100644 +index 10aa1b0109b..1415675824e 100644 --- a/src/include/storage/md.h +++ b/src/include/storage/md.h @@ -19,6 +19,13 @@ #include "storage/smgr.h" #include "storage/sync.h" -+typedef void (*mdextend_hook_type) (RelFileNodeBackend smgr_rnode, ++typedef void (*mdextend_hook_type) (RelFileLocatorBackend smgr_rlocator, + ForkNumber forknum, BlockNumber blocknum); +extern PGDLLIMPORT mdextend_hook_type mdextend_hook; -+typedef void (*mdwrite_hook_type) (RelFileNodeBackend smgr_rnode, ++typedef void (*mdwrite_hook_type) (RelFileLocatorBackend smgr_rlocator, + ForkNumber forknum, BlockNumber blocknum); +extern PGDLLIMPORT mdwrite_hook_type mdwrite_hook; + @@ -246,7 +246,7 @@ index ffffa40db71..3ff98e0bf01 100644 extern void mdinit(void); extern void mdopen(SMgrRelation reln); diff --git a/src/include/storage/sync.h b/src/include/storage/sync.h -index 9737e1eb67c..914ad86328f 100644 +index 049af878dec..7689d49a24e 100644 --- a/src/include/storage/sync.h +++ b/src/include/storage/sync.h @@ -55,6 +55,9 @@ typedef struct FileTag diff --git a/ptrack.c b/ptrack.c index c81eb17..a3503c7 100644 --- a/ptrack.c +++ b/ptrack.c @@ -53,8 +53,8 @@ #include "utils/pg_lsn.h" #include "datapagemap.h" -#include "engine.h" #include "ptrack.h" +#include "engine.h" PG_MODULE_MAGIC; @@ -356,16 +356,16 @@ ptrack_gather_filelist(List **filelist, char *path, Oid spcOid, Oid dbOid) memcpy(oidbuf, de->d_name, oidchars); oidbuf[oidchars] = '\0'; - pfl->relnode.relNode = atooid(oidbuf); - pfl->relnode.dbNode = dbOid; - pfl->relnode.spcNode = spcOid == InvalidOid ? DEFAULTTABLESPACE_OID : spcOid; - pfl->path = GetRelationPath(dbOid, pfl->relnode.spcNode, - pfl->relnode.relNode, InvalidBackendId, pfl->forknum); + nodeRel(pfl->relnode) = atooid(oidbuf); + nodeDb(pfl->relnode) = dbOid; + nodeSpc(pfl->relnode) = spcOid == InvalidOid ? DEFAULTTABLESPACE_OID : spcOid; + pfl->path = GetRelationPath(dbOid, nodeSpc(pfl->relnode), + nodeRel(pfl->relnode), InvalidBackendId, pfl->forknum); *filelist = lappend(*filelist, pfl); elog(DEBUG3, "ptrack: added file %s of rel %u to file list", - pfl->path, pfl->relnode.relNode); + pfl->path, nodeRel(pfl->relnode)); } } else if (S_ISDIR(fst.st_mode)) @@ -426,9 +426,9 @@ ptrack_filelist_getnext(PtScanCtx * ctx) ctx->relpath = pfl->path; } - ctx->bid.relnode.spcNode = pfl->relnode.spcNode; - ctx->bid.relnode.dbNode = pfl->relnode.dbNode; - ctx->bid.relnode.relNode = pfl->relnode.relNode; + nodeSpc(ctx->bid.relnode) = nodeSpc(pfl->relnode); + nodeDb(ctx->bid.relnode) = nodeDb(pfl->relnode); + nodeRel(ctx->bid.relnode) = nodeRel(pfl->relnode); ctx->bid.forknum = pfl->forknum; ctx->bid.blocknum = 0; diff --git a/ptrack.h b/ptrack.h index a5c1f01..e42bfb8 100644 --- a/ptrack.h +++ b/ptrack.h @@ -18,7 +18,11 @@ #include "access/xlogdefs.h" #include "storage/block.h" #include "storage/buf.h" +#if PG_VERSION_NUM >= 160000 +#include "storage/relfilelocator.h" +#else #include "storage/relfilenode.h" +#endif #include "storage/smgr.h" #include "utils/relcache.h" @@ -29,6 +33,20 @@ /* Last ptrack version that changed map file format */ #define PTRACK_MAP_FILE_VERSION_NUM 220 +#if PG_VERSION_NUM >= 160000 +#define RelFileNode RelFileLocator +#define RelFileNodeBackend RelFileLocatorBackend +#define nodeDb(node) (node).dbOid +#define nodeSpc(node) (node).spcOid +#define nodeRel(node) (node).relNumber +#define nodeOf(ndbck) (ndbck).locator +#else +#define nodeDb(node) (node).dbNode +#define nodeSpc(node) (node).spcNode +#define nodeRel(node) (node).relNode +#define nodeOf(ndbck) (ndbck).node +#endif + /* * Structure identifying block on the disk. */ From 4a3d24b374a50c4b3c2acbfd62926fe27600ec6f Mon Sep 17 00:00:00 2001 From: Marina Polyakova Date: Fri, 19 Aug 2022 12:44:13 +0300 Subject: [PATCH 30/65] PGPRO-6817: include common/cfs_common.h for Postgres Pro Enterprise The header file replication/basebackup.h has been moved to backup/basebackup.h in PostgreSQL 15+, see the commmits f88798c098d2afd5223e1ca5c9d107cc18864fcc in PostgreSQL 15 and a8c012869763c711abc9085f54b2a100b60a85fa in PostgreSQL 16devel. So it's easier to include the header file common/cfs_common.h directly. --- ptrack.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ptrack.c b/ptrack.c index c81eb17..2b71878 100644 --- a/ptrack.c +++ b/ptrack.c @@ -38,7 +38,7 @@ #include "nodes/pg_list.h" #ifdef PGPRO_EE /* For file_is_in_cfs_tablespace() only. */ -#include "replication/basebackup.h" +#include "common/cfs_common.h" #endif #include "storage/copydir.h" #include "storage/ipc.h" From 1accef575cb19994e5b706f8d6b2c6b5dff9467f Mon Sep 17 00:00:00 2001 From: Marina Polyakova Date: Fri, 19 Aug 2022 12:44:13 +0300 Subject: [PATCH 31/65] PGPRO-6817: include common/cfs_common.h for Postgres Pro Enterprise The header file replication/basebackup.h has been moved to backup/basebackup.h in PostgreSQL 15+, see the commmits f88798c098d2afd5223e1ca5c9d107cc18864fcc in PostgreSQL 15 and a8c012869763c711abc9085f54b2a100b60a85fa in PostgreSQL 16devel. So it's easier to include the header file common/cfs_common.h directly. --- ptrack.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ptrack.c b/ptrack.c index a3503c7..3152315 100644 --- a/ptrack.c +++ b/ptrack.c @@ -38,7 +38,7 @@ #include "nodes/pg_list.h" #ifdef PGPRO_EE /* For file_is_in_cfs_tablespace() only. */ -#include "replication/basebackup.h" +#include "common/cfs_common.h" #endif #include "storage/copydir.h" #include "storage/ipc.h" From db6c52296f3e0727353e8d80c85519f8212a43de Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sat, 15 Oct 2022 18:55:36 +0300 Subject: [PATCH 32/65] [PGPRO-7068] include pg_crc32.c explicitely Tags: ptrack --- ptrack.c | 1 + 1 file changed, 1 insertion(+) diff --git a/ptrack.c b/ptrack.c index 3152315..d0cbb4a 100644 --- a/ptrack.c +++ b/ptrack.c @@ -40,6 +40,7 @@ /* For file_is_in_cfs_tablespace() only. */ #include "common/cfs_common.h" #endif +#include "port/pg_crc32c.h" #include "storage/copydir.h" #include "storage/ipc.h" #include "storage/lmgr.h" From 2ae320a30d617128d96db32a861c6d5a46397f33 Mon Sep 17 00:00:00 2001 From: Daniel Shelepanov Date: Tue, 4 Oct 2022 12:18:52 +0300 Subject: [PATCH 33/65] [PBCKP-278] ptrack adapted to hadling cfs relations tags: cfs, ptrack --- engine.c | 76 +++++++++++++++++++ engine.h | 6 ++ ptrack.c | 42 ++++++++--- ptrack.h | 3 + t/002_cfs_compatibility.pl | 148 +++++++++++++++++++++++++++++++++++++ 5 files changed, 264 insertions(+), 11 deletions(-) create mode 100644 t/002_cfs_compatibility.pl diff --git a/engine.c b/engine.c index 2cd1908..23c3ab8 100644 --- a/engine.c +++ b/engine.c @@ -36,6 +36,10 @@ #include "catalog/pg_tablespace.h" #include "miscadmin.h" #include "port/pg_crc32c.h" +#ifdef PGPRO_EE +/* For file_is_in_cfs_tablespace() only. */ +#include "common/cfs_common.h" +#endif #include "storage/copydir.h" #if PG_VERSION_NUM >= 120000 #include "storage/md.h" @@ -91,6 +95,44 @@ ptrack_write_chunk(int fd, pg_crc32c *crc, char *chunk, size_t size) } } +/* + * Determines whether given file path is a path to a cfm file. + */ +bool +is_cfm_file_path(const char *filepath) { + ssize_t len = strlen(filepath); + + // For this length checks we assume that the filename is at least + // 1 character longer than the corresponding extension ".cfm": + // strlen(".cfm") == 4 therefore we assume that the filename can't be + // shorter than 5 bytes, for example: "5.cfm". + return strlen(filepath) >= 5 && strcmp(&filepath[len-4], ".cfm") == 0; +} + +#ifdef PGPRO_EE +/* + * Determines the relation file size specified by fullpath as if it + * was not compressed. + */ +off_t +get_cfs_relation_file_decompressed_size(RelFileNodeBackend rnode, const char *fullpath, ForkNumber forknum) { + File fd; + int compressor; + off_t size; + + compressor = md_get_compressor_internal(rnode.node, rnode.backend, forknum); + fd = PathNameOpenFile(fullpath, O_RDWR | PG_BINARY, compressor); + + if(fd < 0) + return (off_t)-1; + + size = FileSize(fd); + FileClose(fd); + + return size; +} +#endif + /* * Delete ptrack files when ptrack is disabled. * @@ -498,8 +540,13 @@ assign_ptrack_map_size(int newval, void *extra) * For use in functions that copy directories bypassing buffer manager. */ static void +#ifdef PGPRO_EE +ptrack_mark_file(Oid dbOid, Oid tablespaceOid, + const char *filepath, const char *filename, bool is_cfs) +#else ptrack_mark_file(Oid dbOid, Oid tablespaceOid, const char *filepath, const char *filename) +#endif { RelFileNodeBackend rnode; ForkNumber forknum; @@ -508,6 +555,9 @@ ptrack_mark_file(Oid dbOid, Oid tablespaceOid, struct stat stat_buf; int oidchars; char oidbuf[OIDCHARS + 1]; +#ifdef PGPRO_EE + off_t rel_size; +#endif /* Do not track temporary relations */ if (looks_like_temp_rel_name(filename)) @@ -526,6 +576,21 @@ ptrack_mark_file(Oid dbOid, Oid tablespaceOid, oidbuf[oidchars] = '\0'; nodeRel(nodeOf(rnode)) = atooid(oidbuf); +#ifdef PGPRO_EE + // if current tablespace is cfs-compressed and md_get_compressor_internal + // returns the type of the compressing algorithm for filepath, then it + // needs to be de-compressed to obtain its size + if(is_cfs && md_get_compressor_internal(rnode.node, rnode.backend, forknum) != 0) { + rel_size = get_cfs_relation_file_decompressed_size(rnode, filepath, forknum); + + if(rel_size == (off_t)-1) { + elog(WARNING, "ptrack: could not open cfs-compressed relation file: %s", filepath); + return; + } + + nblocks = rel_size / BLCKSZ; + } else +#endif /* Compute number of blocks based on file size */ if (stat(filepath, &stat_buf) == 0) nblocks = stat_buf.st_size / BLCKSZ; @@ -546,6 +611,9 @@ ptrack_walkdir(const char *path, Oid tablespaceOid, Oid dbOid) { DIR *dir; struct dirent *de; +#ifdef PGPRO_EE + bool is_cfs; +#endif /* Do not walk during bootstrap and if ptrack is disabled */ if (ptrack_map_size == 0 @@ -554,6 +622,10 @@ ptrack_walkdir(const char *path, Oid tablespaceOid, Oid dbOid) || InitializingParallelWorker) return; +#ifdef PGPRO_EE + is_cfs = file_is_in_cfs_tablespace(path); +#endif + dir = AllocateDir(path); while ((de = ReadDirExtended(dir, path, LOG)) != NULL) @@ -581,7 +653,11 @@ ptrack_walkdir(const char *path, Oid tablespaceOid, Oid dbOid) } if (S_ISREG(fst.st_mode)) +#ifdef PGPRO_EE + ptrack_mark_file(dbOid, tablespaceOid, subpath, de->d_name, is_cfs); +#else ptrack_mark_file(dbOid, tablespaceOid, subpath, de->d_name); +#endif } FreeDir(dir); /* we ignore any error here */ diff --git a/engine.h b/engine.h index 5daf69a..4797a54 100644 --- a/engine.h +++ b/engine.h @@ -111,4 +111,10 @@ extern void ptrack_walkdir(const char *path, Oid tablespaceOid, Oid dbOid); extern void ptrack_mark_block(RelFileNodeBackend smgr_rnode, ForkNumber forkno, BlockNumber blkno); +extern bool is_cfm_file_path(const char *path); +#ifdef PGPRO_EE +extern off_t get_cfs_relation_file_decompressed_size(RelFileNodeBackend rnode, + const char *fullpath, ForkNumber forknum); +#endif + #endif /* PTRACK_ENGINE_H */ diff --git a/ptrack.c b/ptrack.c index d0cbb4a..933227a 100644 --- a/ptrack.c +++ b/ptrack.c @@ -251,14 +251,6 @@ ptrack_copydir_hook(const char *path) elog(DEBUG1, "ptrack_copydir_hook: spcOid %u, dbOid %u", spcOid, dbOid); -#ifdef PGPRO_EE - /* - * Currently, we do not track files from compressed tablespaces in ptrack. - */ - if (file_is_in_cfs_tablespace(path)) - elog(DEBUG1, "ptrack_copydir_hook: skipping changes tracking in the CFS tablespace %u", spcOid); - else -#endif ptrack_walkdir(path, spcOid, dbOid); if (prev_copydir_hook) @@ -302,6 +294,11 @@ ptrack_gather_filelist(List **filelist, char *path, Oid spcOid, Oid dbOid) { DIR *dir; struct dirent *de; +#ifdef PGPRO_EE + bool is_cfs; + + is_cfs = file_is_in_cfs_tablespace(path); +#endif dir = AllocateDir(path); @@ -315,7 +312,8 @@ ptrack_gather_filelist(List **filelist, char *path, Oid spcOid, Oid dbOid) if (strcmp(de->d_name, ".") == 0 || strcmp(de->d_name, "..") == 0 || - looks_like_temp_rel_name(de->d_name)) + looks_like_temp_rel_name(de->d_name) || + is_cfm_file_path(de->d_name)) continue; snprintf(subpath, sizeof(subpath), "%s/%s", path, de->d_name); @@ -362,6 +360,10 @@ ptrack_gather_filelist(List **filelist, char *path, Oid spcOid, Oid dbOid) nodeSpc(pfl->relnode) = spcOid == InvalidOid ? DEFAULTTABLESPACE_OID : spcOid; pfl->path = GetRelationPath(dbOid, nodeSpc(pfl->relnode), nodeRel(pfl->relnode), InvalidBackendId, pfl->forknum); +#ifdef PGPRO_EE + pfl->is_cfs_compressed = is_cfs + && md_get_compressor_internal(pfl->relnode, InvalidBackendId, pfl->forknum) != 0; +#endif *filelist = lappend(*filelist, pfl); @@ -403,6 +405,10 @@ ptrack_filelist_getnext(PtScanCtx * ctx) ListCell *cell; char *fullpath; struct stat fst; + off_t rel_st_size = 0; +#ifdef PGPRO_EE + RelFileNodeBackend rnodebackend; +#endif /* No more file in the list */ if (list_length(ctx->filelist) == 0) @@ -449,14 +455,28 @@ ptrack_filelist_getnext(PtScanCtx * ctx) return ptrack_filelist_getnext(ctx); } +#ifdef PGPRO_EE + rnodebackend.node = ctx->bid.relnode; + rnodebackend.backend = InvalidBackendId; + + if(pfl->is_cfs_compressed) { + rel_st_size = get_cfs_relation_file_decompressed_size(rnodebackend, fullpath, pfl->forknum); + + // Could not open fullpath for some reason, trying the next file. + if(rel_st_size == -1) + return ptrack_filelist_getnext(ctx); + } else +#endif + rel_st_size = fst.st_size; + if (pfl->segno > 0) { - ctx->relsize = pfl->segno * RELSEG_SIZE + fst.st_size / BLCKSZ; + ctx->relsize = pfl->segno * RELSEG_SIZE + rel_st_size / BLCKSZ; ctx->bid.blocknum = pfl->segno * RELSEG_SIZE; } else /* Estimate relsize as size of first segment in blocks */ - ctx->relsize = fst.st_size / BLCKSZ; + ctx->relsize = rel_st_size / BLCKSZ; elog(DEBUG3, "ptrack: got file %s with size %u from the file list", pfl->path, ctx->relsize); diff --git a/ptrack.h b/ptrack.h index e42bfb8..eec03bb 100644 --- a/ptrack.h +++ b/ptrack.h @@ -78,6 +78,9 @@ typedef struct PtrackFileList_i ForkNumber forknum; int segno; char *path; +#ifdef PGPRO_EE + bool is_cfs_compressed; +#endif } PtrackFileList_i; #endif /* PTRACK_H */ diff --git a/t/002_cfs_compatibility.pl b/t/002_cfs_compatibility.pl new file mode 100644 index 0000000..31e31be --- /dev/null +++ b/t/002_cfs_compatibility.pl @@ -0,0 +1,148 @@ +use strict; +use warnings; +use Test::More; + +my $pg_15_modules; + +BEGIN +{ + $pg_15_modules = eval + { + require PostgreSQL::Test::Cluster; + require PostgreSQL::Test::Utils; + return 1; + }; + + unless (defined $pg_15_modules) + { + $pg_15_modules = 0; + + require PostgresNode; + require TestLib; + } +} + +note('PostgreSQL 15 modules are used: ' . ($pg_15_modules ? 'yes' : 'no')); + +my $node; +my $res_stdout; +my $res_stderr; + +# Create node. +# Older versions of PostgreSQL modules use get_new_node function. +# Newer use standard perl object constructor syntax. +eval +{ + if ($pg_15_modules) + { + $node = PostgreSQL::Test::Cluster->new("node"); + } + else + { + $node = PostgresNode::get_new_node("node"); + } +}; + +note "Test for handling a ptrack map in compressed relations"; + +my $psql_stdout; + +# Starting the node +$node->init; + +# Could not load ptrack module after postmaster start + +my $cfs_tblspc1 = $node->basedir."/cfs_tblspc1"; +my $cfs_tblspc2 = $node->basedir."/cfs_tblspc2"; +mkdir $cfs_tblspc1 or die; +mkdir $cfs_tblspc2 or die; +my $no_cfs_tblspc1 = $node->basedir."/no_cfs_tblspc1"; +my $no_cfs_tblspc2 = $node->basedir."/no_cfs_tblspc2"; +mkdir $no_cfs_tblspc1 or die; +mkdir $no_cfs_tblspc2 or die; + +$node->append_conf('postgresql.conf', qq{ + shared_preload_libraries = 'ptrack' + ptrack.map_size = 16 + wal_level = 'replica' +}); + +$node->start; + +# check cfs availability first +my $cfs_available = $node->safe_psql('postgres', + "select count(oid) from pg_proc where proname = 'cfs_version'"); + +if($cfs_available eq "0") { + $node->stop; + plan skip_all => "CFS is not supported by this PostgreSQL build"; +} else { + plan tests => 2; +} + +# Creating content +$node->safe_psql('postgres', qq| + create tablespace cfs_tblspc1 location '$cfs_tblspc1' with (compression=true); + create tablespace cfs_tblspc2 location '$cfs_tblspc2' with (compression=true); + create tablespace no_cfs_tblspc1 location '$no_cfs_tblspc1'; + create tablespace no_cfs_tblspc2 location '$no_cfs_tblspc2'; + + create database testing_cfs tablespace cfs_tblspc1; + create database testing_no_cfs tablespace no_cfs_tblspc1; +|); + +$node->safe_psql('testing_cfs', qq{ + create table testing(i int, text varchar); + insert into testing select 1, '1111111111111111111111111' from generate_series(1,10000000); +}); + +$node->safe_psql('testing_no_cfs', qq{ + create table testing_no(i int, text varchar); + insert into testing_no select 1, '1111111111111111111111111' from generate_series(1,10000000); +}); + +# creating ptrack +$node->safe_psql('postgres', "create extension ptrack"); + +# obtaining init lsn for further usage in ptrack_get_pagemapset +my $init_lsn = $node->safe_psql('postgres', 'select ptrack_init_lsn()'); + +# forcing copydir() hook by altering dbs tablespaces +$node->safe_psql('postgres', "alter database testing_cfs set tablespace cfs_tblspc2;"); +$node->safe_psql('postgres', "alter database testing_no_cfs set tablespace no_cfs_tblspc2;"); + +# obtaining relpath for cfs table +my $cfs_relpath = $node->safe_psql('testing_cfs', "select pg_relation_filepath('testing');"); + +# obtaining relpath for no-cfs table +my $no_cfs_relpath = $node->safe_psql('testing_no_cfs', "select pg_relation_filepath('testing_no');"); + +# select the pagecount sums and compare them (should be equal) +my $pagecount_sum_cfs = $node->safe_psql('postgres', + "select sum(pagecount) from ptrack_get_pagemapset('$init_lsn'::pg_lsn) where path like '%$cfs_relpath%';"); +my $pagecount_sum_no_cfs = $node->safe_psql('postgres', + "select sum(pagecount) from ptrack_get_pagemapset('$init_lsn'::pg_lsn) where path like '%$no_cfs_relpath%';"); + +is($pagecount_sum_cfs, $pagecount_sum_no_cfs, "pagecount sums don't match"); + +# forcing copydir() hook by altering dbs tablespaces back +$node->safe_psql('postgres', "alter database testing_cfs set tablespace cfs_tblspc1;"); +$node->safe_psql('postgres', "alter database testing_no_cfs set tablespace no_cfs_tblspc1;"); + +# obtaining new relpath for cfs table +$cfs_relpath = $node->safe_psql('testing_cfs', "select pg_relation_filepath('testing');"); + +# obtaining new relpath for no-cfs table +$no_cfs_relpath = $node->safe_psql('testing_no_cfs', "select pg_relation_filepath('testing_no');"); + +# select the pagecount sums and compare them (again, they should be equal) +$pagecount_sum_cfs = $node->safe_psql('postgres', + "select sum(pagecount) from ptrack_get_pagemapset('$init_lsn'::pg_lsn) where path like '%$cfs_relpath%';"); +$pagecount_sum_no_cfs = $node->safe_psql('postgres', + "select sum(pagecount) from ptrack_get_pagemapset('$init_lsn'::pg_lsn) where path like '%$no_cfs_relpath%';"); + +is($pagecount_sum_cfs, $pagecount_sum_no_cfs, "pagecount sums don't match"); + + +$node->stop; + From b2f2fdf7a46ad8d4487d8bc1e6859596a596861b Mon Sep 17 00:00:00 2001 From: Daniel Shelepanov Date: Fri, 11 Nov 2022 12:36:47 +0300 Subject: [PATCH 34/65] [PBCKP-334] Unified segno code for any fork In fact, any fork (not only MAIN_FORKNUM) can have multiple segments. tags: ptrack --- ptrack.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/ptrack.c b/ptrack.c index 2b71878..15d5d86 100644 --- a/ptrack.c +++ b/ptrack.c @@ -345,15 +345,11 @@ ptrack_gather_filelist(List **filelist, char *path, Oid spcOid, Oid dbOid) if (!parse_filename_for_nontemp_relation(de->d_name, &oidchars, &pfl->forknum)) continue; - /* Parse segno for main fork */ - if (pfl->forknum == MAIN_FORKNUM) - { - segpath = strstr(de->d_name, "."); - pfl->segno = segpath != NULL ? atoi(segpath + 1) : 0; - } - else - pfl->segno = 0; + /* Parse segno */ + segpath = strstr(de->d_name, "."); + pfl->segno = segpath != NULL ? atoi(segpath + 1) : 0; + /* Fill the pfl in */ memcpy(oidbuf, de->d_name, oidchars); oidbuf[oidchars] = '\0'; pfl->relnode.relNode = atooid(oidbuf); From 93195400e1a91908648442a7bc3a1dc657121a5a Mon Sep 17 00:00:00 2001 From: Daniel Shelepanov Date: Fri, 2 Dec 2022 20:32:50 +0300 Subject: [PATCH 35/65] [PBCKP-278] CFS is supported only by EE12+ --- engine.c | 14 +++++++------- engine.h | 3 +++ ptrack.c | 8 ++++---- 3 files changed, 14 insertions(+), 11 deletions(-) diff --git a/engine.c b/engine.c index 23c3ab8..72b7e7b 100644 --- a/engine.c +++ b/engine.c @@ -109,7 +109,7 @@ is_cfm_file_path(const char *filepath) { return strlen(filepath) >= 5 && strcmp(&filepath[len-4], ".cfm") == 0; } -#ifdef PGPRO_EE +#if CFS_SUPPORT /* * Determines the relation file size specified by fullpath as if it * was not compressed. @@ -540,7 +540,7 @@ assign_ptrack_map_size(int newval, void *extra) * For use in functions that copy directories bypassing buffer manager. */ static void -#ifdef PGPRO_EE +#if CFS_SUPPORT ptrack_mark_file(Oid dbOid, Oid tablespaceOid, const char *filepath, const char *filename, bool is_cfs) #else @@ -555,7 +555,7 @@ ptrack_mark_file(Oid dbOid, Oid tablespaceOid, struct stat stat_buf; int oidchars; char oidbuf[OIDCHARS + 1]; -#ifdef PGPRO_EE +#if CFS_SUPPORT off_t rel_size; #endif @@ -576,7 +576,7 @@ ptrack_mark_file(Oid dbOid, Oid tablespaceOid, oidbuf[oidchars] = '\0'; nodeRel(nodeOf(rnode)) = atooid(oidbuf); -#ifdef PGPRO_EE +#if CFS_SUPPORT // if current tablespace is cfs-compressed and md_get_compressor_internal // returns the type of the compressing algorithm for filepath, then it // needs to be de-compressed to obtain its size @@ -611,7 +611,7 @@ ptrack_walkdir(const char *path, Oid tablespaceOid, Oid dbOid) { DIR *dir; struct dirent *de; -#ifdef PGPRO_EE +#if CFS_SUPPORT bool is_cfs; #endif @@ -622,7 +622,7 @@ ptrack_walkdir(const char *path, Oid tablespaceOid, Oid dbOid) || InitializingParallelWorker) return; -#ifdef PGPRO_EE +#if CFS_SUPPORT is_cfs = file_is_in_cfs_tablespace(path); #endif @@ -653,7 +653,7 @@ ptrack_walkdir(const char *path, Oid tablespaceOid, Oid dbOid) } if (S_ISREG(fst.st_mode)) -#ifdef PGPRO_EE +#if CFS_SUPPORT ptrack_mark_file(dbOid, tablespaceOid, subpath, de->d_name, is_cfs); #else ptrack_mark_file(dbOid, tablespaceOid, subpath, de->d_name); diff --git a/engine.h b/engine.h index 4797a54..d76dc25 100644 --- a/engine.h +++ b/engine.h @@ -44,6 +44,9 @@ #define PTRACK_MAGIC "ptk" #define PTRACK_MAGIC_SIZE 4 +/* CFS support macro */ +#define CFS_SUPPORT (defined(PGPRO_EE) && PG_VERSION_NUM >= 120000) + /* * Header of ptrack map. */ diff --git a/ptrack.c b/ptrack.c index 933227a..ab64990 100644 --- a/ptrack.c +++ b/ptrack.c @@ -294,7 +294,7 @@ ptrack_gather_filelist(List **filelist, char *path, Oid spcOid, Oid dbOid) { DIR *dir; struct dirent *de; -#ifdef PGPRO_EE +#if CFS_SUPPORT bool is_cfs; is_cfs = file_is_in_cfs_tablespace(path); @@ -360,7 +360,7 @@ ptrack_gather_filelist(List **filelist, char *path, Oid spcOid, Oid dbOid) nodeSpc(pfl->relnode) = spcOid == InvalidOid ? DEFAULTTABLESPACE_OID : spcOid; pfl->path = GetRelationPath(dbOid, nodeSpc(pfl->relnode), nodeRel(pfl->relnode), InvalidBackendId, pfl->forknum); -#ifdef PGPRO_EE +#if CFS_SUPPORT pfl->is_cfs_compressed = is_cfs && md_get_compressor_internal(pfl->relnode, InvalidBackendId, pfl->forknum) != 0; #endif @@ -406,7 +406,7 @@ ptrack_filelist_getnext(PtScanCtx * ctx) char *fullpath; struct stat fst; off_t rel_st_size = 0; -#ifdef PGPRO_EE +#if CFS_SUPPORT RelFileNodeBackend rnodebackend; #endif @@ -455,7 +455,7 @@ ptrack_filelist_getnext(PtScanCtx * ctx) return ptrack_filelist_getnext(ctx); } -#ifdef PGPRO_EE +#if CFS_SUPPORT rnodebackend.node = ctx->bid.relnode; rnodebackend.backend = InvalidBackendId; From 2810f4c10bc1f0eb77e234ca279afb95ac68fad1 Mon Sep 17 00:00:00 2001 From: Daniel Shelepanov Date: Fri, 2 Dec 2022 21:29:03 +0300 Subject: [PATCH 36/65] [PBCKP-278] Full 16devel compatibility (so far) --- engine.c | 4 ++-- ptrack.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/engine.c b/engine.c index 72b7e7b..bed5e42 100644 --- a/engine.c +++ b/engine.c @@ -120,7 +120,7 @@ get_cfs_relation_file_decompressed_size(RelFileNodeBackend rnode, const char *fu int compressor; off_t size; - compressor = md_get_compressor_internal(rnode.node, rnode.backend, forknum); + compressor = md_get_compressor_internal(nodeOf(rnode), rnode.backend, forknum); fd = PathNameOpenFile(fullpath, O_RDWR | PG_BINARY, compressor); if(fd < 0) @@ -580,7 +580,7 @@ ptrack_mark_file(Oid dbOid, Oid tablespaceOid, // if current tablespace is cfs-compressed and md_get_compressor_internal // returns the type of the compressing algorithm for filepath, then it // needs to be de-compressed to obtain its size - if(is_cfs && md_get_compressor_internal(rnode.node, rnode.backend, forknum) != 0) { + if(is_cfs && md_get_compressor_internal(nodeOf(rnode), rnode.backend, forknum) != 0) { rel_size = get_cfs_relation_file_decompressed_size(rnode, filepath, forknum); if(rel_size == (off_t)-1) { diff --git a/ptrack.c b/ptrack.c index ab64990..bde2d49 100644 --- a/ptrack.c +++ b/ptrack.c @@ -456,7 +456,7 @@ ptrack_filelist_getnext(PtScanCtx * ctx) } #if CFS_SUPPORT - rnodebackend.node = ctx->bid.relnode; + nodeOf(rnodebackend) = ctx->bid.relnode; rnodebackend.backend = InvalidBackendId; if(pfl->is_cfs_compressed) { From d3b427bbecd5720f442c0a54aa75940d44365d40 Mon Sep 17 00:00:00 2001 From: Daniel Shelepanov Date: Mon, 5 Dec 2022 17:28:54 +0300 Subject: [PATCH 37/65] [PBCKP-278] minor macro fix --- engine.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/engine.h b/engine.h index d76dc25..3ba868c 100644 --- a/engine.h +++ b/engine.h @@ -45,7 +45,9 @@ #define PTRACK_MAGIC_SIZE 4 /* CFS support macro */ -#define CFS_SUPPORT (defined(PGPRO_EE) && PG_VERSION_NUM >= 120000) +#if defined(PGPRO_EE) && PG_VERSION_NUM >= 120000 +#define CFS_SUPPORT 1 +#endif /* * Header of ptrack map. From 611b1a01ed8cef04aa0a3f1811738dc444935477 Mon Sep 17 00:00:00 2001 From: Marina Polyakova Date: Wed, 14 Dec 2022 11:53:01 +0300 Subject: [PATCH 38/65] Remove AssertArg See the commit b1099eca8f38ff5cfaf0901bb91cb6a22f909bc6 (Remove AssertArg and AssertState) in PostgreSQL 16. --- engine.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/engine.c b/engine.c index bed5e42..329f13a 100644 --- a/engine.c +++ b/engine.c @@ -63,7 +63,7 @@ ptrack_file_exists(const char *path) { struct stat st; - AssertArg(path != NULL); + Assert(path != NULL); if (stat(path, &st) == 0) return S_ISDIR(st.st_mode) ? false : true; From 920f4eca271e5a4555d7c6e24ae1ff891f93007d Mon Sep 17 00:00:00 2001 From: waaeer Date: Thu, 29 Dec 2022 13:20:37 +0300 Subject: [PATCH 39/65] Bump year in license --- LICENSE | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LICENSE b/LICENSE index 057f651..c1393f3 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ PostgreSQL License -Copyright (c) 2019-2020, Postgres Professional +Copyright (c) 2019-2023, Postgres Professional Permission to use, copy, modify, and distribute this software and its documentation for any purpose, without fee, and without a written agreement is From ffa2b016453e3e7c0504fb37ccb60f0351494a89 Mon Sep 17 00:00:00 2001 From: Daniel Shelepanov Date: Tue, 10 Jan 2023 10:45:01 +0300 Subject: [PATCH 40/65] [PBCKP-278] test fix --- t/002_cfs_compatibility.pl | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/t/002_cfs_compatibility.pl b/t/002_cfs_compatibility.pl index 31e31be..1772f3e 100644 --- a/t/002_cfs_compatibility.pl +++ b/t/002_cfs_compatibility.pl @@ -65,6 +65,7 @@ BEGIN shared_preload_libraries = 'ptrack' ptrack.map_size = 16 wal_level = 'replica' + autovacuum = 'off' }); $node->start; @@ -119,9 +120,9 @@ BEGIN # select the pagecount sums and compare them (should be equal) my $pagecount_sum_cfs = $node->safe_psql('postgres', - "select sum(pagecount) from ptrack_get_pagemapset('$init_lsn'::pg_lsn) where path like '%$cfs_relpath%';"); + "select sum(pagecount) from ptrack_get_pagemapset('$init_lsn'::pg_lsn) where path like '%$cfs_relpath';"); my $pagecount_sum_no_cfs = $node->safe_psql('postgres', - "select sum(pagecount) from ptrack_get_pagemapset('$init_lsn'::pg_lsn) where path like '%$no_cfs_relpath%';"); + "select sum(pagecount) from ptrack_get_pagemapset('$init_lsn'::pg_lsn) where path like '%$no_cfs_relpath';"); is($pagecount_sum_cfs, $pagecount_sum_no_cfs, "pagecount sums don't match"); @@ -137,9 +138,9 @@ BEGIN # select the pagecount sums and compare them (again, they should be equal) $pagecount_sum_cfs = $node->safe_psql('postgres', - "select sum(pagecount) from ptrack_get_pagemapset('$init_lsn'::pg_lsn) where path like '%$cfs_relpath%';"); + "select sum(pagecount) from ptrack_get_pagemapset('$init_lsn'::pg_lsn) where path like '%$cfs_relpath';"); $pagecount_sum_no_cfs = $node->safe_psql('postgres', - "select sum(pagecount) from ptrack_get_pagemapset('$init_lsn'::pg_lsn) where path like '%$no_cfs_relpath%';"); + "select sum(pagecount) from ptrack_get_pagemapset('$init_lsn'::pg_lsn) where path like '%$no_cfs_relpath';"); is($pagecount_sum_cfs, $pagecount_sum_no_cfs, "pagecount sums don't match"); From 6cfe1f79e715fe1c38600b80ee3548a10b7b55a9 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Mon, 16 Jan 2023 11:08:47 +0300 Subject: [PATCH 41/65] Version upgrade to 2.4 --- Makefile | 5 +++-- README.md | 9 ++++++++- ptrack--2.3--2.4.sql | 5 +++++ ptrack.c | 2 +- ptrack.control | 2 +- ptrack.h | 4 ++-- 6 files changed, 20 insertions(+), 7 deletions(-) create mode 100644 ptrack--2.3--2.4.sql diff --git a/Makefile b/Makefile index 4083df2..7100bec 100644 --- a/Makefile +++ b/Makefile @@ -5,8 +5,9 @@ OBJS = ptrack.o datapagemap.o engine.o $(WIN32RES) PGFILEDESC = "ptrack - block-level incremental backup engine" EXTENSION = ptrack -EXTVERSION = 2.3 -DATA = ptrack--2.1.sql ptrack--2.0--2.1.sql ptrack--2.1--2.2.sql ptrack--2.2--2.3.sql +EXTVERSION = 2.4 +DATA = ptrack--2.1.sql ptrack--2.0--2.1.sql ptrack--2.1--2.2.sql ptrack--2.2--2.3.sql \ + ptrack--2.3--2.4.sql TAP_TESTS = 1 diff --git a/README.md b/README.md index 8042ce3..ece43af 100644 --- a/README.md +++ b/README.md @@ -74,7 +74,7 @@ Usage example: postgres=# SELECT ptrack_version(); ptrack_version ---------------- - 2.3 + 2.4 (1 row) postgres=# SELECT ptrack_init_lsn(); @@ -123,6 +123,13 @@ Since version 2.2 we use a different algorithm for tracking changed pages. Thus, * Start server * Do `ALTER EXTENSION 'ptrack' UPDATE;`. +#### Upgrading from 2.3.* to 2.4.*: + +* Stop your server +* Update ptrack binaries +* Start server +* Do `ALTER EXTENSION 'ptrack' UPDATE;`. + ## Limitations 1. You can only use `ptrack` safely with `wal_level >= 'replica'`. Otherwise, you can lose tracking of some changes if crash-recovery occurs, since [certain commands are designed not to write WAL at all if wal_level is minimal](https://www.postgresql.org/docs/12/populate.html#POPULATE-PITR), but we only durably flush `ptrack` map at checkpoint time. diff --git a/ptrack--2.3--2.4.sql b/ptrack--2.3--2.4.sql new file mode 100644 index 0000000..780bba5 --- /dev/null +++ b/ptrack--2.3--2.4.sql @@ -0,0 +1,5 @@ +/* ptrack/ptrack--2.3--2.4.sql */ + +-- Complain if script is sourced in psql, rather than via ALTER EXTENSION +\echo Use "ALTER EXTENSION ptrack UPDATE;" to load this file. \quit + diff --git a/ptrack.c b/ptrack.c index beb82c6..8715734 100644 --- a/ptrack.c +++ b/ptrack.c @@ -16,7 +16,7 @@ * * Currently ptrack has following public API methods: * - * # ptrack_version --- returns ptrack version string (2.3 currently). + * # ptrack_version --- returns ptrack version string (2.4 currently). * # ptrack_get_pagemapset('LSN') --- returns a set of changed data files with * bitmaps of changed blocks since specified LSN. * # ptrack_init_lsn --- returns LSN of the last ptrack map initialization. diff --git a/ptrack.control b/ptrack.control index 85ede4c..7e3a2b7 100644 --- a/ptrack.control +++ b/ptrack.control @@ -1,5 +1,5 @@ # ptrack extension comment = 'block-level incremental backup engine' -default_version = '2.3' +default_version = '2.4' module_pathname = '$libdir/ptrack' relocatable = true diff --git a/ptrack.h b/ptrack.h index eec03bb..e56f60b 100644 --- a/ptrack.h +++ b/ptrack.h @@ -27,9 +27,9 @@ #include "utils/relcache.h" /* Ptrack version as a string */ -#define PTRACK_VERSION "2.3" +#define PTRACK_VERSION "2.4" /* Ptrack version as a number */ -#define PTRACK_VERSION_NUM 230 +#define PTRACK_VERSION_NUM 240 /* Last ptrack version that changed map file format */ #define PTRACK_MAP_FILE_VERSION_NUM 220 From eab35bcc17700f71392cce41d9de63461f14bdb8 Mon Sep 17 00:00:00 2001 From: Koval Dmitry Date: Fri, 25 Nov 2022 16:25:42 +0300 Subject: [PATCH 42/65] [PGPRO-7470] Function pgwin32_is_junction removed due to commit 5fc88c5d Tags: ptrack --- ptrack.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ptrack.c b/ptrack.c index 8715734..c9ee56a 100644 --- a/ptrack.c +++ b/ptrack.c @@ -376,7 +376,7 @@ ptrack_gather_filelist(List **filelist, char *path, Oid spcOid, Oid dbOid) ptrack_gather_filelist(filelist, subpath, spcOid, InvalidOid); } /* TODO: is it enough to properly check symlink support? */ -#ifndef WIN32 +#if !defined(WIN32) || (PG_VERSION_NUM >= 160000) else if (S_ISLNK(fst.st_mode)) #else else if (pgwin32_is_junction(subpath)) From 42ea77db5b8ed28b2804105a3655fb658b543723 Mon Sep 17 00:00:00 2001 From: Daniel Shelepanov Date: Tue, 21 Feb 2023 12:04:56 +0300 Subject: [PATCH 43/65] [PBKP-531] hotfix (#33) [PBKP-531] hotfix PGPROEE11 now supports ptrack for CFS --------- Co-authored-by: Daniel Shelepanov --- engine.c | 11 ++++++++++- engine.h | 2 +- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/engine.c b/engine.c index 329f13a..afaae1f 100644 --- a/engine.c +++ b/engine.c @@ -117,16 +117,25 @@ is_cfm_file_path(const char *filepath) { off_t get_cfs_relation_file_decompressed_size(RelFileNodeBackend rnode, const char *fullpath, ForkNumber forknum) { File fd; - int compressor; off_t size; +#if PG_VERSION_NUM >= 120000 + int compressor; compressor = md_get_compressor_internal(nodeOf(rnode), rnode.backend, forknum); fd = PathNameOpenFile(fullpath, O_RDWR | PG_BINARY, compressor); +#else + fd = PathNameOpenFile(fullpath, O_RDWR | PG_BINARY | PG_COMPRESSION); +#endif if(fd < 0) return (off_t)-1; +#if PG_VERSION_NUM >= 120000 size = FileSize(fd); +#else + size = FileSeek(fd, 0, SEEK_END); +#endif + FileClose(fd); return size; diff --git a/engine.h b/engine.h index 3ba868c..56777fc 100644 --- a/engine.h +++ b/engine.h @@ -45,7 +45,7 @@ #define PTRACK_MAGIC_SIZE 4 /* CFS support macro */ -#if defined(PGPRO_EE) && PG_VERSION_NUM >= 120000 +#if defined(PGPRO_EE) && PG_VERSION_NUM >= 110000 #define CFS_SUPPORT 1 #endif From 528c9db58c6d6f24313aa971e2ea93f9ed6286e4 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 21 Mar 2023 13:48:52 +0300 Subject: [PATCH 44/65] [PBCKP-564] Don't spam with "skip empty file" message And don't add empty files to file list at all. Fixes #35 --- ptrack.c | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/ptrack.c b/ptrack.c index c9ee56a..22a2acf 100644 --- a/ptrack.c +++ b/ptrack.c @@ -322,7 +322,7 @@ ptrack_gather_filelist(List **filelist, char *path, Oid spcOid, Oid dbOid) if (sret < 0) { - ereport(LOG, + ereport(WARNING, (errcode_for_file_access(), errmsg("ptrack: could not stat file \"%s\": %m", subpath))); continue; @@ -330,6 +330,14 @@ ptrack_gather_filelist(List **filelist, char *path, Oid spcOid, Oid dbOid) if (S_ISREG(fst.st_mode)) { + if (fst.st_size == 0) + { + elog(DEBUG3, "ptrack: skip empty file %s", subpath); + + /* But try the next one */ + continue; + } + /* Regular file inside database directory, otherwise skip it */ if (dbOid != InvalidOid || spcOid == GLOBALTABLESPACE_OID) { @@ -406,6 +414,8 @@ ptrack_filelist_getnext(PtScanCtx * ctx) RelFileNodeBackend rnodebackend; #endif +get_next: + /* No more file in the list */ if (list_length(ctx->filelist) == 0) return -1; @@ -440,15 +450,15 @@ ptrack_filelist_getnext(PtScanCtx * ctx) elog(WARNING, "ptrack: cannot stat file %s", fullpath); /* But try the next one */ - return ptrack_filelist_getnext(ctx); + goto get_next; } if (fst.st_size == 0) { - elog(WARNING, "ptrack: skip empty file %s", fullpath); + elog(DEBUG3, "ptrack: skip empty file %s", fullpath); /* But try the next one */ - return ptrack_filelist_getnext(ctx); + goto get_next; } #if CFS_SUPPORT @@ -460,7 +470,7 @@ ptrack_filelist_getnext(PtScanCtx * ctx) // Could not open fullpath for some reason, trying the next file. if(rel_st_size == -1) - return ptrack_filelist_getnext(ctx); + goto get_next; } else #endif rel_st_size = fst.st_size; From cca3fec45c9d8ceb3d3bbb8a4fc10776c4a1848c Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Mon, 24 Apr 2023 18:03:30 +0300 Subject: [PATCH 45/65] PBCKP-544 remove crc32c source patch for windows - For Pg <= 14 it is better to link ptrack with pgport.lib by patching Mkvcbuild.pm - Pg >= 15 reacts on presence of PG_LIBS_INTERNAL += $(libpq_pgport) --- Makefile | 4 +++ patches/REL_11_STABLE-ptrack-core.diff | 31 +++++++++------------ patches/REL_12_STABLE-ptrack-core.diff | 31 +++++++++------------ patches/REL_13_STABLE-ptrack-core.diff | 37 +++++++++----------------- patches/REL_14_STABLE-ptrack-core.diff | 37 +++++++++----------------- patches/REL_15_STABLE-ptrack-core.diff | 13 --------- patches/master-ptrack-core.diff | 17 ++---------- 7 files changed, 58 insertions(+), 112 deletions(-) diff --git a/Makefile b/Makefile index 7100bec..e3d25a4 100644 --- a/Makefile +++ b/Makefile @@ -11,6 +11,10 @@ DATA = ptrack--2.1.sql ptrack--2.0--2.1.sql ptrack--2.1--2.2.sql ptrack--2.2--2. TAP_TESTS = 1 +# This line to link with pgport.lib on Windows compilation +# with Mkvcbuild.pm on PGv15+ +PG_LIBS_INTERNAL += $(libpq_pgport) + ifdef USE_PGXS PG_CONFIG ?= pg_config PGXS := $(shell $(PG_CONFIG) --pgxs) diff --git a/patches/REL_11_STABLE-ptrack-core.diff b/patches/REL_11_STABLE-ptrack-core.diff index a8207f5..e78977c 100644 --- a/patches/REL_11_STABLE-ptrack-core.diff +++ b/patches/REL_11_STABLE-ptrack-core.diff @@ -207,24 +207,6 @@ index 80241455357..50dca7bf6f4 100644 #define IsBootstrapProcessingMode() (Mode == BootstrapProcessing) #define IsInitProcessingMode() (Mode == InitProcessing) -diff --git a/src/include/port/pg_crc32c.h b/src/include/port/pg_crc32c.h -index 9a26295c8e8..dc72b27a10d 100644 ---- a/src/include/port/pg_crc32c.h -+++ b/src/include/port/pg_crc32c.h -@@ -69,8 +69,11 @@ extern pg_crc32c pg_comp_crc32c_armv8(pg_crc32c crc, const void *data, size_t le - #define FIN_CRC32C(crc) ((crc) ^= 0xFFFFFFFF) - - extern pg_crc32c pg_comp_crc32c_sb8(pg_crc32c crc, const void *data, size_t len); --extern pg_crc32c (*pg_comp_crc32c) (pg_crc32c crc, const void *data, size_t len); -- -+extern -+#ifndef FRONTEND -+PGDLLIMPORT -+#endif -+pg_crc32c (*pg_comp_crc32c) (pg_crc32c crc, const void *data, size_t len); - #ifdef USE_SSE42_CRC32C_WITH_RUNTIME_CHECK - extern pg_crc32c pg_comp_crc32c_sse42(pg_crc32c crc, const void *data, size_t len); - #endif diff --git a/src/include/storage/copydir.h b/src/include/storage/copydir.h index 4fef3e21072..e55430879c3 100644 --- a/src/include/storage/copydir.h @@ -261,3 +243,16 @@ index 0298ed1a2bc..24c684771d0 100644 extern void mdinit(void); extern void mdclose(SMgrRelation reln, ForkNumber forknum); extern void mdcreate(SMgrRelation reln, ForkNumber forknum, bool isRedo); +diff --git a/src/tools/msvc/Mkvcbuild.pm b/src/tools/msvc/Mkvcbuild.pm +index b52baa90988..74870c048db 100644 +--- a/src/tools/msvc/Mkvcbuild.pm ++++ b/src/tools/msvc/Mkvcbuild.pm +@@ -33,7 +33,7 @@ my @unlink_on_exit; + # Set of variables for modules in contrib/ and src/test/modules/ + my $contrib_defines = { 'refint' => 'REFINT_VERBOSE' }; + my @contrib_uselibpq = ('dblink', 'oid2name', 'postgres_fdw', 'vacuumlo'); +-my @contrib_uselibpgport = ('oid2name', 'pg_standby', 'vacuumlo'); ++my @contrib_uselibpgport = ('oid2name', 'pg_standby', 'vacuumlo', 'ptrack'); + my @contrib_uselibpgcommon = ('oid2name', 'pg_standby', 'vacuumlo'); + my $contrib_extralibs = undef; + my $contrib_extraincludes = { 'dblink' => ['src/backend'] }; diff --git a/patches/REL_12_STABLE-ptrack-core.diff b/patches/REL_12_STABLE-ptrack-core.diff index 738b8e7..e3feb67 100644 --- a/patches/REL_12_STABLE-ptrack-core.diff +++ b/patches/REL_12_STABLE-ptrack-core.diff @@ -225,24 +225,6 @@ index 61a24c2e3c6..cbd46d0cb02 100644 #define IsBootstrapProcessingMode() (Mode == BootstrapProcessing) #define IsInitProcessingMode() (Mode == InitProcessing) -diff --git a/src/include/port/pg_crc32c.h b/src/include/port/pg_crc32c.h -index fbd079d2439..01682035e0b 100644 ---- a/src/include/port/pg_crc32c.h -+++ b/src/include/port/pg_crc32c.h -@@ -69,8 +69,11 @@ extern pg_crc32c pg_comp_crc32c_armv8(pg_crc32c crc, const void *data, size_t le - #define FIN_CRC32C(crc) ((crc) ^= 0xFFFFFFFF) - - extern pg_crc32c pg_comp_crc32c_sb8(pg_crc32c crc, const void *data, size_t len); --extern pg_crc32c (*pg_comp_crc32c) (pg_crc32c crc, const void *data, size_t len); -- -+extern -+#ifndef FRONTEND -+PGDLLIMPORT -+#endif -+pg_crc32c (*pg_comp_crc32c) (pg_crc32c crc, const void *data, size_t len); - #ifdef USE_SSE42_CRC32C_WITH_RUNTIME_CHECK - extern pg_crc32c pg_comp_crc32c_sse42(pg_crc32c crc, const void *data, size_t len); - #endif diff --git a/src/include/storage/copydir.h b/src/include/storage/copydir.h index 525cc6203e1..9481e1c5a88 100644 --- a/src/include/storage/copydir.h @@ -289,3 +271,16 @@ index 16428c5f5fb..6b0cd8f8eea 100644 extern void InitSync(void); extern void SyncPreCheckpoint(void); extern void SyncPostCheckpoint(void); +diff --git a/src/tools/msvc/Mkvcbuild.pm b/src/tools/msvc/Mkvcbuild.pm +index 1bdc33d7168..83b1190775f 100644 +--- a/src/tools/msvc/Mkvcbuild.pm ++++ b/src/tools/msvc/Mkvcbuild.pm +@@ -33,7 +33,7 @@ my @unlink_on_exit; + # Set of variables for modules in contrib/ and src/test/modules/ + my $contrib_defines = { 'refint' => 'REFINT_VERBOSE' }; + my @contrib_uselibpq = ('dblink', 'oid2name', 'postgres_fdw', 'vacuumlo'); +-my @contrib_uselibpgport = ('oid2name', 'pg_standby', 'vacuumlo'); ++my @contrib_uselibpgport = ('oid2name', 'pg_standby', 'vacuumlo', 'ptrack'); + my @contrib_uselibpgcommon = ('oid2name', 'pg_standby', 'vacuumlo'); + my $contrib_extralibs = undef; + my $contrib_extraincludes = { 'dblink' => ['src/backend'] }; diff --git a/patches/REL_13_STABLE-ptrack-core.diff b/patches/REL_13_STABLE-ptrack-core.diff index f61fb48..5b73162 100644 --- a/patches/REL_13_STABLE-ptrack-core.diff +++ b/patches/REL_13_STABLE-ptrack-core.diff @@ -1,9 +1,3 @@ -commit a14ac459d71528c64df00c693e9c71ac70d3ba29 -Author: anastasia -Date: Mon Oct 19 14:53:06 2020 +0300 - - add ptrack 2.0 - diff --git a/src/backend/replication/basebackup.c b/src/backend/replication/basebackup.c index 50ae1f16d0..721b926ad2 100644 --- a/src/backend/replication/basebackup.c @@ -231,24 +225,6 @@ index 72e3352398..5c2e016501 100644 #define IsBootstrapProcessingMode() (Mode == BootstrapProcessing) #define IsInitProcessingMode() (Mode == InitProcessing) -diff --git a/src/include/port/pg_crc32c.h b/src/include/port/pg_crc32c.h -index 3c6f906683..a7355f7ad1 100644 ---- a/src/include/port/pg_crc32c.h -+++ b/src/include/port/pg_crc32c.h -@@ -69,8 +69,11 @@ extern pg_crc32c pg_comp_crc32c_armv8(pg_crc32c crc, const void *data, size_t le - #define FIN_CRC32C(crc) ((crc) ^= 0xFFFFFFFF) - - extern pg_crc32c pg_comp_crc32c_sb8(pg_crc32c crc, const void *data, size_t len); --extern pg_crc32c (*pg_comp_crc32c) (pg_crc32c crc, const void *data, size_t len); -- -+extern -+#ifndef FRONTEND -+PGDLLIMPORT -+#endif -+pg_crc32c (*pg_comp_crc32c) (pg_crc32c crc, const void *data, size_t len); - #ifdef USE_SSE42_CRC32C_WITH_RUNTIME_CHECK - extern pg_crc32c pg_comp_crc32c_sse42(pg_crc32c crc, const void *data, size_t len); - #endif diff --git a/src/include/storage/copydir.h b/src/include/storage/copydir.h index 5d28f59c1d..0d3f04d8af 100644 --- a/src/include/storage/copydir.h @@ -295,3 +271,16 @@ index e16ab8e711..88da9686eb 100644 extern void InitSync(void); extern void SyncPreCheckpoint(void); extern void SyncPostCheckpoint(void); +diff --git a/src/tools/msvc/Mkvcbuild.pm b/src/tools/msvc/Mkvcbuild.pm +index 67b2ea9ee9b..e9a282d5647 100644 +--- a/src/tools/msvc/Mkvcbuild.pm ++++ b/src/tools/msvc/Mkvcbuild.pm +@@ -34,7 +34,7 @@ my @unlink_on_exit; + # Set of variables for modules in contrib/ and src/test/modules/ + my $contrib_defines = { 'refint' => 'REFINT_VERBOSE' }; + my @contrib_uselibpq = ('dblink', 'oid2name', 'postgres_fdw', 'vacuumlo'); +-my @contrib_uselibpgport = ('oid2name', 'pg_standby', 'vacuumlo'); ++my @contrib_uselibpgport = ('oid2name', 'pg_standby', 'vacuumlo', 'ptrack'); + my @contrib_uselibpgcommon = ('oid2name', 'pg_standby', 'vacuumlo'); + my $contrib_extralibs = undef; + my $contrib_extraincludes = { 'dblink' => ['src/backend'] }; diff --git a/patches/REL_14_STABLE-ptrack-core.diff b/patches/REL_14_STABLE-ptrack-core.diff index f61fb48..88ffcdc 100644 --- a/patches/REL_14_STABLE-ptrack-core.diff +++ b/patches/REL_14_STABLE-ptrack-core.diff @@ -1,9 +1,3 @@ -commit a14ac459d71528c64df00c693e9c71ac70d3ba29 -Author: anastasia -Date: Mon Oct 19 14:53:06 2020 +0300 - - add ptrack 2.0 - diff --git a/src/backend/replication/basebackup.c b/src/backend/replication/basebackup.c index 50ae1f16d0..721b926ad2 100644 --- a/src/backend/replication/basebackup.c @@ -231,24 +225,6 @@ index 72e3352398..5c2e016501 100644 #define IsBootstrapProcessingMode() (Mode == BootstrapProcessing) #define IsInitProcessingMode() (Mode == InitProcessing) -diff --git a/src/include/port/pg_crc32c.h b/src/include/port/pg_crc32c.h -index 3c6f906683..a7355f7ad1 100644 ---- a/src/include/port/pg_crc32c.h -+++ b/src/include/port/pg_crc32c.h -@@ -69,8 +69,11 @@ extern pg_crc32c pg_comp_crc32c_armv8(pg_crc32c crc, const void *data, size_t le - #define FIN_CRC32C(crc) ((crc) ^= 0xFFFFFFFF) - - extern pg_crc32c pg_comp_crc32c_sb8(pg_crc32c crc, const void *data, size_t len); --extern pg_crc32c (*pg_comp_crc32c) (pg_crc32c crc, const void *data, size_t len); -- -+extern -+#ifndef FRONTEND -+PGDLLIMPORT -+#endif -+pg_crc32c (*pg_comp_crc32c) (pg_crc32c crc, const void *data, size_t len); - #ifdef USE_SSE42_CRC32C_WITH_RUNTIME_CHECK - extern pg_crc32c pg_comp_crc32c_sse42(pg_crc32c crc, const void *data, size_t len); - #endif diff --git a/src/include/storage/copydir.h b/src/include/storage/copydir.h index 5d28f59c1d..0d3f04d8af 100644 --- a/src/include/storage/copydir.h @@ -295,3 +271,16 @@ index e16ab8e711..88da9686eb 100644 extern void InitSync(void); extern void SyncPreCheckpoint(void); extern void SyncPostCheckpoint(void); +diff --git a/src/tools/msvc/Mkvcbuild.pm b/src/tools/msvc/Mkvcbuild.pm +index 9b6539fb15d..4b2bcdb6b88 100644 +--- a/src/tools/msvc/Mkvcbuild.pm ++++ b/src/tools/msvc/Mkvcbuild.pm +@@ -38,7 +38,7 @@ my @unlink_on_exit; + my $contrib_defines = { 'refint' => 'REFINT_VERBOSE' }; + my @contrib_uselibpq = + ('dblink', 'oid2name', 'postgres_fdw', 'vacuumlo', 'libpq_pipeline'); +-my @contrib_uselibpgport = ('libpq_pipeline', 'oid2name', 'vacuumlo'); ++my @contrib_uselibpgport = ('libpq_pipeline', 'oid2name', 'vacuumlo', 'ptrack'); + my @contrib_uselibpgcommon = ('libpq_pipeline', 'oid2name', 'vacuumlo'); + my $contrib_extralibs = { 'libpq_pipeline' => ['ws2_32.lib'] }; + my $contrib_extraincludes = { 'dblink' => ['src/backend'] }; diff --git a/patches/REL_15_STABLE-ptrack-core.diff b/patches/REL_15_STABLE-ptrack-core.diff index 18fcb6e..2adc5f3 100644 --- a/patches/REL_15_STABLE-ptrack-core.diff +++ b/patches/REL_15_STABLE-ptrack-core.diff @@ -200,19 +200,6 @@ index 62529310415..b496f54fb06 100644 /* end of list */ {NULL, false} }; -diff --git a/src/include/port/pg_crc32c.h b/src/include/port/pg_crc32c.h -index d7668651ba8..33994a27f5f 100644 ---- a/src/include/port/pg_crc32c.h -+++ b/src/include/port/pg_crc32c.h -@@ -69,7 +69,7 @@ extern pg_crc32c pg_comp_crc32c_armv8(pg_crc32c crc, const void *data, size_t le - #define FIN_CRC32C(crc) ((crc) ^= 0xFFFFFFFF) - - extern pg_crc32c pg_comp_crc32c_sb8(pg_crc32c crc, const void *data, size_t len); --extern pg_crc32c (*pg_comp_crc32c) (pg_crc32c crc, const void *data, size_t len); -+extern PGDLLIMPORT pg_crc32c (*pg_comp_crc32c) (pg_crc32c crc, const void *data, size_t len); - - #ifdef USE_SSE42_CRC32C_WITH_RUNTIME_CHECK - extern pg_crc32c pg_comp_crc32c_sse42(pg_crc32c crc, const void *data, size_t len); diff --git a/src/include/storage/copydir.h b/src/include/storage/copydir.h index 50a26edeb06..af1602f5154 100644 --- a/src/include/storage/copydir.h diff --git a/patches/master-ptrack-core.diff b/patches/master-ptrack-core.diff index f1371a6..c24ae4d 100644 --- a/patches/master-ptrack-core.diff +++ b/patches/master-ptrack-core.diff @@ -200,19 +200,6 @@ index 269ed6446e6..6318a8c1f55 100644 /* end of list */ {NULL, false} }; -diff --git a/src/include/port/pg_crc32c.h b/src/include/port/pg_crc32c.h -index d7668651ba8..33994a27f5f 100644 ---- a/src/include/port/pg_crc32c.h -+++ b/src/include/port/pg_crc32c.h -@@ -69,7 +69,7 @@ extern pg_crc32c pg_comp_crc32c_armv8(pg_crc32c crc, const void *data, size_t le - #define FIN_CRC32C(crc) ((crc) ^= 0xFFFFFFFF) - - extern pg_crc32c pg_comp_crc32c_sb8(pg_crc32c crc, const void *data, size_t len); --extern pg_crc32c (*pg_comp_crc32c) (pg_crc32c crc, const void *data, size_t len); -+extern PGDLLIMPORT pg_crc32c (*pg_comp_crc32c) (pg_crc32c crc, const void *data, size_t len); - - #ifdef USE_SSE42_CRC32C_WITH_RUNTIME_CHECK - extern pg_crc32c pg_comp_crc32c_sse42(pg_crc32c crc, const void *data, size_t len); diff --git a/src/include/storage/copydir.h b/src/include/storage/copydir.h index 50a26edeb06..af1602f5154 100644 --- a/src/include/storage/copydir.h @@ -224,8 +211,8 @@ index 50a26edeb06..af1602f5154 100644 +typedef void (*copydir_hook_type) (const char *path); +extern PGDLLIMPORT copydir_hook_type copydir_hook; + - extern void copydir(char *fromdir, char *todir, bool recurse); - extern void copy_file(char *fromfile, char *tofile); + extern void copydir(const char *fromdir, const char *todir, bool recurse); + extern void copy_file(const char *fromfile, const char *tofile); diff --git a/src/include/storage/md.h b/src/include/storage/md.h index 10aa1b0109b..1415675824e 100644 From f1f72f89c378eda730cf1ae14061320ab9587c0e Mon Sep 17 00:00:00 2001 From: "v.shepard" Date: Mon, 26 Jun 2023 21:58:12 +0200 Subject: [PATCH 46/65] PBCKP-649 remove cfs --- engine.c | 87 +--------------------- engine.h | 11 --- ptrack.c | 32 +------- ptrack.h | 4 +- t/002_cfs_compatibility.pl | 149 ------------------------------------- 5 files changed, 3 insertions(+), 280 deletions(-) delete mode 100644 t/002_cfs_compatibility.pl diff --git a/engine.c b/engine.c index afaae1f..15760d8 100644 --- a/engine.c +++ b/engine.c @@ -36,10 +36,6 @@ #include "catalog/pg_tablespace.h" #include "miscadmin.h" #include "port/pg_crc32c.h" -#ifdef PGPRO_EE -/* For file_is_in_cfs_tablespace() only. */ -#include "common/cfs_common.h" -#endif #include "storage/copydir.h" #if PG_VERSION_NUM >= 120000 #include "storage/md.h" @@ -95,53 +91,6 @@ ptrack_write_chunk(int fd, pg_crc32c *crc, char *chunk, size_t size) } } -/* - * Determines whether given file path is a path to a cfm file. - */ -bool -is_cfm_file_path(const char *filepath) { - ssize_t len = strlen(filepath); - - // For this length checks we assume that the filename is at least - // 1 character longer than the corresponding extension ".cfm": - // strlen(".cfm") == 4 therefore we assume that the filename can't be - // shorter than 5 bytes, for example: "5.cfm". - return strlen(filepath) >= 5 && strcmp(&filepath[len-4], ".cfm") == 0; -} - -#if CFS_SUPPORT -/* - * Determines the relation file size specified by fullpath as if it - * was not compressed. - */ -off_t -get_cfs_relation_file_decompressed_size(RelFileNodeBackend rnode, const char *fullpath, ForkNumber forknum) { - File fd; - off_t size; - -#if PG_VERSION_NUM >= 120000 - int compressor; - compressor = md_get_compressor_internal(nodeOf(rnode), rnode.backend, forknum); - fd = PathNameOpenFile(fullpath, O_RDWR | PG_BINARY, compressor); -#else - fd = PathNameOpenFile(fullpath, O_RDWR | PG_BINARY | PG_COMPRESSION); -#endif - - if(fd < 0) - return (off_t)-1; - -#if PG_VERSION_NUM >= 120000 - size = FileSize(fd); -#else - size = FileSeek(fd, 0, SEEK_END); -#endif - - FileClose(fd); - - return size; -} -#endif - /* * Delete ptrack files when ptrack is disabled. * @@ -549,13 +498,8 @@ assign_ptrack_map_size(int newval, void *extra) * For use in functions that copy directories bypassing buffer manager. */ static void -#if CFS_SUPPORT -ptrack_mark_file(Oid dbOid, Oid tablespaceOid, - const char *filepath, const char *filename, bool is_cfs) -#else ptrack_mark_file(Oid dbOid, Oid tablespaceOid, const char *filepath, const char *filename) -#endif { RelFileNodeBackend rnode; ForkNumber forknum; @@ -564,9 +508,6 @@ ptrack_mark_file(Oid dbOid, Oid tablespaceOid, struct stat stat_buf; int oidchars; char oidbuf[OIDCHARS + 1]; -#if CFS_SUPPORT - off_t rel_size; -#endif /* Do not track temporary relations */ if (looks_like_temp_rel_name(filename)) @@ -585,21 +526,6 @@ ptrack_mark_file(Oid dbOid, Oid tablespaceOid, oidbuf[oidchars] = '\0'; nodeRel(nodeOf(rnode)) = atooid(oidbuf); -#if CFS_SUPPORT - // if current tablespace is cfs-compressed and md_get_compressor_internal - // returns the type of the compressing algorithm for filepath, then it - // needs to be de-compressed to obtain its size - if(is_cfs && md_get_compressor_internal(nodeOf(rnode), rnode.backend, forknum) != 0) { - rel_size = get_cfs_relation_file_decompressed_size(rnode, filepath, forknum); - - if(rel_size == (off_t)-1) { - elog(WARNING, "ptrack: could not open cfs-compressed relation file: %s", filepath); - return; - } - - nblocks = rel_size / BLCKSZ; - } else -#endif /* Compute number of blocks based on file size */ if (stat(filepath, &stat_buf) == 0) nblocks = stat_buf.st_size / BLCKSZ; @@ -620,9 +546,6 @@ ptrack_walkdir(const char *path, Oid tablespaceOid, Oid dbOid) { DIR *dir; struct dirent *de; -#if CFS_SUPPORT - bool is_cfs; -#endif /* Do not walk during bootstrap and if ptrack is disabled */ if (ptrack_map_size == 0 @@ -631,10 +554,6 @@ ptrack_walkdir(const char *path, Oid tablespaceOid, Oid dbOid) || InitializingParallelWorker) return; -#if CFS_SUPPORT - is_cfs = file_is_in_cfs_tablespace(path); -#endif - dir = AllocateDir(path); while ((de = ReadDirExtended(dir, path, LOG)) != NULL) @@ -662,11 +581,7 @@ ptrack_walkdir(const char *path, Oid tablespaceOid, Oid dbOid) } if (S_ISREG(fst.st_mode)) -#if CFS_SUPPORT - ptrack_mark_file(dbOid, tablespaceOid, subpath, de->d_name, is_cfs); -#else - ptrack_mark_file(dbOid, tablespaceOid, subpath, de->d_name); -#endif + ptrack_mark_file(dbOid, tablespaceOid, subpath, de->d_name); } FreeDir(dir); /* we ignore any error here */ diff --git a/engine.h b/engine.h index 56777fc..5daf69a 100644 --- a/engine.h +++ b/engine.h @@ -44,11 +44,6 @@ #define PTRACK_MAGIC "ptk" #define PTRACK_MAGIC_SIZE 4 -/* CFS support macro */ -#if defined(PGPRO_EE) && PG_VERSION_NUM >= 110000 -#define CFS_SUPPORT 1 -#endif - /* * Header of ptrack map. */ @@ -116,10 +111,4 @@ extern void ptrack_walkdir(const char *path, Oid tablespaceOid, Oid dbOid); extern void ptrack_mark_block(RelFileNodeBackend smgr_rnode, ForkNumber forkno, BlockNumber blkno); -extern bool is_cfm_file_path(const char *path); -#ifdef PGPRO_EE -extern off_t get_cfs_relation_file_decompressed_size(RelFileNodeBackend rnode, - const char *fullpath, ForkNumber forknum); -#endif - #endif /* PTRACK_ENGINE_H */ diff --git a/ptrack.c b/ptrack.c index 22a2acf..4a01759 100644 --- a/ptrack.c +++ b/ptrack.c @@ -36,10 +36,6 @@ #include "funcapi.h" #include "miscadmin.h" #include "nodes/pg_list.h" -#ifdef PGPRO_EE -/* For file_is_in_cfs_tablespace() only. */ -#include "common/cfs_common.h" -#endif #include "port/pg_crc32c.h" #include "storage/copydir.h" #include "storage/ipc.h" @@ -294,12 +290,6 @@ ptrack_gather_filelist(List **filelist, char *path, Oid spcOid, Oid dbOid) { DIR *dir; struct dirent *de; -#if CFS_SUPPORT - bool is_cfs; - - is_cfs = file_is_in_cfs_tablespace(path); -#endif - dir = AllocateDir(path); while ((de = ReadDirExtended(dir, path, LOG)) != NULL) @@ -312,8 +302,7 @@ ptrack_gather_filelist(List **filelist, char *path, Oid spcOid, Oid dbOid) if (strcmp(de->d_name, ".") == 0 || strcmp(de->d_name, "..") == 0 || - looks_like_temp_rel_name(de->d_name) || - is_cfm_file_path(de->d_name)) + looks_like_temp_rel_name(de->d_name)) continue; snprintf(subpath, sizeof(subpath), "%s/%s", path, de->d_name); @@ -364,10 +353,6 @@ ptrack_gather_filelist(List **filelist, char *path, Oid spcOid, Oid dbOid) nodeSpc(pfl->relnode) = spcOid == InvalidOid ? DEFAULTTABLESPACE_OID : spcOid; pfl->path = GetRelationPath(dbOid, nodeSpc(pfl->relnode), nodeRel(pfl->relnode), InvalidBackendId, pfl->forknum); -#if CFS_SUPPORT - pfl->is_cfs_compressed = is_cfs - && md_get_compressor_internal(pfl->relnode, InvalidBackendId, pfl->forknum) != 0; -#endif *filelist = lappend(*filelist, pfl); @@ -410,9 +395,6 @@ ptrack_filelist_getnext(PtScanCtx * ctx) char *fullpath; struct stat fst; off_t rel_st_size = 0; -#if CFS_SUPPORT - RelFileNodeBackend rnodebackend; -#endif get_next: @@ -461,18 +443,6 @@ ptrack_filelist_getnext(PtScanCtx * ctx) goto get_next; } -#if CFS_SUPPORT - nodeOf(rnodebackend) = ctx->bid.relnode; - rnodebackend.backend = InvalidBackendId; - - if(pfl->is_cfs_compressed) { - rel_st_size = get_cfs_relation_file_decompressed_size(rnodebackend, fullpath, pfl->forknum); - - // Could not open fullpath for some reason, trying the next file. - if(rel_st_size == -1) - goto get_next; - } else -#endif rel_st_size = fst.st_size; if (pfl->segno > 0) diff --git a/ptrack.h b/ptrack.h index e56f60b..ecc398b 100644 --- a/ptrack.h +++ b/ptrack.h @@ -78,9 +78,7 @@ typedef struct PtrackFileList_i ForkNumber forknum; int segno; char *path; -#ifdef PGPRO_EE - bool is_cfs_compressed; -#endif + } PtrackFileList_i; #endif /* PTRACK_H */ diff --git a/t/002_cfs_compatibility.pl b/t/002_cfs_compatibility.pl deleted file mode 100644 index 1772f3e..0000000 --- a/t/002_cfs_compatibility.pl +++ /dev/null @@ -1,149 +0,0 @@ -use strict; -use warnings; -use Test::More; - -my $pg_15_modules; - -BEGIN -{ - $pg_15_modules = eval - { - require PostgreSQL::Test::Cluster; - require PostgreSQL::Test::Utils; - return 1; - }; - - unless (defined $pg_15_modules) - { - $pg_15_modules = 0; - - require PostgresNode; - require TestLib; - } -} - -note('PostgreSQL 15 modules are used: ' . ($pg_15_modules ? 'yes' : 'no')); - -my $node; -my $res_stdout; -my $res_stderr; - -# Create node. -# Older versions of PostgreSQL modules use get_new_node function. -# Newer use standard perl object constructor syntax. -eval -{ - if ($pg_15_modules) - { - $node = PostgreSQL::Test::Cluster->new("node"); - } - else - { - $node = PostgresNode::get_new_node("node"); - } -}; - -note "Test for handling a ptrack map in compressed relations"; - -my $psql_stdout; - -# Starting the node -$node->init; - -# Could not load ptrack module after postmaster start - -my $cfs_tblspc1 = $node->basedir."/cfs_tblspc1"; -my $cfs_tblspc2 = $node->basedir."/cfs_tblspc2"; -mkdir $cfs_tblspc1 or die; -mkdir $cfs_tblspc2 or die; -my $no_cfs_tblspc1 = $node->basedir."/no_cfs_tblspc1"; -my $no_cfs_tblspc2 = $node->basedir."/no_cfs_tblspc2"; -mkdir $no_cfs_tblspc1 or die; -mkdir $no_cfs_tblspc2 or die; - -$node->append_conf('postgresql.conf', qq{ - shared_preload_libraries = 'ptrack' - ptrack.map_size = 16 - wal_level = 'replica' - autovacuum = 'off' -}); - -$node->start; - -# check cfs availability first -my $cfs_available = $node->safe_psql('postgres', - "select count(oid) from pg_proc where proname = 'cfs_version'"); - -if($cfs_available eq "0") { - $node->stop; - plan skip_all => "CFS is not supported by this PostgreSQL build"; -} else { - plan tests => 2; -} - -# Creating content -$node->safe_psql('postgres', qq| - create tablespace cfs_tblspc1 location '$cfs_tblspc1' with (compression=true); - create tablespace cfs_tblspc2 location '$cfs_tblspc2' with (compression=true); - create tablespace no_cfs_tblspc1 location '$no_cfs_tblspc1'; - create tablespace no_cfs_tblspc2 location '$no_cfs_tblspc2'; - - create database testing_cfs tablespace cfs_tblspc1; - create database testing_no_cfs tablespace no_cfs_tblspc1; -|); - -$node->safe_psql('testing_cfs', qq{ - create table testing(i int, text varchar); - insert into testing select 1, '1111111111111111111111111' from generate_series(1,10000000); -}); - -$node->safe_psql('testing_no_cfs', qq{ - create table testing_no(i int, text varchar); - insert into testing_no select 1, '1111111111111111111111111' from generate_series(1,10000000); -}); - -# creating ptrack -$node->safe_psql('postgres', "create extension ptrack"); - -# obtaining init lsn for further usage in ptrack_get_pagemapset -my $init_lsn = $node->safe_psql('postgres', 'select ptrack_init_lsn()'); - -# forcing copydir() hook by altering dbs tablespaces -$node->safe_psql('postgres', "alter database testing_cfs set tablespace cfs_tblspc2;"); -$node->safe_psql('postgres', "alter database testing_no_cfs set tablespace no_cfs_tblspc2;"); - -# obtaining relpath for cfs table -my $cfs_relpath = $node->safe_psql('testing_cfs', "select pg_relation_filepath('testing');"); - -# obtaining relpath for no-cfs table -my $no_cfs_relpath = $node->safe_psql('testing_no_cfs', "select pg_relation_filepath('testing_no');"); - -# select the pagecount sums and compare them (should be equal) -my $pagecount_sum_cfs = $node->safe_psql('postgres', - "select sum(pagecount) from ptrack_get_pagemapset('$init_lsn'::pg_lsn) where path like '%$cfs_relpath';"); -my $pagecount_sum_no_cfs = $node->safe_psql('postgres', - "select sum(pagecount) from ptrack_get_pagemapset('$init_lsn'::pg_lsn) where path like '%$no_cfs_relpath';"); - -is($pagecount_sum_cfs, $pagecount_sum_no_cfs, "pagecount sums don't match"); - -# forcing copydir() hook by altering dbs tablespaces back -$node->safe_psql('postgres', "alter database testing_cfs set tablespace cfs_tblspc1;"); -$node->safe_psql('postgres', "alter database testing_no_cfs set tablespace no_cfs_tblspc1;"); - -# obtaining new relpath for cfs table -$cfs_relpath = $node->safe_psql('testing_cfs', "select pg_relation_filepath('testing');"); - -# obtaining new relpath for no-cfs table -$no_cfs_relpath = $node->safe_psql('testing_no_cfs', "select pg_relation_filepath('testing_no');"); - -# select the pagecount sums and compare them (again, they should be equal) -$pagecount_sum_cfs = $node->safe_psql('postgres', - "select sum(pagecount) from ptrack_get_pagemapset('$init_lsn'::pg_lsn) where path like '%$cfs_relpath';"); -$pagecount_sum_no_cfs = $node->safe_psql('postgres', - "select sum(pagecount) from ptrack_get_pagemapset('$init_lsn'::pg_lsn) where path like '%$no_cfs_relpath';"); - -is($pagecount_sum_cfs, $pagecount_sum_no_cfs, "pagecount sums don't match"); - - -$node->stop; - From 2bddc22b76ae7b27f57612ee256503b98140826c Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Mon, 24 Jul 2023 20:32:25 +0300 Subject: [PATCH 47/65] remove from tail of filelist on modern postgres Modern postgres actually uses array as List implementation. Therefore, removing element from head is quite expensive. --- ptrack.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/ptrack.c b/ptrack.c index 4a01759..9285b0d 100644 --- a/ptrack.c +++ b/ptrack.c @@ -402,12 +402,21 @@ ptrack_filelist_getnext(PtScanCtx * ctx) if (list_length(ctx->filelist) == 0) return -1; +#ifdef foreach_current_index + /* Get first file from the head */ + cell = list_tail(ctx->filelist); + pfl = (PtrackFileList_i *) lfirst(cell); + + /* Remove this file from the list */ + ctx->filelist = list_delete_last(ctx->filelist); +#else /* Get first file from the head */ cell = list_head(ctx->filelist); pfl = (PtrackFileList_i *) lfirst(cell); /* Remove this file from the list */ ctx->filelist = list_delete_first(ctx->filelist); +#endif if (pfl->segno > 0) { From f8a7400a023faf9d8a1351528947493c811db636 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Mon, 24 Jul 2023 21:09:07 +0300 Subject: [PATCH 48/65] don't elog(DEBUG3) on production build `elog` is surprisingly expective, since it calls `errstart` function --- ptrack.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ptrack.c b/ptrack.c index 9285b0d..63c8446 100644 --- a/ptrack.c +++ b/ptrack.c @@ -622,10 +622,12 @@ ptrack_get_pagemapset(PG_FUNCTION_ARGS) update_lsn1 = pg_atomic_read_u64(&ptrack_map->entries[slot1]); +#if USE_ASSERT_CHECKING if (update_lsn1 != InvalidXLogRecPtr) elog(DEBUG3, "ptrack: update_lsn1 %X/%X of blckno %u of file %s", (uint32) (update_lsn1 >> 32), (uint32) update_lsn1, ctx->bid.blocknum, ctx->relpath); +#endif /* Only probe the second slot if the first one is marked */ if (update_lsn1 >= ctx->lsn) @@ -633,10 +635,12 @@ ptrack_get_pagemapset(PG_FUNCTION_ARGS) slot2 = (size_t)(((hash << 32) | (hash >> 32)) % PtrackContentNblocks); update_lsn2 = pg_atomic_read_u64(&ptrack_map->entries[slot2]); +#if USE_ASSERT_CHECKING if (update_lsn2 != InvalidXLogRecPtr) elog(DEBUG3, "ptrack: update_lsn2 %X/%X of blckno %u of file %s", (uint32) (update_lsn1 >> 32), (uint32) update_lsn2, ctx->bid.blocknum, ctx->relpath); +#endif /* Block has been changed since specified LSN. Mark it in the bitmap */ if (update_lsn2 >= ctx->lsn) From 23b56d167ceaa4f0d421b337b467e360f64a36b6 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Mon, 24 Jul 2023 22:45:54 +0300 Subject: [PATCH 49/65] don't elog(DEBUG1) in production in ptrack_mark_block either --- engine.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/engine.c b/engine.c index 15760d8..1efc627 100644 --- a/engine.c +++ b/engine.c @@ -629,7 +629,9 @@ ptrack_mark_block(RelFileNodeBackend smgr_rnode, old_init_lsn.value = pg_atomic_read_u64(&ptrack_map->init_lsn); if (old_init_lsn.value == InvalidXLogRecPtr) { +#if USE_ASSERT_CHECKING elog(DEBUG1, "ptrack_mark_block: init_lsn " UINT64_FORMAT " <- " UINT64_FORMAT, old_init_lsn.value, new_lsn); +#endif while (old_init_lsn.value < new_lsn && !pg_atomic_compare_exchange_u64(&ptrack_map->init_lsn, (uint64 *) &old_init_lsn.value, new_lsn)); @@ -637,13 +639,17 @@ ptrack_mark_block(RelFileNodeBackend smgr_rnode, /* Atomically assign new LSN value to the first slot */ old_lsn.value = pg_atomic_read_u64(&ptrack_map->entries[slot1]); +#if USE_ASSERT_CHECKING elog(DEBUG3, "ptrack_mark_block: map[%zu]=" UINT64_FORMAT " <- " UINT64_FORMAT, slot1, old_lsn.value, new_lsn); +#endif while (old_lsn.value < new_lsn && !pg_atomic_compare_exchange_u64(&ptrack_map->entries[slot1], (uint64 *) &old_lsn.value, new_lsn)); /* And to the second */ old_lsn.value = pg_atomic_read_u64(&ptrack_map->entries[slot2]); +#if USE_ASSERT_CHECKING elog(DEBUG3, "ptrack_mark_block: map[%zu]=" UINT64_FORMAT " <- " UINT64_FORMAT, slot2, old_lsn.value, new_lsn); +#endif while (old_lsn.value < new_lsn && !pg_atomic_compare_exchange_u64(&ptrack_map->entries[slot2], (uint64 *) &old_lsn.value, new_lsn)); } From 9654c326fb3b11075e2aef4342f6e1b8bd37215e Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Tue, 25 Jul 2023 14:51:16 +0300 Subject: [PATCH 50/65] weird compatibility with private edition. Well, it looks ugly. But in reality one should not see files > 4GB in data folder. If one puts such file there, ptrack already will produce garbage. --- ptrack.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ptrack.c b/ptrack.c index 63c8446..704e2ee 100644 --- a/ptrack.c +++ b/ptrack.c @@ -394,7 +394,7 @@ ptrack_filelist_getnext(PtScanCtx * ctx) ListCell *cell; char *fullpath; struct stat fst; - off_t rel_st_size = 0; + uint32_t rel_st_size = 0; get_next: @@ -444,7 +444,9 @@ ptrack_filelist_getnext(PtScanCtx * ctx) goto get_next; } - if (fst.st_size == 0) + rel_st_size = fst.st_size; + + if (rel_st_size == 0) { elog(DEBUG3, "ptrack: skip empty file %s", fullpath); @@ -452,8 +454,6 @@ ptrack_filelist_getnext(PtScanCtx * ctx) goto get_next; } - rel_st_size = fst.st_size; - if (pfl->segno > 0) { ctx->relsize = pfl->segno * RELSEG_SIZE + rel_st_size / BLCKSZ; From 061898e022d31467ea61479d427027ac9b344c62 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Wed, 2 Aug 2023 19:43:11 +0300 Subject: [PATCH 51/65] fix for mdzeroextend in REL_16/master mdzeroextend were added to speedup relation extending by using fallocate in commit: 4d330a61bb1 Add smgrzeroextend(), FileZero(), FileFallocate() It should be properly handled to mark such pages as dirty in ptrack. --- patches/REL_16_STABLE-ptrack-core.diff | 261 +++++++++++++++++++++++++ patches/master-ptrack-core.diff | 49 +++-- 2 files changed, 292 insertions(+), 18 deletions(-) create mode 100644 patches/REL_16_STABLE-ptrack-core.diff diff --git a/patches/REL_16_STABLE-ptrack-core.diff b/patches/REL_16_STABLE-ptrack-core.diff new file mode 100644 index 0000000..04cf8a4 --- /dev/null +++ b/patches/REL_16_STABLE-ptrack-core.diff @@ -0,0 +1,261 @@ +diff --git a/src/backend/backup/basebackup.c b/src/backend/backup/basebackup.c +index 45be21131c5..134e677f9d1 100644 +--- a/src/backend/backup/basebackup.c ++++ b/src/backend/backup/basebackup.c +@@ -199,6 +199,13 @@ static const struct exclude_list_item excludeFiles[] = + {"postmaster.pid", false}, + {"postmaster.opts", false}, + ++ /* ++ * Skip all transient ptrack files, but do copy ptrack.map, since it may ++ * be successfully used immediately after backup. TODO: check, test? ++ */ ++ {"ptrack.map.mmap", false}, ++ {"ptrack.map.tmp", false}, ++ + /* end of list */ + {NULL, false} + }; +@@ -214,6 +221,11 @@ static const struct exclude_list_item noChecksumFiles[] = { + {"pg_filenode.map", false}, + {"pg_internal.init", true}, + {"PG_VERSION", false}, ++ ++ {"ptrack.map.mmap", false}, ++ {"ptrack.map", false}, ++ {"ptrack.map.tmp", false}, ++ + #ifdef EXEC_BACKEND + {"config_exec_params", true}, + #endif +diff --git a/src/backend/storage/file/copydir.c b/src/backend/storage/file/copydir.c +index e04bc3941ae..996b5de6169 100644 +--- a/src/backend/storage/file/copydir.c ++++ b/src/backend/storage/file/copydir.c +@@ -27,6 +27,8 @@ + #include "storage/copydir.h" + #include "storage/fd.h" + ++copydir_hook_type copydir_hook = NULL; ++ + /* + * copydir: copy a directory + * +@@ -75,6 +77,9 @@ copydir(const char *fromdir, const char *todir, bool recurse) + } + FreeDir(xldir); + ++ if (copydir_hook) ++ copydir_hook(todir); ++ + /* + * Be paranoid here and fsync all files to ensure the copy is really done. + * But if fsync is disabled, we're done. +diff --git a/src/backend/storage/smgr/md.c b/src/backend/storage/smgr/md.c +index fdecbad1709..f849d00161e 100644 +--- a/src/backend/storage/smgr/md.c ++++ b/src/backend/storage/smgr/md.c +@@ -87,6 +87,8 @@ typedef struct _MdfdVec + + static MemoryContext MdCxt; /* context for all MdfdVec objects */ + ++mdextend_hook_type mdextend_hook = NULL; ++mdwrite_hook_type mdwrite_hook = NULL; + + /* Populate a file tag describing an md.c segment file. */ + #define INIT_MD_FILETAG(a,xx_rlocator,xx_forknum,xx_segno) \ +@@ -515,6 +517,9 @@ mdextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, + register_dirty_segment(reln, forknum, v); + + Assert(_mdnblocks(reln, forknum, v) <= ((BlockNumber) RELSEG_SIZE)); ++ ++ if (mdextend_hook) ++ mdextend_hook(reln->smgr_rlocator, forknum, blocknum); + } + + /* +@@ -622,6 +627,12 @@ mdzeroextend(SMgrRelation reln, ForkNumber forknum, + + remblocks -= numblocks; + curblocknum += numblocks; ++ ++ if (mdextend_hook) ++ { ++ for (; blocknum < curblocknum; blocknum++) ++ mdextend_hook(reln->smgr_rlocator, forknum, blocknum); ++ } + } + } + +@@ -867,6 +878,9 @@ mdwrite(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, + + if (!skipFsync && !SmgrIsTemp(reln)) + register_dirty_segment(reln, forknum, v); ++ ++ if (mdwrite_hook) ++ mdwrite_hook(reln->smgr_rlocator, forknum, blocknum); + } + + /* +diff --git a/src/backend/storage/sync/sync.c b/src/backend/storage/sync/sync.c +index 04fcb06056d..22bf179f560 100644 +--- a/src/backend/storage/sync/sync.c ++++ b/src/backend/storage/sync/sync.c +@@ -79,6 +79,8 @@ static MemoryContext pendingOpsCxt; /* context for the above */ + static CycleCtr sync_cycle_ctr = 0; + static CycleCtr checkpoint_cycle_ctr = 0; + ++ProcessSyncRequests_hook_type ProcessSyncRequests_hook = NULL; ++ + /* Intervals for calling AbsorbSyncRequests */ + #define FSYNCS_PER_ABSORB 10 + #define UNLINKS_PER_ABSORB 10 +@@ -475,6 +477,9 @@ ProcessSyncRequests(void) + CheckpointStats.ckpt_longest_sync = longest; + CheckpointStats.ckpt_agg_sync_time = total_elapsed; + ++ if (ProcessSyncRequests_hook) ++ ProcessSyncRequests_hook(); ++ + /* Flag successful completion of ProcessSyncRequests */ + sync_in_progress = false; + } +diff --git a/src/bin/pg_checksums/pg_checksums.c b/src/bin/pg_checksums/pg_checksums.c +index 19eb67e4854..008a7acc9f0 100644 +--- a/src/bin/pg_checksums/pg_checksums.c ++++ b/src/bin/pg_checksums/pg_checksums.c +@@ -118,6 +118,11 @@ static const struct exclude_list_item skip[] = { + {"pg_filenode.map", false}, + {"pg_internal.init", true}, + {"PG_VERSION", false}, ++ ++ {"ptrack.map.mmap", false}, ++ {"ptrack.map", false}, ++ {"ptrack.map.tmp", false}, ++ + #ifdef EXEC_BACKEND + {"config_exec_params", true}, + #endif +diff --git a/src/bin/pg_resetwal/pg_resetwal.c b/src/bin/pg_resetwal/pg_resetwal.c +index e7ef2b8bd0c..ca7f8cdbc2f 100644 +--- a/src/bin/pg_resetwal/pg_resetwal.c ++++ b/src/bin/pg_resetwal/pg_resetwal.c +@@ -85,6 +85,7 @@ static void RewriteControlFile(void); + static void FindEndOfXLOG(void); + static void KillExistingXLOG(void); + static void KillExistingArchiveStatus(void); ++static void KillExistingPtrack(void); + static void WriteEmptyXLOG(void); + static void usage(void); + +@@ -488,6 +489,7 @@ main(int argc, char *argv[]) + RewriteControlFile(); + KillExistingXLOG(); + KillExistingArchiveStatus(); ++ KillExistingPtrack(); + WriteEmptyXLOG(); + + printf(_("Write-ahead log reset\n")); +@@ -1029,6 +1031,41 @@ KillExistingArchiveStatus(void) + pg_fatal("could not close directory \"%s\": %m", ARCHSTATDIR); + } + ++/* ++ * Remove existing ptrack files ++ */ ++static void ++KillExistingPtrack(void) ++{ ++#define PTRACKDIR "global" ++ ++ DIR *xldir; ++ struct dirent *xlde; ++ char path[MAXPGPATH + sizeof(PTRACKDIR)]; ++ ++ xldir = opendir(PTRACKDIR); ++ if (xldir == NULL) ++ pg_fatal("could not open directory \"%s\": %m", PTRACKDIR); ++ ++ while (errno = 0, (xlde = readdir(xldir)) != NULL) ++ { ++ if (strcmp(xlde->d_name, "ptrack.map.mmap") == 0 || ++ strcmp(xlde->d_name, "ptrack.map") == 0 || ++ strcmp(xlde->d_name, "ptrack.map.tmp") == 0) ++ { ++ snprintf(path, sizeof(path), "%s/%s", PTRACKDIR, xlde->d_name); ++ if (unlink(path) < 0) ++ pg_fatal("could not delete file \"%s\": %m", path); ++ } ++ } ++ ++ if (errno) ++ pg_fatal("could not read directory \"%s\": %m", PTRACKDIR); ++ ++ if (closedir(xldir)) ++ pg_fatal("could not close directory \"%s\": %m", PTRACKDIR); ++} ++ + + /* + * Write an empty XLOG file, containing only the checkpoint record +diff --git a/src/bin/pg_rewind/filemap.c b/src/bin/pg_rewind/filemap.c +index bd5c598e200..a568156c5fb 100644 +--- a/src/bin/pg_rewind/filemap.c ++++ b/src/bin/pg_rewind/filemap.c +@@ -157,6 +157,10 @@ static const struct exclude_list_item excludeFiles[] = + {"postmaster.pid", false}, + {"postmaster.opts", false}, + ++ {"ptrack.map.mmap", false}, ++ {"ptrack.map", false}, ++ {"ptrack.map.tmp", false}, ++ + /* end of list */ + {NULL, false} + }; +diff --git a/src/include/storage/copydir.h b/src/include/storage/copydir.h +index a8be5b21e0b..020874f96cd 100644 +--- a/src/include/storage/copydir.h ++++ b/src/include/storage/copydir.h +@@ -13,6 +13,9 @@ + #ifndef COPYDIR_H + #define COPYDIR_H + ++typedef void (*copydir_hook_type) (const char *path); ++extern PGDLLIMPORT copydir_hook_type copydir_hook; ++ + extern void copydir(const char *fromdir, const char *todir, bool recurse); + extern void copy_file(const char *fromfile, const char *tofile); + +diff --git a/src/include/storage/md.h b/src/include/storage/md.h +index 941879ee6a8..24738aeecd0 100644 +--- a/src/include/storage/md.h ++++ b/src/include/storage/md.h +@@ -19,6 +19,13 @@ + #include "storage/smgr.h" + #include "storage/sync.h" + ++typedef void (*mdextend_hook_type) (RelFileLocatorBackend smgr_rlocator, ++ ForkNumber forknum, BlockNumber blocknum); ++extern PGDLLIMPORT mdextend_hook_type mdextend_hook; ++typedef void (*mdwrite_hook_type) (RelFileLocatorBackend smgr_rlocator, ++ ForkNumber forknum, BlockNumber blocknum); ++extern PGDLLIMPORT mdwrite_hook_type mdwrite_hook; ++ + /* md storage manager functionality */ + extern void mdinit(void); + extern void mdopen(SMgrRelation reln); +diff --git a/src/include/storage/sync.h b/src/include/storage/sync.h +index cfbcfa6797d..2a432440db9 100644 +--- a/src/include/storage/sync.h ++++ b/src/include/storage/sync.h +@@ -55,6 +55,9 @@ typedef struct FileTag + uint32 segno; + } FileTag; + ++typedef void (*ProcessSyncRequests_hook_type) (void); ++extern PGDLLIMPORT ProcessSyncRequests_hook_type ProcessSyncRequests_hook; ++ + extern void InitSync(void); + extern void SyncPreCheckpoint(void); + extern void SyncPostCheckpoint(void); diff --git a/patches/master-ptrack-core.diff b/patches/master-ptrack-core.diff index c24ae4d..04cf8a4 100644 --- a/patches/master-ptrack-core.diff +++ b/patches/master-ptrack-core.diff @@ -1,8 +1,8 @@ diff --git a/src/backend/backup/basebackup.c b/src/backend/backup/basebackup.c -index 715428029b3..81f3218540a 100644 +index 45be21131c5..134e677f9d1 100644 --- a/src/backend/backup/basebackup.c +++ b/src/backend/backup/basebackup.c -@@ -197,6 +197,13 @@ static const struct exclude_list_item excludeFiles[] = +@@ -199,6 +199,13 @@ static const struct exclude_list_item excludeFiles[] = {"postmaster.pid", false}, {"postmaster.opts", false}, @@ -16,7 +16,7 @@ index 715428029b3..81f3218540a 100644 /* end of list */ {NULL, false} }; -@@ -212,6 +219,11 @@ static const struct exclude_list_item noChecksumFiles[] = { +@@ -214,6 +221,11 @@ static const struct exclude_list_item noChecksumFiles[] = { {"pg_filenode.map", false}, {"pg_internal.init", true}, {"PG_VERSION", false}, @@ -29,7 +29,7 @@ index 715428029b3..81f3218540a 100644 {"config_exec_params", true}, #endif diff --git a/src/backend/storage/file/copydir.c b/src/backend/storage/file/copydir.c -index 658fd95ba95..eee38eba176 100644 +index e04bc3941ae..996b5de6169 100644 --- a/src/backend/storage/file/copydir.c +++ b/src/backend/storage/file/copydir.c @@ -27,6 +27,8 @@ @@ -41,7 +41,7 @@ index 658fd95ba95..eee38eba176 100644 /* * copydir: copy a directory * -@@ -78,6 +80,9 @@ copydir(char *fromdir, char *todir, bool recurse) +@@ -75,6 +77,9 @@ copydir(const char *fromdir, const char *todir, bool recurse) } FreeDir(xldir); @@ -52,7 +52,7 @@ index 658fd95ba95..eee38eba176 100644 * Be paranoid here and fsync all files to ensure the copy is really done. * But if fsync is disabled, we're done. diff --git a/src/backend/storage/smgr/md.c b/src/backend/storage/smgr/md.c -index 3deac496eed..07c4ee2ba03 100644 +index fdecbad1709..f849d00161e 100644 --- a/src/backend/storage/smgr/md.c +++ b/src/backend/storage/smgr/md.c @@ -87,6 +87,8 @@ typedef struct _MdfdVec @@ -64,7 +64,7 @@ index 3deac496eed..07c4ee2ba03 100644 /* Populate a file tag describing an md.c segment file. */ #define INIT_MD_FILETAG(a,xx_rlocator,xx_forknum,xx_segno) \ -@@ -484,6 +486,9 @@ mdextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, +@@ -515,6 +517,9 @@ mdextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, register_dirty_segment(reln, forknum, v); Assert(_mdnblocks(reln, forknum, v) <= ((BlockNumber) RELSEG_SIZE)); @@ -74,7 +74,20 @@ index 3deac496eed..07c4ee2ba03 100644 } /* -@@ -773,6 +778,9 @@ mdwrite(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, +@@ -622,6 +627,12 @@ mdzeroextend(SMgrRelation reln, ForkNumber forknum, + + remblocks -= numblocks; + curblocknum += numblocks; ++ ++ if (mdextend_hook) ++ { ++ for (; blocknum < curblocknum; blocknum++) ++ mdextend_hook(reln->smgr_rlocator, forknum, blocknum); ++ } + } + } + +@@ -867,6 +878,9 @@ mdwrite(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, if (!skipFsync && !SmgrIsTemp(reln)) register_dirty_segment(reln, forknum, v); @@ -85,10 +98,10 @@ index 3deac496eed..07c4ee2ba03 100644 /* diff --git a/src/backend/storage/sync/sync.c b/src/backend/storage/sync/sync.c -index 9d6a9e91090..990d0722229 100644 +index 04fcb06056d..22bf179f560 100644 --- a/src/backend/storage/sync/sync.c +++ b/src/backend/storage/sync/sync.c -@@ -81,6 +81,8 @@ static MemoryContext pendingOpsCxt; /* context for the above */ +@@ -79,6 +79,8 @@ static MemoryContext pendingOpsCxt; /* context for the above */ static CycleCtr sync_cycle_ctr = 0; static CycleCtr checkpoint_cycle_ctr = 0; @@ -97,7 +110,7 @@ index 9d6a9e91090..990d0722229 100644 /* Intervals for calling AbsorbSyncRequests */ #define FSYNCS_PER_ABSORB 10 #define UNLINKS_PER_ABSORB 10 -@@ -477,6 +479,9 @@ ProcessSyncRequests(void) +@@ -475,6 +477,9 @@ ProcessSyncRequests(void) CheckpointStats.ckpt_longest_sync = longest; CheckpointStats.ckpt_agg_sync_time = total_elapsed; @@ -108,7 +121,7 @@ index 9d6a9e91090..990d0722229 100644 sync_in_progress = false; } diff --git a/src/bin/pg_checksums/pg_checksums.c b/src/bin/pg_checksums/pg_checksums.c -index 324ccf77834..e82cae5f325 100644 +index 19eb67e4854..008a7acc9f0 100644 --- a/src/bin/pg_checksums/pg_checksums.c +++ b/src/bin/pg_checksums/pg_checksums.c @@ -118,6 +118,11 @@ static const struct exclude_list_item skip[] = { @@ -124,7 +137,7 @@ index 324ccf77834..e82cae5f325 100644 {"config_exec_params", true}, #endif diff --git a/src/bin/pg_resetwal/pg_resetwal.c b/src/bin/pg_resetwal/pg_resetwal.c -index d4772a29650..3318f64359d 100644 +index e7ef2b8bd0c..ca7f8cdbc2f 100644 --- a/src/bin/pg_resetwal/pg_resetwal.c +++ b/src/bin/pg_resetwal/pg_resetwal.c @@ -85,6 +85,7 @@ static void RewriteControlFile(void); @@ -143,7 +156,7 @@ index d4772a29650..3318f64359d 100644 WriteEmptyXLOG(); printf(_("Write-ahead log reset\n")); -@@ -1036,6 +1038,41 @@ KillExistingArchiveStatus(void) +@@ -1029,6 +1031,41 @@ KillExistingArchiveStatus(void) pg_fatal("could not close directory \"%s\": %m", ARCHSTATDIR); } @@ -186,7 +199,7 @@ index d4772a29650..3318f64359d 100644 /* * Write an empty XLOG file, containing only the checkpoint record diff --git a/src/bin/pg_rewind/filemap.c b/src/bin/pg_rewind/filemap.c -index 269ed6446e6..6318a8c1f55 100644 +index bd5c598e200..a568156c5fb 100644 --- a/src/bin/pg_rewind/filemap.c +++ b/src/bin/pg_rewind/filemap.c @@ -157,6 +157,10 @@ static const struct exclude_list_item excludeFiles[] = @@ -201,7 +214,7 @@ index 269ed6446e6..6318a8c1f55 100644 {NULL, false} }; diff --git a/src/include/storage/copydir.h b/src/include/storage/copydir.h -index 50a26edeb06..af1602f5154 100644 +index a8be5b21e0b..020874f96cd 100644 --- a/src/include/storage/copydir.h +++ b/src/include/storage/copydir.h @@ -13,6 +13,9 @@ @@ -215,7 +228,7 @@ index 50a26edeb06..af1602f5154 100644 extern void copy_file(const char *fromfile, const char *tofile); diff --git a/src/include/storage/md.h b/src/include/storage/md.h -index 10aa1b0109b..1415675824e 100644 +index 941879ee6a8..24738aeecd0 100644 --- a/src/include/storage/md.h +++ b/src/include/storage/md.h @@ -19,6 +19,13 @@ @@ -233,7 +246,7 @@ index 10aa1b0109b..1415675824e 100644 extern void mdinit(void); extern void mdopen(SMgrRelation reln); diff --git a/src/include/storage/sync.h b/src/include/storage/sync.h -index 049af878dec..7689d49a24e 100644 +index cfbcfa6797d..2a432440db9 100644 --- a/src/include/storage/sync.h +++ b/src/include/storage/sync.h @@ -55,6 +55,9 @@ typedef struct FileTag From 224889a7fcb9682366a4dba886f9286df3478bb1 Mon Sep 17 00:00:00 2001 From: "v.shepard" Date: Thu, 3 Aug 2023 21:51:29 +0200 Subject: [PATCH 52/65] add AUTHORS.md --- AUTHORS.md | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 AUTHORS.md diff --git a/AUTHORS.md b/AUTHORS.md new file mode 100644 index 0000000..ed4d0eb --- /dev/null +++ b/AUTHORS.md @@ -0,0 +1,22 @@ +# Authors + +This list is sorted by the number of commits per contributor in _descending_ order. + +Avatar|Contributor|Contributions +:-:|---|:-: +@ololobus|[@ololobus](https://github.com/ololobus)|62 +@funny-falcon|[@funny-falcon](https://github.com/funny-falcon)|15 +@alubennikova|[@alubennikova](https://github.com/alubennikova)|9 +@kulaginm|[@kulaginm](https://github.com/kulaginm)|5 +@daniel-95|[@daniel-95](https://github.com/daniel-95)|4 +@ziva777|[@ziva777](https://github.com/ziva777)|2 +@vegebird|[@vegebird](https://github.com/vegebird)|2 +@kovdb75|[@kovdb75](https://github.com/kovdb75)|1 +@MarinaPolyakova|[@MarinaPolyakova](https://github.com/MarinaPolyakova)|1 +@rzharkov|[@rzharkov](https://github.com/rzharkov)|1 +@vbwagner|[@vbwagner](https://github.com/vbwagner)|1 +@waaeer|[@waaeer](https://github.com/waaeer)|1 + +--- + +Auto-generated by [gaocegege/maintainer](https://github.com/maintainer-org/maintainer) on 2023-08-03. From 22855f09c6d8147c62af86931424e9f786b917ec Mon Sep 17 00:00:00 2001 From: asavchkov <79832668+asavchkov@users.noreply.github.com> Date: Mon, 28 Aug 2023 22:39:08 +0700 Subject: [PATCH 53/65] Switch from Travis to GitHub Actions (#32) --- .github/workflows/test.yml | 94 +++++++++++++++++++++++++++ .travis.yml | 48 -------------- Dockerfile.in | 25 -------- Makefile | 62 +++++++++++++++++- README.md | 116 +++++++++++++++++++++++++-------- codecov.yml | 5 +- docker-compose.yml | 17 ----- make_dockerfile.sh | 33 ---------- run_tests.sh | 127 ------------------------------------- 9 files changed, 247 insertions(+), 280 deletions(-) create mode 100644 .github/workflows/test.yml delete mode 100644 .travis.yml delete mode 100644 Dockerfile.in delete mode 100644 docker-compose.yml delete mode 100755 make_dockerfile.sh delete mode 100755 run_tests.sh diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml new file mode 100644 index 0000000..f05773a --- /dev/null +++ b/.github/workflows/test.yml @@ -0,0 +1,94 @@ +name: Test + +on: + push: + branches: + - "**" + pull_request: + branches: + - main + # Allows you to run this workflow manually from the Actions tab + workflow_dispatch: + +jobs: + Test: + strategy: + matrix: + # pg_version: [15] + pg_version: [11, 12, 13, 14, 15] + os: [ubuntu-22.04] + # tests: [tap] + tests: [tap, python] + # test_mode: [normal, legacy, paranoia] + test_mode: [normal, paranoia] + exclude: + - tests: tap + test_mode: paranoia + - tests: python + test_mode: normal + - tests: python + test_mode: legacy + fail-fast: false + name: ${{ format('Ptrack ({0}, PostgreSQL {1}, {2} tests, {3} mode)', matrix.os, matrix.pg_version, matrix.tests, matrix.test_mode) }} + container: + image: ${{ format('ghcr.io/postgres-dev/{0}:1.0', matrix.os) }} + env: + PG_BRANCH: ${{ format('REL_{0}_STABLE', matrix.pg_version) }} + PGDATA: $HOME/data + TEST_MODE: ${{ matrix.test_mode }} + options: --privileged + steps: + - name: Get Postgres sources + uses: actions/checkout@v3 + with: + repository: postgres/postgres + ref: ${{ format('REL_{0}_STABLE', matrix.pg_version) }} + path: postgres + - name: Get Ptrack sources + uses: actions/checkout@v3 + with: + path: ptrack + - name: Get Pg_probackup sources + uses: actions/checkout@v3 + with: + repository: postgrespro/pg_probackup + path: pg_probackup + - name: Apply ptrack patches + run: make patch top_builddir=../postgres + working-directory: ptrack + - name: Install Postgres + run: | + make install-postgres top_builddir=$GITHUB_WORKSPACE/postgres prefix=$HOME/pgsql && + echo $HOME/pgsql/bin >> $GITHUB_PATH + working-directory: ptrack + - name: Install Ptrack + run: make install USE_PGXS=1 PG_CPPFLAGS=-coverage SHLIB_LINK=-coverage + working-directory: ptrack + - name: Install Pg_probackup + run: make install-pg-probackup USE_PGXS=1 top_srcdir=../postgres + working-directory: ptrack + shell: bash {0} + - name: Install additional packages + run: | + apt update && + apt install -y python3-pip python3-six python3-pytest python3-pytest-xdist curl && + pip3 install --no-input testgres + # All steps have been so far executed by root but ptrack tests run from an + # unprivileged user so change some permissions + - name: Adjust the permissions of ptrack test folders + run: | + mkdir pg_probackup/tests/tmp_dirs + chown -R "dev:" pg_probackup ptrack + - name: Test + run: make test-${{ matrix.tests }} USE_PGXS=1 + working-directory: ptrack + shell: runuser dev {0} + - name: Collect coverage results + run: make coverage + working-directory: ptrack + shell: runuser dev {0} + - name: Upload coverage results to Codecov + uses: codecov/codecov-action@v3 + with: + working-directory: ptrack + runs-on: ubuntu-latest diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index b6bd63a..0000000 --- a/.travis.yml +++ /dev/null @@ -1,48 +0,0 @@ -os: linux - -dist: bionic - -language: c - -services: - - docker - -install: - - ./make_dockerfile.sh - - docker-compose build - -script: - - docker-compose run $(bash <(curl -s https://codecov.io/env)) tests - -notifications: - email: - on_success: change - on_failure: always - -# keep in sync with codecov.yml number of builds -env: - - PG_BRANCH=master TEST_CASE=tap - - PG_BRANCH=master TEST_CASE=tap MODE=legacy -# - PG_BRANCH=master TEST_CASE=all - - PG_BRANCH=master TEST_CASE=all MODE=paranoia - - PG_BRANCH=REL_14_STABLE TEST_CASE=tap - - PG_BRANCH=REL_14_STABLE TEST_CASE=tap MODE=legacy -# - PG_BRANCH=REL_14_STABLE TEST_CASE=all - - PG_BRANCH=REL_14_STABLE TEST_CASE=all MODE=paranoia - - PG_BRANCH=REL_13_STABLE TEST_CASE=tap - - PG_BRANCH=REL_13_STABLE TEST_CASE=tap MODE=legacy -# - PG_BRANCH=REL_13_STABLE TEST_CASE=all - - PG_BRANCH=REL_13_STABLE TEST_CASE=all MODE=paranoia - - PG_BRANCH=REL_12_STABLE TEST_CASE=tap - - PG_BRANCH=REL_12_STABLE TEST_CASE=tap MODE=legacy -# - PG_BRANCH=REL_12_STABLE TEST_CASE=all - - PG_BRANCH=REL_12_STABLE TEST_CASE=all MODE=paranoia - - PG_BRANCH=REL_11_STABLE TEST_CASE=tap - - PG_BRANCH=REL_11_STABLE TEST_CASE=tap MODE=legacy -# - PG_BRANCH=REL_11_STABLE TEST_CASE=all - - PG_BRANCH=REL_11_STABLE TEST_CASE=all MODE=paranoia - -jobs: - allow_failures: - - if: env(PG_BRANCH) = master - diff --git a/Dockerfile.in b/Dockerfile.in deleted file mode 100644 index c2b0ffd..0000000 --- a/Dockerfile.in +++ /dev/null @@ -1,25 +0,0 @@ -FROM ololobus/postgres-dev:stretch - -USER root -RUN apt-get update -RUN apt-get -yq install python python-pip python-virtualenv - -# Environment -ENV PG_BRANCH=${PG_BRANCH} -ENV LANG=C.UTF-8 PGHOME=/testdir/pgbin -ENV MODE=${MODE} TEST_CASE=${TEST_CASE} TEST_REPEATS=${TEST_REPEATS} - -# Make directories -RUN mkdir -p /testdir - -COPY run_tests.sh /run.sh -RUN chmod 755 /run.sh - -COPY . /testdir/ptrack -WORKDIR /testdir - -# Grant privileges -RUN chown -R postgres:postgres /testdir - -USER postgres -ENTRYPOINT /run.sh diff --git a/Makefile b/Makefile index e3d25a4..499067a 100644 --- a/Makefile +++ b/Makefile @@ -1,3 +1,4 @@ + # contrib/ptrack/Makefile MODULE_big = ptrack @@ -15,13 +16,70 @@ TAP_TESTS = 1 # with Mkvcbuild.pm on PGv15+ PG_LIBS_INTERNAL += $(libpq_pgport) -ifdef USE_PGXS PG_CONFIG ?= pg_config + +ifdef USE_PGXS PGXS := $(shell $(PG_CONFIG) --pgxs) include $(PGXS) else -subdir = contrib/ptrack top_builddir = ../.. +# Makefile.global is a build artifact and initially may not be available +ifneq ($(wildcard $(top_builddir)/src/Makefile.global), ) include $(top_builddir)/src/Makefile.global include $(top_srcdir)/contrib/contrib-global.mk endif +endif + +# Assuming make is started in the ptrack directory +patch: + @cd $(top_builddir) && \ + echo Applying the ptrack patch... && \ + git apply --3way -v $(CURDIR)/patches/${PG_BRANCH}-ptrack-core.diff +ifeq ($(MODE), paranoia) + @echo Applying turn-off-hint-bits.diff for the paranoia mode... && \ + git apply --3way -v $(CURDIR)/patches/turn-off-hint-bits.diff +endif + +NPROC ?= $(shell nproc) +prefix := $(abspath $(top_builddir)/pgsql) +TEST_MODE ?= normal +# Postgres Makefile skips some targets depending on the MAKELEVEL variable so +# reset it when calling install targets as if they are started directly from the +# command line +install-postgres: + @cd $(top_builddir) && \ + if [ "$(TEST_MODE)" = legacy ]; then \ + ./configure CFLAGS='-DEXEC_BACKEND' --disable-atomics --prefix=$(prefix) --enable-debug --enable-cassert --enable-depend --enable-tap-tests --quiet; \ + else \ + ./configure --prefix=$(prefix) --enable-debug --enable-cassert --enable-depend --enable-tap-tests; \ + fi && \ + $(MAKE) -sj $(NPROC) install MAKELEVEL=0 && \ + $(MAKE) -sj $(NPROC) -C contrib/ install MAKELEVEL=0 + +# Now when Postgres is built call all remainig targets with USE_PGXS=1 + +test-tap: +ifeq ($(TEST_MODE), legacy) + setarch x86_64 --addr-no-randomize $(MAKE) installcheck USE_PGXS=$(USE_PGXS) PG_CONFIG=$(PG_CONFIG) +else + $(MAKE) installcheck USE_PGXS=$(USE_PGXS) PG_CONFIG=$(PG_CONFIG) +endif + +pg_probackup_dir = ../pg_probackup +# Pg_probackup's Makefile uses top_srcdir when building via PGXS so set it when calling this target +# At the moment building pg_probackup with multiple threads may run some jobs too early and end with an error so do not set the -j option +install-pg-probackup: + $(MAKE) -C $(pg_probackup_dir) install USE_PGXS=$(USE_PGXS) PG_CONFIG=$(PG_CONFIG) top_srcdir=$(top_srcdir) + +test-python: + cd $(pg_probackup_dir); \ + env="PG_PROBACKUP_PTRACK=ON PG_CONFIG=$(PG_CONFIG)"; \ + if [ "$(TEST_MODE)" = normal ]; then \ + env="$$env PG_PROBACKUP_TEST_BASIC=ON"; \ + elif [ "$(TEST_MODE)" = paranoia ]; then \ + env="$$env PG_PROBACKUP_PARANOIA=ON"; \ + fi; \ + env $$env python3 -m pytest -svv$(if $(shell python3 -m pytest --help | grep '\-n '), -n $(NPROC))$(if $(TESTS), -k '$(TESTS)') tests/ptrack_test.py + +coverage: + gcov *.c *.h diff --git a/README.md b/README.md index ece43af..8d257ef 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@ -[![Build Status](https://travis-ci.com/postgrespro/ptrack.svg?branch=master)](https://travis-ci.com/postgrespro/ptrack) -[![codecov](https://codecov.io/gh/postgrespro/ptrack/branch/master/graph/badge.svg)](https://codecov.io/gh/postgrespro/ptrack) +[![Test](https://github.com/postgrespro/ptrack/actions/workflows/test.yml/badge.svg)](https://github.com/postgrespro/ptrack/actions/workflows/test.yml) +[![Codecov](https://codecov.io/gh/postgrespro/ptrack/branch/master/graph/badge.svg)](https://codecov.io/gh/postgrespro/ptrack) [![GitHub release](https://img.shields.io/github/v/release/postgrespro/ptrack?include_prereleases)](https://github.com/postgrespro/ptrack/releases/latest) # ptrack @@ -12,44 +12,66 @@ It is designed to allow false positives (i.e. block/page is marked in the `ptrac Currently, `ptrack` codebase is split between small PostgreSQL core patch and extension. All public SQL API methods and main engine are placed in the `ptrack` extension, while the core patch contains only certain hooks and modifies binary utilities to ignore `ptrack.map.*` files. -This extension is compatible with PostgreSQL [11](https://github.com/postgrespro/ptrack/blob/master/patches/REL_11_STABLE-ptrack-core.diff), [12](https://github.com/postgrespro/ptrack/blob/master/patches/REL_12_STABLE-ptrack-core.diff), [13](https://github.com/postgrespro/ptrack/blob/master/patches/REL_13_STABLE-ptrack-core.diff), [14](https://github.com/postgrespro/ptrack/blob/master/patches/REL_14_STABLE-ptrack-core.diff). +This extension is compatible with PostgreSQL [11](patches/REL_11_STABLE-ptrack-core.diff), [12](patches/REL_12_STABLE-ptrack-core.diff), [13](patches/REL_13_STABLE-ptrack-core.diff), [14](patches/REL_14_STABLE-ptrack-core.diff), [15](patches/REL_15_STABLE-ptrack-core.diff). ## Installation -1) Get latest `ptrack` sources: +1) Specify the PostgreSQL branch to work with: ```shell -git clone https://github.com/postgrespro/ptrack.git +export PG_BRANCH=REL_15_STABLE ``` -2) Get latest PostgreSQL sources: +2) Get the latest PostgreSQL sources: ```shell -git clone https://github.com/postgres/postgres.git -b REL_14_STABLE && cd postgres +git clone https://github.com/postgres/postgres.git -b $PG_BRANCH ``` -3) Apply PostgreSQL core patch: +3) Get the latest `ptrack` sources: ```shell -git apply -3 ../ptrack/patches/REL_14_STABLE-ptrack-core.diff +git clone https://github.com/postgrespro/ptrack.git postgres/contrib/ptrack ``` -4) Compile and install PostgreSQL +4) Change to the `ptrack` directory: -5) Set `ptrack.map_size` (in MB) +```shell +cd postgres/contrib/ptrack +``` + +5) Apply the PostgreSQL core patch: + +```shell +make patch +``` + +6) Compile and install PostgreSQL: + +```shell +make install-postgres prefix=$PWD/pgsql # or some other prefix of your choice +``` + +7) Add the newly created binaries to the PATH: + +```shell +export PATH=$PWD/pgsql/bin:$PATH +``` + +8) Compile and install `ptrack`: ```shell -echo "shared_preload_libraries = 'ptrack'" >> postgres_data/postgresql.conf -echo "ptrack.map_size = 64" >> postgres_data/postgresql.conf +make install USE_PGXS=1 ``` -6) Compile and install `ptrack` extension +9) Set `ptrack.map_size` (in MB): ```shell -USE_PGXS=1 make -C /path/to/ptrack/ install +echo "shared_preload_libraries = 'ptrack'" >> /postgresql.conf +echo "ptrack.map_size = 64" >> /postgresql.conf ``` -7) Run PostgreSQL and create `ptrack` extension +10) Run PostgreSQL and create the `ptrack` extension: ```sql postgres=# CREATE EXTENSION ptrack; @@ -158,24 +180,64 @@ To gather the whole changeset of modified blocks in `ptrack_get_pagemapset()` we ## Contribution -Feel free to [send pull requests](https://github.com/postgrespro/ptrack/compare), [fill up issues](https://github.com/postgrespro/ptrack/issues/new), or just reach one of us directly (e.g. <[Alexey Kondratov](mailto:a.kondratov@postgrespro.ru?subject=[GitHub]%20Ptrack), [@ololobus](https://github.com/ololobus)>) if you are interested in `ptrack`. +Feel free to [send a pull request](https://github.com/postgrespro/ptrack/compare), [create an issue](https://github.com/postgrespro/ptrack/issues/new) or [reach us by e-mail](mailto:team-wd40@lists.postgrespro.ru??subject=[GitHub]%20Ptrack) if you are interested in `ptrack`. + +## Tests -### Tests +All changes of the source code in this repository are checked by CI - see commit statuses and the project status badge. You can also run tests locally by executing a few Makefile targets. -Everything is tested automatically with [travis-ci.com](https://travis-ci.com/postgrespro/ptrack) and [codecov.io](https://codecov.io/gh/postgrespro/ptrack), but you can also run tests locally via `Docker`: +### Prerequisites -```sh -export PG_BRANCH=REL_14_STABLE -export TEST_CASE=all -export MODE=paranoia +To run Python tests install the following packages: -./make_dockerfile.sh +OS packages: + - python3-pip + - python3-six + - python3-pytest + - python3-pytest-xdist -docker-compose build -docker-compose run tests +PIP packages: + - testgres + +For example, for Ubuntu: + +```shell +sudo apt update +sudo apt install python3-pip python3-six python3-pytest python3-pytest-xdist +sudo pip3 install testgres +``` + +### Testing + +Install PostgreSQL and ptrack as described in [Installation](#installation), install the testing prerequisites, then do (assuming the current directory is `ptrack`): +```shell +git clone https://github.com/postgrespro/pg_probackup.git ../pg_probackup # clone the repository into postgres/contrib/pg_probackup +# remember to export PATH=/path/to/pgsql/bin:$PATH +make install-pg-probackup USE_PGXS=1 top_srcdir=../.. +make test-tap USE_PGXS=1 +make test-python ``` -Available test modes (`MODE`) are `basic` (default) and `paranoia` (per-block checksum comparison of `PGDATA` content before and after backup-restore process). Available test cases (`TEST_CASE`) are `tap` (minimalistic PostgreSQL [tap test](https://github.com/postgrespro/ptrack/blob/master/t/001_basic.pl)), `all` or any specific [pg_probackup test](https://github.com/postgrespro/pg_probackup/blob/master/tests/ptrack.py), e.g. `test_ptrack_simple`. +If `pg_probackup` is not located in `postgres/contrib` then additionally specify the path to the `pg_probackup` directory when building `pg_probackup`: +```shell +make install-pg-probackup USE_PGXS=1 top_srcdir=/path/to/postgres pg_probackup_dir=/path/to/pg_probackup +``` + +You can use a public Docker image which already has the necessary build environment (but not the testing prerequisites): + +```shell +docker run -e USER_ID=`id -u` -it -v $PWD:/work --name=ptrack ghcr.io/postgres-dev/ubuntu-22.04:1.0 +dev@a033797d2f73:~$ +``` + +## Environment variables + +| Variable | Possible values | Required | Default value | Description | +| - | - | - | - | - | +| NPROC | An integer greater than 0 | No | Output of `nproc` | The number of threads used for building and running tests | +| PG_CONFIG | File path | No | pg_config (from the PATH) | The path to the `pg_config` binary | +| TESTS | A Pytest filter expression | No | Not set (run all Python tests) | A filter to include only selected tests into the run. See the Pytest `-k` option for more information. This variable is only applicable to `test-python` for the tests located in [tests](https://github.com/postgrespro/pg_probackup/tree/master/tests). | +| TEST_MODE | normal, legacy, paranoia | No | normal | The "legacy" mode runs tests in an environment similar to a 32-bit Windows system. This mode is only applicable to `test-tap`. The "paranoia" mode compares the checksums of each block of the database catalog (PGDATA) contents before making a backup and after the restoration. This mode is only applicable to `test-python`.| ### TODO diff --git a/codecov.yml b/codecov.yml index fe3b308..00b744e 100644 --- a/codecov.yml +++ b/codecov.yml @@ -1,6 +1,9 @@ codecov: notify: - after_n_builds: 12 # keep in sync with .travis.yml number of builds + # must be equal to the total number of parallel jobs in a CI pipeline + # (Postgres versions x test types x test modes x OSes minus excluded + # combinations) + after_n_builds: 10 # datapagemap.c/.h are copied from Postgres, so let's remove it # from report. Otherwise, we would have to remove some currently diff --git a/docker-compose.yml b/docker-compose.yml deleted file mode 100644 index fc65455..0000000 --- a/docker-compose.yml +++ /dev/null @@ -1,17 +0,0 @@ -version: "3.7" -services: - tests: - build: - context: . - - cap_add: - - SYS_PTRACE - - security_opt: - - seccomp=unconfined - - # don't work - #sysctls: - # kernel.yama.ptrace_scope: 0 - privileged: true - diff --git a/make_dockerfile.sh b/make_dockerfile.sh deleted file mode 100755 index 409a5b9..0000000 --- a/make_dockerfile.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/usr/bin/env sh - -if [ -z ${PG_BRANCH+x} ]; then - echo PG_BRANCH is not set! - exit 1 -fi - -if [ -z ${MODE+x} ]; then - MODE=basic -else - echo MODE=${MODE} -fi - -if [ -z ${TEST_CASE+x} ]; then - TEST_CASE=all -else - echo TEST_CASE=${TEST_CASE} -fi - -if [ -z ${TEST_REPEATS+x} ]; then - TEST_REPEATS=1 -else - echo TEST_REPEATS=${TEST_REPEATS} -fi - -echo PG_BRANCH=${PG_BRANCH} - -sed \ - -e 's/${PG_BRANCH}/'${PG_BRANCH}/g \ - -e 's/${MODE}/'${MODE}/g \ - -e 's/${TEST_CASE}/'${TEST_CASE}/g \ - -e 's/${TEST_REPEATS}/'${TEST_REPEATS}/g \ -Dockerfile.in > Dockerfile diff --git a/run_tests.sh b/run_tests.sh deleted file mode 100755 index 1b4a693..0000000 --- a/run_tests.sh +++ /dev/null @@ -1,127 +0,0 @@ -#!/usr/bin/env bash - -# -# Copyright (c) 2019-2021, Postgres Professional -# - -PTRACK_SRC=${PWD}/ptrack -PG_SRC=${PWD}/postgres -PBK_SRC=${PWD}/pg_probackup -status=0 - -######################################################### -# Clone Postgres -echo "############### Getting Postgres sources" -git clone https://github.com/postgres/postgres.git --depth=1 --branch=${PG_BRANCH} ${PG_SRC} - -# Clone pg_probackup -echo "############### Getting pg_probackup sources" -git clone https://github.com/postgrespro/pg_probackup.git --depth=1 --branch=master ${PBK_SRC} - -######################################################### -# Compile and install Postgres -cd ${PG_SRC} # Go to postgres dir - -echo "############### Applying ptrack patch" -git apply --verbose --3way ${PTRACK_SRC}/patches/${PG_BRANCH}-ptrack-core.diff - -if [ "${MODE}" = "paranoia" ]; then - echo "############### Paranoia mode: applying turn-off-hint-bits.diff" - git apply --verbose --3way ${PTRACK_SRC}/patches/turn-off-hint-bits.diff -fi - -echo "############### Compiling Postgres" -if [ "${TEST_CASE}" = "tap" ] && [ "${MODE}" = "legacy" ]; then - ./configure CFLAGS='-DEXEC_BACKEND' --disable-atomics --prefix=${PGHOME} --enable-debug --enable-cassert --enable-depend --enable-tap-tests --quiet -else - ./configure --prefix=${PGHOME} --enable-debug --enable-cassert --enable-depend --enable-tap-tests --quiet -fi -make --quiet --jobs=$(nproc) install -make --quiet --jobs=$(nproc) --directory=contrib/ install - -# Override default Postgres instance -export PATH=${PGHOME}/bin:${PATH} -export LD_LIBRARY_PATH=${PGHOME}/lib -export PG_CONFIG=$(which pg_config) - -# Show pg_config path (just in case) -echo "############### pg_config path" -which pg_config - -# Show pg_config just in case -echo "############### pg_config" -pg_config - -######################################################### -# Build and install ptrack extension -echo "############### Compiling and installing ptrack extension" -cp --recursive ${PTRACK_SRC} ${PG_SRC}/contrib/ptrack -make USE_PGXS=1 --directory=${PG_SRC}/contrib/ptrack/ clean -make USE_PGXS=1 PG_CPPFLAGS="-coverage" SHLIB_LINK="-coverage" --directory=${PG_SRC}/contrib/ptrack/ install - -if [ "${TEST_CASE}" = "tap" ]; then - - # Run tap tests - echo "############### Running tap tests" - if [ "${MODE}" = "legacy" ]; then - # There is a known issue with attaching shared memory segment using the same - # address each time, when EXEC_BACKEND mechanism is turned on. It happens due - # to the ASLR address space randomization, so we are trying to attach a segment - # to the already occupied location. That way we simply turning off ASLR here. - # - # Postgres comment: https://github.com/postgres/postgres/blob/5cbfce562f7cd2aab0cdc4694ce298ec3567930e/src/backend/postmaster/postmaster.c#L4929 - setarch x86_64 --addr-no-randomize make --directory=${PG_SRC}/contrib/ptrack check || status=$? - else - make --directory=${PG_SRC}/contrib/ptrack check || status=$? - fi - -else - # Set kernel params (used for debugging -- probackup tests) - echo "############### setting kernel params" - sudo sh -c 'echo 0 > /proc/sys/kernel/yama/ptrace_scope' - - # Build and install pg_probackup - echo "############### Compiling and installing pg_probackup" - cd ${PBK_SRC} # Go to pg_probackup dir - make USE_PGXS=1 top_srcdir=${PG_SRC} install - - # Setup python environment - echo "############### Setting up python env" - virtualenv --python=/usr/bin/python3 pyenv - source pyenv/bin/activate - pip install testgres==1.8.2 - - echo "############### Testing" - export PG_PROBACKUP_PTRACK=ON - if [ "${MODE}" = "basic" ]; then - export PG_PROBACKUP_TEST_BASIC=ON - elif [ "${MODE}" = "paranoia" ]; then - export PG_PROBACKUP_PARANOIA=ON - fi - - if [ "${TEST_CASE}" = "all" ]; then - # Run all pg_probackup ptrack tests - PBK_TEST_CASE=tests.ptrack - else - PBK_TEST_CASE=tests.ptrack.PtrackTest.${TEST_CASE} - fi - for i in `seq ${TEST_REPEATS}`; do - python3 -m unittest -v ${PBK_TEST_CASE} || status=$? - done - - # Exit virtualenv - deactivate -fi - -######################################################### -# codecov -echo "############### Codecov" -cd ${PTRACK_SRC} -# Generate *.gcov files -gcov ${PG_SRC}/contrib/ptrack/*.c ${PG_SRC}/contrib/ptrack/*.h - -# Send coverage stats to Codecov -bash <(curl -s https://codecov.io/bash) - -# Something went wrong, exit with code 1 -if [ ${status} -ne 0 ]; then exit 1; fi From 056c5d67473bdcd35aa74ec1cb8290e192d9c632 Mon Sep 17 00:00:00 2001 From: Alexey Savchkov Date: Thu, 19 Oct 2023 11:52:51 +0300 Subject: [PATCH 54/65] Update the update section in Readme --- README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index ece43af..4a3e4e3 100644 --- a/README.md +++ b/README.md @@ -102,13 +102,13 @@ postgres=# SELECT * FROM ptrack_get_change_stat('0/285C8C8'); ## Upgrading -Usually, you have to only install new version of `ptrack` and do `ALTER EXTENSION 'ptrack' UPDATE;`. However, some specific actions may be required as well: +Usually, you have to only install new version of `ptrack` and do `ALTER EXTENSION ptrack UPDATE;`. However, some specific actions may be required as well: #### Upgrading from 2.0.0 to 2.1.*: * Put `shared_preload_libraries = 'ptrack'` into `postgresql.conf`. * Rename `ptrack_map_size` to `ptrack.map_size`. -* Do `ALTER EXTENSION 'ptrack' UPDATE;`. +* Do `ALTER EXTENSION ptrack UPDATE;`. * Restart your server. #### Upgrading from 2.1.* to 2.2.*: @@ -121,14 +121,14 @@ Since version 2.2 we use a different algorithm for tracking changed pages. Thus, * Update ptrack binaries * Remove global/ptrack.map.mmap if it exist in server data directory * Start server -* Do `ALTER EXTENSION 'ptrack' UPDATE;`. +* Do `ALTER EXTENSION ptrack UPDATE;`. #### Upgrading from 2.3.* to 2.4.*: * Stop your server * Update ptrack binaries * Start server -* Do `ALTER EXTENSION 'ptrack' UPDATE;`. +* Do `ALTER EXTENSION ptrack UPDATE;`. ## Limitations From 2c4157e2bf540456067aff9113f4b73ee5be40cb Mon Sep 17 00:00:00 2001 From: Alexey Savchkov Date: Fri, 20 Oct 2023 14:10:47 +0300 Subject: [PATCH 55/65] Update ce from master --- .github/workflows/test.yml | 94 +++++++++++++++++++++++++++ .travis.yml | 48 -------------- AUTHORS.md | 22 +++++++ Dockerfile.in | 25 -------- Makefile | 62 +++++++++++++++++- README.md | 116 +++++++++++++++++++++++++-------- codecov.yml | 5 +- docker-compose.yml | 17 ----- make_dockerfile.sh | 33 ---------- run_tests.sh | 127 ------------------------------------- 10 files changed, 269 insertions(+), 280 deletions(-) create mode 100644 .github/workflows/test.yml delete mode 100644 .travis.yml create mode 100644 AUTHORS.md delete mode 100644 Dockerfile.in delete mode 100644 docker-compose.yml delete mode 100755 make_dockerfile.sh delete mode 100755 run_tests.sh diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml new file mode 100644 index 0000000..f05773a --- /dev/null +++ b/.github/workflows/test.yml @@ -0,0 +1,94 @@ +name: Test + +on: + push: + branches: + - "**" + pull_request: + branches: + - main + # Allows you to run this workflow manually from the Actions tab + workflow_dispatch: + +jobs: + Test: + strategy: + matrix: + # pg_version: [15] + pg_version: [11, 12, 13, 14, 15] + os: [ubuntu-22.04] + # tests: [tap] + tests: [tap, python] + # test_mode: [normal, legacy, paranoia] + test_mode: [normal, paranoia] + exclude: + - tests: tap + test_mode: paranoia + - tests: python + test_mode: normal + - tests: python + test_mode: legacy + fail-fast: false + name: ${{ format('Ptrack ({0}, PostgreSQL {1}, {2} tests, {3} mode)', matrix.os, matrix.pg_version, matrix.tests, matrix.test_mode) }} + container: + image: ${{ format('ghcr.io/postgres-dev/{0}:1.0', matrix.os) }} + env: + PG_BRANCH: ${{ format('REL_{0}_STABLE', matrix.pg_version) }} + PGDATA: $HOME/data + TEST_MODE: ${{ matrix.test_mode }} + options: --privileged + steps: + - name: Get Postgres sources + uses: actions/checkout@v3 + with: + repository: postgres/postgres + ref: ${{ format('REL_{0}_STABLE', matrix.pg_version) }} + path: postgres + - name: Get Ptrack sources + uses: actions/checkout@v3 + with: + path: ptrack + - name: Get Pg_probackup sources + uses: actions/checkout@v3 + with: + repository: postgrespro/pg_probackup + path: pg_probackup + - name: Apply ptrack patches + run: make patch top_builddir=../postgres + working-directory: ptrack + - name: Install Postgres + run: | + make install-postgres top_builddir=$GITHUB_WORKSPACE/postgres prefix=$HOME/pgsql && + echo $HOME/pgsql/bin >> $GITHUB_PATH + working-directory: ptrack + - name: Install Ptrack + run: make install USE_PGXS=1 PG_CPPFLAGS=-coverage SHLIB_LINK=-coverage + working-directory: ptrack + - name: Install Pg_probackup + run: make install-pg-probackup USE_PGXS=1 top_srcdir=../postgres + working-directory: ptrack + shell: bash {0} + - name: Install additional packages + run: | + apt update && + apt install -y python3-pip python3-six python3-pytest python3-pytest-xdist curl && + pip3 install --no-input testgres + # All steps have been so far executed by root but ptrack tests run from an + # unprivileged user so change some permissions + - name: Adjust the permissions of ptrack test folders + run: | + mkdir pg_probackup/tests/tmp_dirs + chown -R "dev:" pg_probackup ptrack + - name: Test + run: make test-${{ matrix.tests }} USE_PGXS=1 + working-directory: ptrack + shell: runuser dev {0} + - name: Collect coverage results + run: make coverage + working-directory: ptrack + shell: runuser dev {0} + - name: Upload coverage results to Codecov + uses: codecov/codecov-action@v3 + with: + working-directory: ptrack + runs-on: ubuntu-latest diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index b6bd63a..0000000 --- a/.travis.yml +++ /dev/null @@ -1,48 +0,0 @@ -os: linux - -dist: bionic - -language: c - -services: - - docker - -install: - - ./make_dockerfile.sh - - docker-compose build - -script: - - docker-compose run $(bash <(curl -s https://codecov.io/env)) tests - -notifications: - email: - on_success: change - on_failure: always - -# keep in sync with codecov.yml number of builds -env: - - PG_BRANCH=master TEST_CASE=tap - - PG_BRANCH=master TEST_CASE=tap MODE=legacy -# - PG_BRANCH=master TEST_CASE=all - - PG_BRANCH=master TEST_CASE=all MODE=paranoia - - PG_BRANCH=REL_14_STABLE TEST_CASE=tap - - PG_BRANCH=REL_14_STABLE TEST_CASE=tap MODE=legacy -# - PG_BRANCH=REL_14_STABLE TEST_CASE=all - - PG_BRANCH=REL_14_STABLE TEST_CASE=all MODE=paranoia - - PG_BRANCH=REL_13_STABLE TEST_CASE=tap - - PG_BRANCH=REL_13_STABLE TEST_CASE=tap MODE=legacy -# - PG_BRANCH=REL_13_STABLE TEST_CASE=all - - PG_BRANCH=REL_13_STABLE TEST_CASE=all MODE=paranoia - - PG_BRANCH=REL_12_STABLE TEST_CASE=tap - - PG_BRANCH=REL_12_STABLE TEST_CASE=tap MODE=legacy -# - PG_BRANCH=REL_12_STABLE TEST_CASE=all - - PG_BRANCH=REL_12_STABLE TEST_CASE=all MODE=paranoia - - PG_BRANCH=REL_11_STABLE TEST_CASE=tap - - PG_BRANCH=REL_11_STABLE TEST_CASE=tap MODE=legacy -# - PG_BRANCH=REL_11_STABLE TEST_CASE=all - - PG_BRANCH=REL_11_STABLE TEST_CASE=all MODE=paranoia - -jobs: - allow_failures: - - if: env(PG_BRANCH) = master - diff --git a/AUTHORS.md b/AUTHORS.md new file mode 100644 index 0000000..ed4d0eb --- /dev/null +++ b/AUTHORS.md @@ -0,0 +1,22 @@ +# Authors + +This list is sorted by the number of commits per contributor in _descending_ order. + +Avatar|Contributor|Contributions +:-:|---|:-: +@ololobus|[@ololobus](https://github.com/ololobus)|62 +@funny-falcon|[@funny-falcon](https://github.com/funny-falcon)|15 +@alubennikova|[@alubennikova](https://github.com/alubennikova)|9 +@kulaginm|[@kulaginm](https://github.com/kulaginm)|5 +@daniel-95|[@daniel-95](https://github.com/daniel-95)|4 +@ziva777|[@ziva777](https://github.com/ziva777)|2 +@vegebird|[@vegebird](https://github.com/vegebird)|2 +@kovdb75|[@kovdb75](https://github.com/kovdb75)|1 +@MarinaPolyakova|[@MarinaPolyakova](https://github.com/MarinaPolyakova)|1 +@rzharkov|[@rzharkov](https://github.com/rzharkov)|1 +@vbwagner|[@vbwagner](https://github.com/vbwagner)|1 +@waaeer|[@waaeer](https://github.com/waaeer)|1 + +--- + +Auto-generated by [gaocegege/maintainer](https://github.com/maintainer-org/maintainer) on 2023-08-03. diff --git a/Dockerfile.in b/Dockerfile.in deleted file mode 100644 index c2b0ffd..0000000 --- a/Dockerfile.in +++ /dev/null @@ -1,25 +0,0 @@ -FROM ololobus/postgres-dev:stretch - -USER root -RUN apt-get update -RUN apt-get -yq install python python-pip python-virtualenv - -# Environment -ENV PG_BRANCH=${PG_BRANCH} -ENV LANG=C.UTF-8 PGHOME=/testdir/pgbin -ENV MODE=${MODE} TEST_CASE=${TEST_CASE} TEST_REPEATS=${TEST_REPEATS} - -# Make directories -RUN mkdir -p /testdir - -COPY run_tests.sh /run.sh -RUN chmod 755 /run.sh - -COPY . /testdir/ptrack -WORKDIR /testdir - -# Grant privileges -RUN chown -R postgres:postgres /testdir - -USER postgres -ENTRYPOINT /run.sh diff --git a/Makefile b/Makefile index e3d25a4..499067a 100644 --- a/Makefile +++ b/Makefile @@ -1,3 +1,4 @@ + # contrib/ptrack/Makefile MODULE_big = ptrack @@ -15,13 +16,70 @@ TAP_TESTS = 1 # with Mkvcbuild.pm on PGv15+ PG_LIBS_INTERNAL += $(libpq_pgport) -ifdef USE_PGXS PG_CONFIG ?= pg_config + +ifdef USE_PGXS PGXS := $(shell $(PG_CONFIG) --pgxs) include $(PGXS) else -subdir = contrib/ptrack top_builddir = ../.. +# Makefile.global is a build artifact and initially may not be available +ifneq ($(wildcard $(top_builddir)/src/Makefile.global), ) include $(top_builddir)/src/Makefile.global include $(top_srcdir)/contrib/contrib-global.mk endif +endif + +# Assuming make is started in the ptrack directory +patch: + @cd $(top_builddir) && \ + echo Applying the ptrack patch... && \ + git apply --3way -v $(CURDIR)/patches/${PG_BRANCH}-ptrack-core.diff +ifeq ($(MODE), paranoia) + @echo Applying turn-off-hint-bits.diff for the paranoia mode... && \ + git apply --3way -v $(CURDIR)/patches/turn-off-hint-bits.diff +endif + +NPROC ?= $(shell nproc) +prefix := $(abspath $(top_builddir)/pgsql) +TEST_MODE ?= normal +# Postgres Makefile skips some targets depending on the MAKELEVEL variable so +# reset it when calling install targets as if they are started directly from the +# command line +install-postgres: + @cd $(top_builddir) && \ + if [ "$(TEST_MODE)" = legacy ]; then \ + ./configure CFLAGS='-DEXEC_BACKEND' --disable-atomics --prefix=$(prefix) --enable-debug --enable-cassert --enable-depend --enable-tap-tests --quiet; \ + else \ + ./configure --prefix=$(prefix) --enable-debug --enable-cassert --enable-depend --enable-tap-tests; \ + fi && \ + $(MAKE) -sj $(NPROC) install MAKELEVEL=0 && \ + $(MAKE) -sj $(NPROC) -C contrib/ install MAKELEVEL=0 + +# Now when Postgres is built call all remainig targets with USE_PGXS=1 + +test-tap: +ifeq ($(TEST_MODE), legacy) + setarch x86_64 --addr-no-randomize $(MAKE) installcheck USE_PGXS=$(USE_PGXS) PG_CONFIG=$(PG_CONFIG) +else + $(MAKE) installcheck USE_PGXS=$(USE_PGXS) PG_CONFIG=$(PG_CONFIG) +endif + +pg_probackup_dir = ../pg_probackup +# Pg_probackup's Makefile uses top_srcdir when building via PGXS so set it when calling this target +# At the moment building pg_probackup with multiple threads may run some jobs too early and end with an error so do not set the -j option +install-pg-probackup: + $(MAKE) -C $(pg_probackup_dir) install USE_PGXS=$(USE_PGXS) PG_CONFIG=$(PG_CONFIG) top_srcdir=$(top_srcdir) + +test-python: + cd $(pg_probackup_dir); \ + env="PG_PROBACKUP_PTRACK=ON PG_CONFIG=$(PG_CONFIG)"; \ + if [ "$(TEST_MODE)" = normal ]; then \ + env="$$env PG_PROBACKUP_TEST_BASIC=ON"; \ + elif [ "$(TEST_MODE)" = paranoia ]; then \ + env="$$env PG_PROBACKUP_PARANOIA=ON"; \ + fi; \ + env $$env python3 -m pytest -svv$(if $(shell python3 -m pytest --help | grep '\-n '), -n $(NPROC))$(if $(TESTS), -k '$(TESTS)') tests/ptrack_test.py + +coverage: + gcov *.c *.h diff --git a/README.md b/README.md index 4a3e4e3..1dd4a94 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@ -[![Build Status](https://travis-ci.com/postgrespro/ptrack.svg?branch=master)](https://travis-ci.com/postgrespro/ptrack) -[![codecov](https://codecov.io/gh/postgrespro/ptrack/branch/master/graph/badge.svg)](https://codecov.io/gh/postgrespro/ptrack) +[![Test](https://github.com/postgrespro/ptrack/actions/workflows/test.yml/badge.svg)](https://github.com/postgrespro/ptrack/actions/workflows/test.yml) +[![Codecov](https://codecov.io/gh/postgrespro/ptrack/branch/master/graph/badge.svg)](https://codecov.io/gh/postgrespro/ptrack) [![GitHub release](https://img.shields.io/github/v/release/postgrespro/ptrack?include_prereleases)](https://github.com/postgrespro/ptrack/releases/latest) # ptrack @@ -12,44 +12,66 @@ It is designed to allow false positives (i.e. block/page is marked in the `ptrac Currently, `ptrack` codebase is split between small PostgreSQL core patch and extension. All public SQL API methods and main engine are placed in the `ptrack` extension, while the core patch contains only certain hooks and modifies binary utilities to ignore `ptrack.map.*` files. -This extension is compatible with PostgreSQL [11](https://github.com/postgrespro/ptrack/blob/master/patches/REL_11_STABLE-ptrack-core.diff), [12](https://github.com/postgrespro/ptrack/blob/master/patches/REL_12_STABLE-ptrack-core.diff), [13](https://github.com/postgrespro/ptrack/blob/master/patches/REL_13_STABLE-ptrack-core.diff), [14](https://github.com/postgrespro/ptrack/blob/master/patches/REL_14_STABLE-ptrack-core.diff). +This extension is compatible with PostgreSQL [11](patches/REL_11_STABLE-ptrack-core.diff), [12](patches/REL_12_STABLE-ptrack-core.diff), [13](patches/REL_13_STABLE-ptrack-core.diff), [14](patches/REL_14_STABLE-ptrack-core.diff), [15](patches/REL_15_STABLE-ptrack-core.diff). ## Installation -1) Get latest `ptrack` sources: +1) Specify the PostgreSQL branch to work with: ```shell -git clone https://github.com/postgrespro/ptrack.git +export PG_BRANCH=REL_15_STABLE ``` -2) Get latest PostgreSQL sources: +2) Get the latest PostgreSQL sources: ```shell -git clone https://github.com/postgres/postgres.git -b REL_14_STABLE && cd postgres +git clone https://github.com/postgres/postgres.git -b $PG_BRANCH ``` -3) Apply PostgreSQL core patch: +3) Get the latest `ptrack` sources: ```shell -git apply -3 ../ptrack/patches/REL_14_STABLE-ptrack-core.diff +git clone https://github.com/postgrespro/ptrack.git postgres/contrib/ptrack ``` -4) Compile and install PostgreSQL +4) Change to the `ptrack` directory: -5) Set `ptrack.map_size` (in MB) +```shell +cd postgres/contrib/ptrack +``` + +5) Apply the PostgreSQL core patch: + +```shell +make patch +``` + +6) Compile and install PostgreSQL: + +```shell +make install-postgres prefix=$PWD/pgsql # or some other prefix of your choice +``` + +7) Add the newly created binaries to the PATH: + +```shell +export PATH=$PWD/pgsql/bin:$PATH +``` + +8) Compile and install `ptrack`: ```shell -echo "shared_preload_libraries = 'ptrack'" >> postgres_data/postgresql.conf -echo "ptrack.map_size = 64" >> postgres_data/postgresql.conf +make install USE_PGXS=1 ``` -6) Compile and install `ptrack` extension +9) Set `ptrack.map_size` (in MB): ```shell -USE_PGXS=1 make -C /path/to/ptrack/ install +echo "shared_preload_libraries = 'ptrack'" >> /postgresql.conf +echo "ptrack.map_size = 64" >> /postgresql.conf ``` -7) Run PostgreSQL and create `ptrack` extension +10) Run PostgreSQL and create the `ptrack` extension: ```sql postgres=# CREATE EXTENSION ptrack; @@ -158,24 +180,64 @@ To gather the whole changeset of modified blocks in `ptrack_get_pagemapset()` we ## Contribution -Feel free to [send pull requests](https://github.com/postgrespro/ptrack/compare), [fill up issues](https://github.com/postgrespro/ptrack/issues/new), or just reach one of us directly (e.g. <[Alexey Kondratov](mailto:a.kondratov@postgrespro.ru?subject=[GitHub]%20Ptrack), [@ololobus](https://github.com/ololobus)>) if you are interested in `ptrack`. +Feel free to [send a pull request](https://github.com/postgrespro/ptrack/compare), [create an issue](https://github.com/postgrespro/ptrack/issues/new) or [reach us by e-mail](mailto:team-wd40@lists.postgrespro.ru??subject=[GitHub]%20Ptrack) if you are interested in `ptrack`. + +## Tests -### Tests +All changes of the source code in this repository are checked by CI - see commit statuses and the project status badge. You can also run tests locally by executing a few Makefile targets. -Everything is tested automatically with [travis-ci.com](https://travis-ci.com/postgrespro/ptrack) and [codecov.io](https://codecov.io/gh/postgrespro/ptrack), but you can also run tests locally via `Docker`: +### Prerequisites -```sh -export PG_BRANCH=REL_14_STABLE -export TEST_CASE=all -export MODE=paranoia +To run Python tests install the following packages: -./make_dockerfile.sh +OS packages: + - python3-pip + - python3-six + - python3-pytest + - python3-pytest-xdist -docker-compose build -docker-compose run tests +PIP packages: + - testgres + +For example, for Ubuntu: + +```shell +sudo apt update +sudo apt install python3-pip python3-six python3-pytest python3-pytest-xdist +sudo pip3 install testgres +``` + +### Testing + +Install PostgreSQL and ptrack as described in [Installation](#installation), install the testing prerequisites, then do (assuming the current directory is `ptrack`): +```shell +git clone https://github.com/postgrespro/pg_probackup.git ../pg_probackup # clone the repository into postgres/contrib/pg_probackup +# remember to export PATH=/path/to/pgsql/bin:$PATH +make install-pg-probackup USE_PGXS=1 top_srcdir=../.. +make test-tap USE_PGXS=1 +make test-python ``` -Available test modes (`MODE`) are `basic` (default) and `paranoia` (per-block checksum comparison of `PGDATA` content before and after backup-restore process). Available test cases (`TEST_CASE`) are `tap` (minimalistic PostgreSQL [tap test](https://github.com/postgrespro/ptrack/blob/master/t/001_basic.pl)), `all` or any specific [pg_probackup test](https://github.com/postgrespro/pg_probackup/blob/master/tests/ptrack.py), e.g. `test_ptrack_simple`. +If `pg_probackup` is not located in `postgres/contrib` then additionally specify the path to the `pg_probackup` directory when building `pg_probackup`: +```shell +make install-pg-probackup USE_PGXS=1 top_srcdir=/path/to/postgres pg_probackup_dir=/path/to/pg_probackup +``` + +You can use a public Docker image which already has the necessary build environment (but not the testing prerequisites): + +```shell +docker run -e USER_ID=`id -u` -it -v $PWD:/work --name=ptrack ghcr.io/postgres-dev/ubuntu-22.04:1.0 +dev@a033797d2f73:~$ +``` + +## Environment variables + +| Variable | Possible values | Required | Default value | Description | +| - | - | - | - | - | +| NPROC | An integer greater than 0 | No | Output of `nproc` | The number of threads used for building and running tests | +| PG_CONFIG | File path | No | pg_config (from the PATH) | The path to the `pg_config` binary | +| TESTS | A Pytest filter expression | No | Not set (run all Python tests) | A filter to include only selected tests into the run. See the Pytest `-k` option for more information. This variable is only applicable to `test-python` for the tests located in [tests](https://github.com/postgrespro/pg_probackup/tree/master/tests). | +| TEST_MODE | normal, legacy, paranoia | No | normal | The "legacy" mode runs tests in an environment similar to a 32-bit Windows system. This mode is only applicable to `test-tap`. The "paranoia" mode compares the checksums of each block of the database catalog (PGDATA) contents before making a backup and after the restoration. This mode is only applicable to `test-python`.| ### TODO diff --git a/codecov.yml b/codecov.yml index fe3b308..00b744e 100644 --- a/codecov.yml +++ b/codecov.yml @@ -1,6 +1,9 @@ codecov: notify: - after_n_builds: 12 # keep in sync with .travis.yml number of builds + # must be equal to the total number of parallel jobs in a CI pipeline + # (Postgres versions x test types x test modes x OSes minus excluded + # combinations) + after_n_builds: 10 # datapagemap.c/.h are copied from Postgres, so let's remove it # from report. Otherwise, we would have to remove some currently diff --git a/docker-compose.yml b/docker-compose.yml deleted file mode 100644 index fc65455..0000000 --- a/docker-compose.yml +++ /dev/null @@ -1,17 +0,0 @@ -version: "3.7" -services: - tests: - build: - context: . - - cap_add: - - SYS_PTRACE - - security_opt: - - seccomp=unconfined - - # don't work - #sysctls: - # kernel.yama.ptrace_scope: 0 - privileged: true - diff --git a/make_dockerfile.sh b/make_dockerfile.sh deleted file mode 100755 index 409a5b9..0000000 --- a/make_dockerfile.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/usr/bin/env sh - -if [ -z ${PG_BRANCH+x} ]; then - echo PG_BRANCH is not set! - exit 1 -fi - -if [ -z ${MODE+x} ]; then - MODE=basic -else - echo MODE=${MODE} -fi - -if [ -z ${TEST_CASE+x} ]; then - TEST_CASE=all -else - echo TEST_CASE=${TEST_CASE} -fi - -if [ -z ${TEST_REPEATS+x} ]; then - TEST_REPEATS=1 -else - echo TEST_REPEATS=${TEST_REPEATS} -fi - -echo PG_BRANCH=${PG_BRANCH} - -sed \ - -e 's/${PG_BRANCH}/'${PG_BRANCH}/g \ - -e 's/${MODE}/'${MODE}/g \ - -e 's/${TEST_CASE}/'${TEST_CASE}/g \ - -e 's/${TEST_REPEATS}/'${TEST_REPEATS}/g \ -Dockerfile.in > Dockerfile diff --git a/run_tests.sh b/run_tests.sh deleted file mode 100755 index 1b4a693..0000000 --- a/run_tests.sh +++ /dev/null @@ -1,127 +0,0 @@ -#!/usr/bin/env bash - -# -# Copyright (c) 2019-2021, Postgres Professional -# - -PTRACK_SRC=${PWD}/ptrack -PG_SRC=${PWD}/postgres -PBK_SRC=${PWD}/pg_probackup -status=0 - -######################################################### -# Clone Postgres -echo "############### Getting Postgres sources" -git clone https://github.com/postgres/postgres.git --depth=1 --branch=${PG_BRANCH} ${PG_SRC} - -# Clone pg_probackup -echo "############### Getting pg_probackup sources" -git clone https://github.com/postgrespro/pg_probackup.git --depth=1 --branch=master ${PBK_SRC} - -######################################################### -# Compile and install Postgres -cd ${PG_SRC} # Go to postgres dir - -echo "############### Applying ptrack patch" -git apply --verbose --3way ${PTRACK_SRC}/patches/${PG_BRANCH}-ptrack-core.diff - -if [ "${MODE}" = "paranoia" ]; then - echo "############### Paranoia mode: applying turn-off-hint-bits.diff" - git apply --verbose --3way ${PTRACK_SRC}/patches/turn-off-hint-bits.diff -fi - -echo "############### Compiling Postgres" -if [ "${TEST_CASE}" = "tap" ] && [ "${MODE}" = "legacy" ]; then - ./configure CFLAGS='-DEXEC_BACKEND' --disable-atomics --prefix=${PGHOME} --enable-debug --enable-cassert --enable-depend --enable-tap-tests --quiet -else - ./configure --prefix=${PGHOME} --enable-debug --enable-cassert --enable-depend --enable-tap-tests --quiet -fi -make --quiet --jobs=$(nproc) install -make --quiet --jobs=$(nproc) --directory=contrib/ install - -# Override default Postgres instance -export PATH=${PGHOME}/bin:${PATH} -export LD_LIBRARY_PATH=${PGHOME}/lib -export PG_CONFIG=$(which pg_config) - -# Show pg_config path (just in case) -echo "############### pg_config path" -which pg_config - -# Show pg_config just in case -echo "############### pg_config" -pg_config - -######################################################### -# Build and install ptrack extension -echo "############### Compiling and installing ptrack extension" -cp --recursive ${PTRACK_SRC} ${PG_SRC}/contrib/ptrack -make USE_PGXS=1 --directory=${PG_SRC}/contrib/ptrack/ clean -make USE_PGXS=1 PG_CPPFLAGS="-coverage" SHLIB_LINK="-coverage" --directory=${PG_SRC}/contrib/ptrack/ install - -if [ "${TEST_CASE}" = "tap" ]; then - - # Run tap tests - echo "############### Running tap tests" - if [ "${MODE}" = "legacy" ]; then - # There is a known issue with attaching shared memory segment using the same - # address each time, when EXEC_BACKEND mechanism is turned on. It happens due - # to the ASLR address space randomization, so we are trying to attach a segment - # to the already occupied location. That way we simply turning off ASLR here. - # - # Postgres comment: https://github.com/postgres/postgres/blob/5cbfce562f7cd2aab0cdc4694ce298ec3567930e/src/backend/postmaster/postmaster.c#L4929 - setarch x86_64 --addr-no-randomize make --directory=${PG_SRC}/contrib/ptrack check || status=$? - else - make --directory=${PG_SRC}/contrib/ptrack check || status=$? - fi - -else - # Set kernel params (used for debugging -- probackup tests) - echo "############### setting kernel params" - sudo sh -c 'echo 0 > /proc/sys/kernel/yama/ptrace_scope' - - # Build and install pg_probackup - echo "############### Compiling and installing pg_probackup" - cd ${PBK_SRC} # Go to pg_probackup dir - make USE_PGXS=1 top_srcdir=${PG_SRC} install - - # Setup python environment - echo "############### Setting up python env" - virtualenv --python=/usr/bin/python3 pyenv - source pyenv/bin/activate - pip install testgres==1.8.2 - - echo "############### Testing" - export PG_PROBACKUP_PTRACK=ON - if [ "${MODE}" = "basic" ]; then - export PG_PROBACKUP_TEST_BASIC=ON - elif [ "${MODE}" = "paranoia" ]; then - export PG_PROBACKUP_PARANOIA=ON - fi - - if [ "${TEST_CASE}" = "all" ]; then - # Run all pg_probackup ptrack tests - PBK_TEST_CASE=tests.ptrack - else - PBK_TEST_CASE=tests.ptrack.PtrackTest.${TEST_CASE} - fi - for i in `seq ${TEST_REPEATS}`; do - python3 -m unittest -v ${PBK_TEST_CASE} || status=$? - done - - # Exit virtualenv - deactivate -fi - -######################################################### -# codecov -echo "############### Codecov" -cd ${PTRACK_SRC} -# Generate *.gcov files -gcov ${PG_SRC}/contrib/ptrack/*.c ${PG_SRC}/contrib/ptrack/*.h - -# Send coverage stats to Codecov -bash <(curl -s https://codecov.io/bash) - -# Something went wrong, exit with code 1 -if [ ${status} -ne 0 ]; then exit 1; fi From 68ca7176a6da82d71dc3a878f51a70aebc1629b9 Mon Sep 17 00:00:00 2001 From: Daria Lepikhova Date: Thu, 2 Nov 2023 16:45:17 +0700 Subject: [PATCH 56/65] PGPRO-8844: Add subdir into Makefile for pgpro CI tests --- Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/Makefile b/Makefile index 499067a..9d4b451 100644 --- a/Makefile +++ b/Makefile @@ -22,6 +22,7 @@ ifdef USE_PGXS PGXS := $(shell $(PG_CONFIG) --pgxs) include $(PGXS) else +subdir = contrib/ptrack top_builddir = ../.. # Makefile.global is a build artifact and initially may not be available ifneq ($(wildcard $(top_builddir)/src/Makefile.global), ) From 89bbbfcf4f3533b581bd7dcd0e6cf93db242cb17 Mon Sep 17 00:00:00 2001 From: "Anton A. Melnikov" Date: Wed, 20 Dec 2023 10:37:17 +0300 Subject: [PATCH 57/65] Use updated parse_filename_for_nontemp_relation() interface. And place changes under \#if PG_VERSION_NUM >= 170000. Caused by: - 5c47c6546c413d5eb51c1626070a807026e6139d (PostgreSQL) Refactor parse_filename_for_nontemp_relation to parse more. - f1352d75b3790d43cd252c7584efec8336179212 (ptrack extension) Ptrack 2.0 initial release Tags: ptrack --- engine.c | 12 ++++++++++++ ptrack.c | 15 ++++++++++++++- 2 files changed, 26 insertions(+), 1 deletion(-) diff --git a/engine.c b/engine.c index 1efc627..ebd0b61 100644 --- a/engine.c +++ b/engine.c @@ -506,8 +506,13 @@ ptrack_mark_file(Oid dbOid, Oid tablespaceOid, BlockNumber blkno, nblocks = 0; struct stat stat_buf; +#if PG_VERSION_NUM >= 170000 + RelFileNumber relNumber; + unsigned segno; +#else int oidchars; char oidbuf[OIDCHARS + 1]; +#endif /* Do not track temporary relations */ if (looks_like_temp_rel_name(filename)) @@ -519,12 +524,19 @@ ptrack_mark_file(Oid dbOid, Oid tablespaceOid, nodeDb(nodeOf(rnode)) = dbOid; nodeSpc(nodeOf(rnode)) = tablespaceOid; +#if PG_VERSION_NUM >= 170000 + if (!parse_filename_for_nontemp_relation(filename, &relNumber, &forknum, &segno)) + return; + + nodeRel(nodeOf(rnode)) = relNumber; +#else if (!parse_filename_for_nontemp_relation(filename, &oidchars, &forknum)) return; memcpy(oidbuf, filename, oidchars); oidbuf[oidchars] = '\0'; nodeRel(nodeOf(rnode)) = atooid(oidbuf); +#endif /* Compute number of blocks based on file size */ if (stat(filepath, &stat_buf) == 0) diff --git a/ptrack.c b/ptrack.c index 704e2ee..9a8dcaa 100644 --- a/ptrack.c +++ b/ptrack.c @@ -330,25 +330,38 @@ ptrack_gather_filelist(List **filelist, char *path, Oid spcOid, Oid dbOid) /* Regular file inside database directory, otherwise skip it */ if (dbOid != InvalidOid || spcOid == GLOBALTABLESPACE_OID) { +#if PG_VERSION_NUM >= 170000 + RelFileNumber relNumber; + unsigned segno; +#else int oidchars; char oidbuf[OIDCHARS + 1]; +#endif char *segpath; PtrackFileList_i *pfl = palloc0(sizeof(PtrackFileList_i)); /* * Check that filename seems to be a regular relation file. */ +#if PG_VERSION_NUM >= 170000 + if (!parse_filename_for_nontemp_relation(de->d_name, &relNumber, &pfl->forknum, &segno)) + continue; +#else if (!parse_filename_for_nontemp_relation(de->d_name, &oidchars, &pfl->forknum)) continue; - +#endif /* Parse segno */ segpath = strstr(de->d_name, "."); pfl->segno = segpath != NULL ? atoi(segpath + 1) : 0; /* Fill the pfl in */ +#if PG_VERSION_NUM >= 170000 + nodeRel(pfl->relnode) = relNumber; +#else memcpy(oidbuf, de->d_name, oidchars); oidbuf[oidchars] = '\0'; nodeRel(pfl->relnode) = atooid(oidbuf); +#endif nodeDb(pfl->relnode) = dbOid; nodeSpc(pfl->relnode) = spcOid == InvalidOid ? DEFAULTTABLESPACE_OID : spcOid; pfl->path = GetRelationPath(dbOid, nodeSpc(pfl->relnode), From 3e7c7025ee52fca445ea7a47c2fd4b41d3ebb71d Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Thu, 8 Feb 2024 18:38:38 +0300 Subject: [PATCH 58/65] use uint32 instead of uint32_t --- ptrack.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ptrack.c b/ptrack.c index 9a8dcaa..60617f7 100644 --- a/ptrack.c +++ b/ptrack.c @@ -407,7 +407,7 @@ ptrack_filelist_getnext(PtScanCtx * ctx) ListCell *cell; char *fullpath; struct stat fst; - uint32_t rel_st_size = 0; + uint32 rel_st_size = 0; get_next: From 107b2d0dd4a20d75d1cc466d3b1c95374e5ed53d Mon Sep 17 00:00:00 2001 From: Mikhail Litsarev Date: Mon, 18 Mar 2024 18:07:43 +0300 Subject: [PATCH 59/65] [PGPRO-7424] Remove _PG_fini tags: ptrack --- ptrack.c | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/ptrack.c b/ptrack.c index 60617f7..98e5e00 100644 --- a/ptrack.c +++ b/ptrack.c @@ -66,7 +66,6 @@ static mdextend_hook_type prev_mdextend_hook = NULL; static ProcessSyncRequests_hook_type prev_ProcessSyncRequests_hook = NULL; void _PG_init(void); -void _PG_fini(void); static void ptrack_shmem_startup_hook(void); static void ptrack_copydir_hook(const char *path); @@ -155,20 +154,6 @@ ptrack_shmem_request(void) } #endif -/* - * Module unload callback - */ -void -_PG_fini(void) -{ - /* Uninstall hooks */ - shmem_startup_hook = prev_shmem_startup_hook; - copydir_hook = prev_copydir_hook; - mdwrite_hook = prev_mdwrite_hook; - mdextend_hook = prev_mdextend_hook; - ProcessSyncRequests_hook = prev_ProcessSyncRequests_hook; -} - /* * ptrack_shmem_startup hook: allocate or attach to shared memory. */ From 1b517acf33a576e98bcc7872a95551dc538ee7a4 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Fri, 15 Mar 2024 22:59:44 +0300 Subject: [PATCH 60/65] update master's patch due to changes in md.c and basebackup.c - basebaskup now relies on file being relation file to check its checksum. - vectored mdwritev were made instead of single-buffer mdwrite --- patches/master-ptrack-core.diff | 80 +++++++++++++++------------------ 1 file changed, 37 insertions(+), 43 deletions(-) diff --git a/patches/master-ptrack-core.diff b/patches/master-ptrack-core.diff index 04cf8a4..7eb118e 100644 --- a/patches/master-ptrack-core.diff +++ b/patches/master-ptrack-core.diff @@ -1,8 +1,8 @@ diff --git a/src/backend/backup/basebackup.c b/src/backend/backup/basebackup.c -index 45be21131c5..134e677f9d1 100644 +index 5fbbe5ffd20..3c9b99f851d 100644 --- a/src/backend/backup/basebackup.c +++ b/src/backend/backup/basebackup.c -@@ -199,6 +199,13 @@ static const struct exclude_list_item excludeFiles[] = +@@ -220,6 +220,13 @@ static const struct exclude_list_item excludeFiles[] = {"postmaster.pid", false}, {"postmaster.opts", false}, @@ -16,20 +16,8 @@ index 45be21131c5..134e677f9d1 100644 /* end of list */ {NULL, false} }; -@@ -214,6 +221,11 @@ static const struct exclude_list_item noChecksumFiles[] = { - {"pg_filenode.map", false}, - {"pg_internal.init", true}, - {"PG_VERSION", false}, -+ -+ {"ptrack.map.mmap", false}, -+ {"ptrack.map", false}, -+ {"ptrack.map.tmp", false}, -+ - #ifdef EXEC_BACKEND - {"config_exec_params", true}, - #endif diff --git a/src/backend/storage/file/copydir.c b/src/backend/storage/file/copydir.c -index e04bc3941ae..996b5de6169 100644 +index d4fbe542077..b108416c708 100644 --- a/src/backend/storage/file/copydir.c +++ b/src/backend/storage/file/copydir.c @@ -27,6 +27,8 @@ @@ -52,10 +40,10 @@ index e04bc3941ae..996b5de6169 100644 * Be paranoid here and fsync all files to ensure the copy is really done. * But if fsync is disabled, we're done. diff --git a/src/backend/storage/smgr/md.c b/src/backend/storage/smgr/md.c -index fdecbad1709..f849d00161e 100644 +index bf0f3ca76d1..7d9833a3604 100644 --- a/src/backend/storage/smgr/md.c +++ b/src/backend/storage/smgr/md.c -@@ -87,6 +87,8 @@ typedef struct _MdfdVec +@@ -85,6 +85,8 @@ typedef struct _MdfdVec static MemoryContext MdCxt; /* context for all MdfdVec objects */ @@ -64,7 +52,7 @@ index fdecbad1709..f849d00161e 100644 /* Populate a file tag describing an md.c segment file. */ #define INIT_MD_FILETAG(a,xx_rlocator,xx_forknum,xx_segno) \ -@@ -515,6 +517,9 @@ mdextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, +@@ -513,6 +515,9 @@ mdextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, register_dirty_segment(reln, forknum, v); Assert(_mdnblocks(reln, forknum, v) <= ((BlockNumber) RELSEG_SIZE)); @@ -74,7 +62,7 @@ index fdecbad1709..f849d00161e 100644 } /* -@@ -622,6 +627,12 @@ mdzeroextend(SMgrRelation reln, ForkNumber forknum, +@@ -620,6 +625,12 @@ mdzeroextend(SMgrRelation reln, ForkNumber forknum, remblocks -= numblocks; curblocknum += numblocks; @@ -87,21 +75,27 @@ index fdecbad1709..f849d00161e 100644 } } -@@ -867,6 +878,9 @@ mdwrite(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, +@@ -1015,7 +1026,14 @@ mdwritev(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, - if (!skipFsync && !SmgrIsTemp(reln)) - register_dirty_segment(reln, forknum, v); + nblocks -= nblocks_this_segment; + buffers += nblocks_this_segment; +- blocknum += nblocks_this_segment; + -+ if (mdwrite_hook) -+ mdwrite_hook(reln->smgr_rlocator, forknum, blocknum); ++ if (mdwrite_hook) ++ { ++ for (; nblocks_this_segment--; blocknum++) ++ mdwrite_hook(reln->smgr_rlocator, forknum, blocknum); ++ } ++ else ++ blocknum += nblocks_this_segment; + } } - /* diff --git a/src/backend/storage/sync/sync.c b/src/backend/storage/sync/sync.c -index 04fcb06056d..22bf179f560 100644 +index ab7137d0fff..bc40a763c05 100644 --- a/src/backend/storage/sync/sync.c +++ b/src/backend/storage/sync/sync.c -@@ -79,6 +79,8 @@ static MemoryContext pendingOpsCxt; /* context for the above */ +@@ -74,6 +74,8 @@ static MemoryContext pendingOpsCxt; /* context for the above */ static CycleCtr sync_cycle_ctr = 0; static CycleCtr checkpoint_cycle_ctr = 0; @@ -110,7 +104,7 @@ index 04fcb06056d..22bf179f560 100644 /* Intervals for calling AbsorbSyncRequests */ #define FSYNCS_PER_ABSORB 10 #define UNLINKS_PER_ABSORB 10 -@@ -475,6 +477,9 @@ ProcessSyncRequests(void) +@@ -470,6 +472,9 @@ ProcessSyncRequests(void) CheckpointStats.ckpt_longest_sync = longest; CheckpointStats.ckpt_agg_sync_time = total_elapsed; @@ -121,10 +115,10 @@ index 04fcb06056d..22bf179f560 100644 sync_in_progress = false; } diff --git a/src/bin/pg_checksums/pg_checksums.c b/src/bin/pg_checksums/pg_checksums.c -index 19eb67e4854..008a7acc9f0 100644 +index 9e6fd435f60..f2180b9f6de 100644 --- a/src/bin/pg_checksums/pg_checksums.c +++ b/src/bin/pg_checksums/pg_checksums.c -@@ -118,6 +118,11 @@ static const struct exclude_list_item skip[] = { +@@ -110,6 +110,11 @@ static const struct exclude_list_item skip[] = { {"pg_filenode.map", false}, {"pg_internal.init", true}, {"PG_VERSION", false}, @@ -137,27 +131,27 @@ index 19eb67e4854..008a7acc9f0 100644 {"config_exec_params", true}, #endif diff --git a/src/bin/pg_resetwal/pg_resetwal.c b/src/bin/pg_resetwal/pg_resetwal.c -index e7ef2b8bd0c..ca7f8cdbc2f 100644 +index e9dcb5a6d89..844b04d5e12 100644 --- a/src/bin/pg_resetwal/pg_resetwal.c +++ b/src/bin/pg_resetwal/pg_resetwal.c -@@ -85,6 +85,7 @@ static void RewriteControlFile(void); - static void FindEndOfXLOG(void); +@@ -86,6 +86,7 @@ static void FindEndOfXLOG(void); static void KillExistingXLOG(void); static void KillExistingArchiveStatus(void); + static void KillExistingWALSummaries(void); +static void KillExistingPtrack(void); static void WriteEmptyXLOG(void); static void usage(void); -@@ -488,6 +489,7 @@ main(int argc, char *argv[]) - RewriteControlFile(); +@@ -495,6 +496,7 @@ main(int argc, char *argv[]) KillExistingXLOG(); KillExistingArchiveStatus(); + KillExistingWALSummaries(); + KillExistingPtrack(); WriteEmptyXLOG(); printf(_("Write-ahead log reset\n")); -@@ -1029,6 +1031,41 @@ KillExistingArchiveStatus(void) - pg_fatal("could not close directory \"%s\": %m", ARCHSTATDIR); +@@ -998,6 +1000,41 @@ KillExistingXLOG(void) + pg_fatal("could not close directory \"%s\": %m", XLOGDIR); } +/* @@ -197,9 +191,9 @@ index e7ef2b8bd0c..ca7f8cdbc2f 100644 + /* - * Write an empty XLOG file, containing only the checkpoint record + * Remove existing archive status files diff --git a/src/bin/pg_rewind/filemap.c b/src/bin/pg_rewind/filemap.c -index bd5c598e200..a568156c5fb 100644 +index 255ddf2ffaf..1142c244926 100644 --- a/src/bin/pg_rewind/filemap.c +++ b/src/bin/pg_rewind/filemap.c @@ -157,6 +157,10 @@ static const struct exclude_list_item excludeFiles[] = @@ -214,7 +208,7 @@ index bd5c598e200..a568156c5fb 100644 {NULL, false} }; diff --git a/src/include/storage/copydir.h b/src/include/storage/copydir.h -index a8be5b21e0b..020874f96cd 100644 +index a25e258f479..b20b9c76e8d 100644 --- a/src/include/storage/copydir.h +++ b/src/include/storage/copydir.h @@ -13,6 +13,9 @@ @@ -228,7 +222,7 @@ index a8be5b21e0b..020874f96cd 100644 extern void copy_file(const char *fromfile, const char *tofile); diff --git a/src/include/storage/md.h b/src/include/storage/md.h -index 941879ee6a8..24738aeecd0 100644 +index 620f10abdeb..b36936871bd 100644 --- a/src/include/storage/md.h +++ b/src/include/storage/md.h @@ -19,6 +19,13 @@ @@ -246,11 +240,11 @@ index 941879ee6a8..24738aeecd0 100644 extern void mdinit(void); extern void mdopen(SMgrRelation reln); diff --git a/src/include/storage/sync.h b/src/include/storage/sync.h -index cfbcfa6797d..2a432440db9 100644 +index 9dee8fa6e5b..348ed53e4e2 100644 --- a/src/include/storage/sync.h +++ b/src/include/storage/sync.h @@ -55,6 +55,9 @@ typedef struct FileTag - uint32 segno; + uint64 segno; } FileTag; +typedef void (*ProcessSyncRequests_hook_type) (void); From 06595124545ac855255641d89752f9caca0c0f22 Mon Sep 17 00:00:00 2001 From: Marina Polyakova Date: Wed, 3 Apr 2024 14:22:17 +0300 Subject: [PATCH 61/65] Replace BackendIds with 0-based ProcNumbers See the commit 024c521117579a6d356050ad3d78fdc95e44eefa (Replace BackendIds with 0-based ProcNumbers) in PostgreSQL 17devel. Thanks to Yuriy Sokolov for the review. --- ptrack.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ptrack.h b/ptrack.h index ecc398b..abeffb3 100644 --- a/ptrack.h +++ b/ptrack.h @@ -47,6 +47,10 @@ #define nodeOf(ndbck) (ndbck).node #endif +#if PG_VERSION_NUM >= 170000 +#define InvalidBackendId INVALID_PROC_NUMBER +#endif + /* * Structure identifying block on the disk. */ From c668dc834b2229e1bb773c080e9cad5eb2093b14 Mon Sep 17 00:00:00 2001 From: Oleg Gurev Date: Fri, 26 Apr 2024 16:49:44 +0300 Subject: [PATCH 62/65] Backport from ee ptrack_atomic_increase() function - refactor of ptrack_mark_block() --- engine.c | 59 ++++++++++++++++++++++++++++---------------------------- 1 file changed, 30 insertions(+), 29 deletions(-) diff --git a/engine.c b/engine.c index ebd0b61..16f466d 100644 --- a/engine.c +++ b/engine.c @@ -599,6 +599,23 @@ ptrack_walkdir(const char *path, Oid tablespaceOid, Oid dbOid) FreeDir(dir); /* we ignore any error here */ } +static void +ptrack_atomic_increase(XLogRecPtr new_lsn, pg_atomic_uint64 *var) +{ + /* + * We use pg_atomic_uint64 here only for alignment purposes, because + * pg_atomic_uint64 is forcedly aligned on 8 bytes during the MSVC build. + */ + pg_atomic_uint64 old_lsn; + + old_lsn.value = pg_atomic_read_u64(var); +#if USE_ASSERT_CHECKING + elog(DEBUG3, "ptrack_mark_block: " UINT64_FORMAT " <- " UINT64_FORMAT, old_lsn.value, new_lsn); +#endif + while (old_lsn.value < new_lsn && + !pg_atomic_compare_exchange_u64(var, (uint64 *) &old_lsn.value, new_lsn)); +} + /* * Mark modified block in ptrack_map. */ @@ -608,15 +625,9 @@ ptrack_mark_block(RelFileNodeBackend smgr_rnode, { PtBlockId bid; uint64 hash; - size_t slot1; - size_t slot2; + size_t slots[2]; XLogRecPtr new_lsn; - /* - * We use pg_atomic_uint64 here only for alignment purposes, because - * pg_atomic_uint64 is forcedly aligned on 8 bytes during the MSVC build. - */ - pg_atomic_uint64 old_lsn; - pg_atomic_uint64 old_init_lsn; + int i; if (ptrack_map_size == 0 || ptrack_map == NULL @@ -629,8 +640,8 @@ ptrack_mark_block(RelFileNodeBackend smgr_rnode, bid.blocknum = blocknum; hash = BID_HASH_FUNC(bid); - slot1 = (size_t)(hash % PtrackContentNblocks); - slot2 = (size_t)(((hash << 32) | (hash >> 32)) % PtrackContentNblocks); + slots[0] = (size_t)(hash % PtrackContentNblocks); + slots[1] = (size_t)(((hash << 32) | (hash >> 32)) % PtrackContentNblocks); if (RecoveryInProgress()) new_lsn = GetXLogReplayRecPtr(NULL); @@ -638,30 +649,20 @@ ptrack_mark_block(RelFileNodeBackend smgr_rnode, new_lsn = GetXLogInsertRecPtr(); /* Atomically assign new init LSN value */ - old_init_lsn.value = pg_atomic_read_u64(&ptrack_map->init_lsn); - if (old_init_lsn.value == InvalidXLogRecPtr) + if (pg_atomic_read_u64(&ptrack_map->init_lsn) == InvalidXLogRecPtr) { #if USE_ASSERT_CHECKING - elog(DEBUG1, "ptrack_mark_block: init_lsn " UINT64_FORMAT " <- " UINT64_FORMAT, old_init_lsn.value, new_lsn); + elog(DEBUG3, "ptrack_mark_block: init_lsn"); #endif - - while (old_init_lsn.value < new_lsn && - !pg_atomic_compare_exchange_u64(&ptrack_map->init_lsn, (uint64 *) &old_init_lsn.value, new_lsn)); + ptrack_atomic_increase(new_lsn, &ptrack_map->init_lsn); } - /* Atomically assign new LSN value to the first slot */ - old_lsn.value = pg_atomic_read_u64(&ptrack_map->entries[slot1]); -#if USE_ASSERT_CHECKING - elog(DEBUG3, "ptrack_mark_block: map[%zu]=" UINT64_FORMAT " <- " UINT64_FORMAT, slot1, old_lsn.value, new_lsn); -#endif - while (old_lsn.value < new_lsn && - !pg_atomic_compare_exchange_u64(&ptrack_map->entries[slot1], (uint64 *) &old_lsn.value, new_lsn)); - - /* And to the second */ - old_lsn.value = pg_atomic_read_u64(&ptrack_map->entries[slot2]); + /* Atomically assign new LSN value to the slots */ + for (i = 0; i < lengthof(slots); i++) + { #if USE_ASSERT_CHECKING - elog(DEBUG3, "ptrack_mark_block: map[%zu]=" UINT64_FORMAT " <- " UINT64_FORMAT, slot2, old_lsn.value, new_lsn); + elog(DEBUG3, "ptrack_mark_block: map[%zu]", slots[i]); #endif - while (old_lsn.value < new_lsn && - !pg_atomic_compare_exchange_u64(&ptrack_map->entries[slot2], (uint64 *) &old_lsn.value, new_lsn)); + ptrack_atomic_increase(new_lsn, &ptrack_map->entries[slots[i]]); + } } From df10f7c82427d8ea956e7dbd559eb4cc5dc2a7b5 Mon Sep 17 00:00:00 2001 From: Oleg Gurev Date: Thu, 25 Apr 2024 21:33:57 +0300 Subject: [PATCH 63/65] [PGPRO-10166] Set ptrack init_lsn before first transaction to switched xlog segment - Because of vanilla commit: - "During online checkpoints, insert XLOG_CHECKPOINT_REDO at redo point." - Ptrack in some cases got init_lsn right after this xlog record - New hook will set init_lsn before insernig checkpoint record --- engine.c | 27 +++++++++------ engine.h | 1 + patches/master-ptrack-core.diff | 61 +++++++++++++++++++++++++++------ ptrack.c | 20 +++++++++++ 4 files changed, 88 insertions(+), 21 deletions(-) diff --git a/engine.c b/engine.c index 16f466d..bb07d51 100644 --- a/engine.c +++ b/engine.c @@ -643,6 +643,21 @@ ptrack_mark_block(RelFileNodeBackend smgr_rnode, slots[0] = (size_t)(hash % PtrackContentNblocks); slots[1] = (size_t)(((hash << 32) | (hash >> 32)) % PtrackContentNblocks); + new_lsn = ptrack_set_init_lsn(); + + /* Atomically assign new LSN value to the slots */ + for (i = 0; i < lengthof(slots); i++) + { +#if USE_ASSERT_CHECKING + elog(DEBUG3, "ptrack_mark_block: map[%zu]", slots[i]); +#endif + ptrack_atomic_increase(new_lsn, &ptrack_map->entries[slots[i]]); + } +} + +extern XLogRecPtr ptrack_set_init_lsn(void) +{ + XLogRecPtr new_lsn; if (RecoveryInProgress()) new_lsn = GetXLogReplayRecPtr(NULL); else @@ -652,17 +667,9 @@ ptrack_mark_block(RelFileNodeBackend smgr_rnode, if (pg_atomic_read_u64(&ptrack_map->init_lsn) == InvalidXLogRecPtr) { #if USE_ASSERT_CHECKING - elog(DEBUG3, "ptrack_mark_block: init_lsn"); + elog(DEBUG3, "ptrack_set_init_lsn: init_lsn"); #endif ptrack_atomic_increase(new_lsn, &ptrack_map->init_lsn); } - - /* Atomically assign new LSN value to the slots */ - for (i = 0; i < lengthof(slots); i++) - { -#if USE_ASSERT_CHECKING - elog(DEBUG3, "ptrack_mark_block: map[%zu]", slots[i]); -#endif - ptrack_atomic_increase(new_lsn, &ptrack_map->entries[slots[i]]); - } + return new_lsn; } diff --git a/engine.h b/engine.h index 5daf69a..7ecddd2 100644 --- a/engine.h +++ b/engine.h @@ -104,6 +104,7 @@ extern int ptrack_map_size_tmp; extern void ptrackCheckpoint(void); extern void ptrackMapInit(void); extern void ptrackCleanFiles(void); +extern XLogRecPtr ptrack_set_init_lsn(void); extern void assign_ptrack_map_size(int newval, void *extra); diff --git a/patches/master-ptrack-core.diff b/patches/master-ptrack-core.diff index 7eb118e..3357a2b 100644 --- a/patches/master-ptrack-core.diff +++ b/patches/master-ptrack-core.diff @@ -1,5 +1,30 @@ +diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c +index 34a2c71812..2d73d8023e 100644 +--- a/src/backend/access/transam/xlog.c ++++ b/src/backend/access/transam/xlog.c +@@ -135,6 +135,7 @@ int wal_retrieve_retry_interval = 5000; + int max_slot_wal_keep_size_mb = -1; + int wal_decode_buffer_size = 512 * 1024; + bool track_wal_io_timing = false; ++backup_checkpoint_request_hook_type backup_checkpoint_request_hook = NULL; + + #ifdef WAL_DEBUG + bool XLOG_DEBUG = false; +@@ -8801,6 +8802,12 @@ do_pg_backup_start(const char *backupidstr, bool fast, List **tablespaces, + { + bool checkpointfpw; + ++ /* ++ * Before we call RequestCheckpoint() we need to set ++ * init_lsn for ptrack map ++ */ ++ if (backup_checkpoint_request_hook) ++ backup_checkpoint_request_hook(); + /* + * Force a CHECKPOINT. Aside from being necessary to prevent torn + * page problems, this guarantees that two successive backup runs diff --git a/src/backend/backup/basebackup.c b/src/backend/backup/basebackup.c -index 5fbbe5ffd20..3c9b99f851d 100644 +index 9a2bf59e84..ade9115651 100644 --- a/src/backend/backup/basebackup.c +++ b/src/backend/backup/basebackup.c @@ -220,6 +220,13 @@ static const struct exclude_list_item excludeFiles[] = @@ -17,7 +42,7 @@ index 5fbbe5ffd20..3c9b99f851d 100644 {NULL, false} }; diff --git a/src/backend/storage/file/copydir.c b/src/backend/storage/file/copydir.c -index d4fbe542077..b108416c708 100644 +index d4fbe54207..b108416c70 100644 --- a/src/backend/storage/file/copydir.c +++ b/src/backend/storage/file/copydir.c @@ -27,6 +27,8 @@ @@ -40,7 +65,7 @@ index d4fbe542077..b108416c708 100644 * Be paranoid here and fsync all files to ensure the copy is really done. * But if fsync is disabled, we're done. diff --git a/src/backend/storage/smgr/md.c b/src/backend/storage/smgr/md.c -index bf0f3ca76d1..7d9833a3604 100644 +index bf0f3ca76d..7d9833a360 100644 --- a/src/backend/storage/smgr/md.c +++ b/src/backend/storage/smgr/md.c @@ -85,6 +85,8 @@ typedef struct _MdfdVec @@ -92,7 +117,7 @@ index bf0f3ca76d1..7d9833a3604 100644 } diff --git a/src/backend/storage/sync/sync.c b/src/backend/storage/sync/sync.c -index ab7137d0fff..bc40a763c05 100644 +index ab7137d0ff..bc40a763c0 100644 --- a/src/backend/storage/sync/sync.c +++ b/src/backend/storage/sync/sync.c @@ -74,6 +74,8 @@ static MemoryContext pendingOpsCxt; /* context for the above */ @@ -115,7 +140,7 @@ index ab7137d0fff..bc40a763c05 100644 sync_in_progress = false; } diff --git a/src/bin/pg_checksums/pg_checksums.c b/src/bin/pg_checksums/pg_checksums.c -index 9e6fd435f60..f2180b9f6de 100644 +index 9e6fd435f6..f2180b9f6d 100644 --- a/src/bin/pg_checksums/pg_checksums.c +++ b/src/bin/pg_checksums/pg_checksums.c @@ -110,6 +110,11 @@ static const struct exclude_list_item skip[] = { @@ -131,7 +156,7 @@ index 9e6fd435f60..f2180b9f6de 100644 {"config_exec_params", true}, #endif diff --git a/src/bin/pg_resetwal/pg_resetwal.c b/src/bin/pg_resetwal/pg_resetwal.c -index e9dcb5a6d89..844b04d5e12 100644 +index e9dcb5a6d8..844b04d5e1 100644 --- a/src/bin/pg_resetwal/pg_resetwal.c +++ b/src/bin/pg_resetwal/pg_resetwal.c @@ -86,6 +86,7 @@ static void FindEndOfXLOG(void); @@ -193,10 +218,10 @@ index e9dcb5a6d89..844b04d5e12 100644 /* * Remove existing archive status files diff --git a/src/bin/pg_rewind/filemap.c b/src/bin/pg_rewind/filemap.c -index 255ddf2ffaf..1142c244926 100644 +index 4458324c9d..7d857467f7 100644 --- a/src/bin/pg_rewind/filemap.c +++ b/src/bin/pg_rewind/filemap.c -@@ -157,6 +157,10 @@ static const struct exclude_list_item excludeFiles[] = +@@ -156,6 +156,10 @@ static const struct exclude_list_item excludeFiles[] = {"postmaster.pid", false}, {"postmaster.opts", false}, @@ -207,8 +232,22 @@ index 255ddf2ffaf..1142c244926 100644 /* end of list */ {NULL, false} }; +diff --git a/src/include/access/xlog.h b/src/include/access/xlog.h +index 76787a8267..2c662f4022 100644 +--- a/src/include/access/xlog.h ++++ b/src/include/access/xlog.h +@@ -57,6 +57,9 @@ extern PGDLLIMPORT int wal_decode_buffer_size; + + extern PGDLLIMPORT int CheckPointSegments; + ++typedef void (*backup_checkpoint_request_hook_type) (void); ++extern PGDLLIMPORT backup_checkpoint_request_hook_type backup_checkpoint_request_hook; ++ + /* Archive modes */ + typedef enum ArchiveMode + { diff --git a/src/include/storage/copydir.h b/src/include/storage/copydir.h -index a25e258f479..b20b9c76e8d 100644 +index a25e258f47..b20b9c76e8 100644 --- a/src/include/storage/copydir.h +++ b/src/include/storage/copydir.h @@ -13,6 +13,9 @@ @@ -222,7 +261,7 @@ index a25e258f479..b20b9c76e8d 100644 extern void copy_file(const char *fromfile, const char *tofile); diff --git a/src/include/storage/md.h b/src/include/storage/md.h -index 620f10abdeb..b36936871bd 100644 +index 620f10abde..b36936871b 100644 --- a/src/include/storage/md.h +++ b/src/include/storage/md.h @@ -19,6 +19,13 @@ @@ -240,7 +279,7 @@ index 620f10abdeb..b36936871bd 100644 extern void mdinit(void); extern void mdopen(SMgrRelation reln); diff --git a/src/include/storage/sync.h b/src/include/storage/sync.h -index 9dee8fa6e5b..348ed53e4e2 100644 +index 9dee8fa6e5..348ed53e4e 100644 --- a/src/include/storage/sync.h +++ b/src/include/storage/sync.h @@ -55,6 +55,9 @@ typedef struct FileTag diff --git a/ptrack.c b/ptrack.c index 98e5e00..e2f3627 100644 --- a/ptrack.c +++ b/ptrack.c @@ -64,6 +64,9 @@ static copydir_hook_type prev_copydir_hook = NULL; static mdwrite_hook_type prev_mdwrite_hook = NULL; static mdextend_hook_type prev_mdextend_hook = NULL; static ProcessSyncRequests_hook_type prev_ProcessSyncRequests_hook = NULL; +#if PG_VERSION_NUM >= 170000 +static backup_checkpoint_request_hook_type prev_backup_checkpoint_request_hook = NULL; +#endif void _PG_init(void); @@ -74,6 +77,9 @@ static void ptrack_mdwrite_hook(RelFileNodeBackend smgr_rnode, static void ptrack_mdextend_hook(RelFileNodeBackend smgr_rnode, ForkNumber forkno, BlockNumber blkno); static void ptrack_ProcessSyncRequests_hook(void); +#if PG_VERSION_NUM >= 170000 +static void ptrack_backup_checkpoint_request_hook(void); +#endif static void ptrack_gather_filelist(List **filelist, char *path, Oid spcOid, Oid dbOid); static int ptrack_filelist_getnext(PtScanCtx * ctx); @@ -141,6 +147,10 @@ _PG_init(void) mdextend_hook = ptrack_mdextend_hook; prev_ProcessSyncRequests_hook = ProcessSyncRequests_hook; ProcessSyncRequests_hook = ptrack_ProcessSyncRequests_hook; +#if PG_VERSION_NUM >= 170000 + prev_backup_checkpoint_request_hook = backup_checkpoint_request_hook; + backup_checkpoint_request_hook = ptrack_backup_checkpoint_request_hook; +#endif } #if PG_VERSION_NUM >= 150000 @@ -267,6 +277,16 @@ ptrack_ProcessSyncRequests_hook() prev_ProcessSyncRequests_hook(); } +#if PG_VERSION_NUM >= 170000 +static void +ptrack_backup_checkpoint_request_hook(void) +{ + ptrack_set_init_lsn(); + + if (prev_backup_checkpoint_request_hook) + prev_backup_checkpoint_request_hook(); +} +#endif /* * Recursively walk through the path and add all data files to filelist. */ From 08cf80deb0eb15079b69b52e3ffc225d30c13ea5 Mon Sep 17 00:00:00 2001 From: Yura Sokolov Date: Sat, 27 Apr 2024 13:24:44 +0300 Subject: [PATCH 64/65] fix formatting --- engine.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/engine.c b/engine.c index bb07d51..f0c2f9b 100644 --- a/engine.c +++ b/engine.c @@ -655,7 +655,8 @@ ptrack_mark_block(RelFileNodeBackend smgr_rnode, } } -extern XLogRecPtr ptrack_set_init_lsn(void) +XLogRecPtr +ptrack_set_init_lsn(void) { XLogRecPtr new_lsn; if (RecoveryInProgress()) From 9a75c19591bf8c1bb657978ad6c4add639daca23 Mon Sep 17 00:00:00 2001 From: Sofia Kopikova Date: Fri, 4 Oct 2024 13:48:49 +0300 Subject: [PATCH 65/65] PGPRO-11098 add null-pointer check to ptrack_set_init_lsn() function When ptrack extension is added to shared_preload_libraries, but ptrack.map size is 0, we may encounter segmantation fault when calling ptrack_backup_checkpoint_request_hook() function. This commit adds additional null-pointer checks to ptrack_set_init_lsn() function called by ptrack_backup_checkpoint_request_hook() to avoid this segfault --- engine.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/engine.c b/engine.c index f0c2f9b..dfd7c84 100644 --- a/engine.c +++ b/engine.c @@ -659,6 +659,10 @@ XLogRecPtr ptrack_set_init_lsn(void) { XLogRecPtr new_lsn; + + if (ptrack_map_size == 0 || ptrack_map == NULL) + return InvalidXLogRecPtr; + if (RecoveryInProgress()) new_lsn = GetXLogReplayRecPtr(NULL); else