Skip to content

[PBCKP-150] Reading buffer is flushed each time we verify the checksum. #487

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 5 commits into from
May 25, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,7 @@ env:
# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=replica
# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=off MODE=retention
# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=restore
# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=time_consuming

jobs:
allow_failures:
Expand Down
2 changes: 2 additions & 0 deletions src/data.c
Original file line number Diff line number Diff line change
Expand Up @@ -349,6 +349,8 @@ prepare_page(pgFile *file, XLogRecPtr prev_backup_start_lsn,
Assert(false);
}
}
/* avoid re-reading once buffered data, flushing on further attempts, see PBCKP-150 */
fflush(in);
}

/*
Expand Down
2 changes: 2 additions & 0 deletions tests/Readme.md
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,8 @@ Run suit of basic simple tests:
Run ptrack tests:
export PG_PROBACKUP_PTRACK=ON

Run long (time consuming) tests:
export PG_PROBACKUP_LONG=ON

Usage:
sudo echo 0 > /proc/sys/kernel/yama/ptrace_scope
Expand Down
8 changes: 7 additions & 1 deletion tests/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
compression, page, ptrack, archive, exclude, cfs_backup, cfs_restore, \
cfs_validate_backup, auth_test, time_stamp, logging, \
locking, remote, external, config, checkdb, set_backup, incr_restore, \
catchup, CVE_2018_1058
catchup, CVE_2018_1058, time_consuming


def load_tests(loader, tests, pattern):
Expand All @@ -21,6 +21,12 @@ def load_tests(loader, tests, pattern):
if os.environ['PG_PROBACKUP_PTRACK'] == 'ON':
suite.addTests(loader.loadTestsFromModule(ptrack))

# PG_PROBACKUP_LONG section for tests that are long
# by design e.g. they contain loops, sleeps and so on
if 'PG_PROBACKUP_LONG' in os.environ:
if os.environ['PG_PROBACKUP_LONG'] == 'ON':
suite.addTests(loader.loadTestsFromModule(time_consuming))

# suite.addTests(loader.loadTestsFromModule(auth_test))
suite.addTests(loader.loadTestsFromModule(archive))
suite.addTests(loader.loadTestsFromModule(backup))
Expand Down
76 changes: 76 additions & 0 deletions tests/time_consuming.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
import os
import unittest
from .helpers.ptrack_helpers import ProbackupTest
import subprocess
from time import sleep

module_name = 'time_consuming'

class TimeConsumingTests(ProbackupTest, unittest.TestCase):
def test_pbckp150(self):
"""
https://jira.postgrespro.ru/browse/PBCKP-150
create a node filled with pgbench
create FULL backup followed by PTRACK backup
run pgbench, vacuum VERBOSE FULL and ptrack backups in parallel
"""
# init node
fname = self.id().split('.')[3]
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'])
node.append_conf('postgresql.conf',
"""
max_connections = 100
wal_keep_size = 16000
ptrack.map_size = 1
shared_preload_libraries='ptrack'
log_statement = 'none'
fsync = off
log_checkpoints = on
autovacuum = off
""")

# init probackup and add an instance
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)

# run the node and init ptrack
node.slow_start()
node.safe_psql("postgres", "CREATE EXTENSION ptrack")
# populate it with pgbench
node.pgbench_init(scale=5)

# FULL backup followed by PTRACK backup
self.backup_node(backup_dir, 'node', node, options=['--stream'])
self.backup_node(backup_dir, 'node', node, backup_type='ptrack', options=['--stream'])

# run ordinary pgbench scenario to imitate some activity and another pgbench for vacuuming in parallel
nBenchDuration = 30
pgbench = node.pgbench(options=['-c', '20', '-j', '8', '-T', str(nBenchDuration)])
with open('/tmp/pbckp150vacuum.sql', 'w') as f:
f.write('VACUUM (FULL) pgbench_accounts, pgbench_tellers, pgbench_history; SELECT pg_sleep(1);\n')
pgbenchval = node.pgbench(options=['-c', '1', '-f', '/tmp/pbckp150vacuum.sql', '-T', str(nBenchDuration)])

# several PTRACK backups
for i in range(nBenchDuration):
print("[{}] backing up PTRACK diff...".format(i+1))
self.backup_node(backup_dir, 'node', node, backup_type='ptrack', options=['--stream', '--log-level-console', 'VERBOSE'])
sleep(0.1)
# if the activity pgbench has finished, stop backing up
if pgbench.poll() is not None:
break

pgbench.kill()
pgbenchval.kill()
pgbench.wait()
pgbenchval.wait()

backups = self.show_pb(backup_dir, 'node')
for b in backups:
self.assertEqual("OK", b['status'])

# Clean after yourself
self.del_test_dir(module_name, fname)