1
0
mirror of https://github.com/postgrespro/pg_probackup.git synced 2025-02-08 14:28:36 +02:00

Merge branch 'master' into issue_310

This commit is contained in:
Grigory Smolkin 2021-02-28 12:01:24 +03:00
commit 8eff099169
16 changed files with 154 additions and 73 deletions

View File

@ -155,7 +155,7 @@ doc/src/sgml/pgprobackup.sgml
recovery of <productname>PostgreSQL</productname> database clusters.
It is designed to perform periodic backups of the <productname>PostgreSQL</productname>
instance that enable you to restore the server in case of a failure.
<application>pg_probackup</application> supports <productname>PostgreSQL</productname> 9.5 or higher.
<application>pg_probackup</application> supports PostgreSQL 9.5 or higher.
</para>
<itemizedlist spacing="compact">
@ -389,7 +389,7 @@ doc/src/sgml/pgprobackup.sgml
<itemizedlist spacing="compact">
<listitem>
<para>
<application>pg_probackup</application> only supports <productname>PostgreSQL</productname> 9.5 and higher.
<application>pg_probackup</application> only supports PostgreSQL 9.5 and higher.
</para>
</listitem>
<listitem>
@ -410,7 +410,7 @@ doc/src/sgml/pgprobackup.sgml
</listitem>
<listitem>
<para>
For PostgreSQL 9.5, functions
For <productname>PostgreSQL</productname> 9.5, functions
<function>pg_create_restore_point(text)</function> and
<function>pg_switch_xlog()</function> can be executed only if
the backup role is a superuser, so backup of a
@ -599,7 +599,7 @@ pg_probackup add-instance -B <replaceable>backup_dir</replaceable> -D <replaceab
connection</emphasis> to the <productname>PostgreSQL</productname> server:
</para>
<para>
For PostgreSQL 9.5:
For <productname>PostgreSQL</productname> 9.5:
</para>
<programlisting>
BEGIN;
@ -1202,8 +1202,9 @@ CREATE EXTENSION ptrack;
together, which leads to false-positive results when tracking changed
blocks and increases the incremental backup size as unchanged blocks
can also be copied into the incremental backup.
Setting <varname>ptrack.map_size</varname> to a higher value
does not affect PTRACK operation. The maximum allowed value is 1024.
Setting <varname>ptrack.map_size</varname> to a higher value does not
affect PTRACK operation, but it is not recommended to set this parameter
to a value higher than 1024.
</para>
</listitem>
</orderedlist>
@ -1711,7 +1712,7 @@ pg_probackup restore -B <replaceable>backup_dir</replaceable> --instance <replac
<para>
The speed of restore from backup can be significantly improved
by replacing only invalid and changed pages in already
existing PostgreSQL data directory using
existing <productname>PostgreSQL</productname> data directory using
<link linkend="pbk-incremental-restore-options">incremental
restore options</link> with the <xref linkend="pbk-restore"/>
command.
@ -1874,11 +1875,11 @@ pg_probackup restore -B <replaceable>backup_dir</replaceable> --instance <replac
</note>
<note>
<para>
Due to recovery specifics of PostgreSQL versions earlier than 12,
Due to recovery specifics of <productname>PostgreSQL</productname> versions earlier than 12,
it is advisable that you set the
<ulink url="https://postgrespro.com/docs/postgresql/current/runtime-config-replication.html#GUC-HOT-STANDBY">hot_standby</ulink>
parameter to <literal>off</literal> when running partial
restore of a PostgreSQL cluster of version earlier than 12.
restore of a <productname>PostgreSQL</productname> cluster of version earlier than 12.
Otherwise the recovery may fail.
</para>
</note>

View File

@ -227,22 +227,23 @@ do_retention_internal(parray *backup_list, parray *to_keep_list, parray *to_purg
{
pgBackup *backup = (pgBackup *) parray_get(backup_list, i);
/* Consider only valid FULL backups for Redundancy */
if (instance_config.retention_redundancy > 0 &&
backup->backup_mode == BACKUP_MODE_FULL &&
(backup->status == BACKUP_STATUS_OK ||
backup->status == BACKUP_STATUS_DONE))
if (backup->backup_mode == BACKUP_MODE_FULL)
{
n_full_backups++;
/* Add every FULL backup that satisfy Redundancy policy to separate list */
if (n_full_backups <= instance_config.retention_redundancy)
if (n_full_backups < instance_config.retention_redundancy)
{
if (!redundancy_full_backup_list)
redundancy_full_backup_list = parray_new();
parray_append(redundancy_full_backup_list, backup);
}
/* Consider only valid FULL backups for Redundancy fulfillment */
if (backup->status == BACKUP_STATUS_OK ||
backup->status == BACKUP_STATUS_DONE)
{
n_full_backups++;
}
}
}
/* Sort list of full backups to keep */
@ -413,7 +414,10 @@ do_retention_internal(parray *backup_list, parray *to_keep_list, parray *to_purg
pinning_window ? pinning_window : instance_config.retention_window,
action);
if (backup->backup_mode == BACKUP_MODE_FULL)
/* Only valid full backups are count to something */
if (backup->backup_mode == BACKUP_MODE_FULL &&
(backup->status == BACKUP_STATUS_OK ||
backup->status == BACKUP_STATUS_DONE))
cur_full_backup_num++;
}
}
@ -741,7 +745,10 @@ delete_backup_files(pgBackup *backup)
return;
}
time2iso(timestamp, lengthof(timestamp), backup->recovery_time, false);
if (backup->recovery_time)
time2iso(timestamp, lengthof(timestamp), backup->recovery_time, false);
else
time2iso(timestamp, lengthof(timestamp), backup->start_time, false);
elog(INFO, "Delete: %s %s",
base36enc(backup->start_time), timestamp);

View File

@ -1782,9 +1782,9 @@ read_timeline_history(const char *arclog_path, TimeLineID targetTLI, bool strict
elog(ERROR, "Timeline IDs must be less than child timeline's ID.");
/* History file is empty or corrupted */
if (parray_num(result) == 0)
if (parray_num(result) == 0 && targetTLI != 1)
{
elog(WARNING, "History file is corrupted: \"%s\"", path);
elog(WARNING, "History file is corrupted or missing: \"%s\"", path);
pg_free(result);
return NULL;
}

View File

@ -981,7 +981,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
self.backup_node(backup_dir, 'master', replica)
# Clean after yourself
self.del_test_dir(module_name, fname, nodes=[master, replica])
self.del_test_dir(module_name, fname)
# @unittest.expectedFailure
# @unittest.skip("skip")
@ -2016,8 +2016,7 @@ class ArchiveTest(ProbackupTest, unittest.TestCase):
# Clean after yourself
pg_receivexlog.kill()
self.del_test_dir(
module_name, fname, [node, replica, node_restored])
self.del_test_dir(module_name, fname)
@unittest.skip("skip")
def test_multi_timeline_recovery_prefetching(self):

View File

@ -1065,7 +1065,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
self.del_test_dir(module_name, fname, nodes=[node])
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_tablespace_handling_1(self):
@ -1599,7 +1599,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
options=['--stream', '--slot=slot_1', '--temp-slot'])
# Clean after yourself
self.del_test_dir(module_name, fname, [node])
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_backup_concurrent_drop_table(self):
@ -1645,7 +1645,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
self.assertEqual(show_backup['status'], "OK")
# Clean after yourself
self.del_test_dir(module_name, fname, nodes=[node])
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_pg_11_adjusted_wal_segment_size(self):
@ -1930,7 +1930,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
os.chmod(full_path, 700)
# Clean after yourself
self.del_test_dir(module_name, fname, [node])
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_basic_missing_dir_permissions(self):
@ -1973,7 +1973,7 @@ class BackupTest(ProbackupTest, unittest.TestCase):
os.rmdir(full_path)
# Clean after yourself
self.del_test_dir(module_name, fname, [node])
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_backup_with_least_privileges_role(self):

View File

@ -349,7 +349,7 @@ class CheckdbTest(ProbackupTest, unittest.TestCase):
log_file_content)
# Clean after yourself
self.del_test_dir(module_name, fname, [node])
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_checkdb_block_validation_sanity(self):

View File

@ -117,7 +117,7 @@ class CompressionTest(ProbackupTest, unittest.TestCase):
self.assertEqual(delta_result, delta_result_new)
# Clean after yourself
self.del_test_dir(module_name, fname, [node])
self.del_test_dir(module_name, fname)
def test_compression_archive_zlib(self):
"""

View File

@ -80,7 +80,7 @@ class DeltaTest(ProbackupTest, unittest.TestCase):
node_restored.slow_start()
# Clean after yourself
self.del_test_dir(module_name, fname, [node, node_restored])
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_delta_vacuum_truncate_1(self):

View File

@ -1,5 +1,6 @@
# you need os for unittest to work
import os
import gc
from sys import exit, argv, version_info
import subprocess
import shutil
@ -402,7 +403,6 @@ class ProbackupTest(object):
if node.major_version >= 13:
self.set_auto_conf(
node, {}, 'postgresql.conf', ['wal_keep_segments'])
return node
def create_tblspace_in_node(self, node, tblspc_name, tblspc_path=None, cfs=False):
@ -1514,15 +1514,15 @@ class ProbackupTest(object):
def get_bin_path(self, binary):
return testgres.get_bin_path(binary)
def del_test_dir(self, module_name, fname, nodes=[]):
""" Del testdir and optimistically try to del module dir"""
try:
testgres.clean_all()
except:
pass
def clean_all(self):
for o in gc.get_referrers(testgres.PostgresNode):
if o.__class__ is testgres.PostgresNode:
o.cleanup()
for node in nodes:
node.stop()
def del_test_dir(self, module_name, fname):
""" Del testdir and optimistically try to del module dir"""
self.clean_all()
shutil.rmtree(
os.path.join(
@ -1532,10 +1532,6 @@ class ProbackupTest(object):
),
ignore_errors=True
)
try:
os.rmdir(os.path.join(self.tmp_path, module_name))
except:
pass
def pgdata_content(self, pgdata, ignore_ptrack=True, exclude_dirs=None):
""" return dict with directory content. "

View File

@ -720,7 +720,7 @@ class IncrRestoreTest(ProbackupTest, unittest.TestCase):
repr(e.message), self.cmd))
# Clean after yourself
self.del_test_dir(module_name, fname, [node])
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_incr_checksum_restore(self):
@ -809,7 +809,7 @@ class IncrRestoreTest(ProbackupTest, unittest.TestCase):
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
self.del_test_dir(module_name, fname, [node, node_1])
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
@ -898,7 +898,7 @@ class IncrRestoreTest(ProbackupTest, unittest.TestCase):
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
self.del_test_dir(module_name, fname, [node, node_1])
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_incr_lsn_sanity(self):
@ -967,7 +967,7 @@ class IncrRestoreTest(ProbackupTest, unittest.TestCase):
repr(e.message), self.cmd))
# Clean after yourself
self.del_test_dir(module_name, fname, [node_1])
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_incr_checksum_sanity(self):
@ -1026,7 +1026,7 @@ class IncrRestoreTest(ProbackupTest, unittest.TestCase):
self.compare_pgdata(pgdata, pgdata_restored)
# Clean after yourself
self.del_test_dir(module_name, fname, [node_1])
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
@ -1561,7 +1561,7 @@ class IncrRestoreTest(ProbackupTest, unittest.TestCase):
pgbench.wait()
# Clean after yourself
self.del_test_dir(module_name, fname, [new_master, old_master])
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_make_replica_via_incr_lsn_restore(self):
@ -1634,7 +1634,7 @@ class IncrRestoreTest(ProbackupTest, unittest.TestCase):
pgbench.wait()
# Clean after yourself
self.del_test_dir(module_name, fname, [new_master, old_master])
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
@ -1878,7 +1878,7 @@ class IncrRestoreTest(ProbackupTest, unittest.TestCase):
'1')
# Clean after yourself
self.del_test_dir(module_name, fname, [node])
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
@ -2137,7 +2137,7 @@ class IncrRestoreTest(ProbackupTest, unittest.TestCase):
self.assertNotIn('PANIC', output)
# Clean after yourself
self.del_test_dir(module_name, fname, [node, node2])
self.del_test_dir(module_name, fname)
def test_incremental_partial_restore_exclude_lsn(self):
""""""
@ -2247,7 +2247,7 @@ class IncrRestoreTest(ProbackupTest, unittest.TestCase):
self.assertNotIn('PANIC', output)
# Clean after yourself
self.del_test_dir(module_name, fname, [node2])
self.del_test_dir(module_name, fname)
def test_incremental_partial_restore_exclude_tablespace_checksum(self):
""""""
@ -2391,7 +2391,7 @@ class IncrRestoreTest(ProbackupTest, unittest.TestCase):
self.assertNotIn('PANIC', output)
# Clean after yourself
self.del_test_dir(module_name, fname, [node2])
self.del_test_dir(module_name, fname)
def test_incremental_pg_filenode_map(self):
"""

View File

@ -101,7 +101,7 @@ class MergeTest(ProbackupTest, unittest.TestCase):
self.assertEqual(count1, count2)
# Clean after yourself
self.del_test_dir(module_name, fname, [node])
self.del_test_dir(module_name, fname)
def test_merge_compressed_backups(self):
"""
@ -2245,7 +2245,7 @@ class MergeTest(ProbackupTest, unittest.TestCase):
repr(e.message), self.cmd))
# Clean after yourself
self.del_test_dir(module_name, fname, [node])
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_smart_merge(self):
@ -2305,7 +2305,7 @@ class MergeTest(ProbackupTest, unittest.TestCase):
logfile_content = f.read()
# Clean after yourself
self.del_test_dir(module_name, fname, [node])
self.del_test_dir(module_name, fname)
def test_idempotent_merge(self):
"""
@ -2380,7 +2380,7 @@ class MergeTest(ProbackupTest, unittest.TestCase):
self.assertEqual(
page_id_2, self.show_pb(backup_dir, 'node')[0]['id'])
self.del_test_dir(module_name, fname, [node])
self.del_test_dir(module_name, fname)
def test_merge_correct_inheritance(self):
"""
@ -2435,7 +2435,7 @@ class MergeTest(ProbackupTest, unittest.TestCase):
page_meta['expire-time'],
self.show_pb(backup_dir, 'node', page_id)['expire-time'])
self.del_test_dir(module_name, fname, [node])
self.del_test_dir(module_name, fname)
def test_merge_correct_inheritance_1(self):
"""
@ -2485,7 +2485,7 @@ class MergeTest(ProbackupTest, unittest.TestCase):
'expire-time',
self.show_pb(backup_dir, 'node', page_id))
self.del_test_dir(module_name, fname, [node])
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
# @unittest.expectedFailure
@ -2604,7 +2604,7 @@ class MergeTest(ProbackupTest, unittest.TestCase):
'-d', 'postgres', '-p', str(node_restored.port)])
# Clean after yourself
self.del_test_dir(module_name, fname, [node, node_restored])
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
# @unittest.expectedFailure

View File

@ -100,7 +100,7 @@ class PageTest(ProbackupTest, unittest.TestCase):
self.assertEqual(result1, result2)
# Clean after yourself
self.del_test_dir(module_name, fname, [node, node_restored])
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_page_vacuum_truncate_1(self):

View File

@ -3148,7 +3148,7 @@ class PtrackTest(ProbackupTest, unittest.TestCase):
'select 1')
# Clean after yourself
self.del_test_dir(module_name, fname, [master, replica, node])
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
# @unittest.expectedFailure

View File

@ -418,7 +418,7 @@ class ReplicaTest(ProbackupTest, unittest.TestCase):
options=['--archive-timeout=30s', '--stream'])
# Clean after yourself
self.del_test_dir(module_name, fname, [master, replica])
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_take_backup_from_delayed_replica(self):

View File

@ -1008,7 +1008,7 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
'FULL')
# Clean after yourself
self.del_test_dir(module_name, fname, [node])
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_basic_window_merge_multiple_descendants_1(self):
@ -1275,7 +1275,7 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
'--delete-expired', '--log-level-console=log'])
# Clean after yourself
self.del_test_dir(module_name, fname, [node])
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_window_chains(self):
@ -2535,4 +2535,82 @@ class RetentionTest(ProbackupTest, unittest.TestCase):
self.validate_pb(backup_dir, 'node')
self.del_test_dir(module_name, fname, [node])
self.del_test_dir(module_name, fname)
def test_concurrent_running_full_backup(self):
"""
https://github.com/postgrespro/pg_probackup/issues/328
"""
fname = self.id().split('.')[3]
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
node = self.make_simple_node(
base_dir=os.path.join(module_name, fname, 'node'),
set_replication=True,
initdb_params=['--data-checksums'])
self.init_pb(backup_dir)
self.add_instance(backup_dir, 'node', node)
self.set_archiving(backup_dir, 'node', node)
node.slow_start()
# FULL
self.backup_node(backup_dir, 'node', node)
gdb = self.backup_node(backup_dir, 'node', node, gdb=True)
gdb.set_breakpoint('backup_data_file')
gdb.run_until_break()
gdb.kill()
self.assertTrue(
self.show_pb(backup_dir, 'node')[0]['status'],
'RUNNING')
self.backup_node(
backup_dir, 'node', node, backup_type='delta',
options=['--retention-redundancy=2', '--delete-expired'])
self.assertTrue(
self.show_pb(backup_dir, 'node')[1]['status'],
'RUNNING')
self.backup_node(backup_dir, 'node', node)
gdb = self.backup_node(backup_dir, 'node', node, gdb=True)
gdb.set_breakpoint('backup_data_file')
gdb.run_until_break()
gdb.kill()
gdb = self.backup_node(backup_dir, 'node', node, gdb=True)
gdb.set_breakpoint('backup_data_file')
gdb.run_until_break()
gdb.kill()
self.backup_node(backup_dir, 'node', node)
gdb = self.backup_node(backup_dir, 'node', node, gdb=True)
gdb.set_breakpoint('backup_data_file')
gdb.run_until_break()
gdb.kill()
self.backup_node(
backup_dir, 'node', node, backup_type='delta',
options=['--retention-redundancy=2', '--delete-expired'],
return_id=False)
self.assertTrue(
self.show_pb(backup_dir, 'node')[0]['status'],
'OK')
self.assertTrue(
self.show_pb(backup_dir, 'node')[1]['status'],
'RUNNING')
self.assertTrue(
self.show_pb(backup_dir, 'node')[2]['status'],
'OK')
self.assertEqual(
len(self.show_pb(backup_dir, 'node')),
6)
self.del_test_dir(module_name, fname)

View File

@ -298,7 +298,7 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
'Backup STATUS should be "ORPHAN"')
# Clean after yourself
self.del_test_dir(module_name, fname, [node])
self.del_test_dir(module_name, fname)
# @unittest.skip("skip")
def test_validate_corrupted_intermediate_backups(self):
@ -3843,7 +3843,7 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
self.assertIn("WARNING: Some backups are not valid", e.message)
# Clean after yourself
self.del_test_dir(module_name, fname, [node])
self.del_test_dir(module_name, fname)
# @unittest.expectedFailure
# @unittest.skip("skip")
@ -3906,7 +3906,7 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
self.assertIn("WARNING: Some backups are not valid", e.message)
# Clean after yourself
self.del_test_dir(module_name, fname, [node])
self.del_test_dir(module_name, fname)
# @unittest.expectedFailure
# @unittest.skip("skip")
@ -3966,7 +3966,7 @@ class ValidateTest(ProbackupTest, unittest.TestCase):
self.assertIn("WARNING: Some backups are not valid", e.message)
# Clean after yourself
self.del_test_dir(module_name, fname, [node])
self.del_test_dir(module_name, fname)
# validate empty backup list
# page from future during validate