Skip to content

Commit 8e71679

Browse files
committed
tests: minor fixes
1 parent 5b41a10 commit 8e71679

File tree

3 files changed

+464
-10
lines changed

3 files changed

+464
-10
lines changed

Diff for: tests/compatibility.py

+159
Original file line numberDiff line numberDiff line change
@@ -311,3 +311,162 @@ def test_backward_compatibility_ptrack(self):
311311
if self.paranoia:
312312
pgdata_restored = self.pgdata_content(node_restored.data_dir)
313313
self.compare_pgdata(pgdata, pgdata_restored)
314+
315+
# @unittest.expectedFailure
316+
# @unittest.skip("skip")
317+
def test_backward_compatibility_compression(self):
318+
"""Description in jira issue PGPRO-434"""
319+
fname = self.id().split('.')[3]
320+
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
321+
node = self.make_simple_node(
322+
base_dir="{0}/{1}/node".format(module_name, fname),
323+
set_replication=True,
324+
initdb_params=['--data-checksums'],
325+
pg_options={
326+
'max_wal_senders': '2',
327+
'autovacuum': 'off'})
328+
329+
self.init_pb(backup_dir, old_binary=True)
330+
self.add_instance(backup_dir, 'node', node, old_binary=True)
331+
332+
self.set_archiving(backup_dir, 'node', node, old_binary=True)
333+
node.slow_start()
334+
335+
node.pgbench_init(scale=10)
336+
337+
# FULL backup with OLD binary
338+
backup_id = self.backup_node(
339+
backup_dir, 'node', node,
340+
old_binary=True,
341+
options=['--compress'])
342+
343+
if self.paranoia:
344+
pgdata = self.pgdata_content(node.data_dir)
345+
346+
# restore OLD FULL with new binary
347+
node_restored = self.make_simple_node(
348+
base_dir="{0}/{1}/node_restored".format(module_name, fname))
349+
350+
node_restored.cleanup()
351+
352+
self.restore_node(
353+
backup_dir, 'node', node_restored,
354+
options=["-j", "4"])
355+
356+
if self.paranoia:
357+
pgdata_restored = self.pgdata_content(node_restored.data_dir)
358+
self.compare_pgdata(pgdata, pgdata_restored)
359+
360+
# PAGE backup with OLD binary
361+
pgbench = node.pgbench(
362+
stdout=subprocess.PIPE,
363+
stderr=subprocess.STDOUT,
364+
options=["-c", "4", "-T", "10"])
365+
pgbench.wait()
366+
pgbench.stdout.close()
367+
368+
self.backup_node(
369+
backup_dir, 'node', node,
370+
backup_type='page',
371+
old_binary=True,
372+
options=['--compress'])
373+
374+
if self.paranoia:
375+
pgdata = self.pgdata_content(node.data_dir)
376+
377+
node_restored.cleanup()
378+
self.restore_node(
379+
backup_dir, 'node', node_restored,
380+
options=["-j", "4"])
381+
382+
if self.paranoia:
383+
pgdata_restored = self.pgdata_content(node_restored.data_dir)
384+
self.compare_pgdata(pgdata, pgdata_restored)
385+
386+
# PAGE backup with new binary
387+
pgbench = node.pgbench(
388+
stdout=subprocess.PIPE,
389+
stderr=subprocess.STDOUT,
390+
options=["-c", "4", "-T", "10"])
391+
pgbench.wait()
392+
pgbench.stdout.close()
393+
394+
self.backup_node(
395+
backup_dir, 'node', node,
396+
backup_type='page',
397+
options=['--compress'])
398+
399+
if self.paranoia:
400+
pgdata = self.pgdata_content(node.data_dir)
401+
402+
node_restored.cleanup()
403+
404+
self.restore_node(
405+
backup_dir, 'node', node_restored,
406+
options=["-j", "4"])
407+
408+
if self.paranoia:
409+
pgdata_restored = self.pgdata_content(node_restored.data_dir)
410+
self.compare_pgdata(pgdata, pgdata_restored)
411+
412+
# Delta backup with old binary
413+
self.delete_pb(backup_dir, 'node', backup_id)
414+
415+
self.backup_node(
416+
backup_dir, 'node', node,
417+
old_binary=True,
418+
options=['--compress'])
419+
420+
pgbench = node.pgbench(
421+
stdout=subprocess.PIPE,
422+
stderr=subprocess.STDOUT,
423+
options=["-c", "4", "-T", "10"])
424+
425+
pgbench.wait()
426+
pgbench.stdout.close()
427+
428+
self.backup_node(
429+
backup_dir, 'node', node,
430+
backup_type='delta',
431+
options=['--compress'],
432+
old_binary=True)
433+
434+
if self.paranoia:
435+
pgdata = self.pgdata_content(node.data_dir)
436+
437+
node_restored.cleanup()
438+
439+
self.restore_node(
440+
backup_dir, 'node', node_restored,
441+
options=["-j", "4"])
442+
443+
if self.paranoia:
444+
pgdata_restored = self.pgdata_content(node_restored.data_dir)
445+
self.compare_pgdata(pgdata, pgdata_restored)
446+
447+
# Delta backup with new binary
448+
pgbench = node.pgbench(
449+
stdout=subprocess.PIPE,
450+
stderr=subprocess.STDOUT,
451+
options=["-c", "4", "-T", "10"])
452+
453+
pgbench.wait()
454+
pgbench.stdout.close()
455+
456+
self.backup_node(
457+
backup_dir, 'node', node,
458+
backup_type='delta',
459+
options=['--compress'])
460+
461+
if self.paranoia:
462+
pgdata = self.pgdata_content(node.data_dir)
463+
464+
node_restored.cleanup()
465+
466+
self.restore_node(
467+
backup_dir, 'node', node_restored,
468+
options=["-j", "4"])
469+
470+
if self.paranoia:
471+
pgdata_restored = self.pgdata_content(node_restored.data_dir)
472+
self.compare_pgdata(pgdata, pgdata_restored)

Diff for: tests/compression.py

+67
Original file line numberDiff line numberDiff line change
@@ -494,3 +494,70 @@ def test_compression_wrong_algorithm(self):
494494

495495
# Clean after yourself
496496
self.del_test_dir(module_name, fname)
497+
498+
@unittest.skip("skip")
499+
def test_uncompressable_pages(self):
500+
"""
501+
make archive node, create table with uncompressable toast pages,
502+
take backup with compression, make sure that page was not compressed,
503+
restore backup and check data correctness
504+
"""
505+
fname = self.id().split('.')[3]
506+
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
507+
node = self.make_simple_node(
508+
base_dir="{0}/{1}/node".format(module_name, fname),
509+
set_replication=True,
510+
initdb_params=['--data-checksums'],
511+
pg_options={
512+
'wal_level': 'replica',
513+
'max_wal_senders': '2',
514+
'checkpoint_timeout': '30s'}
515+
)
516+
517+
self.init_pb(backup_dir)
518+
self.add_instance(backup_dir, 'node', node)
519+
self.set_archiving(backup_dir, 'node', node)
520+
node.slow_start()
521+
522+
# node.safe_psql(
523+
# "postgres",
524+
# "create table t_heap as select i, "
525+
# "repeat('1234567890abcdefghiyklmn', 1)::bytea, "
526+
# "point(0,0) from generate_series(0,1) i")
527+
528+
node.safe_psql(
529+
"postgres",
530+
"create table t as select i, "
531+
"repeat(md5(i::text),5006056) as fat_attr "
532+
"from generate_series(0,10) i;")
533+
534+
self.backup_node(
535+
backup_dir, 'node', node,
536+
backup_type='full',
537+
options=[
538+
'--compress',
539+
'--log-level-file=verbose'])
540+
541+
node.cleanup()
542+
543+
self.restore_node(backup_dir, 'node', node)
544+
node.slow_start()
545+
546+
self.backup_node(
547+
backup_dir, 'node', node,
548+
backup_type='full',
549+
options=[
550+
'--compress',
551+
'--log-level-file=verbose'])
552+
553+
# Clean after yourself
554+
# self.del_test_dir(module_name, fname)
555+
556+
# create table t as select i, repeat(md5('1234567890'), 1)::bytea, point(0,0) from generate_series(0,1) i;
557+
558+
559+
# create table t_bytea_1(file oid);
560+
# INSERT INTO t_bytea_1 (file)
561+
# VALUES (lo_import('/home/gsmol/git/postgres/contrib/pg_probackup/tests/expected/sample.random', 24593));
562+
# insert into t_bytea select string_agg(data,'') from pg_largeobject where pageno > 0;
563+
#

0 commit comments

Comments
 (0)