@@ -599,6 +599,23 @@ ptrack_walkdir(const char *path, Oid tablespaceOid, Oid dbOid)
599
599
FreeDir (dir ); /* we ignore any error here */
600
600
}
601
601
602
+ static void
603
+ ptrack_atomic_increase (XLogRecPtr new_lsn , pg_atomic_uint64 * var )
604
+ {
605
+ /*
606
+ * We use pg_atomic_uint64 here only for alignment purposes, because
607
+ * pg_atomic_uint64 is forcedly aligned on 8 bytes during the MSVC build.
608
+ */
609
+ pg_atomic_uint64 old_lsn ;
610
+
611
+ old_lsn .value = pg_atomic_read_u64 (var );
612
+ #if USE_ASSERT_CHECKING
613
+ elog (DEBUG3 , "ptrack_mark_block: " UINT64_FORMAT " <- " UINT64_FORMAT , old_lsn .value , new_lsn );
614
+ #endif
615
+ while (old_lsn .value < new_lsn &&
616
+ !pg_atomic_compare_exchange_u64 (var , (uint64 * ) & old_lsn .value , new_lsn ));
617
+ }
618
+
602
619
/*
603
620
* Mark modified block in ptrack_map.
604
621
*/
@@ -608,15 +625,9 @@ ptrack_mark_block(RelFileNodeBackend smgr_rnode,
608
625
{
609
626
PtBlockId bid ;
610
627
uint64 hash ;
611
- size_t slot1 ;
612
- size_t slot2 ;
628
+ size_t slots [2 ];
613
629
XLogRecPtr new_lsn ;
614
- /*
615
- * We use pg_atomic_uint64 here only for alignment purposes, because
616
- * pg_atomic_uint64 is forcedly aligned on 8 bytes during the MSVC build.
617
- */
618
- pg_atomic_uint64 old_lsn ;
619
- pg_atomic_uint64 old_init_lsn ;
630
+ int i ;
620
631
621
632
if (ptrack_map_size == 0
622
633
|| ptrack_map == NULL
@@ -629,39 +640,29 @@ ptrack_mark_block(RelFileNodeBackend smgr_rnode,
629
640
bid .blocknum = blocknum ;
630
641
631
642
hash = BID_HASH_FUNC (bid );
632
- slot1 = (size_t )(hash % PtrackContentNblocks );
633
- slot2 = (size_t )(((hash << 32 ) | (hash >> 32 )) % PtrackContentNblocks );
643
+ slots [ 0 ] = (size_t )(hash % PtrackContentNblocks );
644
+ slots [ 1 ] = (size_t )(((hash << 32 ) | (hash >> 32 )) % PtrackContentNblocks );
634
645
635
646
if (RecoveryInProgress ())
636
647
new_lsn = GetXLogReplayRecPtr (NULL );
637
648
else
638
649
new_lsn = GetXLogInsertRecPtr ();
639
650
640
651
/* Atomically assign new init LSN value */
641
- old_init_lsn .value = pg_atomic_read_u64 (& ptrack_map -> init_lsn );
642
- if (old_init_lsn .value == InvalidXLogRecPtr )
652
+ if (pg_atomic_read_u64 (& ptrack_map -> init_lsn ) == InvalidXLogRecPtr )
643
653
{
644
654
#if USE_ASSERT_CHECKING
645
- elog (DEBUG1 , "ptrack_mark_block: init_lsn " UINT64_FORMAT " <- " UINT64_FORMAT , old_init_lsn . value , new_lsn );
655
+ elog (DEBUG3 , "ptrack_mark_block: init_lsn" );
646
656
#endif
647
-
648
- while (old_init_lsn .value < new_lsn &&
649
- !pg_atomic_compare_exchange_u64 (& ptrack_map -> init_lsn , (uint64 * ) & old_init_lsn .value , new_lsn ));
657
+ ptrack_atomic_increase (new_lsn , & ptrack_map -> init_lsn );
650
658
}
651
659
652
- /* Atomically assign new LSN value to the first slot */
653
- old_lsn .value = pg_atomic_read_u64 (& ptrack_map -> entries [slot1 ]);
654
- #if USE_ASSERT_CHECKING
655
- elog (DEBUG3 , "ptrack_mark_block: map[%zu]=" UINT64_FORMAT " <- " UINT64_FORMAT , slot1 , old_lsn .value , new_lsn );
656
- #endif
657
- while (old_lsn .value < new_lsn &&
658
- !pg_atomic_compare_exchange_u64 (& ptrack_map -> entries [slot1 ], (uint64 * ) & old_lsn .value , new_lsn ));
659
-
660
- /* And to the second */
661
- old_lsn .value = pg_atomic_read_u64 (& ptrack_map -> entries [slot2 ]);
660
+ /* Atomically assign new LSN value to the slots */
661
+ for (i = 0 ; i < lengthof (slots ); i ++ )
662
+ {
662
663
#if USE_ASSERT_CHECKING
663
- elog (DEBUG3 , "ptrack_mark_block: map[%zu]=" UINT64_FORMAT " <- " UINT64_FORMAT , slot2 , old_lsn . value , new_lsn );
664
+ elog (DEBUG3 , "ptrack_mark_block: map[%zu]" , slots [ i ] );
664
665
#endif
665
- while ( old_lsn . value < new_lsn &&
666
- ! pg_atomic_compare_exchange_u64 ( & ptrack_map -> entries [ slot2 ], ( uint64 * ) & old_lsn . value , new_lsn ));
666
+ ptrack_atomic_increase ( new_lsn , & ptrack_map -> entries [ slots [ i ]]);
667
+ }
667
668
}
0 commit comments