@@ -44,7 +44,7 @@ extern volatile int64 gb_write_log_time;
44
44
extern volatile int64 gb_n_write_log;
45
45
#endif
46
46
47
- #if defined (UNIV_PMEMOBJ_PART_PL) || defined (UNIV_PMEMOBJ_WAL_ELR)
47
+ #if defined (UNIV_PMEMOBJ_PART_PL) || defined (UNIV_PMEMOBJ_WAL_ELR) || defined(UNIV_PMEMOBJ_WAL)
48
48
#include " my_pmemobj.h"
49
49
extern PMEM_WRAPPER* gb_pmw;
50
50
#endif /* UNIV_PMEMOBJ_PART_PL */
@@ -1369,7 +1369,7 @@ mtr_t::Command::execute()
1369
1369
#endif
1370
1370
}
1371
1371
1372
- #else // old method
1372
+ #else // old method for UNIV_SKIPLOG
1373
1373
1374
1374
// In PL-NVM, we keep log records in our data structure
1375
1375
// This function just release the resource without writing any logs
@@ -1410,8 +1410,9 @@ mtr_t::Command::execute()
1410
1410
release_resources ();
1411
1411
}
1412
1412
#endif // UNIV_PMEMOBJ_PART_PL
1413
- #elif defined (UNIV_PMEMOBJ_WAL) && defined (UNIV_PMEMOBJ_WAL_ELR)
1414
- // Early lock release
1413
+ // #elif defined (UNIV_PMEMOBJ_WAL) && defined (UNIV_PMEMOBJ_WAL_ELR)
1414
+ #elif defined (UNIV_PMEMOBJ_WAL)
1415
+ // Centralized logging methods: regular or Early lock release
1415
1416
void
1416
1417
mtr_t ::Command::execute()
1417
1418
{
@@ -1596,7 +1597,10 @@ mtr_t::Command::execute()
1596
1597
#if defined(UNIV_PMEMOBJ_LOG) || defined(UNIV_PMEMOBJ_WAL)
1597
1598
// update the lsn and buf_free
1598
1599
gb_pmw->plogbuf ->lsn = log ->lsn ;
1600
+ pmemobj_persist (gb_pmw->pop , &gb_pmw->plogbuf ->lsn , sizeof (gb_pmw->plogbuf ->lsn ));
1601
+
1599
1602
gb_pmw->plogbuf ->buf_free = log ->buf_free ;
1603
+ pmemobj_persist (gb_pmw->pop , &gb_pmw->plogbuf ->buf_free , sizeof (gb_pmw->plogbuf ->buf_free ));
1600
1604
#endif /* UNIV_PMEMOBJ_LOG */
1601
1605
srv_stats.log_write_requests .inc ();
1602
1606
@@ -1611,21 +1615,26 @@ mtr_t::Command::execute()
1611
1615
if (m_impl->m_made_dirty ) {
1612
1616
log_flush_order_mutex_enter ();
1613
1617
}
1614
-
1618
+ # if defined (UNIV_PMEMOBJ_WAL_ELR)
1615
1619
/* It is now safe to release the log mutex because the
1616
1620
flush_order mutex will ensure that we are the first one
1617
1621
to insert into the flush list. */
1618
1622
log_mutex_exit ();
1623
+ #endif // UNIV_PMEMOBJ_WAL_ELR
1619
1624
1620
1625
// now we do the memcpy
1621
1626
TX_BEGIN (gb_pmw->pop ) {
1622
1627
TX_MEMCPY (start_cpy, start_log_ptr, len_cpy);
1623
1628
} TX_ONABORT {
1624
1629
} TX_END
1630
+
1625
1631
gb_pmw->plogbuf ->need_recv = true ;
1632
+ pmemobj_persist (gb_pmw->pop , &gb_pmw->plogbuf ->need_recv , sizeof (gb_pmw->plogbuf ->need_recv ));
1626
1633
1627
1634
m_impl->m_mtr ->m_commit_lsn = m_end_lsn;
1628
-
1635
+ #if !defined(UNIV_PMEMOBJ_WAL_ELR)
1636
+ log_mutex_exit ();
1637
+ #endif
1629
1638
release_blocks ();
1630
1639
1631
1640
if (m_impl->m_made_dirty ) {
0 commit comments