@@ -1087,47 +1087,27 @@ buf_flush_write_block_low(
1087
1087
break ;
1088
1088
}
1089
1089
#if defined(UNIV_PMEMOBJ_BUF)
1090
- // We capture the write from buffer pool flush
1091
- // EXCEPT: space 0
1092
- // if (bpage->id.page_no() != 0) {
1093
- // if(0) {
1094
-
1090
+
1091
+ // printf("\n [begin pm_buf_write space %zu page %zu==>", bpage->id.space(),bpage->id.page_no());
1092
+ // if (!fsp_is_system_temporary(bpage->id.space())){
1095
1093
#if defined (UNIV_PMEMOBJ_BUF_V2)
1096
- int ret = pm_buf_write_no_free_pool (gb_pmw->pop , gb_pmw->pbuf , bpage->id , bpage->size , frame, sync);
1094
+ int ret = pm_buf_write_no_free_pool (gb_pmw->pop , gb_pmw->pbuf , bpage->id , bpage->size , frame, sync);
1097
1095
#elif defined (UNIV_PMEMOBJ_BUF_FLUSHER)
1098
- int ret = pm_buf_write_with_flusher (gb_pmw->pop , gb_pmw->pbuf , bpage->id , bpage->size , frame, sync);
1096
+ int ret = pm_buf_write_with_flusher (gb_pmw->pop , gb_pmw->pbuf , bpage->id , bpage->size , frame, sync);
1099
1097
#elif defined (UNIV_PMEMOBJ_BUF_APPEND)
1100
- int ret = pm_buf_write_with_flusher_append (gb_pmw->pop , gb_pmw->pbuf , bpage->id , bpage->size , frame, sync);
1098
+ int ret = pm_buf_write_with_flusher_append (gb_pmw->pop , gb_pmw->pbuf , bpage->id , bpage->size , frame, sync);
1101
1099
#else
1102
- int ret = pm_buf_write (gb_pmw->pop , gb_pmw->pbuf , bpage->id , bpage->size , frame, sync);
1100
+ int ret = pm_buf_write (gb_pmw->pop , gb_pmw->pbuf , bpage->id , bpage->size , frame, sync);
1103
1101
#endif
1104
- assert (ret == PMEM_SUCCESS);
1105
- // we remove this page from LRU
1106
- // assert(buf_page_io_complete(bpage, true));
1107
- assert (buf_page_io_complete (bpage,sync));
1108
- goto skip_write_and_fsync;
1109
-
1110
- // if (gb_pmw->pbuf->is_async_only) {
1111
- // if(!sync) {
1112
- // int ret = pm_buf_write(gb_pmw->pop, gb_pmw->pbuf, bpage->id, bpage->size, frame, false);
1113
- // assert(ret == PMEM_SUCCESS);
1114
- // //After memcpy, We need this call to sync the buffer pool variables
1115
- // assert(buf_page_io_complete(bpage));
1116
- // //goto skip_write;
1117
- // goto skip_write_and_fsync;
1118
- // }
1119
- // }
1120
- // else {
1121
- // //capture both sync and async write from buffer pool
1122
- // int ret = pm_buf_write(gb_pmw->pop, gb_pmw->pbuf, bpage->id, bpage->size, frame, sync);
1123
- // assert(ret == PMEM_SUCCESS);
1124
- // //After memcpy, We need this call to sync the buffer pool variables
1125
- // if (!sync)
1126
- // assert(buf_page_io_complete(bpage));
1127
- // //goto skip_write;
1128
- // goto skip_write_and_fsync;
1129
- // }
1102
+ // printf("END pm_buf_write space %zu page %zu]\n", bpage->id.space(),bpage->id.page_no());
1103
+ // printf(" END pm_buf_write]");
1104
+ assert (ret == PMEM_SUCCESS);
1105
+ // we remove this page from LRU
1106
+ // assert(buf_page_io_complete(bpage, true));
1107
+ assert (buf_page_io_complete (bpage,sync));
1108
+ goto skip_write_and_fsync;
1130
1109
// }
1110
+ // skip_pm_write:
1131
1111
#endif /* UNIV_PMEMOBJ_BUF*/
1132
1112
/* Disable use of double-write buffer for temporary tablespace.
1133
1113
Given the nature and load of temporary tablespace doublewrite buffer
@@ -1258,6 +1238,8 @@ buf_flush_page(
1258
1238
&& is_uncompressed
1259
1239
&& !rw_lock_sx_lock_nowait (rw_lock, BUF_IO_WRITE)) {
1260
1240
1241
+ // tdnguyen test
1242
+ // printf("\n [begin handle buf_dblwr ==> ");
1261
1243
if (!fsp_is_system_temporary (bpage->id .space ())) {
1262
1244
/* avoiding deadlock possibility involves
1263
1245
doublewrite buffer, should flush it, because
@@ -1268,6 +1250,8 @@ buf_flush_page(
1268
1250
}
1269
1251
1270
1252
rw_lock_sx_lock_gen (rw_lock, BUF_IO_WRITE);
1253
+
1254
+ // printf("end handle buf_dblwr] ");
1271
1255
}
1272
1256
1273
1257
/* If there is an observer that want to know if the asynchronous
@@ -1287,8 +1271,11 @@ buf_flush_page(
1287
1271
point, it is safe to access bpage, because it is io_fixed and
1288
1272
oldest_modification != 0. Thus, it cannot be relocated in the
1289
1273
buffer pool or removed from flush_list or LRU_list. */
1290
-
1274
+
1275
+ // tdnguyen test
1276
+ // printf("\n [begin buf_flush_write_block_low ==> ");
1291
1277
buf_flush_write_block_low (bpage, flush_type, sync);
1278
+ // printf(" END buf_flush_write_block_low ] ");
1292
1279
}
1293
1280
1294
1281
return (flush);
@@ -2273,10 +2260,13 @@ buf_flush_single_page_from_LRU(
2273
2260
2274
2261
Note: There is no guarantee that this page has actually
2275
2262
been freed, only that it has been flushed to disk */
2276
-
2263
+
2264
+ // tdnguyen test
2265
+ // printf("\n [begin buf_flush_page ==>");
2277
2266
freed = buf_flush_page (
2278
2267
buf_pool, bpage, BUF_FLUSH_SINGLE_PAGE, true );
2279
2268
2269
+ // printf("END buf_flush_page ");
2280
2270
if (freed) {
2281
2271
break ;
2282
2272
}
@@ -4076,8 +4066,9 @@ DECLARE_THREAD(pm_flusher_worker)(
4076
4066
// worker thread wait until there is is_requested signal
4077
4067
retry:
4078
4068
os_event_wait (flusher->is_req_not_empty );
4079
-
4080
- // printf("wakeup worker...\n");
4069
+ #if defined(UNIV_PMEMOBJ_BUF_RECOVERY_DEBUG)
4070
+ printf (" wakeup worker...\n " );
4071
+ #endif
4081
4072
// looking for a full list in wait-list and flush it
4082
4073
mutex_enter (&flusher->mutex );
4083
4074
if (flusher->n_requested > 0 ) {
@@ -4087,6 +4078,9 @@ DECLARE_THREAD(pm_flusher_worker)(
4087
4078
if (plist)
4088
4079
{
4089
4080
// ***this call aio_batch ***
4081
+ #if defined(UNIV_PMEMOBJ_BUF_RECOVERY_DEBUG)
4082
+ printf (" in flusher thread, pointer id=%zu, list_id =%zu\n " , i, plist->list_id );
4083
+ #endif
4090
4084
pm_buf_flush_list (gb_pmw->pop , gb_pmw->pbuf , plist);
4091
4085
flusher->n_requested --;
4092
4086
os_event_set (flusher->is_req_full );
@@ -4159,6 +4153,7 @@ pm_handle_finished_block_with_flusher(
4159
4153
pflush_list->n_aio_pending --;
4160
4154
4161
4155
if (pflush_list->n_aio_pending + pflush_list->n_sio_pending == 0 ) {
4156
+ // printf("\n [begin finish AIO list %zu\n", pflush_list->list_id);
4162
4157
// Now all pages in this list are persistent in disk
4163
4158
// (0) flush spaces
4164
4159
pm_buf_flush_spaces_in_list (pop, buf, pflush_list);
@@ -4214,7 +4209,8 @@ pm_handle_finished_block_with_flusher(
4214
4209
pfree_pool->cur_lists ++;
4215
4210
// wakeup who is waitting for free_pool available
4216
4211
os_event_set (buf->free_pool_event );
4217
-
4212
+
4213
+ // printf("end finish AIO List %zu]", pflush_list->list_id);
4218
4214
pmemobj_rwlock_unlock (pop, &pfree_pool->lock );
4219
4215
}
4220
4216
// the list has some unfinished aio
@@ -4269,12 +4265,14 @@ DECLARE_THREAD(pm_buf_flush_list_cleaner_coordinator)(
4269
4265
if (srv_shutdown_state != SRV_SHUTDOWN_NONE) {
4270
4266
break ;
4271
4267
}
4272
- printf (" cur free list = %zu\n " , D_RW (gb_pmw->pbuf ->free_pool )->cur_lists );
4268
+ printf (" cur free list = %zu, cur spec_list = %zu\n " ,
4269
+ D_RW (gb_pmw->pbuf ->free_pool )->cur_lists ,
4270
+ D_RW (gb_pmw->pbuf ->spec_list )->cur_pages );
4273
4271
4274
4272
#if defined(UNIV_PMEMOBJ_BUF_FLUSHER)
4275
- mutex_enter (&flusher->mutex );
4276
- printf (" n_requested/size %zu/%zu \n " , flusher->n_requested , flusher->size );
4277
- mutex_exit (&flusher->mutex );
4273
+ // mutex_enter(&flusher->mutex);
4274
+ // printf(" n_requested/size %zu/%zu \n", flusher->n_requested, flusher->size);
4275
+ // mutex_exit(&flusher->mutex);
4278
4276
#endif
4279
4277
} // end while thread
4280
4278
0 commit comments