14
14
#include <string.h>
15
15
#include <threads.h>
16
16
17
+ static atomic_uint_fast64_t deletes = 0 , inserts = 0 ;
18
+
19
+ /*
20
+ * Reference :
21
+ * A more Pragmatic Implementation of the Lock-free, Ordered, Linked List
22
+ * https://arxiv.org/abs/2010.15755
23
+ */
24
+ #ifdef RUNTIME_STAT
25
+
26
+ enum {
27
+ TRACE_nop = 0 ,
28
+ TRACE_retry , /* the number of retries in the __list_find function. */
29
+ TRACE_contains , /* the number of wait-free contains in the __list_find
30
+ function that curr pointer pointed. */
31
+ TRACE_traversal , /* the number of list element traversal in the __list_find
32
+ function. */
33
+ TRACE_fail , /* the number of CAS() failures. */
34
+ TRACE_del , /* the number of list_delete operation failed and restart again.
35
+ */
36
+ TRACE_ins , /* the number of list_insert operation failed and restart again.
37
+ */
38
+ TRACE_inserts , /* the number of atomic_load operation in list_delete,
39
+ list_insert and __list_find. */
40
+ TRACE_deletes /* the number of atomic_store operation in list_delete,
41
+ list_insert and __list_find. */
42
+ };
43
+
44
+ struct runtime_statistics {
45
+ atomic_uint_fast64_t retry , contains , traversal , fail ;
46
+ atomic_uint_fast64_t del , ins ;
47
+ atomic_uint_fast64_t load , store ;
48
+ };
49
+ static struct runtime_statistics stats = {0 };
50
+
51
+ #define CAS (obj , expected , desired ) \
52
+ ({ \
53
+ bool __ret = atomic_compare_exchange_strong(obj, expected, desired); \
54
+ if (!__ret) \
55
+ atomic_fetch_add(&stats.fail, 1); \
56
+ __ret; \
57
+ })
58
+ #define ATOMIC_LOAD (obj ) \
59
+ ({ \
60
+ atomic_fetch_add(&stats.load, 1); \
61
+ atomic_load(obj); \
62
+ })
63
+ #define ATOMIC_STORE_EXPLICIT (obj , desired , order ) \
64
+ do { \
65
+ atomic_fetch_add(&stats.store, 1); \
66
+ atomic_store_explicit(obj, desired, order); \
67
+ } while (0)
68
+ #define TRACE (ops ) \
69
+ do { \
70
+ if (TRACE_##ops) \
71
+ atomic_fetch_add(&stats.ops, 1); \
72
+ } while (0)
73
+
74
+ static void do_analysis (void )
75
+ {
76
+ __atomic_thread_fence (__ATOMIC_SEQ_CST );
77
+ #define TRACE_PRINT (ops ) printf("%-10s: %ld\n", #ops, stats.ops);
78
+ TRACE_PRINT (retry );
79
+ TRACE_PRINT (contains );
80
+ TRACE_PRINT (traversal );
81
+ TRACE_PRINT (fail );
82
+ TRACE_PRINT (del )
83
+ TRACE_PRINT (ins );
84
+ TRACE_PRINT (load );
85
+ TRACE_PRINT (store );
86
+ #undef TRACE_PRINT
87
+ #define TRACE_PRINT (val ) printf("%-10s: %ld\n", #val, val);
88
+ TRACE_PRINT (deletes );
89
+ TRACE_PRINT (inserts );
90
+ #undef TRACE_PRINT
91
+ }
92
+
93
+ #else
94
+
95
+ #define CAS (obj , expected , desired ) \
96
+ ({ atomic_compare_exchange_strong(obj, expected, desired); })
97
+ #define ATOMIC_LOAD (obj ) ({ atomic_load(obj); })
98
+ #define ATOMIC_STORE_EXPLICIT (obj , desired , order ) \
99
+ do { \
100
+ atomic_store_explicit(obj, desired, order); \
101
+ } while (0)
102
+ #define TRACE (ops ) \
103
+ do { \
104
+ } while (0)
105
+
106
+ static void do_analysis (void )
107
+ {
108
+ __atomic_thread_fence (__ATOMIC_SEQ_CST );
109
+ fprintf (stderr , "inserts = %zu, deletes = %zu\n" , inserts , deletes );
110
+ }
111
+
112
+ #endif /* RUNTIME_STAT */
113
+
114
+ #define RUNTIME_STAT_INIT () atexit(do_analysis)
115
+
17
116
#define HP_MAX_THREADS 128
18
117
#define HP_MAX_HPS 5 /* This is named 'K' in the HP paper */
19
118
#define CLPAD (128 / sizeof(uintptr_t))
@@ -162,8 +261,6 @@ void list_hp_retire(list_hp_t *hp, uintptr_t ptr)
162
261
#define N_THREADS (128 / 2)
163
262
#define MAX_THREADS 128
164
263
165
- static atomic_uint_fast32_t deletes = 0 , inserts = 0 ;
166
-
167
264
enum { HP_NEXT = 0 , HP_CURR = 1 , HP_PREV };
168
265
169
266
#define is_marked (p ) (bool) ((uintptr_t)(p) &0x01)
@@ -225,21 +322,29 @@ static bool __list_find(list_t *list,
225
322
226
323
try_again :
227
324
prev = & list -> head ;
228
- curr = (list_node_t * ) atomic_load (prev );
325
+ curr = (list_node_t * ) ATOMIC_LOAD (prev );
229
326
(void ) list_hp_protect_ptr (list -> hp , HP_CURR , (uintptr_t ) curr );
230
- if (atomic_load (prev ) != get_unmarked (curr ))
327
+ if (ATOMIC_LOAD (prev ) != get_unmarked (curr )) {
328
+ TRACE (retry );
231
329
goto try_again ;
330
+ }
232
331
while (true) {
233
- next = (list_node_t * ) atomic_load (& get_unmarked_node (curr )-> next );
332
+ if (is_marked (curr ))
333
+ TRACE (contains );
334
+ next = (list_node_t * ) ATOMIC_LOAD (& get_unmarked_node (curr )-> next );
234
335
(void ) list_hp_protect_ptr (list -> hp , HP_NEXT , get_unmarked (next ));
235
336
/* On a CAS failure, the search function, "__list_find," will simply
236
337
* have to go backwards in the list until an unmarked element is found
237
338
* from which the search in increasing key order can be started.
238
339
*/
239
- if (atomic_load (& get_unmarked_node (curr )-> next ) != (uintptr_t ) next )
340
+ if (ATOMIC_LOAD (& get_unmarked_node (curr )-> next ) != (uintptr_t ) next ) {
341
+ TRACE (retry );
240
342
goto try_again ;
241
- if (atomic_load (prev ) != get_unmarked (curr ))
343
+ }
344
+ if (ATOMIC_LOAD (prev ) != get_unmarked (curr )) {
345
+ TRACE (retry );
242
346
goto try_again ;
347
+ }
243
348
if (get_unmarked_node (next ) == next ) {
244
349
if (!(get_unmarked_node (curr )-> key < * key )) {
245
350
* par_curr = curr ;
@@ -252,12 +357,15 @@ static bool __list_find(list_t *list,
252
357
get_unmarked (curr ));
253
358
} else {
254
359
uintptr_t tmp = get_unmarked (curr );
255
- if (!atomic_compare_exchange_strong (prev , & tmp , get_unmarked (next )))
360
+ if (!CAS (prev , & tmp , get_unmarked (next ))) {
361
+ TRACE (retry );
256
362
goto try_again ;
363
+ }
257
364
list_hp_retire (list -> hp , get_unmarked (curr ));
258
365
}
259
366
curr = next ;
260
367
(void ) list_hp_protect_release (list -> hp , HP_CURR , get_unmarked (next ));
368
+ TRACE (traversal );
261
369
}
262
370
}
263
371
@@ -274,13 +382,14 @@ bool list_insert(list_t *list, list_key_t key)
274
382
list_hp_clear (list -> hp );
275
383
return false;
276
384
}
277
- atomic_store_explicit (& node -> next , (uintptr_t ) curr ,
385
+ ATOMIC_STORE_EXPLICIT (& node -> next , (uintptr_t ) curr ,
278
386
memory_order_relaxed );
279
387
uintptr_t tmp = get_unmarked (curr );
280
- if (atomic_compare_exchange_strong (prev , & tmp , (uintptr_t ) node )) {
388
+ if (CAS (prev , & tmp , (uintptr_t ) node )) {
281
389
list_hp_clear (list -> hp );
282
390
return true;
283
391
}
392
+ TRACE (ins );
284
393
}
285
394
}
286
395
@@ -296,12 +405,13 @@ bool list_delete(list_t *list, list_key_t key)
296
405
297
406
uintptr_t tmp = get_unmarked (next );
298
407
299
- if (!atomic_compare_exchange_strong (& curr -> next , & tmp ,
300
- get_marked ( next )))
408
+ if (!CAS (& curr -> next , & tmp , get_marked ( next ))) {
409
+ TRACE ( del );
301
410
continue ;
411
+ }
302
412
303
413
tmp = get_unmarked (curr );
304
- if (atomic_compare_exchange_strong (prev , & tmp , get_unmarked (next ))) {
414
+ if (CAS (prev , & tmp , get_unmarked (next ))) {
305
415
list_hp_clear (list -> hp );
306
416
list_hp_retire (list -> hp , get_unmarked (curr ));
307
417
} else {
@@ -364,6 +474,7 @@ static void *delete_thread(void *arg)
364
474
365
475
int main (void )
366
476
{
477
+ RUNTIME_STAT_INIT ();
367
478
list_t * list = list_new ();
368
479
369
480
pthread_t thr [N_THREADS ];
@@ -382,8 +493,5 @@ int main(void)
382
493
383
494
list_destroy (list );
384
495
385
- fprintf (stderr , "inserts = %zu, deletes = %zu\n" , atomic_load (& inserts ),
386
- atomic_load (& deletes ));
387
-
388
496
return 0 ;
389
497
}
0 commit comments