Skip to content

Commit 58c3197

Browse files
authored
hp_list: Introduce runtime statistics(#9)
This patch introduces runtime states In statistics. Following are the explanation of statistical entries, when macro "RUNTIME_STAT" is defined : - number of retries in the __list_find function. - number of wait-free contains in the __list_find function - number of list element traversal in the __list_find function. - number of CAS() failures. - number of list_delete operation failed and restart again. - number of list_insert operation failed and restart again. - number of linked list elements deleted. - number of linked list elements created. - number of atomic_load operation in list_delete, list_insert and __list_find. - number of atomic_store operation in list_delete, list_insert and __list_find.
1 parent ce237c8 commit 58c3197

File tree

3 files changed

+145
-16
lines changed

3 files changed

+145
-16
lines changed

hp_list/Makefile

+5
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,11 @@ $(BIN): main.c
1212
all: CFLAGS += -O2
1313
all: $(BIN)
1414

15+
# Once RUNTIME_STAT is defined, the program will show runtime states in
16+
# statistics.
17+
analyze: CFLAGS +=-D RUNTIME_STAT
18+
analyze: $(BIN)
19+
1520
indent:
1621
clang-format -i *.[ch]
1722

hp_list/README.md

+16
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
# Concurrent Linked List With Hazard Pointer
2+
3+
## Runtime States In Statistics
4+
5+
Following are the explanation of variables, when `RUNTIME_STAT` defined :
6+
7+
* **retry** is the number of retries in the __list_find function.
8+
* **contains** is the number of wait-free contains in the __list_find function that curr pointer pointed.
9+
* **traversal** is the number of list element traversal in the __list_find function.
10+
* **fail** is the number of CAS() failures.
11+
* **del** is the number of list_delete operation failed and restart again.
12+
* **ins** is the number of list_insert operation failed and restart again.
13+
* **deletes** is the number of linked list elements deleted.
14+
* **inserts** is the number of linked list elements created.
15+
* **load** is the number of atomic_load operation in list_delete, list_insert and __list_find.
16+
* **store** is the number of atomic_store operation in list_delete, list_insert and __list_find.

hp_list/main.c

+124-16
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,105 @@
1414
#include <string.h>
1515
#include <threads.h>
1616

17+
static atomic_uint_fast64_t deletes = 0, inserts = 0;
18+
19+
/*
20+
* Reference :
21+
* A more Pragmatic Implementation of the Lock-free, Ordered, Linked List
22+
* https://arxiv.org/abs/2010.15755
23+
*/
24+
#ifdef RUNTIME_STAT
25+
26+
enum {
27+
TRACE_nop = 0,
28+
TRACE_retry, /* the number of retries in the __list_find function. */
29+
TRACE_contains, /* the number of wait-free contains in the __list_find
30+
function that curr pointer pointed. */
31+
TRACE_traversal, /* the number of list element traversal in the __list_find
32+
function. */
33+
TRACE_fail, /* the number of CAS() failures. */
34+
TRACE_del, /* the number of list_delete operation failed and restart again.
35+
*/
36+
TRACE_ins, /* the number of list_insert operation failed and restart again.
37+
*/
38+
TRACE_inserts, /* the number of atomic_load operation in list_delete,
39+
list_insert and __list_find. */
40+
TRACE_deletes /* the number of atomic_store operation in list_delete,
41+
list_insert and __list_find. */
42+
};
43+
44+
struct runtime_statistics {
45+
atomic_uint_fast64_t retry, contains, traversal, fail;
46+
atomic_uint_fast64_t del, ins;
47+
atomic_uint_fast64_t load, store;
48+
};
49+
static struct runtime_statistics stats = {0};
50+
51+
#define CAS(obj, expected, desired) \
52+
({ \
53+
bool __ret = atomic_compare_exchange_strong(obj, expected, desired); \
54+
if (!__ret) \
55+
atomic_fetch_add(&stats.fail, 1); \
56+
__ret; \
57+
})
58+
#define ATOMIC_LOAD(obj) \
59+
({ \
60+
atomic_fetch_add(&stats.load, 1); \
61+
atomic_load(obj); \
62+
})
63+
#define ATOMIC_STORE_EXPLICIT(obj, desired, order) \
64+
do { \
65+
atomic_fetch_add(&stats.store, 1); \
66+
atomic_store_explicit(obj, desired, order); \
67+
} while (0)
68+
#define TRACE(ops) \
69+
do { \
70+
if (TRACE_##ops) \
71+
atomic_fetch_add(&stats.ops, 1); \
72+
} while (0)
73+
74+
static void do_analysis(void)
75+
{
76+
__atomic_thread_fence(__ATOMIC_SEQ_CST);
77+
#define TRACE_PRINT(ops) printf("%-10s: %ld\n", #ops, stats.ops);
78+
TRACE_PRINT(retry);
79+
TRACE_PRINT(contains);
80+
TRACE_PRINT(traversal);
81+
TRACE_PRINT(fail);
82+
TRACE_PRINT(del)
83+
TRACE_PRINT(ins);
84+
TRACE_PRINT(load);
85+
TRACE_PRINT(store);
86+
#undef TRACE_PRINT
87+
#define TRACE_PRINT(val) printf("%-10s: %ld\n", #val, val);
88+
TRACE_PRINT(deletes);
89+
TRACE_PRINT(inserts);
90+
#undef TRACE_PRINT
91+
}
92+
93+
#else
94+
95+
#define CAS(obj, expected, desired) \
96+
({ atomic_compare_exchange_strong(obj, expected, desired); })
97+
#define ATOMIC_LOAD(obj) ({ atomic_load(obj); })
98+
#define ATOMIC_STORE_EXPLICIT(obj, desired, order) \
99+
do { \
100+
atomic_store_explicit(obj, desired, order); \
101+
} while (0)
102+
#define TRACE(ops) \
103+
do { \
104+
} while (0)
105+
106+
static void do_analysis(void)
107+
{
108+
__atomic_thread_fence(__ATOMIC_SEQ_CST);
109+
fprintf(stderr, "inserts = %zu, deletes = %zu\n", inserts, deletes);
110+
}
111+
112+
#endif /* RUNTIME_STAT */
113+
114+
#define RUNTIME_STAT_INIT() atexit(do_analysis)
115+
17116
#define HP_MAX_THREADS 128
18117
#define HP_MAX_HPS 5 /* This is named 'K' in the HP paper */
19118
#define CLPAD (128 / sizeof(uintptr_t))
@@ -162,8 +261,6 @@ void list_hp_retire(list_hp_t *hp, uintptr_t ptr)
162261
#define N_THREADS (128 / 2)
163262
#define MAX_THREADS 128
164263

165-
static atomic_uint_fast32_t deletes = 0, inserts = 0;
166-
167264
enum { HP_NEXT = 0, HP_CURR = 1, HP_PREV };
168265

169266
#define is_marked(p) (bool) ((uintptr_t)(p) &0x01)
@@ -225,21 +322,29 @@ static bool __list_find(list_t *list,
225322

226323
try_again:
227324
prev = &list->head;
228-
curr = (list_node_t *) atomic_load(prev);
325+
curr = (list_node_t *) ATOMIC_LOAD(prev);
229326
(void) list_hp_protect_ptr(list->hp, HP_CURR, (uintptr_t) curr);
230-
if (atomic_load(prev) != get_unmarked(curr))
327+
if (ATOMIC_LOAD(prev) != get_unmarked(curr)) {
328+
TRACE(retry);
231329
goto try_again;
330+
}
232331
while (true) {
233-
next = (list_node_t *) atomic_load(&get_unmarked_node(curr)->next);
332+
if (is_marked(curr))
333+
TRACE(contains);
334+
next = (list_node_t *) ATOMIC_LOAD(&get_unmarked_node(curr)->next);
234335
(void) list_hp_protect_ptr(list->hp, HP_NEXT, get_unmarked(next));
235336
/* On a CAS failure, the search function, "__list_find," will simply
236337
* have to go backwards in the list until an unmarked element is found
237338
* from which the search in increasing key order can be started.
238339
*/
239-
if (atomic_load(&get_unmarked_node(curr)->next) != (uintptr_t) next)
340+
if (ATOMIC_LOAD(&get_unmarked_node(curr)->next) != (uintptr_t) next) {
341+
TRACE(retry);
240342
goto try_again;
241-
if (atomic_load(prev) != get_unmarked(curr))
343+
}
344+
if (ATOMIC_LOAD(prev) != get_unmarked(curr)) {
345+
TRACE(retry);
242346
goto try_again;
347+
}
243348
if (get_unmarked_node(next) == next) {
244349
if (!(get_unmarked_node(curr)->key < *key)) {
245350
*par_curr = curr;
@@ -252,12 +357,15 @@ static bool __list_find(list_t *list,
252357
get_unmarked(curr));
253358
} else {
254359
uintptr_t tmp = get_unmarked(curr);
255-
if (!atomic_compare_exchange_strong(prev, &tmp, get_unmarked(next)))
360+
if (!CAS(prev, &tmp, get_unmarked(next))) {
361+
TRACE(retry);
256362
goto try_again;
363+
}
257364
list_hp_retire(list->hp, get_unmarked(curr));
258365
}
259366
curr = next;
260367
(void) list_hp_protect_release(list->hp, HP_CURR, get_unmarked(next));
368+
TRACE(traversal);
261369
}
262370
}
263371

@@ -274,13 +382,14 @@ bool list_insert(list_t *list, list_key_t key)
274382
list_hp_clear(list->hp);
275383
return false;
276384
}
277-
atomic_store_explicit(&node->next, (uintptr_t) curr,
385+
ATOMIC_STORE_EXPLICIT(&node->next, (uintptr_t) curr,
278386
memory_order_relaxed);
279387
uintptr_t tmp = get_unmarked(curr);
280-
if (atomic_compare_exchange_strong(prev, &tmp, (uintptr_t) node)) {
388+
if (CAS(prev, &tmp, (uintptr_t) node)) {
281389
list_hp_clear(list->hp);
282390
return true;
283391
}
392+
TRACE(ins);
284393
}
285394
}
286395

@@ -296,12 +405,13 @@ bool list_delete(list_t *list, list_key_t key)
296405

297406
uintptr_t tmp = get_unmarked(next);
298407

299-
if (!atomic_compare_exchange_strong(&curr->next, &tmp,
300-
get_marked(next)))
408+
if (!CAS(&curr->next, &tmp, get_marked(next))) {
409+
TRACE(del);
301410
continue;
411+
}
302412

303413
tmp = get_unmarked(curr);
304-
if (atomic_compare_exchange_strong(prev, &tmp, get_unmarked(next))) {
414+
if (CAS(prev, &tmp, get_unmarked(next))) {
305415
list_hp_clear(list->hp);
306416
list_hp_retire(list->hp, get_unmarked(curr));
307417
} else {
@@ -364,6 +474,7 @@ static void *delete_thread(void *arg)
364474

365475
int main(void)
366476
{
477+
RUNTIME_STAT_INIT();
367478
list_t *list = list_new();
368479

369480
pthread_t thr[N_THREADS];
@@ -382,8 +493,5 @@ int main(void)
382493

383494
list_destroy(list);
384495

385-
fprintf(stderr, "inserts = %zu, deletes = %zu\n", atomic_load(&inserts),
386-
atomic_load(&deletes));
387-
388496
return 0;
389497
}

0 commit comments

Comments
 (0)