1
1
#include <assert.h>
2
2
#include <errno.h>
3
+ #include <stdatomic.h>
3
4
#include <stdbool.h>
5
+ #include <stdlib.h>
6
+ #include <string.h>
4
7
5
- #include "atomics.h"
6
8
#include "lfq.h"
7
9
8
10
#define MAX_FREE 150
@@ -20,14 +22,14 @@ static bool in_hp(struct lfq_ctx *ctx, struct lfq_node *node)
20
22
static void insert_pool (struct lfq_ctx * ctx , struct lfq_node * node )
21
23
{
22
24
atomic_store (& node -> free_next , NULL );
23
- struct lfq_node * old_tail = XCHG (& ctx -> fpt , node ); /* seq_cst */
25
+ struct lfq_node * old_tail = atomic_exchange (& ctx -> fpt , node ); /* seq_cst */
24
26
atomic_store (& old_tail -> free_next , node );
25
27
}
26
28
27
29
static void free_pool (struct lfq_ctx * ctx , bool freeall )
28
30
{
29
31
bool old = 0 ;
30
- if (!CAS (& ctx -> is_freeing , & old , 1 ))
32
+ if (!atomic_compare_exchange_strong (& ctx -> is_freeing , & old , 1 ))
31
33
return ;
32
34
33
35
for (int i = 0 ; i < MAX_FREE || freeall ; i ++ ) {
@@ -39,20 +41,20 @@ static void free_pool(struct lfq_ctx *ctx, bool freeall)
39
41
free (p );
40
42
}
41
43
atomic_store (& ctx -> is_freeing , false);
42
- smp_mb ( );
44
+ atomic_thread_fence ( memory_order_seq_cst );
43
45
}
44
46
45
47
static void safe_free (struct lfq_ctx * ctx , struct lfq_node * node )
46
48
{
47
49
if (atomic_load (& node -> can_free ) && !in_hp (ctx , node )) {
48
50
/* free is not thread-safe */
49
51
bool old = 0 ;
50
- if (CAS (& ctx -> is_freeing , & old , 1 )) {
52
+ if (atomic_compare_exchange_strong (& ctx -> is_freeing , & old , 1 )) {
51
53
/* poison the pointer to detect use-after-free */
52
54
node -> next = (void * ) -1 ;
53
55
free (node ); /* we got the lock; actually free */
54
56
atomic_store (& ctx -> is_freeing , false);
55
- smp_mb ( );
57
+ atomic_thread_fence ( memory_order_seq_cst );
56
58
} else /* we did not get the lock; only add to a freelist */
57
59
insert_pool (ctx , node );
58
60
} else
@@ -65,7 +67,7 @@ static int alloc_tid(struct lfq_ctx *ctx)
65
67
for (int i = 0 ; i < ctx -> MAX_HP_SIZE ; i ++ ) {
66
68
if (ctx -> tid_map [i ] == 0 ) {
67
69
int old = 0 ;
68
- if (CAS (& ctx -> tid_map [i ], & old , 1 ))
70
+ if (atomic_compare_exchange_strong (& ctx -> tid_map [i ], & old , 1 ))
69
71
return i ;
70
72
}
71
73
}
@@ -141,7 +143,7 @@ int lfq_enqueue(struct lfq_ctx *ctx, void *data)
141
143
return - errno ;
142
144
143
145
insert_node -> data = data ;
144
- struct lfq_node * old_tail = XCHG (& ctx -> tail , insert_node );
146
+ struct lfq_node * old_tail = atomic_exchange (& ctx -> tail , insert_node );
145
147
/* We have claimed our spot in the insertion order by modifying tail.
146
148
* we are the only inserting thread with a pointer to the old tail.
147
149
*
@@ -162,13 +164,13 @@ void *lfq_dequeue_tid(struct lfq_ctx *ctx, int tid)
162
164
/* HP[tid] is necessary for deallocation. */
163
165
do {
164
166
retry :
165
- /* continue jumps to the bottom of the loop, and would attempt a CAS
166
- * with uninitialized new_head.
167
+ /* continue jumps to the bottom of the loop, and would attempt a
168
+ * atomic_compare_exchange_strong with uninitialized new_head.
167
169
*/
168
170
old_head = atomic_load (& ctx -> head );
169
171
170
172
atomic_store (& ctx -> HP [tid ], old_head );
171
- mb ( );
173
+ atomic_thread_fence ( memory_order_seq_cst );
172
174
173
175
/* another thread freed it before seeing our HP[tid] store */
174
176
if (old_head != atomic_load (& ctx -> head ))
@@ -179,7 +181,7 @@ void *lfq_dequeue_tid(struct lfq_ctx *ctx, int tid)
179
181
atomic_store (& ctx -> HP [tid ], 0 );
180
182
return NULL ; /* never remove the last node */
181
183
}
182
- } while (!CAS (& ctx -> head , & old_head , new_head ));
184
+ } while (!atomic_compare_exchange_strong (& ctx -> head , & old_head , new_head ));
183
185
184
186
/* We have atomically advanced head, and we are the thread that won the race
185
187
* to claim a node. We return the data from the *new* head. The list starts
@@ -191,7 +193,7 @@ void *lfq_dequeue_tid(struct lfq_ctx *ctx, int tid)
191
193
atomic_store (& new_head -> can_free , true);
192
194
193
195
/* we need to avoid freeing until other readers are definitely not going to
194
- * load its ->next in the CAS loop
196
+ * load its ->next in the atomic_compare_exchange_strong loop
195
197
*/
196
198
safe_free (ctx , (struct lfq_node * ) old_head );
197
199
@@ -208,4 +210,4 @@ void *lfq_dequeue(struct lfq_ctx *ctx)
208
210
void * ret = lfq_dequeue_tid (ctx , tid );
209
211
free_tid (ctx , tid );
210
212
return ret ;
211
- }
213
+ }
0 commit comments