Skip to content

Commit 0683100

Browse files
committed
atomics: eliminate mb_read/mb_set
qatomic_mb_read and qatomic_mb_set were the very first atomic primitives introduced for QEMU; their semantics are unclear and they provide a false sense of safety. The last use of qatomic_mb_read() has been removed, so delete it. qatomic_mb_set() instead can survive as an optimized qatomic_set()+smp_mb(), similar to Linux's smp_store_mb(), but rename it to qatomic_set_mb() to match the order of the two operations. Reviewed-by: Richard Henderson <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
1 parent 09a49af commit 0683100

File tree

11 files changed

+20
-46
lines changed

11 files changed

+20
-46
lines changed

accel/tcg/cpu-exec.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -774,7 +774,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
774774
* Ensure zeroing happens before reading cpu->exit_request or
775775
* cpu->interrupt_request (see also smp_wmb in cpu_exit())
776776
*/
777-
qatomic_mb_set(&cpu_neg(cpu)->icount_decr.u16.high, 0);
777+
qatomic_set_mb(&cpu_neg(cpu)->icount_decr.u16.high, 0);
778778

779779
if (unlikely(qatomic_read(&cpu->interrupt_request))) {
780780
int interrupt_request;

accel/tcg/tcg-accel-ops-mttcg.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -119,7 +119,7 @@ static void *mttcg_cpu_thread_fn(void *arg)
119119
}
120120
}
121121

122-
qatomic_mb_set(&cpu->exit_request, 0);
122+
qatomic_set_mb(&cpu->exit_request, 0);
123123
qemu_wait_io_event(cpu);
124124
} while (!cpu->unplug || cpu_can_run(cpu));
125125

accel/tcg/tcg-accel-ops-rr.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -244,7 +244,7 @@ static void *rr_cpu_thread_fn(void *arg)
244244

245245
while (cpu && cpu_work_list_empty(cpu) && !cpu->exit_request) {
246246
/* Store rr_current_cpu before evaluating cpu_can_run(). */
247-
qatomic_mb_set(&rr_current_cpu, cpu);
247+
qatomic_set_mb(&rr_current_cpu, cpu);
248248

249249
current_cpu = cpu;
250250

@@ -287,7 +287,7 @@ static void *rr_cpu_thread_fn(void *arg)
287287
qatomic_set(&rr_current_cpu, NULL);
288288

289289
if (cpu && cpu->exit_request) {
290-
qatomic_mb_set(&cpu->exit_request, 0);
290+
qatomic_set_mb(&cpu->exit_request, 0);
291291
}
292292

293293
if (icount_enabled() && all_cpu_threads_idle()) {

docs/devel/atomics.rst

Lines changed: 4 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -102,28 +102,10 @@ Similar operations return the new value of ``*ptr``::
102102
typeof(*ptr) qatomic_or_fetch(ptr, val)
103103
typeof(*ptr) qatomic_xor_fetch(ptr, val)
104104

105-
``qemu/atomic.h`` also provides loads and stores that cannot be reordered
106-
with each other::
105+
``qemu/atomic.h`` also provides an optimized shortcut for
106+
``qatomic_set`` followed by ``smp_mb``::
107107

108-
typeof(*ptr) qatomic_mb_read(ptr)
109-
void qatomic_mb_set(ptr, val)
110-
111-
However these do not provide sequential consistency and, in particular,
112-
they do not participate in the total ordering enforced by
113-
sequentially-consistent operations. For this reason they are deprecated.
114-
They should instead be replaced with any of the following (ordered from
115-
easiest to hardest):
116-
117-
- accesses inside a mutex or spinlock
118-
119-
- lightweight synchronization primitives such as ``QemuEvent``
120-
121-
- RCU operations (``qatomic_rcu_read``, ``qatomic_rcu_set``) when publishing
122-
or accessing a new version of a data structure
123-
124-
- other atomic accesses: ``qatomic_read`` and ``qatomic_load_acquire`` for
125-
loads, ``qatomic_set`` and ``qatomic_store_release`` for stores, ``smp_mb``
126-
to forbid reordering subsequent loads before a store.
108+
void qatomic_set_mb(ptr, val)
127109

128110

129111
Weak atomic access and manual memory barriers
@@ -523,8 +505,7 @@ and memory barriers, and the equivalents in QEMU:
523505
| :: |
524506
| |
525507
| a = qatomic_read(&x); |
526-
| qatomic_set(&x, a + 2); |
527-
| smp_mb(); |
508+
| qatomic_set_mb(&x, a + 2); |
528509
| b = qatomic_read(&y); |
529510
+--------------------------------+
530511

include/qemu/atomic.h

Lines changed: 5 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -259,24 +259,17 @@
259259
# define smp_mb__after_rmw() smp_mb()
260260
#endif
261261

262-
/* qatomic_mb_read/set semantics map Java volatile variables. They are
263-
* less expensive on some platforms (notably POWER) than fully
264-
* sequentially consistent operations.
265-
*
266-
* As long as they are used as paired operations they are safe to
267-
* use. See docs/devel/atomics.rst for more discussion.
262+
/*
263+
* On some architectures, qatomic_set_mb is more efficient than a store
264+
* plus a fence.
268265
*/
269266

270-
#define qatomic_mb_read(ptr) \
271-
qatomic_load_acquire(ptr)
272-
273267
#if !defined(QEMU_SANITIZE_THREAD) && \
274268
(defined(__i386__) || defined(__x86_64__) || defined(__s390x__))
275-
/* This is more efficient than a store plus a fence. */
276-
# define qatomic_mb_set(ptr, i) \
269+
# define qatomic_set_mb(ptr, i) \
277270
({ (void)qatomic_xchg(ptr, i); smp_mb__after_rmw(); })
278271
#else
279-
# define qatomic_mb_set(ptr, i) \
272+
# define qatomic_set_mb(ptr, i) \
280273
({ qatomic_store_release(ptr, i); smp_mb(); })
281274
#endif
282275

monitor/qmp.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -246,7 +246,7 @@ static QMPRequest *monitor_qmp_dispatcher_pop_any(void)
246246
*
247247
* Clear qmp_dispatcher_co_busy before reading request.
248248
*/
249-
qatomic_mb_set(&qmp_dispatcher_co_busy, false);
249+
qatomic_set_mb(&qmp_dispatcher_co_busy, false);
250250

251251
WITH_QEMU_LOCK_GUARD(&monitor_lock) {
252252
QMPRequest *req_obj;

softmmu/cpus.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -405,7 +405,7 @@ static void qemu_cpu_stop(CPUState *cpu, bool exit)
405405

406406
void qemu_wait_io_event_common(CPUState *cpu)
407407
{
408-
qatomic_mb_set(&cpu->thread_kicked, false);
408+
qatomic_set_mb(&cpu->thread_kicked, false);
409409
if (cpu->stop) {
410410
qemu_cpu_stop(cpu, false);
411411
}

softmmu/physmem.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3132,7 +3132,7 @@ void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
31323132
bounce.buffer = NULL;
31333133
memory_region_unref(bounce.mr);
31343134
/* Clear in_use before reading map_client_list. */
3135-
qatomic_mb_set(&bounce.in_use, false);
3135+
qatomic_set_mb(&bounce.in_use, false);
31363136
cpu_notify_map_clients();
31373137
}
31383138

target/arm/hvf/hvf.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1229,7 +1229,7 @@ static void hvf_wait_for_ipi(CPUState *cpu, struct timespec *ts)
12291229
* Use pselect to sleep so that other threads can IPI us while we're
12301230
* sleeping.
12311231
*/
1232-
qatomic_mb_set(&cpu->thread_kicked, false);
1232+
qatomic_set_mb(&cpu->thread_kicked, false);
12331233
qemu_mutex_unlock_iothread();
12341234
pselect(0, 0, 0, 0, ts, &cpu->hvf->unblock_ipi_mask);
12351235
qemu_mutex_lock_iothread();

tests/unit/test-aio-multithread.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -154,7 +154,7 @@ static coroutine_fn void test_multi_co_schedule_entry(void *opaque)
154154
n = g_test_rand_int_range(0, NUM_CONTEXTS);
155155
schedule_next(n);
156156

157-
qatomic_mb_set(&to_schedule[id], qemu_coroutine_self());
157+
qatomic_set_mb(&to_schedule[id], qemu_coroutine_self());
158158
/* finish_cb can run here. */
159159
qemu_coroutine_yield();
160160
g_assert(to_schedule[id] == NULL);

0 commit comments

Comments
 (0)