diff --git a/accel/tcg/atomic_template.h b/accel/tcg/atomic_template.h index 89593b2502..7638d67263 100644 --- a/accel/tcg/atomic_template.h +++ b/accel/tcg/atomic_template.h @@ -81,7 +81,7 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, abi_ptr addr, ABI_TYPE cmpv, ABI_TYPE newv, MemOpIdx oi, uintptr_t retaddr) { - DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, + _Atomic(DATA_TYPE) *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr); DATA_TYPE ret; @@ -104,7 +104,7 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, abi_ptr addr, ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, abi_ptr addr, ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) { - DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, + _Atomic(DATA_TYPE) *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr); DATA_TYPE ret; @@ -123,7 +123,8 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, abi_ptr addr, ABI_TYPE val, ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \ ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \ { \ - DATA_TYPE *haddr, ret; \ + DATA_TYPE ret; \ + _Atomic(DATA_TYPE) *haddr; \ haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr); \ ret = qatomic_##X(haddr, val); \ ATOMIC_MMU_CLEANUP; \ @@ -159,7 +160,8 @@ GEN_ATOMIC_HELPER(xor_fetch) ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \ ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \ { \ - XDATA_TYPE *haddr, cmp, old, new, val = xval; \ + _Atomic(XDATA_TYPE) *haddr; \ + XDATA_TYPE cmp, old, new, val = xval; \ haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr); \ smp_mb(); \ cmp = qatomic_read__nocheck(haddr); \ @@ -206,7 +208,7 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, abi_ptr addr, ABI_TYPE cmpv, ABI_TYPE newv, MemOpIdx oi, uintptr_t retaddr) { - DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, + _Atomic(DATA_TYPE) *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr); DATA_TYPE ret; @@ -229,7 +231,7 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, abi_ptr addr, ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, abi_ptr addr, ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) { - DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, + _Atomic(DATA_TYPE) *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr); ABI_TYPE ret; @@ -248,7 +250,8 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, abi_ptr addr, ABI_TYPE val, ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \ ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \ { \ - DATA_TYPE *haddr, ret; \ + _Atomic(DATA_TYPE) *haddr; \ + DATA_TYPE ret; \ haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr); \ ret = qatomic_##X(haddr, BSWAP(val)); \ ATOMIC_MMU_CLEANUP; \ @@ -281,7 +284,8 @@ GEN_ATOMIC_HELPER(xor_fetch) ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \ ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \ { \ - XDATA_TYPE *haddr, ldo, ldn, old, new, val = xval; \ + _Atomic(XDATA_TYPE) *haddr; \ + XDATA_TYPE ldo, ldn, old, new, val = xval; \ haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr); \ smp_mb(); \ ldn = qatomic_read__nocheck(haddr); \ diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index b76a4eac4e..eba6cc0dc6 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -1794,7 +1794,7 @@ static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi, * Probe for an atomic operation. Do not allow unaligned operations, * or io operations to proceed. Return the host address. */ -static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi, +static _Atomic(DATA_TYPE) *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi, int size, uintptr_t retaddr) { uintptr_t mmu_idx = get_mmuidx(oi); diff --git a/accel/tcg/internal-common.h b/accel/tcg/internal-common.h index a8fc3db774..07a0f51795 100644 --- a/accel/tcg/internal-common.h +++ b/accel/tcg/internal-common.h @@ -15,7 +15,7 @@ extern int64_t max_delay; extern int64_t max_advance; -extern bool one_insn_per_tb; +extern _Atomic(bool) one_insn_per_tb; /* * Return true if CS is not running in parallel with other cpus, either diff --git a/accel/tcg/ldst_atomicity.c.inc b/accel/tcg/ldst_atomicity.c.inc index c735add261..bf96168e9e 100644 --- a/accel/tcg/ldst_atomicity.c.inc +++ b/accel/tcg/ldst_atomicity.c.inc @@ -108,7 +108,7 @@ static int required_atomicity(CPUState *cpu, uintptr_t p, MemOp memop) static inline uint16_t load_atomic2(void *pv) { uint16_t *p = __builtin_assume_aligned(pv, 2); - return qatomic_read(p); + return qatomic_read(_MK_ATOMIC(p)); } /** @@ -120,7 +120,7 @@ static inline uint16_t load_atomic2(void *pv) static inline uint32_t load_atomic4(void *pv) { uint32_t *p = __builtin_assume_aligned(pv, 4); - return qatomic_read(p); + return qatomic_read(_MK_ATOMIC(p)); } /** @@ -134,7 +134,7 @@ static inline uint64_t load_atomic8(void *pv) uint64_t *p = __builtin_assume_aligned(pv, 8); qemu_build_assert(HAVE_al8); - return qatomic_read__nocheck(p); + return qatomic_read__nocheck(_MK_ATOMIC(p)); } /** @@ -598,7 +598,7 @@ static Int128 load_atom_16(CPUState *cpu, uintptr_t ra, static inline void store_atomic2(void *pv, uint16_t val) { uint16_t *p = __builtin_assume_aligned(pv, 2); - qatomic_set(p, val); + qatomic_set(_MK_ATOMIC(p), val); } /** @@ -611,7 +611,7 @@ static inline void store_atomic2(void *pv, uint16_t val) static inline void store_atomic4(void *pv, uint32_t val) { uint32_t *p = __builtin_assume_aligned(pv, 4); - qatomic_set(p, val); + qatomic_set(_MK_ATOMIC(p), val); } /** @@ -626,7 +626,7 @@ static inline void store_atomic8(void *pv, uint64_t val) uint64_t *p = __builtin_assume_aligned(pv, 8); qemu_build_assert(HAVE_al8); - qatomic_set__nocheck(p, val); + qatomic_set__nocheck(_MK_ATOMIC(p), val); } /** @@ -669,11 +669,11 @@ static void store_atom_insert_al4(uint32_t *p, uint32_t val, uint32_t msk) uint32_t old, new; p = __builtin_assume_aligned(p, 4); - old = qatomic_read(p); + old = qatomic_read(_MK_ATOMIC(p)); do { new = (old & ~msk) | val; - } while (!__atomic_compare_exchange_n(p, &old, new, true, - __ATOMIC_RELAXED, __ATOMIC_RELAXED)); + } while (!atomic_compare_exchange_weak_explicit(_MK_ATOMIC(p), &old, new, + memory_order_relaxed, memory_order_relaxed)); } /** @@ -690,11 +690,11 @@ static void store_atom_insert_al8(uint64_t *p, uint64_t val, uint64_t msk) qemu_build_assert(HAVE_al8); p = __builtin_assume_aligned(p, 8); - old = qatomic_read__nocheck(p); + old = qatomic_read__nocheck(_MK_ATOMIC(p)); do { new = (old & ~msk) | val; - } while (!__atomic_compare_exchange_n(p, &old, new, true, - __ATOMIC_RELAXED, __ATOMIC_RELAXED)); + } while (!atomic_compare_exchange_weak_explicit(_MK_ATOMIC(p), &old, new, + memory_order_relaxed, memory_order_relaxed)); } /** diff --git a/accel/tcg/tb-context.h b/accel/tcg/tb-context.h index cac62d9749..d19e486d49 100644 --- a/accel/tcg/tb-context.h +++ b/accel/tcg/tb-context.h @@ -33,8 +33,8 @@ struct TBContext { struct qht htable; /* statistics */ - unsigned tb_flush_count; - unsigned tb_phys_invalidate_count; + _Atomic(unsigned) tb_flush_count; + _Atomic(unsigned) tb_phys_invalidate_count; }; extern TBContext tb_ctx; diff --git a/accel/tcg/tb-jmp-cache.h b/accel/tcg/tb-jmp-cache.h index c3a505e394..432684ed81 100644 --- a/accel/tcg/tb-jmp-cache.h +++ b/accel/tcg/tb-jmp-cache.h @@ -25,7 +25,7 @@ typedef struct CPUJumpCache { struct rcu_head rcu; struct { - TranslationBlock *tb; + _Atomic(TranslationBlock *) tb; vaddr pc; } array[TB_JMP_CACHE_SIZE]; } CPUJumpCache; diff --git a/accel/tcg/tcg-all.c b/accel/tcg/tcg-all.c index 2090907dba..07f578fd6b 100644 --- a/accel/tcg/tcg-all.c +++ b/accel/tcg/tcg-all.c @@ -98,7 +98,7 @@ static void tcg_accel_instance_init(Object *obj) } bool mttcg_enabled; -bool one_insn_per_tb; +_Atomic(bool) one_insn_per_tb; static int tcg_init_machine(MachineState *ms) { diff --git a/bsd-user/qemu.h b/bsd-user/qemu.h index 808ba4abeb..3063cdb8da 100644 --- a/bsd-user/qemu.h +++ b/bsd-user/qemu.h @@ -125,7 +125,7 @@ struct TaskState { * the qatomic_read() and qatomic_set() functions. (It is not accessed * from multiple threads.) */ - int signal_pending; + _Atomic(int) signal_pending; /* True if we're leaving a sigsuspend and sigsuspend_mask is valid. */ bool in_sigsuspend; /* diff --git a/cpu-common.c b/cpu-common.c index 6b262233a3..a32a0e5629 100644 --- a/cpu-common.c +++ b/cpu-common.c @@ -33,7 +33,7 @@ static QemuCond qemu_work_cond; /* >= 1 if a thread is inside start_exclusive/end_exclusive. Written * under qemu_cpu_list_lock, read with atomic operations. */ -static int pending_cpus; +static _Atomic(int) pending_cpus; void qemu_init_cpu_list(void) { @@ -128,7 +128,8 @@ struct qemu_work_item { QSIMPLEQ_ENTRY(qemu_work_item) node; run_on_cpu_func func; run_on_cpu_data data; - bool free, exclusive, done; + bool free, exclusive; + _Atomic(bool) done; }; static void queue_work_on_cpu(CPUState *cpu, struct qemu_work_item *wi) diff --git a/host/include/generic/host/atomic128-cas.h b/host/include/generic/host/atomic128-cas.h index 6b40cc2271..fca222c026 100644 --- a/host/include/generic/host/atomic128-cas.h +++ b/host/include/generic/host/atomic128-cas.h @@ -13,9 +13,9 @@ #if defined(CONFIG_ATOMIC128) static inline Int128 ATTRIBUTE_ATOMIC128_OPT -atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new) +atomic16_cmpxchg(_Atomic(Int128) *ptr, Int128 cmp, Int128 new) { - __int128_t *ptr_align = __builtin_assume_aligned(ptr, 16); + _Atomic(__int128_t) *ptr_align = __builtin_assume_aligned(ptr, 16); Int128Alias r, c, n; c.s = cmp; diff --git a/host/include/generic/host/load-extract-al16-al8.h.inc b/host/include/generic/host/load-extract-al16-al8.h.inc index d95556130f..473e97e70d 100644 --- a/host/include/generic/host/load-extract-al16-al8.h.inc +++ b/host/include/generic/host/load-extract-al16-al8.h.inc @@ -28,8 +28,8 @@ load_atom_extract_al16_or_al8(void *pv, int s) pv = (void *)(pi & ~7); if (pi & 8) { uint64_t *p8 = __builtin_assume_aligned(pv, 16, 8); - uint64_t a = qatomic_read__nocheck(p8); - uint64_t b = qatomic_read__nocheck(p8 + 1); + uint64_t a = qatomic_read__nocheck(_MK_ATOMIC(p8)); + uint64_t b = qatomic_read__nocheck(_MK_ATOMIC(p8 + 1)); if (HOST_BIG_ENDIAN) { r = int128_make128(b, a); diff --git a/include/block/aio.h b/include/block/aio.h index 43883a8a33..b86c1a30e8 100644 --- a/include/block/aio.h +++ b/include/block/aio.h @@ -49,7 +49,7 @@ void qemu_aio_unref(void *p); void qemu_aio_ref(void *p); typedef struct AioHandler AioHandler; -typedef QLIST_HEAD(, AioHandler) AioHandlerList; +typedef QLIST_HEAD_ATOMIC(, AioHandler) AioHandlerList; typedef void QEMUBHFunc(void *opaque); typedef bool AioPollFn(void *opaque); typedef void IOHandler(void *opaque); @@ -170,7 +170,7 @@ struct AioContext { * Instead, the aio_poll calls include both the prepare and the * dispatch phase, hence a simple counter is enough for them. */ - uint32_t notify_me; + _Atomic(uint32_t) notify_me; /* A lock to protect between QEMUBH and AioHandler adders and deleter, * and to ensure that no callbacks are removed while we're walking and @@ -196,7 +196,7 @@ struct AioContext { * more information on the problem that would result, see "#ifdef BUG2" * in the docs/aio_notify_accept.promela formal model. */ - bool notified; + _Atomic(bool) notified; EventNotifier notifier; QSLIST_HEAD(, Coroutine) scheduled_coroutines; @@ -226,7 +226,7 @@ struct AioContext { QEMUTimerListGroup tlg; /* Number of AioHandlers without .io_poll() */ - int poll_disable_cnt; + _Atomic(int) poll_disable_cnt; /* Polling mode parameters */ int64_t poll_ns; /* current polling time in nanoseconds */ diff --git a/include/exec/memory.h b/include/exec/memory.h index 9458e2801d..31ab51a90e 100644 --- a/include/exec/memory.h +++ b/include/exec/memory.h @@ -1096,7 +1096,7 @@ struct AddressSpace { MemoryRegion *root; /* Accessed via RCU. */ - struct FlatView *current_map; + _Atomic(struct FlatView *) current_map; int ioeventfd_nb; int ioeventfd_notifiers; diff --git a/include/exec/translation-block.h b/include/exec/translation-block.h index a6d1af6e9b..a81c2d15cd 100644 --- a/include/exec/translation-block.h +++ b/include/exec/translation-block.h @@ -64,7 +64,7 @@ struct TranslationBlock { uint64_t cs_base; uint32_t flags; /* flags defining in which context the code was generated */ - uint32_t cflags; /* compile flags */ + _Atomic(uint32_t) cflags; /* compile flags */ /* Note that TCG_MAX_INSNS is 512; we validate this match elsewhere. */ #define CF_COUNT_MASK 0x000001ff @@ -140,7 +140,7 @@ struct TranslationBlock { */ uintptr_t jmp_list_head; uintptr_t jmp_list_next[2]; - uintptr_t jmp_dest[2]; + _Atomic(uintptr_t) jmp_dest[2]; }; /* The alignment given to TranslationBlock during allocation. */ diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h index db8a6fbc6e..2fdf128ac7 100644 --- a/include/hw/core/cpu.h +++ b/include/hw/core/cpu.h @@ -336,14 +336,14 @@ typedef struct CPUTLB { * for both decrementer underflow and exceptions. */ typedef union IcountDecr { - uint32_t u32; + _Atomic(uint32_t) u32; struct { #if HOST_BIG_ENDIAN - uint16_t high; + _Atomic(uint16_t) high; uint16_t low; #else uint16_t low; - uint16_t high; + _Atomic(uint16_t) high; #endif } u16; } IcountDecr; @@ -476,7 +476,8 @@ struct CPUState { QemuSemaphore sem; #endif int thread_id; - bool running, has_waiter; + _Atomic(bool) running; + bool has_waiter; struct QemuCond *halt_cond; bool thread_kicked; bool created; @@ -488,11 +489,11 @@ struct CPUState { bool unplug; bool crash_occurred; - bool exit_request; + _Atomic(bool) exit_request; int exclusive_context_count; uint32_t cflags_next_tb; /* updates protected by BQL */ - uint32_t interrupt_request; + _Atomic(uint32_t) interrupt_request; int singlestep_enabled; int64_t icount_budget; int64_t icount_extra; @@ -513,7 +514,7 @@ struct CPUState { GArray *gdb_regs; int gdb_num_regs; int gdb_num_g_regs; - QTAILQ_ENTRY(CPUState) node; + QTAILQ_ENTRY_ATOMIC(CPUState) node; /* ice debug support */ QTAILQ_HEAD(, CPUBreakpoint) breakpoints; @@ -590,7 +591,7 @@ static inline CPUArchState *cpu_env(CPUState *cpu) return (CPUArchState *)(cpu + 1); } -typedef QTAILQ_HEAD(CPUTailQ, CPUState) CPUTailQ; +typedef QTAILQ_HEAD_ATOMIC(CPUTailQ, CPUState) CPUTailQ; extern CPUTailQ cpus_queue; #define first_cpu QTAILQ_FIRST_RCU(&cpus_queue) diff --git a/include/hw/qdev-core.h b/include/hw/qdev-core.h index 94914858d8..9f59ce234a 100644 --- a/include/hw/qdev-core.h +++ b/include/hw/qdev-core.h @@ -209,7 +209,7 @@ typedef struct { typedef QLIST_HEAD(, NamedGPIOList) NamedGPIOListHead; typedef QLIST_HEAD(, NamedClockList) NamedClockListHead; -typedef QLIST_HEAD(, BusState) BusStateHead; +typedef QLIST_HEAD_ATOMIC(, BusState) BusStateHead; /** * struct DeviceState - common device state, accessed with qdev helpers @@ -233,7 +233,7 @@ struct DeviceState { /** * @realized: has device been realized? */ - bool realized; + _Atomic(bool) realized; /** * @pending_deleted_event: track pending deletion events during unplug */ @@ -356,13 +356,13 @@ typedef struct BusChild { struct rcu_head rcu; DeviceState *child; int index; - QTAILQ_ENTRY(BusChild) sibling; + QTAILQ_ENTRY_ATOMIC(BusChild) sibling; } BusChild; #define QDEV_HOTPLUG_HANDLER_PROPERTY "hotplug-handler" -typedef QTAILQ_HEAD(, BusChild) BusChildHead; -typedef QLIST_ENTRY(BusState) BusStateEntry; +typedef QTAILQ_HEAD_ATOMIC(, BusChild) BusChildHead; +typedef QLIST_ENTRY_ATOMIC(BusState) BusStateEntry; /** * struct BusState: diff --git a/include/qemu/atomic.h b/include/qemu/atomic.h index 7a3f2e6576..d24eeaedf3 100644 --- a/include/qemu/atomic.h +++ b/include/qemu/atomic.h @@ -15,6 +15,7 @@ #ifndef QEMU_ATOMIC_H #define QEMU_ATOMIC_H +#include #include "compiler.h" /* Compiler barrier */ @@ -87,7 +88,7 @@ * will get flagged by sanitizers as a violation. */ #define qatomic_read__nocheck(ptr) \ - __atomic_load_n(ptr, __ATOMIC_RELAXED) + atomic_load_explicit(ptr, memory_order_relaxed) #define qatomic_read(ptr) \ ({ \ @@ -96,7 +97,7 @@ }) #define qatomic_set__nocheck(ptr, i) \ - __atomic_store_n(ptr, i, __ATOMIC_RELAXED) + atomic_store_explicit(ptr, i, memory_order_relaxed) #define qatomic_set(ptr, i) do { \ qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \ @@ -111,7 +112,7 @@ __atomic_load(ptr, valptr, __ATOMIC_CONSUME); #else #define qatomic_rcu_read__nocheck(ptr, valptr) \ - __atomic_load(ptr, valptr, __ATOMIC_RELAXED); \ + *(valptr) = atomic_load_explicit(ptr, memory_order_relaxed);\ smp_read_barrier_depends(); #endif @@ -132,27 +133,25 @@ #define qatomic_rcu_set(ptr, i) do { \ qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \ - __atomic_store_n(ptr, i, __ATOMIC_RELEASE); \ + atomic_store_explicit(ptr, i, memory_order_release);\ } while(0) #define qatomic_load_acquire(ptr) \ ({ \ qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \ - typeof_strip_qual(*ptr) _val; \ - __atomic_load(ptr, &_val, __ATOMIC_ACQUIRE); \ - _val; \ + atomic_load_explicit(ptr, memory_order_acquire); \ }) #define qatomic_store_release(ptr, i) do { \ qemu_build_assert(sizeof(*ptr) <= ATOMIC_REG_SIZE); \ - __atomic_store_n(ptr, i, __ATOMIC_RELEASE); \ + atomic_store_explicit(ptr, i, memory_order_release);\ } while(0) /* All the remaining operations are fully sequentially consistent */ #define qatomic_xchg__nocheck(ptr, i) ({ \ - __atomic_exchange_n(ptr, (i), __ATOMIC_SEQ_CST); \ + atomic_exchange_explicit(ptr, (i), memory_order_seq_cst); \ }) #define qatomic_xchg(ptr, i) ({ \ @@ -162,9 +161,9 @@ /* Returns the old value of '*ptr' (whether the cmpxchg failed or not) */ #define qatomic_cmpxchg__nocheck(ptr, old, new) ({ \ - typeof_strip_qual(*ptr) _old = (old); \ - (void)__atomic_compare_exchange_n(ptr, &_old, new, false, \ - __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); \ + typeof(atomic_load_explicit(ptr, memory_order_relaxed)) _old = (old);\ + (void)atomic_compare_exchange_strong_explicit(ptr, &_old, new, \ + memory_order_seq_cst, memory_order_seq_cst); \ _old; \ }) @@ -174,38 +173,38 @@ }) /* Provide shorter names for GCC atomic builtins, return old value */ -#define qatomic_fetch_inc(ptr) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST) -#define qatomic_fetch_dec(ptr) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST) - -#define qatomic_fetch_add(ptr, n) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST) -#define qatomic_fetch_sub(ptr, n) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST) -#define qatomic_fetch_and(ptr, n) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST) -#define qatomic_fetch_or(ptr, n) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST) -#define qatomic_fetch_xor(ptr, n) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST) - -#define qatomic_inc_fetch(ptr) __atomic_add_fetch(ptr, 1, __ATOMIC_SEQ_CST) -#define qatomic_dec_fetch(ptr) __atomic_sub_fetch(ptr, 1, __ATOMIC_SEQ_CST) -#define qatomic_add_fetch(ptr, n) __atomic_add_fetch(ptr, n, __ATOMIC_SEQ_CST) -#define qatomic_sub_fetch(ptr, n) __atomic_sub_fetch(ptr, n, __ATOMIC_SEQ_CST) -#define qatomic_and_fetch(ptr, n) __atomic_and_fetch(ptr, n, __ATOMIC_SEQ_CST) -#define qatomic_or_fetch(ptr, n) __atomic_or_fetch(ptr, n, __ATOMIC_SEQ_CST) -#define qatomic_xor_fetch(ptr, n) __atomic_xor_fetch(ptr, n, __ATOMIC_SEQ_CST) +#define qatomic_fetch_inc(ptr) atomic_fetch_add_explicit(ptr, 1, memory_order_seq_cst) +#define qatomic_fetch_dec(ptr) atomic_fetch_sub_explicit(ptr, 1, memory_order_seq_cst) + +#define qatomic_fetch_add(ptr, n) atomic_fetch_add_explicit(ptr, n, memory_order_seq_cst) +#define qatomic_fetch_sub(ptr, n) atomic_fetch_sub_explicit(ptr, n, memory_order_seq_cst) +#define qatomic_fetch_and(ptr, n) atomic_fetch_and_explicit(ptr, n, memory_order_seq_cst) +#define qatomic_fetch_or(ptr, n) atomic_fetch_or_explicit(ptr, n, memory_order_seq_cst) +#define qatomic_fetch_xor(ptr, n) atomic_fetch_xor_explicit(ptr, n, memory_order_seq_cst) + +#define qatomic_inc_fetch(ptr) (atomic_fetch_add_explicit(ptr, 1, memory_order_seq_cst) + 1) +#define qatomic_dec_fetch(ptr) (atomic_fetch_sub_explicit(ptr, 1, memory_order_seq_cst) - 1) +#define qatomic_add_fetch(ptr, n) (atomic_fetch_add_explicit(ptr, n, memory_order_seq_cst) + n) +#define qatomic_sub_fetch(ptr, n) (atomic_fetch_sub_explicit(ptr, n, memory_order_seq_cst) - n) +#define qatomic_and_fetch(ptr, n) (atomic_fetch_and_explicit(ptr, n, memory_order_seq_cst) & n) +#define qatomic_or_fetch(ptr, n) (atomic_fetch_or_explicit(ptr, n, memory_order_seq_cst) | n) +#define qatomic_xor_fetch(ptr, n) (atomic_fetch_xor_explicit(ptr, n, memory_order_seq_cst) ^ n) /* And even shorter names that return void. */ #define qatomic_inc(ptr) \ - ((void) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST)) + ((void) atomic_fetch_add_explicit(ptr, 1, memory_order_seq_cst)) #define qatomic_dec(ptr) \ - ((void) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST)) + ((void) atomic_fetch_sub_explicit(ptr, 1, memory_order_seq_cst)) #define qatomic_add(ptr, n) \ - ((void) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST)) + ((void) atomic_fetch_add_explicit(ptr, n, memory_order_seq_cst)) #define qatomic_sub(ptr, n) \ - ((void) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST)) + ((void) atomic_fetch_sub_explicit(ptr, n, memory_order_seq_cst)) #define qatomic_and(ptr, n) \ - ((void) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST)) + ((void) atomic_fetch_and_explicit(ptr, n, memory_order_seq_cst)) #define qatomic_or(ptr, n) \ - ((void) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST)) + ((void) atomic_fetch_or_explicit(ptr, n, memory_order_seq_cst)) #define qatomic_xor(ptr, n) \ - ((void) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST)) + ((void) atomic_fetch_xor_explicit(ptr, n, memory_order_seq_cst)) #define smp_wmb() smp_mb_release() #define smp_rmb() smp_mb_acquire() @@ -259,6 +258,8 @@ typedef int64_t aligned_int64_t __attribute__((aligned(8))); typedef uint64_t aligned_uint64_t __attribute__((aligned(8))); +#define _MK_ATOMIC(p) ((_Atomic(typeof(*(p))) *)(p)) + #ifdef CONFIG_ATOMIC64 /* Use __nocheck because sizeof(void *) might be < sizeof(u64) */ #define qatomic_read_i64(P) \ diff --git a/include/qemu/bitmap.h b/include/qemu/bitmap.h index 1cf288445f..71a4051a70 100644 --- a/include/qemu/bitmap.h +++ b/include/qemu/bitmap.h @@ -251,7 +251,7 @@ void bitmap_set_atomic(unsigned long *map, long i, long len); void bitmap_clear(unsigned long *map, long start, long nr); bool bitmap_test_and_clear_atomic(unsigned long *map, long start, long nr); bool bitmap_test_and_clear(unsigned long *map, long start, long nr); -void bitmap_copy_and_clear_atomic(unsigned long *dst, unsigned long *src, +void bitmap_copy_and_clear_atomic(unsigned long *dst, _Atomic(unsigned long) *src, long nr); unsigned long bitmap_find_next_zero_area(unsigned long *map, unsigned long size, diff --git a/include/qemu/bitops.h b/include/qemu/bitops.h index 2c0a2fe751..7b0dfc52be 100644 --- a/include/qemu/bitops.h +++ b/include/qemu/bitops.h @@ -51,7 +51,7 @@ static inline void set_bit_atomic(long nr, unsigned long *addr) unsigned long mask = BIT_MASK(nr); unsigned long *p = addr + BIT_WORD(nr); - qatomic_or(p, mask); + qatomic_or(_MK_ATOMIC(p), mask); } /** @@ -77,7 +77,7 @@ static inline void clear_bit_atomic(long nr, unsigned long *addr) unsigned long mask = BIT_MASK(nr); unsigned long *p = addr + BIT_WORD(nr); - return qatomic_and(p, ~mask); + return qatomic_and(_MK_ATOMIC(p), ~mask); } /** diff --git a/include/qemu/coroutine.h b/include/qemu/coroutine.h index ff3084538b..5624b64a5c 100644 --- a/include/qemu/coroutine.h +++ b/include/qemu/coroutine.h @@ -49,7 +49,7 @@ struct CoMutex { /* Count of pending lockers; 0 for a free mutex, 1 for an * uncontended mutex. */ - unsigned locked; + _Atomic(unsigned) locked; /* Context that is holding the lock. Useful to avoid spinning * when two coroutines on the same AioContext try to get the lock. :) diff --git a/include/qemu/interval-tree.h b/include/qemu/interval-tree.h index 25006debe8..d0529aa397 100644 --- a/include/qemu/interval-tree.h +++ b/include/qemu/interval-tree.h @@ -16,14 +16,14 @@ typedef struct RBNode { /* Encodes parent with color in the lsb. */ - uintptr_t rb_parent_color; - struct RBNode *rb_right; - struct RBNode *rb_left; + _Atomic(uintptr_t) rb_parent_color; + _Atomic(struct RBNode *) rb_right; + _Atomic(struct RBNode *) rb_left; } RBNode; typedef struct RBRoot { - RBNode *rb_node; + _Atomic(RBNode *) rb_node; } RBRoot; typedef struct RBRootLeftCached { diff --git a/include/qemu/lockcnt.h b/include/qemu/lockcnt.h index f4b62a3f70..b5fd259cc5 100644 --- a/include/qemu/lockcnt.h +++ b/include/qemu/lockcnt.h @@ -20,7 +20,7 @@ struct QemuLockCnt { #ifndef CONFIG_LINUX QemuMutex mutex; #endif - unsigned count; + _Atomic(unsigned) count; }; /** diff --git a/include/qemu/qht.h b/include/qemu/qht.h index 758c7ac6c8..f5fb71e1a3 100644 --- a/include/qemu/qht.h +++ b/include/qemu/qht.h @@ -14,7 +14,7 @@ typedef bool (*qht_cmp_func_t)(const void *a, const void *b); struct qht { - struct qht_map *map; + _Atomic(struct qht_map *) map; qht_cmp_func_t cmp; QemuMutex lock; /* serializes setters of ht->map */ unsigned int mode; diff --git a/include/qemu/queue.h b/include/qemu/queue.h index e029e7bf66..669a00623d 100644 --- a/include/qemu/queue.h +++ b/include/qemu/queue.h @@ -86,6 +86,11 @@ struct name { \ struct type *lh_first; /* first element */ \ } +#define QLIST_HEAD_ATOMIC(name, type) \ +struct name { \ + _Atomic(struct type *) lh_first; /* first element */ \ +} + #define QLIST_HEAD_INITIALIZER(head) \ { NULL } @@ -95,6 +100,12 @@ struct { \ struct type **le_prev; /* address of previous next element */ \ } +#define QLIST_ENTRY_ATOMIC(type) \ +struct { \ + _Atomic(struct type *) le_next; /* next element */ \ + _Atomic(struct type *) *le_prev; /* address of previous next element */ \ +} + /* * List functions. */ @@ -189,6 +200,11 @@ struct name { \ struct type *slh_first; /* first element */ \ } +#define QSLIST_HEAD_ATOMIC(name, type) \ +struct name { \ + _Atomic(struct type *) slh_first; /* first element */ \ +} + #define QSLIST_HEAD_INITIALIZER(head) \ { NULL } @@ -197,6 +213,11 @@ struct { \ struct type *sle_next; /* next element */ \ } +#define QSLIST_ENTRY_ATOMIC(type) \ +struct { \ + _Atomic(struct type *) sle_next; /* next element */ \ +} + /* * Singly-linked List functions. */ @@ -275,6 +296,12 @@ struct name { \ struct type **sqh_last; /* addr of last next element */ \ } +#define QSIMPLEQ_HEAD_ATOMIC(name, type) \ +struct name { \ + _Atomic(struct type *) sqh_first; /* first element */ \ + _Atomic(struct type *) *sqh_last; /* addr of last next element */\ +} + #define QSIMPLEQ_HEAD_INITIALIZER(head) \ { NULL, &(head).sqh_first } @@ -283,6 +310,11 @@ struct { \ struct type *sqe_next; /* next element */ \ } +#define QSIMPLEQ_ENTRY_ATOMIC(type) \ +struct { \ + _Atomic(struct type *) sqe_next; /* next element */ \ +} + /* * Simple queue functions. */ @@ -387,6 +419,11 @@ typedef struct QTailQLink { struct QTailQLink *tql_prev; } QTailQLink; +typedef struct QTailQLink_atomic { + _Atomic(void *) tql_next; + struct QTailQLink_atomic *tql_prev; +} QTailQLink_atomic; + /* * Tail queue definitions. The union acts as a poor man template, as if * it were QTailQLink. @@ -397,6 +434,12 @@ union name { \ QTailQLink tqh_circ; /* link for circular backwards list */ \ } +#define QTAILQ_HEAD_ATOMIC(name, type) \ +union name { \ + _Atomic(struct type *) tqh_first; /* first element */ \ + QTailQLink_atomic tqh_circ; /* link for circular backwards list */ \ +} + #define QTAILQ_HEAD_INITIALIZER(head) \ { .tqh_circ = { NULL, &(head).tqh_circ } } @@ -406,6 +449,12 @@ union { \ QTailQLink tqe_circ; /* link for circular backwards list */ \ } +#define QTAILQ_ENTRY_ATOMIC(type) \ +union { \ + _Atomic(struct type *) tqe_next; /* next element */ \ + QTailQLink_atomic tqe_circ; /* link for circular backwards list */ \ +} + /* * Tail queue functions. */ diff --git a/include/qemu/rcu.h b/include/qemu/rcu.h index fea058aa9f..96341abf3c 100644 --- a/include/qemu/rcu.h +++ b/include/qemu/rcu.h @@ -50,14 +50,14 @@ * Using a int rather than a char to eliminate false register dependencies * causing stalls on some architectures. */ -extern unsigned long rcu_gp_ctr; +extern _Atomic(unsigned long) rcu_gp_ctr; extern QemuEvent rcu_gp_event; struct rcu_reader_data { /* Data used by both reader and synchronize_rcu() */ - unsigned long ctr; - bool waiting; + _Atomic(unsigned long) ctr; + _Atomic(bool) waiting; /* Data used by reader only */ unsigned depth; @@ -136,7 +136,7 @@ struct rcu_head; typedef void RCUCBFunc(struct rcu_head *head); struct rcu_head { - struct rcu_head *next; + _Atomic(struct rcu_head *) next; RCUCBFunc *func; }; diff --git a/include/qemu/seqlock.h b/include/qemu/seqlock.h index ecb7d2c864..2adbe2075b 100644 --- a/include/qemu/seqlock.h +++ b/include/qemu/seqlock.h @@ -21,7 +21,7 @@ typedef struct QemuSeqLock QemuSeqLock; struct QemuSeqLock { - unsigned sequence; + _Atomic(unsigned) sequence; }; static inline void seqlock_init(QemuSeqLock *sl) diff --git a/include/qemu/stats64.h b/include/qemu/stats64.h index 99b5cb724a..5af885960a 100644 --- a/include/qemu/stats64.h +++ b/include/qemu/stats64.h @@ -21,7 +21,7 @@ typedef struct Stat64 { #ifdef CONFIG_ATOMIC64 - aligned_uint64_t value; + _Atomic(aligned_uint64_t) value; #else uint32_t low, high; uint32_t lock; diff --git a/include/qemu/thread-posix.h b/include/qemu/thread-posix.h index 5f2f3d1386..f2a6fdabbf 100644 --- a/include/qemu/thread-posix.h +++ b/include/qemu/thread-posix.h @@ -37,7 +37,7 @@ struct QemuEvent { pthread_mutex_t lock; pthread_cond_t cond; #endif - unsigned value; + _Atomic(unsigned) value; bool initialized; }; diff --git a/include/qemu/thread.h b/include/qemu/thread.h index 7eba27a704..072ab6fc19 100644 --- a/include/qemu/thread.h +++ b/include/qemu/thread.h @@ -47,13 +47,13 @@ typedef void (*QemuCondWaitFunc)(QemuCond *c, QemuMutex *m, const char *f, typedef bool (*QemuCondTimedWaitFunc)(QemuCond *c, QemuMutex *m, int ms, const char *f, int l); -extern QemuMutexLockFunc bql_mutex_lock_func; -extern QemuMutexLockFunc qemu_mutex_lock_func; -extern QemuMutexTrylockFunc qemu_mutex_trylock_func; -extern QemuRecMutexLockFunc qemu_rec_mutex_lock_func; -extern QemuRecMutexTrylockFunc qemu_rec_mutex_trylock_func; -extern QemuCondWaitFunc qemu_cond_wait_func; -extern QemuCondTimedWaitFunc qemu_cond_timedwait_func; +extern _Atomic(QemuMutexLockFunc) bql_mutex_lock_func; +extern _Atomic(QemuMutexLockFunc) qemu_mutex_lock_func; +extern _Atomic(QemuMutexTrylockFunc) qemu_mutex_trylock_func; +extern _Atomic(QemuRecMutexLockFunc) qemu_rec_mutex_lock_func; +extern _Atomic(QemuRecMutexTrylockFunc) qemu_rec_mutex_trylock_func; +extern _Atomic(QemuCondWaitFunc) qemu_cond_wait_func; +extern _Atomic(QemuCondTimedWaitFunc) qemu_cond_timedwait_func; /* convenience macros to bypass the profiler */ #define qemu_mutex_lock__raw(m) \ @@ -230,7 +230,7 @@ void qemu_thread_atexit_remove(struct Notifier *notifier); #endif struct QemuSpin { - int value; + _Atomic(int) value; }; static inline void qemu_spin_init(QemuSpin *spin) diff --git a/include/qom/object.h b/include/qom/object.h index 43c135984a..6038b85ee5 100644 --- a/include/qom/object.h +++ b/include/qom/object.h @@ -130,8 +130,8 @@ struct ObjectClass Type type; GSList *interfaces; - const char *object_cast_cache[OBJECT_CLASS_CAST_CACHE]; - const char *class_cast_cache[OBJECT_CLASS_CAST_CACHE]; + _Atomic(const char *) object_cast_cache[OBJECT_CLASS_CAST_CACHE]; + _Atomic(const char *) class_cast_cache[OBJECT_CLASS_CAST_CACHE]; ObjectUnparent *unparent; @@ -156,7 +156,7 @@ struct Object ObjectClass *class; ObjectFree *free; GHashTable *properties; - uint32_t ref; + _Atomic(uint32_t) ref; Object *parent; }; diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h index a77ed12b9d..313fe53002 100644 --- a/include/tcg/tcg.h +++ b/include/tcg/tcg.h @@ -487,7 +487,7 @@ struct TCGContext { extension that allows arithmetic on void*. */ void *code_gen_buffer; size_t code_gen_buffer_size; - void *code_gen_ptr; + _Atomic(void *) code_gen_ptr; void *data_gen_ptr; /* Threshold to flush the translated code buffer. */ @@ -562,7 +562,7 @@ extern bool tcg_use_softmmu; #define tcg_use_softmmu true #endif -extern __thread TCGContext *tcg_ctx; +extern __thread _Atomic(TCGContext *) tcg_ctx; extern const void *tcg_code_gen_epilogue; extern uintptr_t tcg_splitwx_diff; extern TCGv_env tcg_env; diff --git a/include/user/safe-syscall.h b/include/user/safe-syscall.h index aa075f4d5c..5b3a013af5 100644 --- a/include/user/safe-syscall.h +++ b/include/user/safe-syscall.h @@ -126,7 +126,7 @@ */ /* The core part of this function is implemented in assembly */ -long safe_syscall_base(int *pending, long number, ...); +long safe_syscall_base(_Atomic(int) *pending, long number, ...); long safe_syscall_set_errno_tail(int value); /* These are defined by the safe-syscall.inc.S file */ diff --git a/linux-user/qemu.h b/linux-user/qemu.h index 895bdd722a..654dd72468 100644 --- a/linux-user/qemu.h +++ b/linux-user/qemu.h @@ -156,7 +156,7 @@ struct TaskState { * the qatomic_read() and qatomic_set() functions. (It is not accessed * from multiple threads.) */ - int signal_pending; + _Atomic(int) signal_pending; /* This thread's sigaltstack, if it has one */ struct target_sigaltstack sigaltstack_used; diff --git a/subprojects/libvhost-user/libvhost-user.c b/subprojects/libvhost-user/libvhost-user.c index 54ee9a79f7..88788912b8 100644 --- a/subprojects/libvhost-user/libvhost-user.c +++ b/subprojects/libvhost-user/libvhost-user.c @@ -719,7 +719,7 @@ vu_log_kick(VuDev *dev) } static void -vu_log_page(uint8_t *log_table, uint64_t page) +vu_log_page(_Atomic(uint8_t) *log_table, uint64_t page) { DPRINT("Logged dirty guest page: %"PRId64"\n", page); #pragma GCC diagnostic push diff --git a/subprojects/libvhost-user/libvhost-user.h b/subprojects/libvhost-user/libvhost-user.h index 1eb0ba971f..4842bdf928 100644 --- a/subprojects/libvhost-user/libvhost-user.h +++ b/subprojects/libvhost-user/libvhost-user.h @@ -413,7 +413,7 @@ struct VuDev { pthread_mutex_t backend_mutex; int backend_fd; uint64_t log_size; - uint8_t *log_table; + _Atomic(uint8_t) *log_table; uint64_t features; uint64_t protocol_features; bool broken; diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc index ffa8a3e519..d43117745f 100644 --- a/tcg/aarch64/tcg-target.c.inc +++ b/tcg/aarch64/tcg-target.c.inc @@ -2100,7 +2100,7 @@ void tb_target_set_jmp_target(const TranslationBlock *tb, int n, /* Note that we asserted this in range in tcg_out_goto_tb. */ insn = deposit32(I3305_LDR | TCG_REG_TMP0, 5, 19, i_offset >> 2); } - qatomic_set((uint32_t *)jmp_rw, insn); + qatomic_set(_MK_ATOMIC((typeof(&insn))jmp_rw), insn); flush_idcache_range(jmp_rx, jmp_rw, 4); } diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc index 56072d89a2..a4fc8a8b37 100644 --- a/tcg/arm/tcg-target.c.inc +++ b/tcg/arm/tcg-target.c.inc @@ -1795,7 +1795,7 @@ void tb_target_set_jmp_target(const TranslationBlock *tb, int n, insn = INSN_NOP; } - qatomic_set((uint32_t *)jmp_rw, insn); + qatomic_set(_MK_ATOMIC((typeof(&insn))jmp_rw), insn); flush_idcache_range(jmp_rx, jmp_rw, 4); } diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc index 1bf50f1f62..35d1bf3754 100644 --- a/tcg/i386/tcg-target.c.inc +++ b/tcg/i386/tcg-target.c.inc @@ -2591,7 +2591,7 @@ void tb_target_set_jmp_target(const TranslationBlock *tb, int n, { /* patch the branch destination */ uintptr_t addr = tb->jmp_target_addr[n]; - qatomic_set((int32_t *)jmp_rw, addr - (jmp_rx + 4)); + qatomic_set(_MK_ATOMIC((tcg_insn_unit *)jmp_rw), addr - (jmp_rx + 4)); /* no need to flush icache explicitly */ } diff --git a/tcg/tcg-internal.h b/tcg/tcg-internal.h index 8099248076..022c1fc5b4 100644 --- a/tcg/tcg-internal.h +++ b/tcg/tcg-internal.h @@ -30,8 +30,8 @@ #define TCG_HIGHWATER 1024 extern TCGContext tcg_init_ctx; -extern TCGContext **tcg_ctxs; -extern unsigned int tcg_cur_ctxs; +extern _Atomic(TCGContext *) *tcg_ctxs; +extern _Atomic(unsigned int) tcg_cur_ctxs; extern unsigned int tcg_max_ctxs; void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus); diff --git a/tcg/tcg.c b/tcg/tcg.c index 0babae1b88..afa0b99c5b 100644 --- a/tcg/tcg.c +++ b/tcg/tcg.c @@ -236,10 +236,10 @@ bool tcg_use_softmmu; #endif TCGContext tcg_init_ctx; -__thread TCGContext *tcg_ctx; +__thread _Atomic(TCGContext *) tcg_ctx; -TCGContext **tcg_ctxs; -unsigned int tcg_cur_ctxs; +_Atomic(TCGContext *)*tcg_ctxs; +_Atomic(unsigned int) tcg_cur_ctxs; unsigned int tcg_max_ctxs; TCGv_env tcg_env; const void *tcg_code_gen_epilogue; diff --git a/tests/bench/qht-bench.c b/tests/bench/qht-bench.c index 8afe161d10..cf4beed546 100644 --- a/tests/bench/qht-bench.c +++ b/tests/bench/qht-bench.c @@ -49,7 +49,7 @@ static unsigned long lookup_range = DEFAULT_RANGE; static unsigned long update_range = DEFAULT_RANGE; static size_t init_range = DEFAULT_RANGE; static size_t init_size = DEFAULT_RANGE; -static size_t n_ready_threads; +static _Atomic(size_t) n_ready_threads; static long populate_offset; static long *keys; @@ -69,8 +69,8 @@ static uint64_t resize_threshold; static size_t qht_n_elems = DEFAULT_QHT_N_ELEMS; static int qht_mode; -static bool test_start; -static bool test_stop; +static _Atomic(bool) test_start; +static _Atomic(bool) test_stop; static struct thread_info *rw_info; diff --git a/tests/unit/rcutorture.c b/tests/unit/rcutorture.c index 7662081683..e6755bffe6 100644 --- a/tests/unit/rcutorture.c +++ b/tests/unit/rcutorture.c @@ -64,7 +64,7 @@ #include "qemu/rcu.h" #include "qemu/thread.h" -int nthreadsrunning; +_Atomic(int) nthreadsrunning; #define GOFLAG_INIT 0 #define GOFLAG_RUN 1 @@ -229,12 +229,12 @@ static void uperftest(int nupdaters, int duration) #define RCU_STRESS_PIPE_LEN 10 struct rcu_stress { - int age; /* how many update cycles while not rcu_stress_current */ - int mbtest; + _Atomic(int) age; /* how many update cycles while not rcu_stress_current */ + _Atomic(int) mbtest; }; struct rcu_stress rcu_stress_array[RCU_STRESS_PIPE_LEN] = { { 0 } }; -struct rcu_stress *rcu_stress_current; +_Atomic(struct rcu_stress *) rcu_stress_current; int n_mberror; /* Updates protected by counts_mutex */ diff --git a/tests/unit/test-rcu-list.c b/tests/unit/test-rcu-list.c index 8f0adb8b00..0b368b540b 100644 --- a/tests/unit/test-rcu-list.c +++ b/tests/unit/test-rcu-list.c @@ -32,18 +32,18 @@ static QemuMutex counts_mutex; static long long n_reads = 0LL; static long long n_updates = 0LL; -static int64_t n_reclaims; -static int64_t n_nodes_removed; +static _Atomic(int64_t) n_reclaims; +static _Atomic(int64_t) n_nodes_removed; static long long n_nodes = 0LL; static int g_test_in_charge = 0; -static int nthreadsrunning; +static _Atomic(int) nthreadsrunning; #define GOFLAG_INIT 0 #define GOFLAG_RUN 1 #define GOFLAG_STOP 2 -static int goflag = GOFLAG_INIT; +static _Atomic(int) goflag = GOFLAG_INIT; #define RCU_READ_RUN 1000 #define RCU_UPDATE_RUN 10 @@ -87,13 +87,13 @@ static void wait_all_threads(void) struct list_element { #if TEST_LIST_TYPE == 1 - QLIST_ENTRY(list_element) entry; + QLIST_ENTRY_ATOMIC(list_element) entry; #elif TEST_LIST_TYPE == 2 - QSIMPLEQ_ENTRY(list_element) entry; + QSIMPLEQ_ENTRY_ATOMIC(list_element) entry; #elif TEST_LIST_TYPE == 3 - QTAILQ_ENTRY(list_element) entry; + QTAILQ_ENTRY_ATOMIC(list_element) entry; #elif TEST_LIST_TYPE == 4 - QSLIST_ENTRY(list_element) entry; + QSLIST_ENTRY_ATOMIC(list_element) entry; #else #error Invalid TEST_LIST_TYPE #endif @@ -109,7 +109,7 @@ static void reclaim_list_el(struct rcu_head *prcu) } #if TEST_LIST_TYPE == 1 -static QLIST_HEAD(, list_element) Q_list_head; +static QLIST_HEAD_ATOMIC(, list_element) Q_list_head; #define TEST_NAME "qlist" #define TEST_LIST_REMOVE_RCU QLIST_REMOVE_RCU @@ -119,7 +119,7 @@ static QLIST_HEAD(, list_element) Q_list_head; #define TEST_LIST_FOREACH_SAFE_RCU QLIST_FOREACH_SAFE_RCU #elif TEST_LIST_TYPE == 2 -static QSIMPLEQ_HEAD(, list_element) Q_list_head = +static QSIMPLEQ_HEAD_ATOMIC(, list_element) Q_list_head = QSIMPLEQ_HEAD_INITIALIZER(Q_list_head); #define TEST_NAME "qsimpleq" @@ -134,7 +134,7 @@ static QSIMPLEQ_HEAD(, list_element) Q_list_head = #define TEST_LIST_FOREACH_SAFE_RCU QSIMPLEQ_FOREACH_SAFE_RCU #elif TEST_LIST_TYPE == 3 -static QTAILQ_HEAD(, list_element) Q_list_head; +static QTAILQ_HEAD_ATOMIC(, list_element) Q_list_head; #define TEST_NAME "qtailq" #define TEST_LIST_REMOVE_RCU(el, f) QTAILQ_REMOVE_RCU(&Q_list_head, el, f) @@ -147,7 +147,7 @@ static QTAILQ_HEAD(, list_element) Q_list_head; #define TEST_LIST_FOREACH_SAFE_RCU QTAILQ_FOREACH_SAFE_RCU #elif TEST_LIST_TYPE == 4 -static QSLIST_HEAD(, list_element) Q_list_head; +static QSLIST_HEAD_ATOMIC(, list_element) Q_list_head; #define TEST_NAME "qslist" #define TEST_LIST_REMOVE_RCU(el, f) \ diff --git a/util/aio-posix.h b/util/aio-posix.h index 4264c518be..d009b80fb4 100644 --- a/util/aio-posix.h +++ b/util/aio-posix.h @@ -28,12 +28,12 @@ struct AioHandler { IOHandler *io_poll_begin; IOHandler *io_poll_end; void *opaque; - QLIST_ENTRY(AioHandler) node; - QLIST_ENTRY(AioHandler) node_ready; /* only used during aio_poll() */ - QLIST_ENTRY(AioHandler) node_deleted; - QLIST_ENTRY(AioHandler) node_poll; + QLIST_ENTRY_ATOMIC(AioHandler) node; + QLIST_ENTRY_ATOMIC(AioHandler) node_ready; /* only used during aio_poll() */ + QLIST_ENTRY_ATOMIC(AioHandler) node_deleted; + QLIST_ENTRY_ATOMIC(AioHandler) node_poll; #ifdef CONFIG_LINUX_IO_URING - QSLIST_ENTRY(AioHandler) node_submitted; + QSLIST_ENTRY_ATOMIC(AioHandler) node_submitted; unsigned flags; /* see fdmon-io_uring.c */ #endif int64_t poll_idle_timeout; /* when to stop userspace polling */ diff --git a/util/bitmap.c b/util/bitmap.c index 8d12e90a5a..fb17116212 100644 --- a/util/bitmap.c +++ b/util/bitmap.c @@ -190,7 +190,7 @@ void bitmap_set_atomic(unsigned long *map, long start, long nr) /* First word */ if (nr - bits_to_set > 0) { - qatomic_or(p, mask_to_set); + qatomic_or(_MK_ATOMIC(p), mask_to_set); nr -= bits_to_set; bits_to_set = BITS_PER_LONG; mask_to_set = ~0UL; @@ -209,7 +209,7 @@ void bitmap_set_atomic(unsigned long *map, long start, long nr) /* Last word */ if (nr) { mask_to_set &= BITMAP_LAST_WORD_MASK(size); - qatomic_or(p, mask_to_set); + qatomic_or(_MK_ATOMIC(p), mask_to_set); } else { /* If we avoided the full barrier in qatomic_or(), issue a * barrier to account for the assignments in the while loop. @@ -298,7 +298,7 @@ bool bitmap_test_and_clear_atomic(unsigned long *map, long start, long nr) /* First word */ if (nr - bits_to_clear > 0) { - old_bits = qatomic_fetch_and(p, ~mask_to_clear); + old_bits = qatomic_fetch_and(_MK_ATOMIC(p), ~mask_to_clear); dirty |= old_bits & mask_to_clear; nr -= bits_to_clear; bits_to_clear = BITS_PER_LONG; @@ -310,7 +310,7 @@ bool bitmap_test_and_clear_atomic(unsigned long *map, long start, long nr) if (bits_to_clear == BITS_PER_LONG) { while (nr >= BITS_PER_LONG) { if (*p) { - old_bits = qatomic_xchg(p, 0); + old_bits = qatomic_xchg(_MK_ATOMIC(p), 0); dirty |= old_bits; } nr -= BITS_PER_LONG; @@ -321,7 +321,7 @@ bool bitmap_test_and_clear_atomic(unsigned long *map, long start, long nr) /* Last word */ if (nr) { mask_to_clear &= BITMAP_LAST_WORD_MASK(size); - old_bits = qatomic_fetch_and(p, ~mask_to_clear); + old_bits = qatomic_fetch_and(_MK_ATOMIC(p), ~mask_to_clear); dirty |= old_bits & mask_to_clear; } else { if (!dirty) { @@ -332,7 +332,7 @@ bool bitmap_test_and_clear_atomic(unsigned long *map, long start, long nr) return dirty != 0; } -void bitmap_copy_and_clear_atomic(unsigned long *dst, unsigned long *src, +void bitmap_copy_and_clear_atomic(unsigned long *dst, _Atomic(unsigned long) *src, long nr) { while (nr > 0) { diff --git a/util/interval-tree.c b/util/interval-tree.c index 53465182e6..d46c8c1c54 100644 --- a/util/interval-tree.c +++ b/util/interval-tree.c @@ -132,7 +132,7 @@ static inline void rb_set_parent(RBNode *n, RBNode *p) rb_set_parent_color(n, p, rb_color(n)); } -static inline void rb_link_node(RBNode *node, RBNode *parent, RBNode **rb_link) +static inline void rb_link_node(RBNode *node, RBNode *parent, _Atomic(RBNode *) *rb_link) { node->rb_parent_color = (uintptr_t)parent; node->rb_left = node->rb_right = NULL; @@ -710,7 +710,8 @@ static const RBAugmentCallbacks interval_tree_augment = { /* Insert / remove interval nodes from the tree */ void interval_tree_insert(IntervalTreeNode *node, IntervalTreeRoot *root) { - RBNode **link = &root->rb_root.rb_node, *rb_parent = NULL; + _Atomic(RBNode *) *link = &root->rb_root.rb_node; + RBNode *rb_parent = NULL; uint64_t start = node->start, last = node->last; IntervalTreeNode *parent; bool leftmost = true; diff --git a/util/log.c b/util/log.c index 6219819855..fd4612ef2a 100644 --- a/util/log.c +++ b/util/log.c @@ -40,7 +40,7 @@ typedef struct RCUCloseFILE { /* Mutex covering the other global_* variables. */ static QemuMutex global_mutex; static char *global_filename; -static FILE *global_file; +static _Atomic(FILE *) global_file; static __thread FILE *thread_file; static __thread Notifier qemu_log_thread_cleanup_notifier; @@ -115,7 +115,7 @@ static FILE *qemu_log_trylock_with_err(Error **errp) * Since all we want is a read of a pointer, cast to void**, * which does work with typeof_strip_qual. */ - logfile = qatomic_rcu_read((void **)&global_file); + logfile = qatomic_rcu_read(&global_file); if (!logfile) { rcu_read_unlock(); return NULL; diff --git a/util/qht.c b/util/qht.c index 92c6b78759..c7858ebabd 100644 --- a/util/qht.c +++ b/util/qht.c @@ -144,9 +144,9 @@ static inline void qht_unlock(struct qht *ht) struct qht_bucket { QemuSpin lock; QemuSeqLock sequence; - uint32_t hashes[QHT_BUCKET_ENTRIES]; - void *pointers[QHT_BUCKET_ENTRIES]; - struct qht_bucket *next; + _Atomic(uint32_t) hashes[QHT_BUCKET_ENTRIES]; + _Atomic(void *) pointers[QHT_BUCKET_ENTRIES]; + _Atomic(struct qht_bucket *) next; } QEMU_ALIGNED(QHT_BUCKET_ALIGN); QEMU_BUILD_BUG_ON(sizeof(struct qht_bucket) > QHT_BUCKET_ALIGN); @@ -184,7 +184,7 @@ struct qht_map { struct rcu_head rcu; struct qht_bucket *buckets; size_t n_buckets; - size_t n_added_buckets; + _Atomic(size_t) n_added_buckets; size_t n_added_buckets_threshold; #ifdef CONFIG_TSAN struct qht_tsan_lock tsan_bucket_locks[QHT_TSAN_BUCKET_LOCKS]; diff --git a/util/qsp.c b/util/qsp.c index 6b783e2e7f..500b5f96ad 100644 --- a/util/qsp.c +++ b/util/qsp.c @@ -83,8 +83,8 @@ typedef struct QSPCallSite QSPCallSite; struct QSPEntry { void *thread_ptr; const QSPCallSite *callsite; - aligned_uint64_t n_acqs; - aligned_uint64_t ns; + _Atomic(aligned_uint64_t) n_acqs; + _Atomic(aligned_uint64_t) ns; unsigned int n_objs; /* count of coalesced objs; only used for reporting */ }; typedef struct QSPEntry QSPEntry; @@ -114,8 +114,8 @@ static __thread int qsp_thread; static struct qht qsp_callsite_ht; static struct qht qsp_ht; -static QSPSnapshot *qsp_snapshot; -static bool qsp_initialized, qsp_initializing; +static _Atomic(QSPSnapshot *) qsp_snapshot; +static _Atomic(bool) qsp_initialized, qsp_initializing; static const char * const qsp_typenames[] = { [QSP_MUTEX] = "mutex", @@ -124,14 +124,14 @@ static const char * const qsp_typenames[] = { [QSP_CONDVAR] = "condvar", }; -QemuMutexLockFunc bql_mutex_lock_func = qemu_mutex_lock_impl; -QemuMutexLockFunc qemu_mutex_lock_func = qemu_mutex_lock_impl; -QemuMutexTrylockFunc qemu_mutex_trylock_func = qemu_mutex_trylock_impl; -QemuRecMutexLockFunc qemu_rec_mutex_lock_func = qemu_rec_mutex_lock_impl; -QemuRecMutexTrylockFunc qemu_rec_mutex_trylock_func = +_Atomic(QemuMutexLockFunc) bql_mutex_lock_func = qemu_mutex_lock_impl; +_Atomic(QemuMutexLockFunc) qemu_mutex_lock_func = qemu_mutex_lock_impl; +_Atomic(QemuMutexTrylockFunc) qemu_mutex_trylock_func = qemu_mutex_trylock_impl; +_Atomic(QemuRecMutexLockFunc) qemu_rec_mutex_lock_func = qemu_rec_mutex_lock_impl; +_Atomic(QemuRecMutexTrylockFunc) qemu_rec_mutex_trylock_func = qemu_rec_mutex_trylock_impl; -QemuCondWaitFunc qemu_cond_wait_func = qemu_cond_wait_impl; -QemuCondTimedWaitFunc qemu_cond_timedwait_func = qemu_cond_timedwait_impl; +_Atomic(QemuCondWaitFunc) qemu_cond_wait_func = qemu_cond_wait_impl; +_Atomic(QemuCondTimedWaitFunc) qemu_cond_timedwait_func = qemu_cond_timedwait_impl; /* * It pays off to _not_ hash callsite->file; hashing a string is slow, and diff --git a/util/rcu.c b/util/rcu.c index fa32c942e4..c1715aed48 100644 --- a/util/rcu.c +++ b/util/rcu.c @@ -43,10 +43,10 @@ #define RCU_GP_LOCKED (1UL << 0) #define RCU_GP_CTR (1UL << 1) -unsigned long rcu_gp_ctr = RCU_GP_LOCKED; +_Atomic(unsigned long) rcu_gp_ctr = RCU_GP_LOCKED; QemuEvent rcu_gp_event; -static int in_drain_call_rcu; +static _Atomic(int) in_drain_call_rcu; static QemuMutex rcu_registry_lock; static QemuMutex rcu_sync_lock; @@ -54,7 +54,7 @@ static QemuMutex rcu_sync_lock; * Check whether a quiescent state was crossed between the beginning of * update_counter_and_wait and now. */ -static inline int rcu_gp_ongoing(unsigned long *ctr) +static inline int rcu_gp_ongoing(_Atomic(unsigned long) *ctr) { unsigned long v; @@ -180,13 +180,14 @@ void synchronize_rcu(void) * from liburcu. Note that head is only used by the consumer. */ static struct rcu_head dummy; -static struct rcu_head *head = &dummy, **tail = &dummy.next; -static int rcu_call_count; +static struct rcu_head *head = &dummy; +static _Atomic(_Atomic(struct rcu_head *) *) tail = &dummy.next; +static _Atomic(int) rcu_call_count; static QemuEvent rcu_call_ready_event; static void enqueue(struct rcu_head *node) { - struct rcu_head **old_tail; + _Atomic(struct rcu_head *) *old_tail; node->next = NULL;