Skip to content

Commit

Permalink
Switch to stdatomics.
Browse files Browse the repository at this point in the history
  • Loading branch information
sobomax committed Dec 12, 2024
1 parent 05a2dac commit 7b11725
Show file tree
Hide file tree
Showing 50 changed files with 243 additions and 185 deletions.
20 changes: 12 additions & 8 deletions accel/tcg/atomic_template.h
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, abi_ptr addr,
ABI_TYPE cmpv, ABI_TYPE newv,
MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi,
_Atomic(DATA_TYPE) *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi,
DATA_SIZE, retaddr);
DATA_TYPE ret;

Expand All @@ -104,7 +104,7 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, abi_ptr addr,
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, abi_ptr addr, ABI_TYPE val,
MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi,
_Atomic(DATA_TYPE) *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi,
DATA_SIZE, retaddr);
DATA_TYPE ret;

Expand All @@ -123,7 +123,8 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, abi_ptr addr, ABI_TYPE val,
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \
ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
{ \
DATA_TYPE *haddr, ret; \
DATA_TYPE ret; \
_Atomic(DATA_TYPE) *haddr; \
haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr); \
ret = qatomic_##X(haddr, val); \
ATOMIC_MMU_CLEANUP; \
Expand Down Expand Up @@ -159,7 +160,8 @@ GEN_ATOMIC_HELPER(xor_fetch)
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \
ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \
{ \
XDATA_TYPE *haddr, cmp, old, new, val = xval; \
_Atomic(XDATA_TYPE) *haddr; \
XDATA_TYPE cmp, old, new, val = xval; \
haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr); \
smp_mb(); \
cmp = qatomic_read__nocheck(haddr); \
Expand Down Expand Up @@ -206,7 +208,7 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, abi_ptr addr,
ABI_TYPE cmpv, ABI_TYPE newv,
MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi,
_Atomic(DATA_TYPE) *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi,
DATA_SIZE, retaddr);
DATA_TYPE ret;

Expand All @@ -229,7 +231,7 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, abi_ptr addr,
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, abi_ptr addr, ABI_TYPE val,
MemOpIdx oi, uintptr_t retaddr)
{
DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi,
_Atomic(DATA_TYPE) *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi,
DATA_SIZE, retaddr);
ABI_TYPE ret;

Expand All @@ -248,7 +250,8 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, abi_ptr addr, ABI_TYPE val,
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \
ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
{ \
DATA_TYPE *haddr, ret; \
_Atomic(DATA_TYPE) *haddr; \
DATA_TYPE ret; \
haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr); \
ret = qatomic_##X(haddr, BSWAP(val)); \
ATOMIC_MMU_CLEANUP; \
Expand Down Expand Up @@ -281,7 +284,8 @@ GEN_ATOMIC_HELPER(xor_fetch)
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \
ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \
{ \
XDATA_TYPE *haddr, ldo, ldn, old, new, val = xval; \
_Atomic(XDATA_TYPE) *haddr; \
XDATA_TYPE ldo, ldn, old, new, val = xval; \
haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr); \
smp_mb(); \
ldn = qatomic_read__nocheck(haddr); \
Expand Down
2 changes: 1 addition & 1 deletion accel/tcg/cputlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -1794,7 +1794,7 @@ static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
* Probe for an atomic operation. Do not allow unaligned operations,
* or io operations to proceed. Return the host address.
*/
static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
static _Atomic(DATA_TYPE) *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
int size, uintptr_t retaddr)
{
uintptr_t mmu_idx = get_mmuidx(oi);
Expand Down
2 changes: 1 addition & 1 deletion accel/tcg/internal-common.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
extern int64_t max_delay;
extern int64_t max_advance;

extern bool one_insn_per_tb;
extern _Atomic(bool) one_insn_per_tb;

/*
* Return true if CS is not running in parallel with other cpus, either
Expand Down
24 changes: 12 additions & 12 deletions accel/tcg/ldst_atomicity.c.inc
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ static int required_atomicity(CPUState *cpu, uintptr_t p, MemOp memop)
static inline uint16_t load_atomic2(void *pv)
{
uint16_t *p = __builtin_assume_aligned(pv, 2);
return qatomic_read(p);
return qatomic_read(_MK_ATOMIC(p));
}

/**
Expand All @@ -120,7 +120,7 @@ static inline uint16_t load_atomic2(void *pv)
static inline uint32_t load_atomic4(void *pv)
{
uint32_t *p = __builtin_assume_aligned(pv, 4);
return qatomic_read(p);
return qatomic_read(_MK_ATOMIC(p));
}

/**
Expand All @@ -134,7 +134,7 @@ static inline uint64_t load_atomic8(void *pv)
uint64_t *p = __builtin_assume_aligned(pv, 8);

qemu_build_assert(HAVE_al8);
return qatomic_read__nocheck(p);
return qatomic_read__nocheck(_MK_ATOMIC(p));
}

/**
Expand Down Expand Up @@ -598,7 +598,7 @@ static Int128 load_atom_16(CPUState *cpu, uintptr_t ra,
static inline void store_atomic2(void *pv, uint16_t val)
{
uint16_t *p = __builtin_assume_aligned(pv, 2);
qatomic_set(p, val);
qatomic_set(_MK_ATOMIC(p), val);
}

/**
Expand All @@ -611,7 +611,7 @@ static inline void store_atomic2(void *pv, uint16_t val)
static inline void store_atomic4(void *pv, uint32_t val)
{
uint32_t *p = __builtin_assume_aligned(pv, 4);
qatomic_set(p, val);
qatomic_set(_MK_ATOMIC(p), val);
}

/**
Expand All @@ -626,7 +626,7 @@ static inline void store_atomic8(void *pv, uint64_t val)
uint64_t *p = __builtin_assume_aligned(pv, 8);

qemu_build_assert(HAVE_al8);
qatomic_set__nocheck(p, val);
qatomic_set__nocheck(_MK_ATOMIC(p), val);
}

/**
Expand Down Expand Up @@ -669,11 +669,11 @@ static void store_atom_insert_al4(uint32_t *p, uint32_t val, uint32_t msk)
uint32_t old, new;

p = __builtin_assume_aligned(p, 4);
old = qatomic_read(p);
old = qatomic_read(_MK_ATOMIC(p));
do {
new = (old & ~msk) | val;
} while (!__atomic_compare_exchange_n(p, &old, new, true,
__ATOMIC_RELAXED, __ATOMIC_RELAXED));
} while (!atomic_compare_exchange_weak_explicit(_MK_ATOMIC(p), &old, new,
memory_order_relaxed, memory_order_relaxed));
}

/**
Expand All @@ -690,11 +690,11 @@ static void store_atom_insert_al8(uint64_t *p, uint64_t val, uint64_t msk)

qemu_build_assert(HAVE_al8);
p = __builtin_assume_aligned(p, 8);
old = qatomic_read__nocheck(p);
old = qatomic_read__nocheck(_MK_ATOMIC(p));
do {
new = (old & ~msk) | val;
} while (!__atomic_compare_exchange_n(p, &old, new, true,
__ATOMIC_RELAXED, __ATOMIC_RELAXED));
} while (!atomic_compare_exchange_weak_explicit(_MK_ATOMIC(p), &old, new,
memory_order_relaxed, memory_order_relaxed));
}

/**
Expand Down
4 changes: 2 additions & 2 deletions accel/tcg/tb-context.h
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,8 @@ struct TBContext {
struct qht htable;

/* statistics */
unsigned tb_flush_count;
unsigned tb_phys_invalidate_count;
_Atomic(unsigned) tb_flush_count;
_Atomic(unsigned) tb_phys_invalidate_count;
};

extern TBContext tb_ctx;
Expand Down
2 changes: 1 addition & 1 deletion accel/tcg/tb-jmp-cache.h
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
typedef struct CPUJumpCache {
struct rcu_head rcu;
struct {
TranslationBlock *tb;
_Atomic(TranslationBlock *) tb;
vaddr pc;
} array[TB_JMP_CACHE_SIZE];
} CPUJumpCache;
Expand Down
2 changes: 1 addition & 1 deletion accel/tcg/tcg-all.c
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ static void tcg_accel_instance_init(Object *obj)
}

bool mttcg_enabled;
bool one_insn_per_tb;
_Atomic(bool) one_insn_per_tb;

static int tcg_init_machine(MachineState *ms)
{
Expand Down
2 changes: 1 addition & 1 deletion bsd-user/qemu.h
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ struct TaskState {
* the qatomic_read() and qatomic_set() functions. (It is not accessed
* from multiple threads.)
*/
int signal_pending;
_Atomic(int) signal_pending;
/* True if we're leaving a sigsuspend and sigsuspend_mask is valid. */
bool in_sigsuspend;
/*
Expand Down
5 changes: 3 additions & 2 deletions cpu-common.c
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ static QemuCond qemu_work_cond;
/* >= 1 if a thread is inside start_exclusive/end_exclusive. Written
* under qemu_cpu_list_lock, read with atomic operations.
*/
static int pending_cpus;
static _Atomic(int) pending_cpus;

void qemu_init_cpu_list(void)
{
Expand Down Expand Up @@ -128,7 +128,8 @@ struct qemu_work_item {
QSIMPLEQ_ENTRY(qemu_work_item) node;
run_on_cpu_func func;
run_on_cpu_data data;
bool free, exclusive, done;
bool free, exclusive;
_Atomic(bool) done;
};

static void queue_work_on_cpu(CPUState *cpu, struct qemu_work_item *wi)
Expand Down
4 changes: 2 additions & 2 deletions host/include/generic/host/atomic128-cas.h
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,9 @@

#if defined(CONFIG_ATOMIC128)
static inline Int128 ATTRIBUTE_ATOMIC128_OPT
atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new)
atomic16_cmpxchg(_Atomic(Int128) *ptr, Int128 cmp, Int128 new)
{
__int128_t *ptr_align = __builtin_assume_aligned(ptr, 16);
_Atomic(__int128_t) *ptr_align = __builtin_assume_aligned(ptr, 16);
Int128Alias r, c, n;

c.s = cmp;
Expand Down
4 changes: 2 additions & 2 deletions host/include/generic/host/load-extract-al16-al8.h.inc
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,8 @@ load_atom_extract_al16_or_al8(void *pv, int s)
pv = (void *)(pi & ~7);
if (pi & 8) {
uint64_t *p8 = __builtin_assume_aligned(pv, 16, 8);
uint64_t a = qatomic_read__nocheck(p8);
uint64_t b = qatomic_read__nocheck(p8 + 1);
uint64_t a = qatomic_read__nocheck(_MK_ATOMIC(p8));
uint64_t b = qatomic_read__nocheck(_MK_ATOMIC(p8 + 1));

if (HOST_BIG_ENDIAN) {
r = int128_make128(b, a);
Expand Down
8 changes: 4 additions & 4 deletions include/block/aio.h
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ void qemu_aio_unref(void *p);
void qemu_aio_ref(void *p);

typedef struct AioHandler AioHandler;
typedef QLIST_HEAD(, AioHandler) AioHandlerList;
typedef QLIST_HEAD_ATOMIC(, AioHandler) AioHandlerList;
typedef void QEMUBHFunc(void *opaque);
typedef bool AioPollFn(void *opaque);
typedef void IOHandler(void *opaque);
Expand Down Expand Up @@ -170,7 +170,7 @@ struct AioContext {
* Instead, the aio_poll calls include both the prepare and the
* dispatch phase, hence a simple counter is enough for them.
*/
uint32_t notify_me;
_Atomic(uint32_t) notify_me;

/* A lock to protect between QEMUBH and AioHandler adders and deleter,
* and to ensure that no callbacks are removed while we're walking and
Expand All @@ -196,7 +196,7 @@ struct AioContext {
* more information on the problem that would result, see "#ifdef BUG2"
* in the docs/aio_notify_accept.promela formal model.
*/
bool notified;
_Atomic(bool) notified;
EventNotifier notifier;

QSLIST_HEAD(, Coroutine) scheduled_coroutines;
Expand Down Expand Up @@ -226,7 +226,7 @@ struct AioContext {
QEMUTimerListGroup tlg;

/* Number of AioHandlers without .io_poll() */
int poll_disable_cnt;
_Atomic(int) poll_disable_cnt;

/* Polling mode parameters */
int64_t poll_ns; /* current polling time in nanoseconds */
Expand Down
2 changes: 1 addition & 1 deletion include/exec/memory.h
Original file line number Diff line number Diff line change
Expand Up @@ -1096,7 +1096,7 @@ struct AddressSpace {
MemoryRegion *root;

/* Accessed via RCU. */
struct FlatView *current_map;
_Atomic(struct FlatView *) current_map;

int ioeventfd_nb;
int ioeventfd_notifiers;
Expand Down
4 changes: 2 additions & 2 deletions include/exec/translation-block.h
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ struct TranslationBlock {
uint64_t cs_base;

uint32_t flags; /* flags defining in which context the code was generated */
uint32_t cflags; /* compile flags */
_Atomic(uint32_t) cflags; /* compile flags */

/* Note that TCG_MAX_INSNS is 512; we validate this match elsewhere. */
#define CF_COUNT_MASK 0x000001ff
Expand Down Expand Up @@ -140,7 +140,7 @@ struct TranslationBlock {
*/
uintptr_t jmp_list_head;
uintptr_t jmp_list_next[2];
uintptr_t jmp_dest[2];
_Atomic(uintptr_t) jmp_dest[2];
};

/* The alignment given to TranslationBlock during allocation. */
Expand Down
17 changes: 9 additions & 8 deletions include/hw/core/cpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -336,14 +336,14 @@ typedef struct CPUTLB {
* for both decrementer underflow and exceptions.
*/
typedef union IcountDecr {
uint32_t u32;
_Atomic(uint32_t) u32;
struct {
#if HOST_BIG_ENDIAN
uint16_t high;
_Atomic(uint16_t) high;
uint16_t low;
#else
uint16_t low;
uint16_t high;
_Atomic(uint16_t) high;
#endif
} u16;
} IcountDecr;
Expand Down Expand Up @@ -476,7 +476,8 @@ struct CPUState {
QemuSemaphore sem;
#endif
int thread_id;
bool running, has_waiter;
_Atomic(bool) running;
bool has_waiter;
struct QemuCond *halt_cond;
bool thread_kicked;
bool created;
Expand All @@ -488,11 +489,11 @@ struct CPUState {

bool unplug;
bool crash_occurred;
bool exit_request;
_Atomic(bool) exit_request;
int exclusive_context_count;
uint32_t cflags_next_tb;
/* updates protected by BQL */
uint32_t interrupt_request;
_Atomic(uint32_t) interrupt_request;
int singlestep_enabled;
int64_t icount_budget;
int64_t icount_extra;
Expand All @@ -513,7 +514,7 @@ struct CPUState {
GArray *gdb_regs;
int gdb_num_regs;
int gdb_num_g_regs;
QTAILQ_ENTRY(CPUState) node;
QTAILQ_ENTRY_ATOMIC(CPUState) node;

/* ice debug support */
QTAILQ_HEAD(, CPUBreakpoint) breakpoints;
Expand Down Expand Up @@ -590,7 +591,7 @@ static inline CPUArchState *cpu_env(CPUState *cpu)
return (CPUArchState *)(cpu + 1);
}

typedef QTAILQ_HEAD(CPUTailQ, CPUState) CPUTailQ;
typedef QTAILQ_HEAD_ATOMIC(CPUTailQ, CPUState) CPUTailQ;
extern CPUTailQ cpus_queue;

#define first_cpu QTAILQ_FIRST_RCU(&cpus_queue)
Expand Down
Loading

0 comments on commit 7b11725

Please sign in to comment.