tcg: Synchronize with qemu

This commit is contained in:
Lioncash 2019-04-26 09:06:21 -04:00
parent 3996153514
commit 4a64ebf95e
No known key found for this signature in database
GPG Key ID: 4E3C3CC1031BA9C7
8 changed files with 164 additions and 199 deletions

View File

@ -156,10 +156,11 @@ void *HELPER(lookup_tb_ptr)(CPUArchState *env)
return tcg_ctx->code_gen_epilogue;
}
// Unicorn: commented out
// Unicorn: Commented out
//qemu_log_mask_and_addr(CPU_LOG_EXEC, pc,
// "Chain %p [%d: " TARGET_FMT_lx "] %s\n",
// tb->tc.ptr, cpu->cpu_index, pc,
// "Chain %d: %p ["
// TARGET_FMT_lx "/" TARGET_FMT_lx "/%#x] %s\n",
// cpu->cpu_index, tb->tc.ptr, cs_base, pc, flags,
// lookup_symbol(pc));
return tb->tc.ptr;
}
@ -168,58 +169,3 @@ void HELPER(exit_atomic)(CPUArchState *env)
{
cpu_loop_exit_atomic(ENV_GET_CPU(env), GETPC());
}
#ifndef CONFIG_SOFTMMU
/* The softmmu versions of these helpers are in cputlb.c. */
/* Do not allow unaligned operations to proceed. Return the host address. */
static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
int size, uintptr_t retaddr)
{
/* Enforce qemu required alignment. */
if (unlikely(addr & (size - 1))) {
cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr);
}
return g2h(addr);
}
/* Macro to call the above, with local variables from the use context. */
#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, DATA_SIZE, GETPC())
#define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
#define EXTRA_ARGS
#define DATA_SIZE 1
#include "atomic_template.h"
#define DATA_SIZE 2
#include "atomic_template.h"
#define DATA_SIZE 4
#include "atomic_template.h"
#ifdef CONFIG_ATOMIC64
#define DATA_SIZE 8
#include "atomic_template.h"
#endif
/* The following is only callable from other helpers, and matches up
with the softmmu version. */
#if HAVE_ATOMIC128 || HAVE_CMPXCHG128
#undef EXTRA_ARGS
#undef ATOMIC_NAME
#undef ATOMIC_MMU_LOOKUP
#define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr
#define ATOMIC_NAME(X) \
HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, DATA_SIZE, retaddr)
#define DATA_SIZE 16
#include "atomic_template.h"
#endif
#endif /* !CONFIG_SOFTMMU */

View File

@ -40,7 +40,6 @@ DEF_HELPER_FLAGS_5(atomic_cmpxchgl_be, TCG_CALL_NO_WG,
i32, env, tl, i32, i32, i32)
DEF_HELPER_FLAGS_5(atomic_cmpxchgl_le, TCG_CALL_NO_WG,
i32, env, tl, i32, i32, i32)
#ifdef CONFIG_ATOMIC64
DEF_HELPER_FLAGS_5(atomic_cmpxchgq_be, TCG_CALL_NO_WG,
i64, env, tl, i64, i64, i32)
@ -181,11 +180,6 @@ DEF_HELPER_FLAGS_4(gvec_muls16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
DEF_HELPER_FLAGS_4(gvec_muls32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
DEF_HELPER_FLAGS_4(gvec_muls64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
DEF_HELPER_FLAGS_3(gvec_neg8, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(gvec_neg16, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(gvec_neg32, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(gvec_neg64, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_ssadd8, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_ssadd16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_ssadd32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
@ -226,6 +220,11 @@ DEF_HELPER_FLAGS_4(gvec_umax16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_umax32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_umax64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(gvec_neg8, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(gvec_neg16, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(gvec_neg32, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(gvec_neg64, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(gvec_not, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_and, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_or, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)

View File

@ -367,6 +367,7 @@ bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit)
r = true;
}
}
return r;
}

View File

@ -6,7 +6,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of

View File

@ -268,6 +268,7 @@ static inline void tcg_gen_op6ii_i64(TCGContext *s, TCGOpcode opc, TCGv_i64 a1,
tcgv_i64_arg(s, a3), tcgv_i64_arg(s, a4), a5, a6);
}
/* Generic ops. */
static inline void gen_set_label(TCGContext *s, TCGLabel *l)
@ -759,8 +760,7 @@ static inline void tcg_gen_insn_start(TCGContext *tcg_ctx, target_ulong pc)
# else
static inline void tcg_gen_insn_start(TCGContext *tcg_ctx, target_ulong pc)
{
tcg_gen_op2(tcg_ctx, INDEX_op_insn_start,
(uint32_t)pc, (uint32_t)(pc >> 32));
tcg_gen_op2(tcg_ctx, INDEX_op_insn_start, (uint32_t)pc, (uint32_t)(pc >> 32));
}
# endif
#elif TARGET_INSN_START_WORDS == 2
@ -828,7 +828,7 @@ void tcg_gen_exit_tb(TCGContext *s, TranslationBlock *tb, unsigned idx);
void tcg_gen_goto_tb(TCGContext *s, unsigned idx);
/**
* tcg_gen_lookup_and_goto_ptr() - look up a TB and jump to it if valid
* tcg_gen_lookup_and_goto_ptr() - look up the current TB, jump to it if valid
* @addr: Guest address of the target TB
*
* If the TB is not valid, jump to the epilogue.

View File

@ -162,7 +162,6 @@ DEF(extrh_i64_i32, 1, 1, 0,
IMPL(TCG_TARGET_HAS_extrh_i64_i32)
| (TCG_TARGET_REG_BITS == 32 ? TCG_OPF_NOT_PRESENT : 0))
DEF(brcond_i64, 0, 2, 2, TCG_OPF_BB_END | IMPL64)
DEF(ext8s_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext8s_i64))
DEF(ext16s_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext16s_i64))
@ -211,6 +210,8 @@ DEF(qemu_ld_i64, DATA64_ARGS, TLADDR_ARGS, 1,
DEF(qemu_st_i64, 0, TLADDR_ARGS + DATA64_ARGS, 1,
TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT)
/* Host vector support. */
#define IMPLVEC TCG_OPF_VECTOR | IMPL(TCG_TARGET_MAYBE_vec)
DEF(mov_vec, 1, 1, 0, TCG_OPF_VECTOR | TCG_OPF_NOT_PRESENT)

View File

@ -68,8 +68,7 @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
/* The CIE and FDE header definitions will be common to all hosts. */
typedef struct {
//uint32_t QEMU_ALIGNED(sizeof(void *), len);
uint32_t QEMU_ALIGNED(8, len);
uint32_t QEMU_ALIGNED(sizeof(void *), len);
uint32_t id;
uint8_t version;
char augmentation[1];
@ -79,8 +78,7 @@ typedef struct {
} DebugFrameCIE;
QEMU_PACK( typedef struct {
// uint32_t QEMU_ALIGNED(sizeof(void *), len);
uint32_t QEMU_ALIGNED(8, len);
uint32_t QEMU_ALIGNED(sizeof(void *), len);
uint32_t cie_offset;
uintptr_t func_start;
uintptr_t func_len;
@ -267,7 +265,7 @@ void *tcg_malloc_internal(TCGContext *s, int size)
{
TCGPool *p;
int pool_size;
if (size > TCG_POOL_CHUNK_SIZE) {
/* big malloc: insert a new pool (XXX: could optimize) */
p = g_malloc(sizeof(TCGPool) + size);
@ -3181,7 +3179,7 @@ static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op)
#ifdef CONFIG_PROFILER
static void dump_op_count(void)
void tcg_dump_op_count(void)
{
int i;
@ -3194,6 +3192,9 @@ static void dump_op_count(void)
int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
{
#ifdef CONFIG_PROFILER
TCGProfile *prof = &s->prof;
#endif
int i, num_insns;
TCGOp *op;
@ -3204,15 +3205,15 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
QTAILQ_FOREACH(op, &s->ops, link) {
n++;
}
atomic_set(&s->op_count, s->op_count + n);
if (n > s->op_count_max) {
atomic_set(&s->op_count_max, n);
atomic_set(&prof->op_count, prof->op_count + n);
if (n > prof->op_count_max) {
atomic_set(&prof->op_count_max, n);
}
n = s->nb_temps;
atomic_set(&s->temp_count, s->temp_count + n);
if (n > s->temp_count_max) {
atomic_set(&s->temp_count_max, n);
atomic_set(&prof->temp_count, prof->temp_count + n);
if (n > prof->temp_count_max) {
atomic_set(&prof->temp_count_max, n);
}
}
#endif
@ -3244,7 +3245,7 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
#endif
#ifdef CONFIG_PROFILER
s->opt_time -= profile_getclock();
atomic_set(&prof->opt_time, prof->opt_time - profile_getclock());
#endif
#ifdef USE_TCG_OPTIMIZATIONS
@ -3252,8 +3253,8 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
#endif
#ifdef CONFIG_PROFILER
s->opt_time += profile_getclock();
s->la_time -= profile_getclock();
atomic_set(&prof->opt_time, prof->opt_time + profile_getclock());
atomic_set(&prof->la_time, prof->la_time - profile_getclock());
#endif
reachable_code_pass(s);
@ -3276,14 +3277,14 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
}
#ifdef CONFIG_PROFILER
s->la_time += profile_getclock();
atomic_set(&prof->la_time, prof->la_time + profile_getclock());
#endif
#ifdef DEBUG_DISAS
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT)
&& qemu_log_in_addr_range(tb->pc))) {
qemu_log("OP after optimization and liveness analysis:\n");
tcg_dump_ops(s, false);
tcg_dump_ops(s, true);
qemu_log("\n");
}
#endif
@ -3305,8 +3306,9 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
TCGOpcode opc = op->opc;
#ifdef CONFIG_PROFILER
tcg_table_op_count[opc]++;
atomic_set(&prof->table_op_count[opc], prof->table_op_count[opc] + 1);
#endif
switch (opc) {
case INDEX_op_mov_i32:
case INDEX_op_mov_i64:
@ -3385,63 +3387,68 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb)
}
#ifdef CONFIG_PROFILER
void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf)
void tcg_dump_info(void)
{
#if 0
TCGContext *s = &tcg_ctx;
int64_t tb_count = s->tb_count;
int64_t tb_div_count = tb_count ? tb_count : 1;
int64_t tot = s->interm_time + s->code_time;
TCGProfile prof = {};
const TCGProfile *s;
int64_t tb_count;
int64_t tb_div_count;
int64_t tot;
cpu_fprintf(f, "JIT cycles %" PRId64 " (%0.3f s at 2.4 GHz)\n",
tcg_profile_snapshot_counters(&prof);
s = &prof;
tb_count = s->tb_count;
tb_div_count = tb_count ? tb_count : 1;
tot = s->interm_time + s->code_time;
qemu_printf("JIT cycles %" PRId64 " (%0.3f s at 2.4 GHz)\n",
tot, tot / 2.4e9);
cpu_fprintf(f, "translated TBs %" PRId64 " (aborted=%" PRId64 " %0.1f%%)\n",
qemu_printf("translated TBs %" PRId64 " (aborted=%" PRId64
" %0.1f%%)\n",
tb_count, s->tb_count1 - tb_count,
(double)(s->tb_count1 - s->tb_count)
/ (s->tb_count1 ? s->tb_count1 : 1) * 100.0);
cpu_fprintf(f, "avg ops/TB %0.1f max=%d\n",
qemu_printf("avg ops/TB %0.1f max=%d\n",
(double)s->op_count / tb_div_count, s->op_count_max);
cpu_fprintf(f, "deleted ops/TB %0.2f\n",
qemu_printf("deleted ops/TB %0.2f\n",
(double)s->del_op_count / tb_div_count);
cpu_fprintf(f, "avg temps/TB %0.2f max=%d\n",
qemu_printf("avg temps/TB %0.2f max=%d\n",
(double)s->temp_count / tb_div_count, s->temp_count_max);
cpu_fprintf(f, "avg host code/TB %0.1f\n",
qemu_printf("avg host code/TB %0.1f\n",
(double)s->code_out_len / tb_div_count);
cpu_fprintf(f, "avg search data/TB %0.1f\n",
qemu_printf("avg search data/TB %0.1f\n",
(double)s->search_out_len / tb_div_count);
cpu_fprintf(f, "cycles/op %0.1f\n",
qemu_printf("cycles/op %0.1f\n",
s->op_count ? (double)tot / s->op_count : 0);
cpu_fprintf(f, "cycles/in byte %0.1f\n",
qemu_printf("cycles/in byte %0.1f\n",
s->code_in_len ? (double)tot / s->code_in_len : 0);
cpu_fprintf(f, "cycles/out byte %0.1f\n",
qemu_printf("cycles/out byte %0.1f\n",
s->code_out_len ? (double)tot / s->code_out_len : 0);
cpu_fprintf(f, "cycles/search byte %0.1f\n",
qemu_printf("cycles/search byte %0.1f\n",
s->search_out_len ? (double)tot / s->search_out_len : 0);
if (tot == 0) {
tot = 1;
}
cpu_fprintf(f, " gen_interm time %0.1f%%\n",
qemu_printf(" gen_interm time %0.1f%%\n",
(double)s->interm_time / tot * 100.0);
cpu_fprintf(f, " gen_code time %0.1f%%\n",
qemu_printf(" gen_code time %0.1f%%\n",
(double)s->code_time / tot * 100.0);
cpu_fprintf(f, "optim./code time %0.1f%%\n",
qemu_printf("optim./code time %0.1f%%\n",
(double)s->opt_time / (s->code_time ? s->code_time : 1)
* 100.0);
cpu_fprintf(f, "liveness/code time %0.1f%%\n",
qemu_printf("liveness/code time %0.1f%%\n",
(double)s->la_time / (s->code_time ? s->code_time : 1) * 100.0);
cpu_fprintf(f, "cpu_restore count %" PRId64 "\n",
qemu_printf("cpu_restore count %" PRId64 "\n",
s->restore_count);
cpu_fprintf(f, " avg cycles %0.1f\n",
qemu_printf(" avg cycles %0.1f\n",
s->restore_count ? (double)s->restore_time / s->restore_count : 0);
dump_op_count();
#endif
}
#else
void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf)
void tcg_dump_info(void)
{
cpu_fprintf(f, "[TCG profiler not compiled]\n");
// Unicorn: commented out
//qemu_printf("[TCG profiler not compiled]\n");
}
#endif

View File

@ -349,12 +349,11 @@ typedef enum TCGMemOp {
*
* MO_ALIGN supposes the alignment size is the size of a memory access.
*
* than an access size.
* There are three options:
* - unaligned access permitted (MO_UNALN).
* - an alignment to the size of an access (MO_ALIGN);
* - an alignment to a specified size, which may be more or less than
* the access size (MO_ALIGN_x where 'x' is a size in bytes);
* - unaligned access permitted (MO_UNALN).
*/
MO_ASHIFT = 4,
MO_AMASK = 7 << MO_ASHIFT,
@ -591,8 +590,8 @@ typedef struct TCGTemp {
unsigned int temp_allocated:1;
tcg_target_long val;
intptr_t mem_offset;
struct TCGTemp *mem_base;
intptr_t mem_offset;
const char *name;
/* Pass-specific information that can be stored for a temporary.
@ -621,7 +620,6 @@ typedef uint16_t TCGLifeData;
typedef struct TCGOp {
TCGOpcode opc : 8; /* 8 */
/* The number of out and in parameter for a call. */
/* Parameters for this opcode. See below. */
unsigned param1 : 4; /* 12 */
unsigned param2 : 4; /* 16 */
@ -648,6 +646,27 @@ typedef struct TCGOp {
/* Make sure operands fit in the bitfields above. */
QEMU_BUILD_BUG_ON(NB_OPS > (1 << 8));
typedef struct TCGProfile {
int64_t cpu_exec_time;
int64_t tb_count1;
int64_t tb_count;
int64_t op_count; /* total insn count */
int op_count_max; /* max insn per TB */
int temp_count_max;
int64_t temp_count;
int64_t del_op_count;
int64_t code_in_len;
int64_t code_out_len;
int64_t search_out_len;
int64_t interm_time;
int64_t code_time;
int64_t la_time;
int64_t opt_time;
int64_t restore_count;
int64_t restore_time;
int64_t table_op_count[NB_OPS];
} TCGProfile;
/* pool based memory allocation */
/* tb_lock must be held for tcg_malloc_internal. */
@ -677,7 +696,7 @@ int tcg_check_temp_count(void);
#define tcg_check_temp_count() 0
#endif
void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf);
void tcg_dump_info(void);
#define TCG_CT_ALIAS 0x80
#define TCG_CT_IALIAS 0x40
@ -760,23 +779,7 @@ struct TCGContext {
GHashTable *helpers;
#ifdef CONFIG_PROFILER
/* profiling info */
int64_t tb_count1;
int64_t tb_count;
int64_t op_count; /* total insn count */
int op_count_max; /* max insn per TB */
int temp_count_max;
int64_t temp_count;
int64_t del_op_count;
int64_t code_in_len;
int64_t code_out_len;
int64_t search_out_len;
int64_t interm_time;
int64_t code_time;
int64_t la_time;
int64_t opt_time;
int64_t restore_count;
int64_t restore_time;
TCGProfile prof;
#endif
#ifdef CONFIG_DEBUG_TCG
@ -1369,9 +1372,9 @@ static inline unsigned get_mmuidx(TCGMemOpIdx oi)
* returned is the TB we were about to execute, and the caller must
* arrange to execute the remaining count of instructions.
* 3: we stopped because the CPU's exit_request flag was set
* (usually meaning that there is an interrupt that needs to be
* handled). The pointer returned is the TB we were about to execute
* when we noticed the pending exit request.
* about to execute when we noticed the pending exit request.
*
* If the bottom two bits indicate an exit-via-index then the CPU
* state is correctly synchronised and ready for execution of the next
@ -1491,67 +1494,6 @@ uint32_t helper_be_ldl_cmmu(CPUArchState *env, target_ulong addr,
uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr);
uint32_t helper_atomic_cmpxchgb_mmu(CPUArchState *env, target_ulong addr,
uint32_t cmpv, uint32_t newv,
TCGMemOpIdx oi, uintptr_t retaddr);
uint32_t helper_atomic_cmpxchgw_le_mmu(CPUArchState *env, target_ulong addr,
uint32_t cmpv, uint32_t newv,
TCGMemOpIdx oi, uintptr_t retaddr);
uint32_t helper_atomic_cmpxchgl_le_mmu(CPUArchState *env, target_ulong addr,
uint32_t cmpv, uint32_t newv,
TCGMemOpIdx oi, uintptr_t retaddr);
uint64_t helper_atomic_cmpxchgq_le_mmu(CPUArchState *env, target_ulong addr,
uint64_t cmpv, uint64_t newv,
TCGMemOpIdx oi, uintptr_t retaddr);
uint32_t helper_atomic_cmpxchgw_be_mmu(CPUArchState *env, target_ulong addr,
uint32_t cmpv, uint32_t newv,
TCGMemOpIdx oi, uintptr_t retaddr);
uint32_t helper_atomic_cmpxchgl_be_mmu(CPUArchState *env, target_ulong addr,
uint32_t cmpv, uint32_t newv,
TCGMemOpIdx oi, uintptr_t retaddr);
uint64_t helper_atomic_cmpxchgq_be_mmu(CPUArchState *env, target_ulong addr,
uint64_t cmpv, uint64_t newv,
TCGMemOpIdx oi, uintptr_t retaddr);
#define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX) \
TYPE helper_atomic_ ## NAME ## SUFFIX ## _mmu \
(CPUArchState *env, target_ulong addr, TYPE val, \
TCGMemOpIdx oi, uintptr_t retaddr);
#define GEN_ATOMIC_HELPER_ALL(NAME) \
GEN_ATOMIC_HELPER(NAME, uint32_t, b) \
GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \
GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \
GEN_ATOMIC_HELPER(NAME, uint64_t, q_le) \
GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \
GEN_ATOMIC_HELPER(NAME, uint32_t, l_be) \
GEN_ATOMIC_HELPER(NAME, uint64_t, q_be)
GEN_ATOMIC_HELPER_ALL(fetch_add)
GEN_ATOMIC_HELPER_ALL(fetch_sub)
GEN_ATOMIC_HELPER_ALL(fetch_and)
GEN_ATOMIC_HELPER_ALL(fetch_or)
GEN_ATOMIC_HELPER_ALL(fetch_xor)
GEN_ATOMIC_HELPER_ALL(fetch_smin)
GEN_ATOMIC_HELPER_ALL(fetch_umin)
GEN_ATOMIC_HELPER_ALL(fetch_smax)
GEN_ATOMIC_HELPER_ALL(fetch_umax)
GEN_ATOMIC_HELPER_ALL(add_fetch)
GEN_ATOMIC_HELPER_ALL(sub_fetch)
GEN_ATOMIC_HELPER_ALL(and_fetch)
GEN_ATOMIC_HELPER_ALL(or_fetch)
GEN_ATOMIC_HELPER_ALL(xor_fetch)
GEN_ATOMIC_HELPER_ALL(smin_fetch)
GEN_ATOMIC_HELPER_ALL(umin_fetch)
GEN_ATOMIC_HELPER_ALL(smax_fetch)
GEN_ATOMIC_HELPER_ALL(umax_fetch)
GEN_ATOMIC_HELPER_ALL(xchg)
#undef GEN_ATOMIC_HELPER_ALL
#undef GEN_ATOMIC_HELPER
/* Temporary aliases until backends are converted. */
#ifdef TARGET_WORDS_BIGENDIAN
# define helper_ret_ldsw_mmu helper_be_ldsw_mmu
@ -1581,6 +1523,75 @@ GEN_ATOMIC_HELPER_ALL(xchg)
# define helper_ret_ldq_cmmu helper_le_ldq_cmmu
#endif
uint32_t helper_atomic_cmpxchgb_mmu(CPUArchState *env, target_ulong addr,
uint32_t cmpv, uint32_t newv,
TCGMemOpIdx oi, uintptr_t retaddr);
uint32_t helper_atomic_cmpxchgw_le_mmu(CPUArchState *env, target_ulong addr,
uint32_t cmpv, uint32_t newv,
TCGMemOpIdx oi, uintptr_t retaddr);
uint32_t helper_atomic_cmpxchgl_le_mmu(CPUArchState *env, target_ulong addr,
uint32_t cmpv, uint32_t newv,
TCGMemOpIdx oi, uintptr_t retaddr);
uint64_t helper_atomic_cmpxchgq_le_mmu(CPUArchState *env, target_ulong addr,
uint64_t cmpv, uint64_t newv,
TCGMemOpIdx oi, uintptr_t retaddr);
uint32_t helper_atomic_cmpxchgw_be_mmu(CPUArchState *env, target_ulong addr,
uint32_t cmpv, uint32_t newv,
TCGMemOpIdx oi, uintptr_t retaddr);
uint32_t helper_atomic_cmpxchgl_be_mmu(CPUArchState *env, target_ulong addr,
uint32_t cmpv, uint32_t newv,
TCGMemOpIdx oi, uintptr_t retaddr);
uint64_t helper_atomic_cmpxchgq_be_mmu(CPUArchState *env, target_ulong addr,
uint64_t cmpv, uint64_t newv,
TCGMemOpIdx oi, uintptr_t retaddr);
#define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX) \
TYPE helper_atomic_ ## NAME ## SUFFIX ## _mmu \
(CPUArchState *env, target_ulong addr, TYPE val, \
TCGMemOpIdx oi, uintptr_t retaddr);
#ifdef CONFIG_ATOMIC64
#define GEN_ATOMIC_HELPER_ALL(NAME) \
GEN_ATOMIC_HELPER(NAME, uint32_t, b) \
GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \
GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \
GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \
GEN_ATOMIC_HELPER(NAME, uint32_t, l_be) \
GEN_ATOMIC_HELPER(NAME, uint64_t, q_le) \
GEN_ATOMIC_HELPER(NAME, uint64_t, q_be)
#else
#define GEN_ATOMIC_HELPER_ALL(NAME) \
GEN_ATOMIC_HELPER(NAME, uint32_t, b) \
GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \
GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \
GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \
GEN_ATOMIC_HELPER(NAME, uint32_t, l_be)
#endif
GEN_ATOMIC_HELPER_ALL(fetch_add)
GEN_ATOMIC_HELPER_ALL(fetch_sub)
GEN_ATOMIC_HELPER_ALL(fetch_and)
GEN_ATOMIC_HELPER_ALL(fetch_or)
GEN_ATOMIC_HELPER_ALL(fetch_xor)
GEN_ATOMIC_HELPER_ALL(fetch_smin)
GEN_ATOMIC_HELPER_ALL(fetch_umin)
GEN_ATOMIC_HELPER_ALL(fetch_smax)
GEN_ATOMIC_HELPER_ALL(fetch_umax)
GEN_ATOMIC_HELPER_ALL(add_fetch)
GEN_ATOMIC_HELPER_ALL(sub_fetch)
GEN_ATOMIC_HELPER_ALL(and_fetch)
GEN_ATOMIC_HELPER_ALL(or_fetch)
GEN_ATOMIC_HELPER_ALL(xor_fetch)
GEN_ATOMIC_HELPER_ALL(smin_fetch)
GEN_ATOMIC_HELPER_ALL(umin_fetch)
GEN_ATOMIC_HELPER_ALL(smax_fetch)
GEN_ATOMIC_HELPER_ALL(umax_fetch)
GEN_ATOMIC_HELPER_ALL(xchg)
#undef GEN_ATOMIC_HELPER_ALL
#undef GEN_ATOMIC_HELPER
#endif /* CONFIG_SOFTMMU */
/*