target/arm: Expand read/write_neon_element32 to all MemOp

We can then use this to improve VMOV (scalar to gp) and
VMOV (gp to scalar) so that we simply perform the memory
operation that we wanted, rather than inserting or
extracting from a 32-bit quantity.

These were the last uses of neon_load/store_reg, so remove them.

Backports 4d5fa5a80ac28f34b8497be1e85371272413a12e
This commit is contained in:
Richard Henderson 2021-03-02 12:26:38 -05:00 committed by Lioncash
parent d21316d639
commit 011d9ab061
2 changed files with 37 additions and 87 deletions

View File

@ -518,11 +518,9 @@ static bool trans_VMOV_to_gp(DisasContext *s, arg_VMOV_to_gp *a)
/* VMOV scalar to general purpose register */
TCGContext *tcg_ctx = s->uc->tcg_ctx;
TCGv_i32 tmp;
int pass;
uint32_t offset;
/* SIZE == 2 is a VFP instruction; otherwise NEON. */
if (a->size == 2
/* SIZE == MO_32 is a VFP instruction; otherwise NEON. */
if (a->size == MO_32
? !dc_isar_feature(aa32_fpsp_v2, s)
: !arm_dc_feature(s, ARM_FEATURE_NEON)) {
return false;
@ -533,44 +531,12 @@ static bool trans_VMOV_to_gp(DisasContext *s, arg_VMOV_to_gp *a)
return false;
}
offset = a->index << a->size;
pass = extract32(offset, 2, 1);
offset = extract32(offset, 0, 2) * 8;
if (!vfp_access_check(s)) {
return true;
}
tmp = neon_load_reg(s, a->vn, pass);
switch (a->size) {
case 0:
if (offset) {
tcg_gen_shri_i32(tcg_ctx, tmp, tmp, offset);
}
if (a->u) {
gen_uxtb(tmp);
} else {
gen_sxtb(tmp);
}
break;
case 1:
if (a->u) {
if (offset) {
tcg_gen_shri_i32(tcg_ctx, tmp, tmp, 16);
} else {
gen_uxth(tmp);
}
} else {
if (offset) {
tcg_gen_sari_i32(tcg_ctx, tmp, tmp, 16);
} else {
gen_sxth(tmp);
}
}
break;
case 2:
break;
}
tmp = tcg_temp_new_i32(tcg_ctx);
read_neon_element32(s, tmp, a->vn, a->index, a->size | (a->u ? 0 : MO_SIGN));
store_reg(s, a->rt, tmp);
return true;
@ -580,12 +546,10 @@ static bool trans_VMOV_from_gp(DisasContext *s, arg_VMOV_from_gp *a)
{
/* VMOV general purpose register to scalar */
TCGContext *tcg_ctx = s->uc->tcg_ctx;
TCGv_i32 tmp, tmp2;
int pass;
uint32_t offset;
TCGv_i32 tmp;
/* SIZE == 2 is a VFP instruction; otherwise NEON. */
if (a->size == 2
/* SIZE == MO_32 is a VFP instruction; otherwise NEON. */
if (a->size == MO_32
? !dc_isar_feature(aa32_fpsp_v2, s)
: !arm_dc_feature(s, ARM_FEATURE_NEON)) {
return false;
@ -596,30 +560,13 @@ static bool trans_VMOV_from_gp(DisasContext *s, arg_VMOV_from_gp *a)
return false;
}
offset = a->index << a->size;
pass = extract32(offset, 2, 1);
offset = extract32(offset, 0, 2) * 8;
if (!vfp_access_check(s)) {
return true;
}
tmp = load_reg(s, a->rt);
switch (a->size) {
case 0:
tmp2 = neon_load_reg(s, a->vn, pass);
tcg_gen_deposit_i32(tcg_ctx, tmp, tmp2, tmp, offset, 8);
tcg_temp_free_i32(tcg_ctx, tmp2);
break;
case 1:
tmp2 = neon_load_reg(s, a->vn, pass);
tcg_gen_deposit_i32(tcg_ctx, tmp, tmp2, tmp, offset, 16);
tcg_temp_free_i32(tcg_ctx, tmp2);
break;
case 2:
break;
}
neon_store_reg(s, a->vn, pass, tmp);
write_neon_element32(s, tmp, a->vn, a->index, a->size);
tcg_temp_free_i32(tcg_ctx, tmp);
return true;
}

View File

@ -1150,10 +1150,9 @@ static long neon_full_reg_offset(unsigned reg)
* Return the offset of a 2**SIZE piece of a NEON register, at index ELE,
* where 0 is the least significant end of the register.
*/
static inline long
neon_element_offset(int reg, int element, MemOp size)
static long neon_element_offset(int reg, int element, MemOp memop)
{
int element_size = 1 << size;
int element_size = 1 << (memop & MO_SIZE);
int ofs = element * element_size;
#ifdef HOST_WORDS_BIGENDIAN
/*
@ -1177,21 +1176,6 @@ static long vfp_reg_offset(bool dp, unsigned reg)
}
}
static TCGv_i32 neon_load_reg(DisasContext *s, int reg, int pass)
{
TCGContext *tcg_ctx = s->uc->tcg_ctx;
TCGv_i32 tmp = tcg_temp_new_i32(tcg_ctx);
tcg_gen_ld_i32(tcg_ctx, tmp, tcg_ctx->cpu_env, neon_element_offset(reg, pass, MO_32));
return tmp;
}
static void neon_store_reg(DisasContext *s, int reg, int pass, TCGv_i32 var)
{
TCGContext *tcg_ctx = s->uc->tcg_ctx;
tcg_gen_st_i32(tcg_ctx, var, tcg_ctx->cpu_env, neon_element_offset(reg, pass, MO_32));
tcg_temp_free_i32(tcg_ctx, var);
}
static inline void neon_load_reg64(DisasContext *s, TCGv_i64 var, int reg)
{
TCGContext *tcg_ctx = s->uc->tcg_ctx;
@ -1216,13 +1200,26 @@ static inline void neon_store_reg32(DisasContext *s, TCGv_i32 var, int reg)
tcg_gen_st_i32(tcg_ctx, var, tcg_ctx->cpu_env, vfp_reg_offset(false, reg));
}
static void read_neon_element32(DisasContext *s, TCGv_i32 dest, int reg, int ele, MemOp size)
static void read_neon_element32(DisasContext *s, TCGv_i32 dest, int reg, int ele, MemOp memop)
{
TCGContext *tcg_ctx = s->uc->tcg_ctx;
long off = neon_element_offset(reg, ele, size);
long off = neon_element_offset(reg, ele, memop);
switch (size) {
case MO_32:
switch (memop) {
case MO_SB:
tcg_gen_ld8s_i32(tcg_ctx, dest, tcg_ctx->cpu_env, off);
break;
case MO_UB:
tcg_gen_ld8u_i32(tcg_ctx, dest, tcg_ctx->cpu_env, off);
break;
case MO_SW:
tcg_gen_ld16s_i32(tcg_ctx, dest, tcg_ctx->cpu_env, off);
break;
case MO_UW:
tcg_gen_ld16u_i32(tcg_ctx, dest, tcg_ctx->cpu_env, off);
break;
case MO_UL:
case MO_SL:
tcg_gen_ld_i32(tcg_ctx, dest, tcg_ctx->cpu_env, off);
break;
default:
@ -1230,12 +1227,18 @@ static void read_neon_element32(DisasContext *s, TCGv_i32 dest, int reg, int ele
}
}
static void write_neon_element32(DisasContext *s, TCGv_i32 src, int reg, int ele, MemOp size)
static void write_neon_element32(DisasContext *s, TCGv_i32 src, int reg, int ele, MemOp memop)
{
TCGContext *tcg_ctx = s->uc->tcg_ctx;
long off = neon_element_offset(reg, ele, size);
long off = neon_element_offset(reg, ele, memop);
switch (size) {
switch (memop) {
case MO_8:
tcg_gen_st8_i32(tcg_ctx, src, tcg_ctx->cpu_env, off);
break;
case MO_16:
tcg_gen_st16_i32(tcg_ctx, src, tcg_ctx->cpu_env, off);
break;
case MO_32:
tcg_gen_st_i32(tcg_ctx, src, tcg_ctx->cpu_env, off);
break;