target/arm: Convert Neon 3-reg-diff narrowing ops to decodetree

Convert the narrow-to-high-half insns VADDHN, VSUBHN, VRADDHN,
VRSUBHN in the Neon 3-registers-different-lengths group to
decodetree.

Backports commit 0fa1ab0302badabc3581aefcbb2f189ef52c4985 from qemu
This commit is contained in:
Peter Maydell 2020-06-16 23:36:15 -04:00 committed by Lioncash
parent d25998ba7d
commit 34418f1998
3 changed files with 106 additions and 81 deletions

View File

@ -438,5 +438,11 @@ Vimm_1r 1111 001 . 1 . 000 ... .... cmode:4 0 . op:1 1 .... @1reg_imm
VSUBW_S_3d 1111 001 0 1 . .. .... .... 0011 . 0 . 0 .... @3diff VSUBW_S_3d 1111 001 0 1 . .. .... .... 0011 . 0 . 0 .... @3diff
VSUBW_U_3d 1111 001 1 1 . .. .... .... 0011 . 0 . 0 .... @3diff VSUBW_U_3d 1111 001 1 1 . .. .... .... 0011 . 0 . 0 .... @3diff
VADDHN_3d 1111 001 0 1 . .. .... .... 0100 . 0 . 0 .... @3diff
VRADDHN_3d 1111 001 1 1 . .. .... .... 0100 . 0 . 0 .... @3diff
VSUBHN_3d 1111 001 0 1 . .. .... .... 0110 . 0 . 0 .... @3diff
VRSUBHN_3d 1111 001 1 1 . .. .... .... 0110 . 0 . 0 .... @3diff
] ]
} }

View File

@ -1859,6 +1859,7 @@ static bool do_prewiden_3d(DisasContext *s, arg_3diff *a,
bool src1_wide) bool src1_wide)
{ {
/* 3-regs different lengths, prewidening case (VADDL/VSUBL/VAADW/VSUBW) */ /* 3-regs different lengths, prewidening case (VADDL/VSUBL/VAADW/VSUBW) */
TCGContext *tcg_ctx = s->uc->tcg_ctx;
TCGv_i64 rn0_64, rn1_64, rm_64; TCGv_i64 rn0_64, rn1_64, rm_64;
TCGv_i32 rm; TCGv_i32 rm;
@ -1956,3 +1957,91 @@ DO_PREWIDEN(VADDW_S, s, ext, add, true)
DO_PREWIDEN(VADDW_U, u, extu, add, true) DO_PREWIDEN(VADDW_U, u, extu, add, true)
DO_PREWIDEN(VSUBW_S, s, ext, sub, true) DO_PREWIDEN(VSUBW_S, s, ext, sub, true)
DO_PREWIDEN(VSUBW_U, u, extu, sub, true) DO_PREWIDEN(VSUBW_U, u, extu, sub, true)
static bool do_narrow_3d(DisasContext *s, arg_3diff *a,
NeonGenTwo64OpFn *opfn, NeonGenNarrowFn *narrowfn)
{
/* 3-regs different lengths, narrowing (VADDHN/VSUBHN/VRADDHN/VRSUBHN) */
TCGContext *tcg_ctx = s->uc->tcg_ctx;
TCGv_i64 rn_64, rm_64;
TCGv_i32 rd0, rd1;
if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
return false;
}
/* UNDEF accesses to D16-D31 if they don't exist. */
if (!dc_isar_feature(aa32_simd_r32, s) &&
((a->vd | a->vn | a->vm) & 0x10)) {
return false;
}
if (!opfn || !narrowfn) {
/* size == 3 case, which is an entirely different insn group */
return false;
}
if ((a->vn | a->vm) & 1) {
return false;
}
if (!vfp_access_check(s)) {
return true;
}
rn_64 = tcg_temp_new_i64(tcg_ctx);
rm_64 = tcg_temp_new_i64(tcg_ctx);
rd0 = tcg_temp_new_i32(tcg_ctx);
rd1 = tcg_temp_new_i32(tcg_ctx);
neon_load_reg64(s, rn_64, a->vn);
neon_load_reg64(s, rm_64, a->vm);
opfn(tcg_ctx, rn_64, rn_64, rm_64);
narrowfn(tcg_ctx, rd0, rn_64);
neon_load_reg64(s, rn_64, a->vn + 1);
neon_load_reg64(s, rm_64, a->vm + 1);
opfn(tcg_ctx, rn_64, rn_64, rm_64);
narrowfn(tcg_ctx, rd1, rn_64);
neon_store_reg(s, a->vd, 0, rd0);
neon_store_reg(s, a->vd, 1, rd1);
tcg_temp_free_i64(tcg_ctx, rn_64);
tcg_temp_free_i64(tcg_ctx, rm_64);
return true;
}
#define DO_NARROW_3D(INSN, OP, NARROWTYPE, EXTOP) \
static bool trans_##INSN##_3d(DisasContext *s, arg_3diff *a) \
{ \
static NeonGenTwo64OpFn * const addfn[] = { \
gen_helper_neon_##OP##l_u16, \
gen_helper_neon_##OP##l_u32, \
tcg_gen_##OP##_i64, \
NULL, \
}; \
static NeonGenNarrowFn * const narrowfn[] = { \
gen_helper_neon_##NARROWTYPE##_high_u8, \
gen_helper_neon_##NARROWTYPE##_high_u16, \
EXTOP, \
NULL, \
}; \
return do_narrow_3d(s, a, addfn[a->size], narrowfn[a->size]); \
}
static void gen_narrow_round_high_u32(TCGContext *s, TCGv_i32 rd, TCGv_i64 rn)
{
tcg_gen_addi_i64(s, rn, rn, 1u << 31);
tcg_gen_extrh_i64_i32(s, rd, rn);
}
DO_NARROW_3D(VADDHN, add, narrow, tcg_gen_extrh_i64_i32)
DO_NARROW_3D(VSUBHN, sub, narrow, tcg_gen_extrh_i64_i32)
DO_NARROW_3D(VRADDHN, add, narrow_round, gen_narrow_round_high_u32)
DO_NARROW_3D(VRSUBHN, sub, narrow_round, gen_narrow_round_high_u32)

View File

@ -3353,17 +3353,6 @@ static inline void gen_neon_addl(DisasContext *s, int size)
} }
} }
static inline void gen_neon_subl(DisasContext *s, int size)
{
TCGContext *tcg_ctx = s->uc->tcg_ctx;
switch (size) {
case 0: gen_helper_neon_subl_u16(tcg_ctx, CPU_V001); break;
case 1: gen_helper_neon_subl_u32(tcg_ctx, CPU_V001); break;
case 2: tcg_gen_sub_i64(tcg_ctx, CPU_V001); break;
default: abort();
}
}
static inline void gen_neon_negl(DisasContext *s, TCGv_i64 var, int size) static inline void gen_neon_negl(DisasContext *s, TCGv_i64 var, int size)
{ {
TCGContext *tcg_ctx = s->uc->tcg_ctx; TCGContext *tcg_ctx = s->uc->tcg_ctx;
@ -5364,8 +5353,6 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
op = (insn >> 8) & 0xf; op = (insn >> 8) & 0xf;
if ((insn & (1 << 6)) == 0) { if ((insn & (1 << 6)) == 0) {
/* Three registers of different lengths. */ /* Three registers of different lengths. */
int src1_wide;
int src2_wide;
/* undefreq: bit 0 : UNDEF if size == 0 /* undefreq: bit 0 : UNDEF if size == 0
* bit 1 : UNDEF if size == 1 * bit 1 : UNDEF if size == 1
* bit 2 : UNDEF if size == 2 * bit 2 : UNDEF if size == 2
@ -5379,9 +5366,9 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
{0, 0, 0, 7}, /* VADDW: handled by decodetree */ {0, 0, 0, 7}, /* VADDW: handled by decodetree */
{0, 0, 0, 7}, /* VSUBL: handled by decodetree */ {0, 0, 0, 7}, /* VSUBL: handled by decodetree */
{0, 0, 0, 7}, /* VSUBW: handled by decodetree */ {0, 0, 0, 7}, /* VSUBW: handled by decodetree */
{0, 1, 1, 0}, /* VADDHN */ {0, 0, 0, 7}, /* VADDHN: handled by decodetree */
{0, 0, 0, 0}, /* VABAL */ {0, 0, 0, 0}, /* VABAL */
{0, 1, 1, 0}, /* VSUBHN */ {0, 0, 0, 7}, /* VSUBHN: handled by decodetree */
{0, 0, 0, 0}, /* VABDL */ {0, 0, 0, 0}, /* VABDL */
{0, 0, 0, 0}, /* VMLAL */ {0, 0, 0, 0}, /* VMLAL */
{0, 0, 0, 9}, /* VQDMLAL */ {0, 0, 0, 9}, /* VQDMLAL */
@ -5393,17 +5380,13 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
{0, 0, 0, 7}, /* Reserved: always UNDEF */ {0, 0, 0, 7}, /* Reserved: always UNDEF */
}; };
src1_wide = neon_3reg_wide[op][1];
src2_wide = neon_3reg_wide[op][2];
undefreq = neon_3reg_wide[op][3]; undefreq = neon_3reg_wide[op][3];
if ((undefreq & (1 << size)) || if ((undefreq & (1 << size)) ||
((undefreq & 8) && u)) { ((undefreq & 8) && u)) {
return 1; return 1;
} }
if ((src1_wide && (rn & 1)) || if (rd & 1) {
(src2_wide && (rm & 1)) ||
(!src2_wide && (rd & 1))) {
return 1; return 1;
} }
@ -5427,42 +5410,26 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
/* Avoid overlapping operands. Wide source operands are /* Avoid overlapping operands. Wide source operands are
always aligned so will never overlap with wide always aligned so will never overlap with wide
destinations in problematic ways. */ destinations in problematic ways. */
if (rd == rm && !src2_wide) { if (rd == rm) {
tmp = neon_load_reg(s, rm, 1); tmp = neon_load_reg(s, rm, 1);
neon_store_scratch(s, 2, tmp); neon_store_scratch(s, 2, tmp);
} else if (rd == rn && !src1_wide) { } else if (rd == rn) {
tmp = neon_load_reg(s, rn, 1); tmp = neon_load_reg(s, rn, 1);
neon_store_scratch(s, 2, tmp); neon_store_scratch(s, 2, tmp);
} }
tmp3 = NULL; tmp3 = NULL;
for (pass = 0; pass < 2; pass++) { for (pass = 0; pass < 2; pass++) {
if (src1_wide) { if (pass == 1 && rd == rn) {
neon_load_reg64(s, s->V0, rn + pass); tmp = neon_load_scratch(s, 2);
tmp = NULL;
} else { } else {
if (pass == 1 && rd == rn) { tmp = neon_load_reg(s, rn, pass);
tmp = neon_load_scratch(s, 2);
} else {
tmp = neon_load_reg(s, rn, pass);
}
} }
if (src2_wide) { if (pass == 1 && rd == rm) {
neon_load_reg64(s, s->V1, rm + pass); tmp2 = neon_load_scratch(s, 2);
tmp2 = NULL;
} else { } else {
if (pass == 1 && rd == rm) { tmp2 = neon_load_reg(s, rm, pass);
tmp2 = neon_load_scratch(s, 2);
} else {
tmp2 = neon_load_reg(s, rm, pass);
}
} }
switch (op) { switch (op) {
case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
gen_neon_addl(s, size);
break;
case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
gen_neon_subl(s, size);
break;
case 5: case 7: /* VABAL, VABDL */ case 5: case 7: /* VABAL, VABDL */
switch ((size << 1) | u) { switch ((size << 1) | u) {
case 0: case 0:
@ -5520,43 +5487,6 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
abort(); abort();
} }
neon_store_reg64(s, s->V0, rd + pass); neon_store_reg64(s, s->V0, rd + pass);
} else if (op == 4 || op == 6) {
/* Narrowing operation. */
tmp = tcg_temp_new_i32(tcg_ctx);
if (!u) {
switch (size) {
case 0:
gen_helper_neon_narrow_high_u8(tcg_ctx, tmp, s->V0);
break;
case 1:
gen_helper_neon_narrow_high_u16(tcg_ctx, tmp, s->V0);
break;
case 2:
tcg_gen_extrh_i64_i32(tcg_ctx, tmp, s->V0);
break;
default: abort();
}
} else {
switch (size) {
case 0:
gen_helper_neon_narrow_round_high_u8(tcg_ctx, tmp, s->V0);
break;
case 1:
gen_helper_neon_narrow_round_high_u16(tcg_ctx, tmp, s->V0);
break;
case 2:
tcg_gen_addi_i64(tcg_ctx, s->V0, s->V0, 1u << 31);
tcg_gen_extrh_i64_i32(tcg_ctx, tmp, s->V0);
break;
default: abort();
}
}
if (pass == 0) {
tmp3 = tmp;
} else {
neon_store_reg(s, rd, 0, tmp3);
neon_store_reg(s, rd, 1, tmp);
}
} else { } else {
/* Write back the result. */ /* Write back the result. */
neon_store_reg64(s, s->V0, rd + pass); neon_store_reg64(s, s->V0, rd + pass);