target/arm: Convert Neon 3-reg-diff VABAL, VABDL to decodetree

Convert the Neon 3-reg-diff insns VABAL and VABDL to decodetree.
Like almost all the remaining insns in this group, these are
a combination of a two-input operation which returns a double width
result and then a possible accumulation of that double width
result into the destination.

Backports commit f5b28401200ec95ba89552df3ecdcdc342f6b90b from qemu
This commit is contained in:
Peter Maydell 2020-06-16 23:41:18 -04:00 committed by Lioncash
parent 34418f1998
commit 21044a1d11
4 changed files with 143 additions and 28 deletions

View File

@ -442,7 +442,13 @@ Vimm_1r 1111 001 . 1 . 000 ... .... cmode:4 0 . op:1 1 .... @1reg_imm
VADDHN_3d 1111 001 0 1 . .. .... .... 0100 . 0 . 0 .... @3diff VADDHN_3d 1111 001 0 1 . .. .... .... 0100 . 0 . 0 .... @3diff
VRADDHN_3d 1111 001 1 1 . .. .... .... 0100 . 0 . 0 .... @3diff VRADDHN_3d 1111 001 1 1 . .. .... .... 0100 . 0 . 0 .... @3diff
VABAL_S_3d 1111 001 0 1 . .. .... .... 0101 . 0 . 0 .... @3diff
VABAL_U_3d 1111 001 1 1 . .. .... .... 0101 . 0 . 0 .... @3diff
VSUBHN_3d 1111 001 0 1 . .. .... .... 0110 . 0 . 0 .... @3diff VSUBHN_3d 1111 001 0 1 . .. .... .... 0110 . 0 . 0 .... @3diff
VRSUBHN_3d 1111 001 1 1 . .. .... .... 0110 . 0 . 0 .... @3diff VRSUBHN_3d 1111 001 1 1 . .. .... .... 0110 . 0 . 0 .... @3diff
VABDL_S_3d 1111 001 0 1 . .. .... .... 0111 . 0 . 0 .... @3diff
VABDL_U_3d 1111 001 1 1 . .. .... .... 0111 . 0 . 0 .... @3diff
] ]
} }

View File

@ -2045,3 +2045,136 @@ DO_NARROW_3D(VADDHN, add, narrow, tcg_gen_extrh_i64_i32)
DO_NARROW_3D(VSUBHN, sub, narrow, tcg_gen_extrh_i64_i32) DO_NARROW_3D(VSUBHN, sub, narrow, tcg_gen_extrh_i64_i32)
DO_NARROW_3D(VRADDHN, add, narrow_round, gen_narrow_round_high_u32) DO_NARROW_3D(VRADDHN, add, narrow_round, gen_narrow_round_high_u32)
DO_NARROW_3D(VRSUBHN, sub, narrow_round, gen_narrow_round_high_u32) DO_NARROW_3D(VRSUBHN, sub, narrow_round, gen_narrow_round_high_u32)
static bool do_long_3d(DisasContext *s, arg_3diff *a,
NeonGenTwoOpWidenFn *opfn,
NeonGenTwo64OpFn *accfn)
{
/*
* 3-regs different lengths, long operations.
* These perform an operation on two inputs that returns a double-width
* result, and then possibly perform an accumulation operation of
* that result into the double-width destination.
*/
TCGContext *tcg_ctx = s->uc->tcg_ctx;
TCGv_i64 rd0, rd1, tmp;
TCGv_i32 rn, rm;
if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
return false;
}
/* UNDEF accesses to D16-D31 if they don't exist. */
if (!dc_isar_feature(aa32_simd_r32, s) &&
((a->vd | a->vn | a->vm) & 0x10)) {
return false;
}
if (!opfn) {
/* size == 3 case, which is an entirely different insn group */
return false;
}
if (a->vd & 1) {
return false;
}
if (!vfp_access_check(s)) {
return true;
}
rd0 = tcg_temp_new_i64(tcg_ctx);
rd1 = tcg_temp_new_i64(tcg_ctx);
rn = neon_load_reg(s, a->vn, 0);
rm = neon_load_reg(s, a->vm, 0);
opfn(tcg_ctx, rd0, rn, rm);
tcg_temp_free_i32(tcg_ctx, rn);
tcg_temp_free_i32(tcg_ctx, rm);
rn = neon_load_reg(s, a->vn, 1);
rm = neon_load_reg(s, a->vm, 1);
opfn(tcg_ctx, rd1, rn, rm);
tcg_temp_free_i32(tcg_ctx, rn);
tcg_temp_free_i32(tcg_ctx, rm);
/* Don't store results until after all loads: they might overlap */
if (accfn) {
tmp = tcg_temp_new_i64(tcg_ctx);
neon_load_reg64(s, tmp, a->vd);
accfn(tcg_ctx, tmp, tmp, rd0);
neon_store_reg64(s, tmp, a->vd);
neon_load_reg64(s, tmp, a->vd + 1);
accfn(tcg_ctx, tmp, tmp, rd1);
neon_store_reg64(s, tmp, a->vd + 1);
tcg_temp_free_i64(tcg_ctx, tmp);
} else {
neon_store_reg64(s, rd0, a->vd);
neon_store_reg64(s, rd1, a->vd + 1);
}
tcg_temp_free_i64(tcg_ctx, rd0);
tcg_temp_free_i64(tcg_ctx, rd1);
return true;
}
static bool trans_VABDL_S_3d(DisasContext *s, arg_3diff *a)
{
static NeonGenTwoOpWidenFn * const opfn[] = {
gen_helper_neon_abdl_s16,
gen_helper_neon_abdl_s32,
gen_helper_neon_abdl_s64,
NULL,
};
return do_long_3d(s, a, opfn[a->size], NULL);
}
static bool trans_VABDL_U_3d(DisasContext *s, arg_3diff *a)
{
static NeonGenTwoOpWidenFn * const opfn[] = {
gen_helper_neon_abdl_u16,
gen_helper_neon_abdl_u32,
gen_helper_neon_abdl_u64,
NULL,
};
return do_long_3d(s, a, opfn[a->size], NULL);
}
static bool trans_VABAL_S_3d(DisasContext *s, arg_3diff *a)
{
static NeonGenTwoOpWidenFn * const opfn[] = {
gen_helper_neon_abdl_s16,
gen_helper_neon_abdl_s32,
gen_helper_neon_abdl_s64,
NULL,
};
static NeonGenTwo64OpFn * const addfn[] = {
gen_helper_neon_addl_u16,
gen_helper_neon_addl_u32,
tcg_gen_add_i64,
NULL,
};
return do_long_3d(s, a, opfn[a->size], addfn[a->size]);
}
static bool trans_VABAL_U_3d(DisasContext *s, arg_3diff *a)
{
static NeonGenTwoOpWidenFn * const opfn[] = {
gen_helper_neon_abdl_u16,
gen_helper_neon_abdl_u32,
gen_helper_neon_abdl_u64,
NULL,
};
static NeonGenTwo64OpFn * const addfn[] = {
gen_helper_neon_addl_u16,
gen_helper_neon_addl_u32,
tcg_gen_add_i64,
NULL,
};
return do_long_3d(s, a, opfn[a->size], addfn[a->size]);
}

View File

@ -5367,9 +5367,9 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
{0, 0, 0, 7}, /* VSUBL: handled by decodetree */ {0, 0, 0, 7}, /* VSUBL: handled by decodetree */
{0, 0, 0, 7}, /* VSUBW: handled by decodetree */ {0, 0, 0, 7}, /* VSUBW: handled by decodetree */
{0, 0, 0, 7}, /* VADDHN: handled by decodetree */ {0, 0, 0, 7}, /* VADDHN: handled by decodetree */
{0, 0, 0, 0}, /* VABAL */ {0, 0, 0, 7}, /* VABAL */
{0, 0, 0, 7}, /* VSUBHN: handled by decodetree */ {0, 0, 0, 7}, /* VSUBHN: handled by decodetree */
{0, 0, 0, 0}, /* VABDL */ {0, 0, 0, 7}, /* VABDL */
{0, 0, 0, 0}, /* VMLAL */ {0, 0, 0, 0}, /* VMLAL */
{0, 0, 0, 9}, /* VQDMLAL */ {0, 0, 0, 9}, /* VQDMLAL */
{0, 0, 0, 0}, /* VMLSL */ {0, 0, 0, 0}, /* VMLSL */
@ -5430,31 +5430,6 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
tmp2 = neon_load_reg(s, rm, pass); tmp2 = neon_load_reg(s, rm, pass);
} }
switch (op) { switch (op) {
case 5: case 7: /* VABAL, VABDL */
switch ((size << 1) | u) {
case 0:
gen_helper_neon_abdl_s16(tcg_ctx, s->V0, tmp, tmp2);
break;
case 1:
gen_helper_neon_abdl_u16(tcg_ctx, s->V0, tmp, tmp2);
break;
case 2:
gen_helper_neon_abdl_s32(tcg_ctx, s->V0, tmp, tmp2);
break;
case 3:
gen_helper_neon_abdl_u32(tcg_ctx, s->V0, tmp, tmp2);
break;
case 4:
gen_helper_neon_abdl_s64(tcg_ctx, s->V0, tmp, tmp2);
break;
case 5:
gen_helper_neon_abdl_u64(tcg_ctx, s->V0, tmp, tmp2);
break;
default: abort();
}
tcg_temp_free_i32(tcg_ctx, tmp2);
tcg_temp_free_i32(tcg_ctx, tmp);
break;
case 8: case 9: case 10: case 11: case 12: case 13: case 8: case 9: case 10: case 11: case 12: case 13:
/* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */ /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
gen_neon_mull(s, s->V0, tmp, tmp2, size, u); gen_neon_mull(s, s->V0, tmp, tmp2, size, u);
@ -5473,7 +5448,7 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
case 10: /* VMLSL */ case 10: /* VMLSL */
gen_neon_negl(s, s->V0, size); gen_neon_negl(s, s->V0, size);
/* Fall through */ /* Fall through */
case 5: case 8: /* VABAL, VMLAL */ case 8: /* VABAL, VMLAL */
gen_neon_addl(s, size); gen_neon_addl(s, size);
break; break;
case 9: case 11: /* VQDMLAL, VQDMLSL */ case 9: case 11: /* VQDMLAL, VQDMLSL */

View File

@ -378,6 +378,7 @@ typedef void NeonGenTwo64OpEnvFn(TCGContext *t, TCGv_i64, TCGv_ptr, TCGv_i64, TC
typedef void NeonGenNarrowFn(TCGContext *t, TCGv_i32, TCGv_i64); typedef void NeonGenNarrowFn(TCGContext *t, TCGv_i32, TCGv_i64);
typedef void NeonGenNarrowEnvFn(TCGContext *t, TCGv_i32, TCGv_ptr, TCGv_i64); typedef void NeonGenNarrowEnvFn(TCGContext *t, TCGv_i32, TCGv_ptr, TCGv_i64);
typedef void NeonGenWidenFn(TCGContext *t, TCGv_i64, TCGv_i32); typedef void NeonGenWidenFn(TCGContext *t, TCGv_i64, TCGv_i32);
typedef void NeonGenTwoOpWidenFn(TCGContext *t, TCGv_i64, TCGv_i32, TCGv_i32);
typedef void NeonGenTwoSingleOPFn(TCGContext *t, TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr); typedef void NeonGenTwoSingleOPFn(TCGContext *t, TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
typedef void NeonGenTwoDoubleOPFn(TCGContext *t, TCGv_i64, TCGv_i64, TCGv_i64, TCGv_ptr); typedef void NeonGenTwoDoubleOPFn(TCGContext *t, TCGv_i64, TCGv_i64, TCGv_i64, TCGv_ptr);
typedef void NeonGenOneOpFn(TCGContext *t, TCGv_i64, TCGv_i64); typedef void NeonGenOneOpFn(TCGContext *t, TCGv_i64, TCGv_i64);