target/arm: Use vector infrastructure for aa64 add/sub/logic

Backports commit bc48092f5865c20893bb19200a7a320feac99eeb from qemu
This commit is contained in:
Richard Henderson 2018-03-06 15:35:28 -05:00 committed by Lioncash
parent f30abaea66
commit 03b76589b4
No known key found for this signature in database
GPG Key ID: 4E3C3CC1031BA9C7

View File

@ -22,6 +22,7 @@
#include "cpu.h" #include "cpu.h"
#include "exec/exec-all.h" #include "exec/exec-all.h"
#include "tcg-op.h" #include "tcg-op.h"
#include "tcg-op-gvec.h"
#include "qemu/log.h" #include "qemu/log.h"
#include "arm_ldst.h" #include "arm_ldst.h"
#include "translate.h" #include "translate.h"
@ -81,6 +82,10 @@ typedef void CryptoTwoOpFn(TCGContext *, TCGv_ptr, TCGv_ptr);
typedef void CryptoThreeOpIntFn(TCGContext *, TCGv_ptr, TCGv_ptr, TCGv_i32); typedef void CryptoThreeOpIntFn(TCGContext *, TCGv_ptr, TCGv_ptr, TCGv_i32);
typedef void CryptoThreeOpFn(TCGContext *, TCGv_ptr, TCGv_ptr, TCGv_ptr); typedef void CryptoThreeOpFn(TCGContext *, TCGv_ptr, TCGv_ptr, TCGv_ptr);
/* Note that the gvec expanders operate on offsets + sizes. */
typedef void GVecGen3Fn(TCGContext *, unsigned, uint32_t, uint32_t,
uint32_t, uint32_t, uint32_t);
/* initialize TCG globals. */ /* initialize TCG globals. */
void a64_translate_init(struct uc_struct *uc) void a64_translate_init(struct uc_struct *uc)
{ {
@ -577,6 +582,14 @@ static TCGv_ptr vec_full_reg_ptr(DisasContext *s, int regno)
return ret; return ret;
} }
/* Return the byte size of the "whole" vector register, VL / 8. */
static inline int vec_full_reg_size(DisasContext *s)
{
/* FIXME SVE: We should put the composite ZCR_EL* value into tb->flags.
In the meantime this is just the AdvSIMD length of 128. */
return 128 / 8;
}
/* Return the offset into CPUARMState of a slice (from /* Return the offset into CPUARMState of a slice (from
* the least significant end) of FP register Qn (ie * the least significant end) of FP register Qn (ie
* Dn, Sn, Hn or Bn). * Dn, Sn, Hn or Bn).
@ -651,6 +664,27 @@ static TCGv_ptr get_fpstatus_ptr(TCGContext *tcg_ctx)
return statusptr; return statusptr;
} }
/* Expand a 3-operand AdvSIMD vector operation using an expander function. */
static void gen_gvec_fn3(DisasContext *s, bool is_q, int rd, int rn, int rm,
GVecGen3Fn *gvec_fn, int vece)
{
TCGContext *tcg_ctx = s->uc->tcg_ctx;
gvec_fn(tcg_ctx, vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
vec_full_reg_offset(s, rm), is_q ? 16 : 8, vec_full_reg_size(s));
}
/* Expand a 3-operand AdvSIMD vector operation using an op descriptor. */
static void gen_gvec_op3(DisasContext *s, bool is_q, int rd,
int rn, int rm, const GVecGen3 *gvec_op)
{
TCGContext *tcg_ctx = s->uc->tcg_ctx;
tcg_gen_gvec_3(tcg_ctx, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
vec_full_reg_offset(s, rm), is_q ? 16 : 8,
vec_full_reg_size(s), gvec_op);
}
/* Set ZF and NF based on a 64 bit result. This is alas fiddlier /* Set ZF and NF based on a 64 bit result. This is alas fiddlier
* than the 32 bit equivalent. * than the 32 bit equivalent.
*/ */
@ -9203,86 +9237,109 @@ static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn)
} }
} }
static void gen_bsl_i64(TCGContext *s, TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
{
tcg_gen_xor_i64(s, rn, rn, rm);
tcg_gen_and_i64(s, rn, rn, rd);
tcg_gen_xor_i64(s, rd, rm, rn);
}
static void gen_bit_i64(TCGContext *s, TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
{
tcg_gen_xor_i64(s, rn, rn, rd);
tcg_gen_and_i64(s, rn, rn, rm);
tcg_gen_xor_i64(s, rd, rd, rn);
}
static void gen_bif_i64(TCGContext *s, TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
{
tcg_gen_xor_i64(s, rn, rn, rd);
tcg_gen_andc_i64(s, rn, rn, rm);
tcg_gen_xor_i64(s, rd, rd, rn);
}
static void gen_bsl_vec(TCGContext *s, unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
{
tcg_gen_xor_vec(s, vece, rn, rn, rm);
tcg_gen_and_vec(s, vece, rn, rn, rd);
tcg_gen_xor_vec(s, vece, rd, rm, rn);
}
static void gen_bit_vec(TCGContext *s, unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
{
tcg_gen_xor_vec(s, vece, rn, rn, rd);
tcg_gen_and_vec(s, vece, rn, rn, rm);
tcg_gen_xor_vec(s, vece, rd, rd, rn);
}
static void gen_bif_vec(TCGContext *s, unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
{
tcg_gen_xor_vec(s, vece, rn, rn, rd);
tcg_gen_andc_vec(s, vece, rn, rn, rm);
tcg_gen_xor_vec(s, vece, rd, rd, rn);
}
/* Logic op (opcode == 3) subgroup of C3.6.16. */ /* Logic op (opcode == 3) subgroup of C3.6.16. */
static void disas_simd_3same_logic(DisasContext *s, uint32_t insn) static void disas_simd_3same_logic(DisasContext *s, uint32_t insn)
{ {
TCGContext *tcg_ctx = s->uc->tcg_ctx; static const GVecGen3 bsl_op = {
.fni8 = gen_bsl_i64,
.fniv = gen_bsl_vec,
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
.load_dest = true
};
static const GVecGen3 bit_op = {
.fni8 = gen_bit_i64,
.fniv = gen_bit_vec,
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
.load_dest = true
};
static const GVecGen3 bif_op = {
.fni8 = gen_bif_i64,
.fniv = gen_bif_vec,
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
.load_dest = true
};
int rd = extract32(insn, 0, 5); int rd = extract32(insn, 0, 5);
int rn = extract32(insn, 5, 5); int rn = extract32(insn, 5, 5);
int rm = extract32(insn, 16, 5); int rm = extract32(insn, 16, 5);
int size = extract32(insn, 22, 2); int size = extract32(insn, 22, 2);
bool is_u = extract32(insn, 29, 1); bool is_u = extract32(insn, 29, 1);
bool is_q = extract32(insn, 30, 1); bool is_q = extract32(insn, 30, 1);
TCGv_i64 tcg_op1, tcg_op2, tcg_res[2];
int pass;
if (!fp_access_check(s)) { if (!fp_access_check(s)) {
return; return;
} }
tcg_op1 = tcg_temp_new_i64(tcg_ctx); switch (size + 4 * is_u) {
tcg_op2 = tcg_temp_new_i64(tcg_ctx); case 0: /* AND */
tcg_res[0] = tcg_temp_new_i64(tcg_ctx); gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_and, 0);
tcg_res[1] = tcg_temp_new_i64(tcg_ctx); return;
case 1: /* BIC */
for (pass = 0; pass < (is_q ? 2 : 1); pass++) { gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_andc, 0);
read_vec_element(s, tcg_op1, rn, pass, MO_64); return;
read_vec_element(s, tcg_op2, rm, pass, MO_64); case 2: /* ORR */
gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_or, 0);
if (!is_u) { return;
switch (size) { case 3: /* ORN */
case 0: /* AND */ gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_orc, 0);
tcg_gen_and_i64(tcg_ctx, tcg_res[pass], tcg_op1, tcg_op2); return;
break; case 4: /* EOR */
case 1: /* BIC */ gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_xor, 0);
tcg_gen_andc_i64(tcg_ctx, tcg_res[pass], tcg_op1, tcg_op2); return;
break; case 5: /* BSL bitwise select */
case 2: /* ORR */ gen_gvec_op3(s, is_q, rd, rn, rm, &bsl_op);
tcg_gen_or_i64(tcg_ctx, tcg_res[pass], tcg_op1, tcg_op2); return;
break; case 6: /* BIT, bitwise insert if true */
case 3: /* ORN */ gen_gvec_op3(s, is_q, rd, rn, rm, &bit_op);
tcg_gen_orc_i64(tcg_ctx, tcg_res[pass], tcg_op1, tcg_op2); return;
break; case 7: /* BIF, bitwise insert if false */
} gen_gvec_op3(s, is_q, rd, rn, rm, &bif_op);
} else { return;
if (size != 0) { default:
/* B* ops need res loaded to operate on */ g_assert_not_reached();
read_vec_element(s, tcg_res[pass], rd, pass, MO_64);
}
switch (size) {
case 0: /* EOR */
tcg_gen_xor_i64(tcg_ctx, tcg_res[pass], tcg_op1, tcg_op2);
break;
case 1: /* BSL bitwise select */
tcg_gen_xor_i64(tcg_ctx, tcg_op1, tcg_op1, tcg_op2);
tcg_gen_and_i64(tcg_ctx, tcg_op1, tcg_op1, tcg_res[pass]);
tcg_gen_xor_i64(tcg_ctx, tcg_res[pass], tcg_op2, tcg_op1);
break;
case 2: /* BIT, bitwise insert if true */
tcg_gen_xor_i64(tcg_ctx, tcg_op1, tcg_op1, tcg_res[pass]);
tcg_gen_and_i64(tcg_ctx, tcg_op1, tcg_op1, tcg_op2);
tcg_gen_xor_i64(tcg_ctx, tcg_res[pass], tcg_res[pass], tcg_op1);
break;
case 3: /* BIF, bitwise insert if false */
tcg_gen_xor_i64(tcg_ctx, tcg_op1, tcg_op1, tcg_res[pass]);
tcg_gen_andc_i64(tcg_ctx, tcg_op1, tcg_op1, tcg_op2);
tcg_gen_xor_i64(tcg_ctx, tcg_res[pass], tcg_res[pass], tcg_op1);
break;
}
}
} }
write_vec_element(s, tcg_res[0], rd, 0, MO_64);
if (!is_q) {
tcg_gen_movi_i64(tcg_ctx, tcg_res[1], 0);
}
write_vec_element(s, tcg_res[1], rd, 1, MO_64);
tcg_temp_free_i64(tcg_ctx, tcg_op1);
tcg_temp_free_i64(tcg_ctx, tcg_op2);
tcg_temp_free_i64(tcg_ctx, tcg_res[0]);
tcg_temp_free_i64(tcg_ctx, tcg_res[1]);
} }
/* Helper functions for 32 bit comparisons */ /* Helper functions for 32 bit comparisons */
@ -9584,6 +9641,16 @@ static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
return; return;
} }
switch (opcode) {
case 0x10: /* ADD, SUB */
if (u) {
gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_sub, size);
} else {
gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_add, size);
}
return;
}
if (size == 3) { if (size == 3) {
assert(is_q); assert(is_q);
for (pass = 0; pass < 2; pass++) { for (pass = 0; pass < 2; pass++) {
@ -9756,16 +9823,6 @@ static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
genfn = fns[size][u]; genfn = fns[size][u];
break; break;
} }
case 0x10: /* ADD, SUB */
{
static NeonGenTwoOpFn * const fns[3][2] = {
{ gen_helper_neon_add_u8, gen_helper_neon_sub_u8 },
{ gen_helper_neon_add_u16, gen_helper_neon_sub_u16 },
{ tcg_gen_add_i32, tcg_gen_sub_i32 },
};
genfn = fns[size][u];
break;
}
case 0x11: /* CMTST, CMEQ */ case 0x11: /* CMTST, CMEQ */
{ {
static NeonGenTwoOpFn * const fns[3][2] = { static NeonGenTwoOpFn * const fns[3][2] = {