target/arm: Use tcg_gen_gvec_bitsel

This replaces 3 target-specific implementations for BIT, BIF, and BSL.

Backports commit 3a7a2b4e5cf0d49cd8b14e8225af0310068b7d20 from qemu
This commit is contained in:
Richard Henderson 2019-06-13 16:12:20 -04:00 committed by Lioncash
parent a1396b12f6
commit 7c32498b7f
No known key found for this signature in database
GPG Key ID: 4E3C3CC1031BA9C7
7 changed files with 22 additions and 87 deletions

View File

@ -3396,9 +3396,6 @@
#define arm_set_cpu_off arm_set_cpu_off_arm
#define arm_set_cpu_on arm_set_cpu_on_arm
#define arm_stage1_mmu_idx arm_stage1_mmu_idx_arm
#define bif_op bif_op_arm
#define bit_op bit_op_arm
#define bsl_op bsl_op_arm
#define cmtst_op cmtst_op_arm
#define cpu_mmu_index cpu_mmu_index_arm
#define fp_exception_el fp_exception_el_arm

View File

@ -3396,9 +3396,6 @@
#define arm_set_cpu_off arm_set_cpu_off_armeb
#define arm_set_cpu_on arm_set_cpu_on_armeb
#define arm_stage1_mmu_idx arm_stage1_mmu_idx_armeb
#define bif_op bif_op_armeb
#define bit_op bit_op_armeb
#define bsl_op bsl_op_armeb
#define cmtst_op cmtst_op_armeb
#define cpu_mmu_index cpu_mmu_index_armeb
#define fp_exception_el fp_exception_el_armeb

View File

@ -3405,9 +3405,6 @@ arm_symbols = (
'arm_set_cpu_off',
'arm_set_cpu_on',
'arm_stage1_mmu_idx',
'bif_op',
'bit_op',
'bsl_op',
'cmtst_op',
'cpu_mmu_index',
'fp_exception_el',

View File

@ -717,6 +717,17 @@ static void gen_gvec_fn2(DisasContext *s, bool is_q, int rd, int rn,
is_q ? 16 : 8, vec_full_reg_size(s));
}
/* Expand a 4-operand AdvSIMD vector operation using an expander function. */
static void gen_gvec_fn4(DisasContext *s, bool is_q, int rd, int rn, int rm,
int rx, GVecGen4Fn *gvec_fn, int vece)
{
TCGContext *tcg_ctx = s->uc->tcg_ctx;
gvec_fn(tcg_ctx, vece, vec_full_reg_offset(s, rd), vec_full_reg_offset(s, rn),
vec_full_reg_offset(s, rm), vec_full_reg_offset(s, rx),
is_q ? 16 : 8, vec_full_reg_size(s));
}
/* Expand a 2-operand + immediate AdvSIMD vector operation using
* an expander function.
*/
@ -11091,13 +11102,13 @@ static void disas_simd_3same_logic(DisasContext *s, uint32_t insn)
return;
case 5: /* BSL bitwise select */
gen_gvec_op3(s, is_q, rd, rn, rm, &bsl_op);
gen_gvec_fn4(s, is_q, rd, rd, rn, rm, tcg_gen_gvec_bitsel, 0);
return;
case 6: /* BIT, bitwise insert if true */
gen_gvec_op3(s, is_q, rd, rn, rm, &bit_op);
gen_gvec_fn4(s, is_q, rd, rm, rn, rd, tcg_gen_gvec_bitsel, 0);
return;
case 7: /* BIF, bitwise insert if false */
gen_gvec_op3(s, is_q, rd, rn, rm, &bif_op);
gen_gvec_fn4(s, is_q, rd, rm, rd, rn, tcg_gen_gvec_bitsel, 0);
return;
default:

View File

@ -123,5 +123,7 @@ typedef void GVecGen2iFn(TCGContext *, unsigned, uint32_t, uint32_t, int64_t,
uint32_t, uint32_t);
typedef void GVecGen3Fn(TCGContext *, unsigned, uint32_t, uint32_t,
uint32_t, uint32_t, uint32_t);
typedef void GVecGen4Fn(TCGContext *, unsigned, uint32_t, uint32_t, uint32_t,
uint32_t, uint32_t, uint32_t);
#endif /* TARGET_ARM_TRANSLATE_A64_H */

View File

@ -5909,72 +5909,6 @@ static int do_v81_helper(DisasContext *s, gen_helper_gvec_3_ptr *fn,
return 1;
}
/*
* Expanders for VBitOps_VBIF, VBIT, VBSL.
*/
static void gen_bsl_i64(TCGContext* s, TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
{
tcg_gen_xor_i64(s, rn, rn, rm);
tcg_gen_and_i64(s, rn, rn, rd);
tcg_gen_xor_i64(s, rd, rm, rn);
}
static void gen_bit_i64(TCGContext* s, TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
{
tcg_gen_xor_i64(s, rn, rn, rd);
tcg_gen_and_i64(s, rn, rn, rm);
tcg_gen_xor_i64(s, rd, rd, rn);
}
static void gen_bif_i64(TCGContext *s, TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
{
tcg_gen_xor_i64(s, rn, rn, rd);
tcg_gen_andc_i64(s, rn, rn, rm);
tcg_gen_xor_i64(s, rd, rd, rn);
}
static void gen_bsl_vec(TCGContext* s, unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
{
tcg_gen_xor_vec(s, vece, rn, rn, rm);
tcg_gen_and_vec(s, vece, rn, rn, rd);
tcg_gen_xor_vec(s, vece, rd, rm, rn);
}
static void gen_bit_vec(TCGContext* s, unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
{
tcg_gen_xor_vec(s, vece, rn, rn, rd);
tcg_gen_and_vec(s, vece, rn, rn, rm);
tcg_gen_xor_vec(s, vece, rd, rd, rn);
}
static void gen_bif_vec(TCGContext *s, unsigned vece, TCGv_vec rd, TCGv_vec rn, TCGv_vec rm)
{
tcg_gen_xor_vec(s, vece, rn, rn, rd);
tcg_gen_andc_vec(s, vece, rn, rn, rm);
tcg_gen_xor_vec(s, vece, rd, rd, rn);
}
const GVecGen3 bsl_op = {
.fni8 = gen_bsl_i64,
.fniv = gen_bsl_vec,
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
.load_dest = true
};
const GVecGen3 bit_op = {
.fni8 = gen_bit_i64,
.fniv = gen_bit_vec,
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
.load_dest = true
};
const GVecGen3 bif_op = {
.fni8 = gen_bif_i64,
.fniv = gen_bif_vec,
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
.load_dest = true
};
static void gen_ssra8_i64(TCGContext *s, TCGv_i64 d, TCGv_i64 a, int64_t shift)
{
tcg_gen_vec_sar8i_i64(s, a, a, shift);
@ -6725,16 +6659,16 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
vec_size, vec_size);
break;
case 5: /* VBSL */
tcg_gen_gvec_3(tcg_ctx, rd_ofs, rn_ofs, rm_ofs,
vec_size, vec_size, &bsl_op);
tcg_gen_gvec_bitsel(tcg_ctx, MO_8, rd_ofs, rd_ofs, rn_ofs, rm_ofs,
vec_size, vec_size);
break;
case 6: /* VBIT */
tcg_gen_gvec_3(tcg_ctx, rd_ofs, rn_ofs, rm_ofs,
vec_size, vec_size, &bit_op);
tcg_gen_gvec_bitsel(tcg_ctx, MO_8, rd_ofs, rm_ofs, rn_ofs, rd_ofs,
vec_size, vec_size);
break;
case 7: /* VBIF */
tcg_gen_gvec_3(tcg_ctx, rd_ofs, rn_ofs, rm_ofs,
vec_size, vec_size, &bif_op);
tcg_gen_gvec_bitsel(tcg_ctx, MO_8, rd_ofs, rm_ofs, rd_ofs, rn_ofs,
vec_size, vec_size);
break;
}
return 0;

View File

@ -243,9 +243,6 @@ static inline void gen_ss_advance(DisasContext *s)
}
/* Vector operations shared between ARM and AArch64. */
extern const GVecGen3 bsl_op;
extern const GVecGen3 bit_op;
extern const GVecGen3 bif_op;
extern const GVecGen3 cmtst_op[4];
extern const GVecGen3 mla_op[4];
extern const GVecGen3 mls_op[4];