diff --git a/qemu/aarch64.h b/qemu/aarch64.h index 432e9fd5..bbbc06ac 100644 --- a/qemu/aarch64.h +++ b/qemu/aarch64.h @@ -3426,11 +3426,15 @@ #define gen_gvec_mla gen_gvec_mla_aarch64 #define gen_gvec_mls gen_gvec_mls_aarch64 #define gen_gvec_sli gen_gvec_sli_aarch64 -#define gen_gvec_sshl gen_gvec_sshl_aarch64 -#define gen_gvec_ssra gen_gvec_ssra_aarch64 +#define gen_gvec_sqadd_qc gen_gvec_sqadd_qc_aarch64 +#define gen_gvec_sqsub_qc gen_gvec_sqsub_qc_aarch64 #define gen_gvec_sri gen_gvec_sri_aarch64 #define gen_gvec_srshr gen_gvec_srshr_aarch64 #define gen_gvec_srsra gen_gvec_srsra_aarch64 +#define gen_gvec_sshl gen_gvec_sshl_aarch64 +#define gen_gvec_ssra gen_gvec_ssra_aarch64 +#define gen_gvec_uqadd_qc gen_gvec_uqadd_qc_aarch64 +#define gen_gvec_uqsub_qc gen_gvec_uqsub_qc_aarch64 #define gen_gvec_ursra gen_gvec_ursra_aarch64 #define gen_gvec_urshr gen_gvec_urshr_aarch64 #define gen_gvec_ushl gen_gvec_ushl_aarch64 @@ -4468,16 +4472,10 @@ #define raise_exception_ra raise_exception_ra_aarch64 #define read_cpu_reg read_cpu_reg_aarch64 #define read_cpu_reg_sp read_cpu_reg_sp_aarch64 -#define sqadd_op sqadd_op_aarch64 -#define sqsub_op sqsub_op_aarch64 -#define sshl_op sshl_op_aarch64 #define sve_access_check sve_access_check_aarch64 #define sve_exception_el sve_exception_el_aarch64 #define sve_zcr_len_for_el sve_zcr_len_for_el_aarch64 #define unallocated_encoding unallocated_encoding_aarch64 -#define uqadd_op uqadd_op_aarch64 -#define uqsub_op uqsub_op_aarch64 -#define ushl_op ushl_op_aarch64 #define v8m_security_lookup v8m_security_lookup_aarch64 #define vfp_expand_imm vfp_expand_imm_aarch64 #define write_fp_dreg write_fp_dreg_aarch64 diff --git a/qemu/aarch64eb.h b/qemu/aarch64eb.h index 2966c2d8..3e8ac583 100644 --- a/qemu/aarch64eb.h +++ b/qemu/aarch64eb.h @@ -3426,11 +3426,15 @@ #define gen_gvec_mla gen_gvec_mla_aarch64eb #define gen_gvec_mls gen_gvec_mls_aarch64eb #define gen_gvec_sli gen_gvec_sli_aarch64eb -#define gen_gvec_sshl gen_gvec_sshl_aarch64eb -#define gen_gvec_ssra gen_gvec_ssra_aarch64eb +#define gen_gvec_sqadd_qc gen_gvec_sqadd_qc_aarch64eb +#define gen_gvec_sqsub_qc gen_gvec_sqsub_qc_aarch64eb #define gen_gvec_sri gen_gvec_sri_aarch64eb #define gen_gvec_srshr gen_gvec_srshr_aarch64eb #define gen_gvec_srsra gen_gvec_srsra_aarch64eb +#define gen_gvec_sshl gen_gvec_sshl_aarch64eb +#define gen_gvec_ssra gen_gvec_ssra_aarch64eb +#define gen_gvec_uqadd_qc gen_gvec_uqadd_qc_aarch64eb +#define gen_gvec_uqsub_qc gen_gvec_uqsub_qc_aarch64eb #define gen_gvec_ursra gen_gvec_ursra_aarch64eb #define gen_gvec_urshr gen_gvec_urshr_aarch64eb #define gen_gvec_ushl gen_gvec_ushl_aarch64eb @@ -4468,16 +4472,10 @@ #define raise_exception_ra raise_exception_ra_aarch64eb #define read_cpu_reg read_cpu_reg_aarch64eb #define read_cpu_reg_sp read_cpu_reg_sp_aarch64eb -#define sqadd_op sqadd_op_aarch64eb -#define sqsub_op sqsub_op_aarch64eb -#define sshl_op sshl_op_aarch64eb #define sve_access_check sve_access_check_aarch64eb #define sve_exception_el sve_exception_el_aarch64eb #define sve_zcr_len_for_el sve_zcr_len_for_el_aarch64eb #define unallocated_encoding unallocated_encoding_aarch64eb -#define uqadd_op uqadd_op_aarch64eb -#define uqsub_op uqsub_op_aarch64eb -#define ushl_op ushl_op_aarch64eb #define v8m_security_lookup v8m_security_lookup_aarch64eb #define vfp_expand_imm vfp_expand_imm_aarch64eb #define write_fp_dreg write_fp_dreg_aarch64eb diff --git a/qemu/arm.h b/qemu/arm.h index b04b850d..29cc2e6a 100644 --- a/qemu/arm.h +++ b/qemu/arm.h @@ -3411,11 +3411,15 @@ #define gen_gvec_mla gen_gvec_mla_arm #define gen_gvec_mls gen_gvec_mls_arm #define gen_gvec_sli gen_gvec_sli_arm -#define gen_gvec_sshl gen_gvec_sshl_arm -#define gen_gvec_ssra gen_gvec_ssra_arm +#define gen_gvec_sqadd_qc gen_gvec_sqadd_qc_arm +#define gen_gvec_sqsub_qc gen_gvec_sqsub_qc_arm #define gen_gvec_sri gen_gvec_sri_arm #define gen_gvec_srshr gen_gvec_srshr_arm #define gen_gvec_srsra gen_gvec_srsra_arm +#define gen_gvec_sshl gen_gvec_sshl_arm +#define gen_gvec_ssra gen_gvec_ssra_arm +#define gen_gvec_uqadd_qc gen_gvec_uqadd_qc_arm +#define gen_gvec_uqsub_qc gen_gvec_uqsub_qc_arm #define gen_gvec_ursra gen_gvec_ursra_arm #define gen_gvec_urshr gen_gvec_urshr_arm #define gen_gvec_ushl gen_gvec_ushl_arm diff --git a/qemu/armeb.h b/qemu/armeb.h index b4eb67c0..f9fa16fa 100644 --- a/qemu/armeb.h +++ b/qemu/armeb.h @@ -3411,11 +3411,15 @@ #define gen_gvec_mla gen_gvec_mla_armeb #define gen_gvec_mls gen_gvec_mls_armeb #define gen_gvec_sli gen_gvec_sli_armeb -#define gen_gvec_sshl gen_gvec_sshl_armeb -#define gen_gvec_ssra gen_gvec_ssra_armeb +#define gen_gvec_sqadd_qc gen_gvec_sqadd_qc_armeb +#define gen_gvec_sqsub_qc gen_gvec_sqsub_qc_armeb #define gen_gvec_sri gen_gvec_sri_armeb #define gen_gvec_srshr gen_gvec_srshr_armeb #define gen_gvec_srsra gen_gvec_srsra_armeb +#define gen_gvec_sshl gen_gvec_sshl_armeb +#define gen_gvec_ssra gen_gvec_ssra_armeb +#define gen_gvec_uqadd_qc gen_gvec_uqadd_qc_armeb +#define gen_gvec_uqsub_qc gen_gvec_uqsub_qc_armeb #define gen_gvec_ursra gen_gvec_ursra_armeb #define gen_gvec_urshr gen_gvec_urshr_armeb #define gen_gvec_ushl gen_gvec_ushl_armeb diff --git a/qemu/header_gen.py b/qemu/header_gen.py index 4f843f50..24c9e4c5 100644 --- a/qemu/header_gen.py +++ b/qemu/header_gen.py @@ -3420,11 +3420,15 @@ arm_symbols = ( 'gen_gvec_mla', 'gen_gvec_mls', 'gen_gvec_sli', - 'gen_gvec_sshl', - 'gen_gvec_ssra', + 'gen_gvec_sqadd_qc', + 'gen_gvec_sqsub_qc', 'gen_gvec_sri', 'gen_gvec_srshr', 'gen_gvec_srsra', + 'gen_gvec_sshl', + 'gen_gvec_ssra', + 'gen_gvec_uqadd_qc', + 'gen_gvec_uqsub_qc', 'gen_gvec_ursra', 'gen_gvec_urshr', 'gen_gvec_ushl', @@ -3533,11 +3537,15 @@ aarch64_symbols = ( 'gen_gvec_mla', 'gen_gvec_mls', 'gen_gvec_sli', - 'gen_gvec_sshl', - 'gen_gvec_ssra', + 'gen_gvec_sqadd_qc', + 'gen_gvec_sqsub_qc', 'gen_gvec_sri', 'gen_gvec_srshr', 'gen_gvec_srsra', + 'gen_gvec_sshl', + 'gen_gvec_ssra', + 'gen_gvec_uqadd_qc', + 'gen_gvec_uqsub_qc', 'gen_gvec_ursra', 'gen_gvec_urshr', 'gen_gvec_ushl', @@ -4575,16 +4583,10 @@ aarch64_symbols = ( 'raise_exception_ra', 'read_cpu_reg', 'read_cpu_reg_sp', - 'sqadd_op', - 'sqsub_op', - 'sshl_op', 'sve_access_check', 'sve_exception_el', 'sve_zcr_len_for_el', 'unallocated_encoding', - 'uqadd_op', - 'uqsub_op', - 'ushl_op', 'v8m_security_lookup', 'vfp_expand_imm', 'write_fp_dreg', diff --git a/qemu/target/arm/translate-a64.c b/qemu/target/arm/translate-a64.c index 90e28265..44d8282a 100644 --- a/qemu/target/arm/translate-a64.c +++ b/qemu/target/arm/translate-a64.c @@ -11460,20 +11460,18 @@ static void disas_simd_3same_int(DisasContext *s, uint32_t insn) switch (opcode) { case 0x01: /* SQADD, UQADD */ - tcg_gen_gvec_4(tcg_ctx, vec_full_reg_offset(s, rd), - offsetof(CPUARMState, vfp.qc), - vec_full_reg_offset(s, rn), - vec_full_reg_offset(s, rm), - is_q ? 16 : 8, vec_full_reg_size(s), - (u ? uqadd_op : sqadd_op) + size); + if (u) { + gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_uqadd_qc, size); + } else { + gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sqadd_qc, size); + } return; case 0x05: /* SQSUB, UQSUB */ - tcg_gen_gvec_4(tcg_ctx, vec_full_reg_offset(s, rd), - offsetof(CPUARMState, vfp.qc), - vec_full_reg_offset(s, rn), - vec_full_reg_offset(s, rm), - is_q ? 16 : 8, vec_full_reg_size(s), - (u ? uqsub_op : sqsub_op) + size); + if (u) { + gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_uqsub_qc, size); + } else { + gen_gvec_fn3(s, is_q, rd, rn, rm, gen_gvec_sqsub_qc, size); + } return; case 0x08: /* SSHL, USHL */ if (u) { diff --git a/qemu/target/arm/translate-neon.inc.c b/qemu/target/arm/translate-neon.inc.c index b4a65645..08c81fa3 100644 --- a/qemu/target/arm/translate-neon.inc.c +++ b/qemu/target/arm/translate-neon.inc.c @@ -618,6 +618,10 @@ DO_3SAME(VORN, tcg_gen_gvec_orc) DO_3SAME(VEOR, tcg_gen_gvec_xor) DO_3SAME(VSHL_S, gen_gvec_sshl) DO_3SAME(VSHL_U, gen_gvec_ushl) +DO_3SAME(VQADD_S, gen_gvec_sqadd_qc) +DO_3SAME(VQADD_U, gen_gvec_uqadd_qc) +DO_3SAME(VQSUB_S, gen_gvec_sqsub_qc) +DO_3SAME(VQSUB_U, gen_gvec_uqsub_qc) /* These insns are all gvec_bitsel but with the inputs in various orders. */ #define DO_3SAME_BITSEL(INSN, O1, O2, O3) \ @@ -666,21 +670,6 @@ DO_3SAME_CMP(VCGE_S, TCG_COND_GE) DO_3SAME_CMP(VCGE_U, TCG_COND_GEU) DO_3SAME_CMP(VCEQ, TCG_COND_EQ) -#define DO_3SAME_GVEC4(INSN, OPARRAY) \ - static void gen_##INSN##_3s(TCGContext *s, unsigned vece, uint32_t rd_ofs, \ - uint32_t rn_ofs, uint32_t rm_ofs, \ - uint32_t oprsz, uint32_t maxsz) \ - { \ - tcg_gen_gvec_4(s, rd_ofs, offsetof(CPUARMState, vfp.qc), \ - rn_ofs, rm_ofs, oprsz, maxsz, &OPARRAY[vece]); \ - } \ - DO_3SAME(INSN, gen_##INSN##_3s) - -DO_3SAME_GVEC4(VQADD_S, sqadd_op) -DO_3SAME_GVEC4(VQADD_U, uqadd_op) -DO_3SAME_GVEC4(VQSUB_S, sqsub_op) -DO_3SAME_GVEC4(VQSUB_U, uqsub_op) - static void gen_VMUL_p_3s(TCGContext *s, unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, uint32_t rm_ofs, uint32_t oprsz, uint32_t maxsz) { diff --git a/qemu/target/arm/translate.c b/qemu/target/arm/translate.c index 7e894f92..20e517bc 100644 --- a/qemu/target/arm/translate.c +++ b/qemu/target/arm/translate.c @@ -5053,32 +5053,37 @@ static void gen_uqadd_vec(TCGContext *s, unsigned vece, TCGv_vec t, TCGv_vec sat tcg_temp_free_vec(s, x); } -static const TCGOpcode vecop_list_uqadd[] = { - INDEX_op_usadd_vec, INDEX_op_cmp_vec, INDEX_op_add_vec, 0 -}; - -const GVecGen4 uqadd_op[4] = { - { .fniv = gen_uqadd_vec, - .fno = gen_helper_gvec_uqadd_b, - .opt_opc = vecop_list_uqadd, - .write_aofs = true, - .vece = MO_8 }, - { .fniv = gen_uqadd_vec, - .fno = gen_helper_gvec_uqadd_h, - .opt_opc = vecop_list_uqadd, - .write_aofs = true, - .vece = MO_16 }, - { .fniv = gen_uqadd_vec, - .fno = gen_helper_gvec_uqadd_s, - .opt_opc = vecop_list_uqadd, - .write_aofs = true, - .vece = MO_32 }, - { .fniv = gen_uqadd_vec, - .fno = gen_helper_gvec_uqadd_d, - .opt_opc = vecop_list_uqadd, - .write_aofs = true, - .vece = MO_64 }, -}; +void gen_gvec_uqadd_qc(TCGContext *s, unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, + uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_usadd_vec, INDEX_op_cmp_vec, INDEX_op_add_vec, 0 + }; + static const GVecGen4 ops[4] = { + { .fniv = gen_uqadd_vec, + .fno = gen_helper_gvec_uqadd_b, + .write_aofs = true, + .opt_opc = vecop_list, + .vece = MO_8 }, + { .fniv = gen_uqadd_vec, + .fno = gen_helper_gvec_uqadd_h, + .write_aofs = true, + .opt_opc = vecop_list, + .vece = MO_16 }, + { .fniv = gen_uqadd_vec, + .fno = gen_helper_gvec_uqadd_s, + .write_aofs = true, + .opt_opc = vecop_list, + .vece = MO_32 }, + { .fniv = gen_uqadd_vec, + .fno = gen_helper_gvec_uqadd_d, + .write_aofs = true, + .opt_opc = vecop_list, + .vece = MO_64 }, + }; + tcg_gen_gvec_4(s, rd_ofs, offsetof(CPUARMState, vfp.qc), + rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]); +} static void gen_sqadd_vec(TCGContext *s, unsigned vece, TCGv_vec t, TCGv_vec sat, TCGv_vec a, TCGv_vec b) @@ -5091,32 +5096,37 @@ static void gen_sqadd_vec(TCGContext *s, unsigned vece, TCGv_vec t, TCGv_vec sat tcg_temp_free_vec(s, x); } -static const TCGOpcode vecop_list_sqadd[] = { - INDEX_op_ssadd_vec, INDEX_op_cmp_vec, INDEX_op_add_vec, 0 -}; - -const GVecGen4 sqadd_op[4] = { - { .fniv = gen_sqadd_vec, - .fno = gen_helper_gvec_sqadd_b, - .opt_opc = vecop_list_sqadd, - .write_aofs = true, - .vece = MO_8 }, - { .fniv = gen_sqadd_vec, - .fno = gen_helper_gvec_sqadd_h, - .opt_opc = vecop_list_sqadd, - .write_aofs = true, - .vece = MO_16 }, - { .fniv = gen_sqadd_vec, - .fno = gen_helper_gvec_sqadd_s, - .opt_opc = vecop_list_sqadd, - .write_aofs = true, - .vece = MO_32 }, - { .fniv = gen_sqadd_vec, - .fno = gen_helper_gvec_sqadd_d, - .opt_opc = vecop_list_sqadd, - .write_aofs = true, - .vece = MO_64 }, -}; +void gen_gvec_sqadd_qc(TCGContext *s, unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, + uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_ssadd_vec, INDEX_op_cmp_vec, INDEX_op_add_vec, 0 + }; + static const GVecGen4 ops[4] = { + { .fniv = gen_sqadd_vec, + .fno = gen_helper_gvec_sqadd_b, + .opt_opc = vecop_list, + .write_aofs = true, + .vece = MO_8 }, + { .fniv = gen_sqadd_vec, + .fno = gen_helper_gvec_sqadd_h, + .opt_opc = vecop_list, + .write_aofs = true, + .vece = MO_16 }, + { .fniv = gen_sqadd_vec, + .fno = gen_helper_gvec_sqadd_s, + .opt_opc = vecop_list, + .write_aofs = true, + .vece = MO_32 }, + { .fniv = gen_sqadd_vec, + .fno = gen_helper_gvec_sqadd_d, + .opt_opc = vecop_list, + .write_aofs = true, + .vece = MO_64 }, + }; + tcg_gen_gvec_4(s, rd_ofs, offsetof(CPUARMState, vfp.qc), + rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]); +} static void gen_uqsub_vec(TCGContext *s, unsigned vece, TCGv_vec t, TCGv_vec sat, TCGv_vec a, TCGv_vec b) @@ -5129,32 +5139,37 @@ static void gen_uqsub_vec(TCGContext *s, unsigned vece, TCGv_vec t, TCGv_vec sat tcg_temp_free_vec(s, x); } -static const TCGOpcode vecop_list_uqsub[] = { - INDEX_op_ussub_vec, INDEX_op_cmp_vec, INDEX_op_sub_vec, 0 -}; - -const GVecGen4 uqsub_op[4] = { - { .fniv = gen_uqsub_vec, - .fno = gen_helper_gvec_uqsub_b, - .opt_opc = vecop_list_uqsub, - .write_aofs = true, - .vece = MO_8 }, - { .fniv = gen_uqsub_vec, - .fno = gen_helper_gvec_uqsub_h, - .opt_opc = vecop_list_uqsub, - .write_aofs = true, - .vece = MO_16 }, - { .fniv = gen_uqsub_vec, - .fno = gen_helper_gvec_uqsub_s, - .opt_opc = vecop_list_uqsub, - .write_aofs = true, - .vece = MO_32 }, - { .fniv = gen_uqsub_vec, - .fno = gen_helper_gvec_uqsub_d, - .opt_opc = vecop_list_uqsub, - .write_aofs = true, - .vece = MO_64 }, -}; +void gen_gvec_uqsub_qc(TCGContext *s, unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, + uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_ussub_vec, INDEX_op_cmp_vec, INDEX_op_sub_vec, 0 + }; + static const GVecGen4 ops[4] = { + { .fniv = gen_uqsub_vec, + .fno = gen_helper_gvec_uqsub_b, + .opt_opc = vecop_list, + .write_aofs = true, + .vece = MO_8 }, + { .fniv = gen_uqsub_vec, + .fno = gen_helper_gvec_uqsub_h, + .opt_opc = vecop_list, + .write_aofs = true, + .vece = MO_16 }, + { .fniv = gen_uqsub_vec, + .fno = gen_helper_gvec_uqsub_s, + .opt_opc = vecop_list, + .write_aofs = true, + .vece = MO_32 }, + { .fniv = gen_uqsub_vec, + .fno = gen_helper_gvec_uqsub_d, + .opt_opc = vecop_list, + .write_aofs = true, + .vece = MO_64 }, + }; + tcg_gen_gvec_4(s, rd_ofs, offsetof(CPUARMState, vfp.qc), + rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]); +} static void gen_sqsub_vec(TCGContext *s, unsigned vece, TCGv_vec t, TCGv_vec sat, TCGv_vec a, TCGv_vec b) @@ -5167,32 +5182,37 @@ static void gen_sqsub_vec(TCGContext *s, unsigned vece, TCGv_vec t, TCGv_vec sat tcg_temp_free_vec(s, x); } -static const TCGOpcode vecop_list_sqsub[] = { - INDEX_op_sssub_vec, INDEX_op_cmp_vec, INDEX_op_sub_vec, 0 -}; - -const GVecGen4 sqsub_op[4] = { - { .fniv = gen_sqsub_vec, - .fno = gen_helper_gvec_sqsub_b, - .opt_opc = vecop_list_sqsub, - .write_aofs = true, - .vece = MO_8 }, - { .fniv = gen_sqsub_vec, - .fno = gen_helper_gvec_sqsub_h, - .opt_opc = vecop_list_sqsub, - .write_aofs = true, - .vece = MO_16 }, - { .fniv = gen_sqsub_vec, - .fno = gen_helper_gvec_sqsub_s, - .opt_opc = vecop_list_sqsub, - .write_aofs = true, - .vece = MO_32 }, - { .fniv = gen_sqsub_vec, - .fno = gen_helper_gvec_sqsub_d, - .opt_opc = vecop_list_sqsub, - .write_aofs = true, - .vece = MO_64 }, -}; +void gen_gvec_sqsub_qc(TCGContext *s, unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, + uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_sssub_vec, INDEX_op_cmp_vec, INDEX_op_sub_vec, 0 + }; + static const GVecGen4 ops[4] = { + { .fniv = gen_sqsub_vec, + .fno = gen_helper_gvec_sqsub_b, + .opt_opc = vecop_list, + .write_aofs = true, + .vece = MO_8 }, + { .fniv = gen_sqsub_vec, + .fno = gen_helper_gvec_sqsub_h, + .opt_opc = vecop_list, + .write_aofs = true, + .vece = MO_16 }, + { .fniv = gen_sqsub_vec, + .fno = gen_helper_gvec_sqsub_s, + .opt_opc = vecop_list, + .write_aofs = true, + .vece = MO_32 }, + { .fniv = gen_sqsub_vec, + .fno = gen_helper_gvec_sqsub_d, + .opt_opc = vecop_list, + .write_aofs = true, + .vece = MO_64 }, + }; + tcg_gen_gvec_4(s, rd_ofs, offsetof(CPUARMState, vfp.qc), + rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]); +} /* Translate a NEON data processing instruction. Return nonzero if the instruction is invalid. diff --git a/qemu/target/arm/translate.h b/qemu/target/arm/translate.h index 8d1f9872..b2208513 100644 --- a/qemu/target/arm/translate.h +++ b/qemu/target/arm/translate.h @@ -305,16 +305,21 @@ void gen_gvec_sshl(TCGContext *s, unsigned vece, uint32_t rd_ofs, uint32_t rn_of void gen_gvec_ushl(TCGContext *s, unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); -extern const GVecGen4 uqadd_op[4]; -extern const GVecGen4 sqadd_op[4]; -extern const GVecGen4 uqsub_op[4]; -extern const GVecGen4 sqsub_op[4]; void gen_cmtst_i64(TCGContext* tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b); void gen_ushl_i32(TCGContext* tcg_ctx, TCGv_i32 d, TCGv_i32 a, TCGv_i32 b); void gen_sshl_i32(TCGContext* tcg_ctx, TCGv_i32 d, TCGv_i32 a, TCGv_i32 b); void gen_ushl_i64(TCGContext* tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b); void gen_sshl_i64(TCGContext* tcg_ctx, TCGv_i64 d, TCGv_i64 a, TCGv_i64 b); +void gen_gvec_uqadd_qc(TCGContext *s, unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, + uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); +void gen_gvec_sqadd_qc(TCGContext *s, unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, + uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); +void gen_gvec_uqsub_qc(TCGContext *s, unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, + uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); +void gen_gvec_sqsub_qc(TCGContext *s, unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, + uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz); + void gen_gvec_ssra(TCGContext* tcg_ctx, unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs, int64_t shift, uint32_t opr_sz, uint32_t max_sz); void gen_gvec_usra(TCGContext *tcg_ctx, unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,