target-i386: exception handling for seg_helper functions

This patch fixes exception handling for seg_helper functions.

Backports commit 100ec0991958d0c1b61f140e64dbe92991c6dd2c from qemu
This commit is contained in:
Pavel Dovgalyuk 2018-02-15 14:29:41 -05:00 committed by Lioncash
parent 57b96e16af
commit 08f93c3fe6
No known key found for this signature in database
GPG Key ID: 4E3C3CC1031BA9C7
3 changed files with 335 additions and 329 deletions

View File

@ -32,9 +32,9 @@ DEF_HELPER_2(verw, void, env, tl)
DEF_HELPER_2(lldt, void, env, int)
DEF_HELPER_2(ltr, void, env, int)
DEF_HELPER_3(load_seg, void, env, int, int)
DEF_HELPER_4(ljmp_protected, void, env, int, tl, int)
DEF_HELPER_4(ljmp_protected, void, env, int, tl, tl)
DEF_HELPER_5(lcall_real, void, env, int, tl, int, int)
DEF_HELPER_5(lcall_protected, void, env, int, tl, int, int)
DEF_HELPER_5(lcall_protected, void, env, int, tl, int, tl)
DEF_HELPER_2(iret_real, void, env, int)
DEF_HELPER_3(iret_protected, void, env, int, int)
DEF_HELPER_3(lret_protected, void, env, int, int)

File diff suppressed because it is too large Load Diff

View File

@ -794,7 +794,6 @@ static void gen_helper_out_func(TCGContext *s, TCGMemOp ot, TCGv_i32 v, TCGv_i32
static void gen_check_io(DisasContext *s, TCGMemOp ot, target_ulong cur_eip,
uint32_t svm_flags)
{
int state_saved;
target_ulong next_eip;
TCGContext *tcg_ctx = s->uc->tcg_ctx;
TCGv_i32 cpu_tmp2_i32 = tcg_ctx->cpu_tmp2_i32;
@ -803,11 +802,7 @@ static void gen_check_io(DisasContext *s, TCGMemOp ot, target_ulong cur_eip,
// Unicorn: allow all I/O instructions
return;
state_saved = 0;
if (s->pe && (s->cpl > s->iopl || s->vm86)) {
gen_update_cc_op(s);
gen_jmp_im(s, cur_eip);
state_saved = 1;
tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, *cpu_T[0]);
switch (ot) {
case MO_8:
@ -824,10 +819,8 @@ static void gen_check_io(DisasContext *s, TCGMemOp ot, target_ulong cur_eip,
}
}
if(s->flags & HF_SVMI_MASK) {
if (!state_saved) {
gen_update_cc_op(s);
gen_jmp_im(s, cur_eip);
}
gen_update_cc_op(s);
gen_jmp_im(s, cur_eip);
svm_flags |= (1 << (4 + ot));
next_eip = s->pc - s->cs_base;
tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, *cpu_T[0]);
@ -2586,16 +2579,13 @@ static inline void gen_op_movl_seg_T0_vm(TCGContext *s, int seg_reg)
/* move T0 to seg_reg and compute if the CPU state may change. Never
call this function with seg_reg == R_CS */
static void gen_movl_seg_T0(DisasContext *s, int seg_reg, target_ulong cur_eip)
static void gen_movl_seg_T0(DisasContext *s, int seg_reg)
{
TCGContext *tcg_ctx = s->uc->tcg_ctx;
TCGv_i32 cpu_tmp2_i32 = tcg_ctx->cpu_tmp2_i32;
TCGv **cpu_T = (TCGv **)tcg_ctx->cpu_T;
if (s->pe && !s->vm86) {
/* XXX: optimize by finding processor state dynamically */
gen_update_cc_op(s);
gen_jmp_im(s, cur_eip);
tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, *cpu_T[0]);
gen_helper_load_seg(tcg_ctx, tcg_ctx->cpu_env, tcg_const_i32(tcg_ctx, seg_reg), cpu_tmp2_i32);
/* abort translation because the addseg value may change or
@ -5577,12 +5567,10 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
gen_op_ld_v(s, MO_16, *cpu_T[0], cpu_A0);
do_lcall:
if (s->pe && !s->vm86) {
gen_update_cc_op(s);
gen_jmp_im(s, pc_start - s->cs_base);
tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, *cpu_T[0]);
gen_helper_lcall_protected(tcg_ctx, cpu_env, cpu_tmp2_i32, *cpu_T[1],
tcg_const_i32(tcg_ctx, dflag - 1),
tcg_const_i32(tcg_ctx, s->pc - pc_start));
tcg_const_tl(tcg_ctx, s->pc - s->cs_base));
} else {
tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, *cpu_T[0]);
gen_helper_lcall_real(tcg_ctx, cpu_env, cpu_tmp2_i32, *cpu_T[1],
@ -5604,11 +5592,9 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
gen_op_ld_v(s, MO_16, *cpu_T[0], cpu_A0);
do_ljmp:
if (s->pe && !s->vm86) {
gen_update_cc_op(s);
gen_jmp_im(s, pc_start - s->cs_base);
tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, *cpu_T[0]);
gen_helper_ljmp_protected(tcg_ctx, cpu_env, cpu_tmp2_i32, *cpu_T[1],
tcg_const_i32(tcg_ctx, s->pc - pc_start));
tcg_const_tl(tcg_ctx, s->pc - s->cs_base));
} else {
gen_op_movl_seg_T0_vm(tcg_ctx, R_CS);
gen_op_jmp_v(tcg_ctx, *cpu_T[1]);
@ -5947,7 +5933,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
goto illegal_op;
reg = b >> 3;
ot = gen_pop_T0(s);
gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
gen_movl_seg_T0(s, reg);
gen_pop_update(s, ot);
if (reg == R_SS) {
/* if reg == SS, inhibit interrupts/trace. */
@ -5965,7 +5951,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
case 0x1a1: /* pop fs */
case 0x1a9: /* pop gs */
ot = gen_pop_T0(s);
gen_movl_seg_T0(s, (b >> 3) & 7, pc_start - s->cs_base);
gen_movl_seg_T0(s, (b >> 3) & 7);
gen_pop_update(s, ot);
if (s->is_jmp) {
gen_jmp_im(s, s->pc - s->cs_base);
@ -6022,7 +6008,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
if (reg >= 6 || reg == R_CS)
goto illegal_op;
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
gen_movl_seg_T0(s, reg);
if (reg == R_SS) {
/* if reg == SS, inhibit interrupts/trace */
/* If several instructions disable interrupts, only the
@ -6237,7 +6223,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
gen_add_A0_im(s, 1 << ot);
/* load the segment first to handle exceptions properly */
gen_op_ld_v(s, MO_16, *cpu_T[0], cpu_A0);
gen_movl_seg_T0(s, op, pc_start - s->cs_base);
gen_movl_seg_T0(s, op);
/* then put the data */
gen_op_mov_reg_v(tcg_ctx, ot, reg, *cpu_T[1]);
if (s->is_jmp) {
@ -7063,8 +7049,6 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
set_cc_op(s, CC_OP_EFLAGS);
}
} else {
gen_update_cc_op(s);
gen_jmp_im(s, pc_start - s->cs_base);
gen_helper_iret_protected(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, dflag - 1),
tcg_const_i32(tcg_ctx, s->pc - s->cs_base));
set_cc_op(s, CC_OP_EFLAGS);
@ -7718,8 +7702,6 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
if (!s->pe) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_update_cc_op(s);
gen_jmp_im(s, pc_start - s->cs_base);
gen_helper_sysenter(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, s->pc - pc_start));
gen_eob(s);
}
@ -7731,8 +7713,6 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
if (!s->pe) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_update_cc_op(s);
gen_jmp_im(s, pc_start - s->cs_base);
gen_helper_sysexit(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, dflag - 1));
gen_eob(s);
}
@ -7740,8 +7720,6 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
#ifdef TARGET_X86_64
case 0x105: /* syscall */
/* XXX: is it usable in real mode ? */
gen_update_cc_op(s);
gen_jmp_im(s, pc_start - s->cs_base);
gen_helper_syscall(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, s->pc - pc_start));
gen_eob(s);
break;
@ -7749,8 +7727,6 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
if (!s->pe) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_update_cc_op(s);
gen_jmp_im(s, pc_start - s->cs_base);
gen_helper_sysret(tcg_ctx, cpu_env, tcg_const_i32(tcg_ctx, dflag - 1));
/* condition codes are modified only in long mode */
if (s->lma) {
@ -7796,7 +7772,6 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
} else {
gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE);
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
gen_jmp_im(s, pc_start - s->cs_base);
tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, *cpu_T[0]);
gen_helper_lldt(tcg_ctx, cpu_env, cpu_tmp2_i32);
}
@ -7817,7 +7792,6 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
} else {
gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE);
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
gen_jmp_im(s, pc_start - s->cs_base);
tcg_gen_trunc_tl_i32(tcg_ctx, cpu_tmp2_i32, *cpu_T[0]);
gen_helper_ltr(tcg_ctx, cpu_env, cpu_tmp2_i32);
}