target/mips: use *ctx for DisasContext

No changes to the logic here; this is just to make the diff
that follows easier to read.

While at it, remove the unnecessary 'struct' in
'struct TranslationBlock'.

Note that checkpatch complains with a false positive:
ERROR: space prohibited after that '&' (ctx:WxW)
\#75: FILE: target/mips/translate.c:20220:
+ ctx->kscrexist = (env->CP0_Config4 >> CP0C4_KScrExist) & 0xff;
^

Backports commit 12be92588cf26a192f1b62846906983fc1e102a7 from qemu
This commit is contained in:
Emilio G. Cota 2018-05-12 16:24:10 -04:00 committed by Lioncash
parent dae6b84c14
commit 2c93423acd
No known key found for this signature in database
GPG Key ID: 4E3C3CC1031BA9C7

View File

@ -20366,10 +20366,11 @@ static void decode_opc(CPUMIPSState *env, DisasContext *ctx, bool *insn_need_pat
}
}
void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
{
CPUMIPSState *env = cs->env_ptr;
DisasContext ctx;
DisasContext ctx1;
DisasContext *ctx = &ctx1;
target_ulong page_start;
int max_insns;
int insn_bytes;
@ -20379,46 +20380,46 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
//int save_opparam_idx = -1;
bool block_full = false;
ctx.base.tb = tb;
ctx.base.pc_first = tb->pc;
ctx.base.pc_next = tb->pc;
ctx.base.is_jmp = DISAS_NEXT;
ctx.base.singlestep_enabled = cs->singlestep_enabled;
ctx.base.num_insns = 0;
ctx->base.tb = tb;
ctx->base.pc_first = tb->pc;
ctx->base.pc_next = tb->pc;
ctx->base.is_jmp = DISAS_NEXT;
ctx->base.singlestep_enabled = cs->singlestep_enabled;
ctx->base.num_insns = 0;
page_start = ctx.base.pc_first & TARGET_PAGE_MASK;
ctx.uc = env->uc;
ctx.saved_pc = -1;
ctx.insn_flags = env->insn_flags;
ctx.CP0_Config1 = env->CP0_Config1;
ctx.btarget = 0;
ctx.kscrexist = (env->CP0_Config4 >> CP0C4_KScrExist) & 0xff;
ctx.rxi = (env->CP0_Config3 >> CP0C3_RXI) & 1;
ctx.ie = (env->CP0_Config4 >> CP0C4_IE) & 3;
ctx.bi = (env->CP0_Config3 >> CP0C3_BI) & 1;
ctx.bp = (env->CP0_Config3 >> CP0C3_BP) & 1;
ctx.PAMask = env->PAMask;
ctx.mvh = (env->CP0_Config5 >> CP0C5_MVH) & 1;
ctx.eva = (env->CP0_Config5 >> CP0C5_EVA) & 1;
ctx.sc = (env->CP0_Config3 >> CP0C3_SC) & 1;
ctx.CP0_LLAddr_shift = env->CP0_LLAddr_shift;
ctx.cmgcr = (env->CP0_Config3 >> CP0C3_CMGCR) & 1;
page_start = ctx->base.pc_first & TARGET_PAGE_MASK;
ctx->uc = env->uc;
ctx->saved_pc = -1;
ctx->insn_flags = env->insn_flags;
ctx->CP0_Config1 = env->CP0_Config1;
ctx->btarget = 0;
ctx->kscrexist = (env->CP0_Config4 >> CP0C4_KScrExist) & 0xff;
ctx->rxi = (env->CP0_Config3 >> CP0C3_RXI) & 1;
ctx->ie = (env->CP0_Config4 >> CP0C4_IE) & 3;
ctx->bi = (env->CP0_Config3 >> CP0C3_BI) & 1;
ctx->bp = (env->CP0_Config3 >> CP0C3_BP) & 1;
ctx->PAMask = env->PAMask;
ctx->mvh = (env->CP0_Config5 >> CP0C5_MVH) & 1;
ctx->eva = (env->CP0_Config5 >> CP0C5_EVA) & 1;
ctx->sc = (env->CP0_Config3 >> CP0C3_SC) & 1;
ctx->CP0_LLAddr_shift = env->CP0_LLAddr_shift;
ctx->cmgcr = (env->CP0_Config3 >> CP0C3_CMGCR) & 1;
/* Restore delay slot state from the tb context. */
ctx.hflags = (uint32_t)ctx.base.tb->flags; /* FIXME: maybe use 64 bits? */
ctx.ulri = (env->CP0_Config3 >> CP0C3_ULRI) & 1;
ctx.ps = ((env->active_fpu.fcr0 >> FCR0_PS) & 1) ||
ctx->hflags = (uint32_t)ctx->base.tb->flags; /* FIXME: maybe use 64 bits? */
ctx->ulri = (env->CP0_Config3 >> CP0C3_ULRI) & 1;
ctx->ps = ((env->active_fpu.fcr0 >> FCR0_PS) & 1) ||
(env->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F));
ctx.vp = (env->CP0_Config5 >> CP0C5_VP) & 1;
ctx.mrp = (env->CP0_Config5 >> CP0C5_MRP) & 1;
ctx.nan2008 = (env->active_fpu.fcr31 >> FCR31_NAN2008) & 1;
ctx.abs2008 = (env->active_fpu.fcr31 >> FCR31_ABS2008) & 1;
restore_cpu_state(env, &ctx);
ctx->vp = (env->CP0_Config5 >> CP0C5_VP) & 1;
ctx->mrp = (env->CP0_Config5 >> CP0C5_MRP) & 1;
ctx->nan2008 = (env->active_fpu.fcr31 >> FCR31_NAN2008) & 1;
ctx->abs2008 = (env->active_fpu.fcr31 >> FCR31_ABS2008) & 1;
restore_cpu_state(env, ctx);
#ifdef CONFIG_USER_ONLY
ctx.mem_idx = MIPS_HFLAG_UM;
ctx->mem_idx = MIPS_HFLAG_UM;
#else
ctx.mem_idx = hflags_mmu_index(ctx.hflags);
ctx->mem_idx = hflags_mmu_index(ctx->hflags);
#endif
ctx.default_tcg_memop_mask = (ctx.insn_flags & ISA_MIPS32R6) ?
ctx->default_tcg_memop_mask = (ctx->insn_flags & ISA_MIPS32R6) ?
MO_UNALN : MO_ALIGN;
max_insns = tb->cflags & CF_COUNT_MASK;
@ -20428,59 +20429,59 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
if (max_insns > TCG_MAX_INSNS) {
max_insns = TCG_MAX_INSNS;
}
LOG_DISAS("\ntb %p idx %d hflags %04x\n", tb, ctx.mem_idx, ctx.hflags);
LOG_DISAS("\ntb %p idx %d hflags %04x\n", tb, ctx->mem_idx, ctx->hflags);
// Unicorn: early check to see if the address of this block is the until address
if (tb->pc == env->uc->addr_end) {
gen_tb_start(tcg_ctx, tb);
gen_helper_wait(tcg_ctx, tcg_ctx->cpu_env);
ctx.base.is_jmp = DISAS_EXIT;
ctx->base.is_jmp = DISAS_EXIT;
goto done_generating;
}
// Unicorn: trace this block on request
// Only hook this block if it is not broken from previous translation due to
// full translation cache
if (!env->uc->block_full && HOOK_EXISTS_BOUNDED(env->uc, UC_HOOK_BLOCK, ctx.base.pc_first)) {
if (!env->uc->block_full && HOOK_EXISTS_BOUNDED(env->uc, UC_HOOK_BLOCK, ctx->base.pc_first)) {
// Unicorn: FIXME: Amend to work with the new TCG API
#if 0
int arg_i = tcg_ctx->gen_op_buf[tcg_ctx->gen_op_buf[0].prev].args;
// save block address to see if we need to patch block size later
env->uc->block_addr = ctx.base.pc_first;
env->uc->block_addr = ctx->base.pc_first;
env->uc->size_arg = arg_i + 1;
gen_uc_tracecode(tcg_ctx, 0xf8f8f8f8, UC_HOOK_BLOCK_IDX, env->uc, ctx.base.pc_first);
gen_uc_tracecode(tcg_ctx, 0xf8f8f8f8, UC_HOOK_BLOCK_IDX, env->uc, ctx->base.pc_first);
#endif
} else {
env->uc->size_arg = -1;
}
gen_tb_start(tcg_ctx, tb);
while (ctx.base.is_jmp == DISAS_NEXT) {
tcg_gen_insn_start(tcg_ctx, ctx.base.pc_next, ctx.hflags & MIPS_HFLAG_BMASK,
ctx.btarget);
ctx.base.num_insns++;
while (ctx->base.is_jmp == DISAS_NEXT) {
tcg_gen_insn_start(tcg_ctx, ctx->base.pc_next, ctx->hflags & MIPS_HFLAG_BMASK,
ctx->btarget);
ctx->base.num_insns++;
if (unlikely(cpu_breakpoint_test(cs, ctx.base.pc_next, BP_ANY))) {
save_cpu_state(&ctx, 1);
ctx.base.is_jmp = DISAS_NORETURN;
if (unlikely(cpu_breakpoint_test(cs, ctx->base.pc_next, BP_ANY))) {
save_cpu_state(ctx, 1);
ctx->base.is_jmp = DISAS_NORETURN;
gen_helper_raise_exception_debug(tcg_ctx, tcg_ctx->cpu_env);
/* The address covered by the breakpoint must be included in
[tb->pc, tb->pc + tb->size) in order to for it to be
properly cleared -- thus we increment the PC here so that
the logic setting tb->size below does the right thing. */
ctx.base.pc_next += 4;
ctx->base.pc_next += 4;
goto done_generating;
}
// Unicorn: Commented out
//if (ctx.base.num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
//if (ctx->base.num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
// gen_io_start();
//}
// Unicorn: end address tells us to stop emulation
if (ctx.base.pc_next == ctx.uc->addr_end) {
if (ctx->base.pc_next == ctx->uc->addr_end) {
gen_helper_wait(tcg_ctx, tcg_ctx->cpu_env);
ctx.base.is_jmp = DISAS_EXIT;
ctx->base.is_jmp = DISAS_EXIT;
break;
} else {
bool insn_need_patch = false;
@ -20494,20 +20495,20 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
}
#endif
is_slot = ctx.hflags & MIPS_HFLAG_BMASK;
is_slot = ctx->hflags & MIPS_HFLAG_BMASK;
if (!(ctx.hflags & MIPS_HFLAG_M16)) {
ctx.opcode = cpu_ldl_code(env, ctx.base.pc_next);
if (!(ctx->hflags & MIPS_HFLAG_M16)) {
ctx->opcode = cpu_ldl_code(env, ctx->base.pc_next);
insn_bytes = 4;
decode_opc(env, &ctx, &insn_need_patch, &insn_patch_offset);
} else if (ctx.insn_flags & ASE_MICROMIPS) {
ctx.opcode = cpu_lduw_code(env, ctx.base.pc_next);
insn_bytes = decode_micromips_opc(env, &ctx, &insn_need_patch);
} else if (ctx.insn_flags & ASE_MIPS16) {
ctx.opcode = cpu_lduw_code(env, ctx.base.pc_next);
insn_bytes = decode_mips16_opc(env, &ctx, &insn_need_patch);
decode_opc(env, ctx, &insn_need_patch, &insn_patch_offset);
} else if (ctx->insn_flags & ASE_MICROMIPS) {
ctx->opcode = cpu_lduw_code(env, ctx->base.pc_next);
insn_bytes = decode_micromips_opc(env, ctx, &insn_need_patch);
} else if (ctx->insn_flags & ASE_MIPS16) {
ctx->opcode = cpu_lduw_code(env, ctx->base.pc_next);
insn_bytes = decode_mips16_opc(env, ctx, &insn_need_patch);
} else {
generate_exception_end(&ctx, EXCP_RI);
generate_exception_end(ctx, EXCP_RI);
break;
}
@ -20524,35 +20525,35 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
}
}
if (ctx.hflags & MIPS_HFLAG_BMASK) {
if (!(ctx.hflags & (MIPS_HFLAG_BDS16 | MIPS_HFLAG_BDS32 |
if (ctx->hflags & MIPS_HFLAG_BMASK) {
if (!(ctx->hflags & (MIPS_HFLAG_BDS16 | MIPS_HFLAG_BDS32 |
MIPS_HFLAG_FBNSLOT))) {
/* force to generate branch as there is neither delay nor
forbidden slot */
is_slot = 1;
}
if ((ctx.hflags & MIPS_HFLAG_M16) &&
(ctx.hflags & MIPS_HFLAG_FBNSLOT)) {
if ((ctx->hflags & MIPS_HFLAG_M16) &&
(ctx->hflags & MIPS_HFLAG_FBNSLOT)) {
/* Force to generate branch as microMIPS R6 doesn't restrict
branches in the forbidden slot. */
is_slot = 1;
}
}
if (is_slot) {
gen_branch(&ctx, insn_bytes);
gen_branch(ctx, insn_bytes);
}
ctx.base.pc_next += insn_bytes;
ctx->base.pc_next += insn_bytes;
/* Execute a branch and its delay slot as a single instruction.
This is what GDB expects and is consistent with what the
hardware does (e.g. if a delay slot instruction faults, the
reported PC is the PC of the branch). */
if (ctx.base.singlestep_enabled &&
(ctx.hflags & MIPS_HFLAG_BMASK) == 0) {
if (ctx->base.singlestep_enabled &&
(ctx->hflags & MIPS_HFLAG_BMASK) == 0) {
break;
}
if (ctx.base.pc_next - page_start >= TARGET_PAGE_SIZE) {
if (ctx->base.pc_next - page_start >= TARGET_PAGE_SIZE) {
break;
}
@ -20560,7 +20561,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
break;
}
if (ctx.base.num_insns >= max_insns) {
if (ctx->base.num_insns >= max_insns) {
break;
}
@ -20568,26 +20569,26 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
// break;
}
if (tcg_op_buf_full(tcg_ctx) || ctx.base.num_insns >= max_insns) {
if (tcg_op_buf_full(tcg_ctx) || ctx->base.num_insns >= max_insns) {
block_full = true;
}
//if (tb->cflags & CF_LAST_IO) {
// gen_io_end();
//}
if (ctx.base.singlestep_enabled && ctx.base.is_jmp != DISAS_NORETURN) {
save_cpu_state(&ctx, ctx.base.is_jmp != DISAS_EXIT);
if (ctx->base.singlestep_enabled && ctx->base.is_jmp != DISAS_NORETURN) {
save_cpu_state(ctx, ctx->base.is_jmp != DISAS_EXIT);
gen_helper_raise_exception_debug(tcg_ctx, tcg_ctx->cpu_env);
} else {
switch (ctx.base.is_jmp) {
switch (ctx->base.is_jmp) {
case DISAS_STOP:
gen_save_pc(&ctx, ctx.base.pc_next);
gen_save_pc(ctx, ctx->base.pc_next);
tcg_gen_lookup_and_goto_ptr(tcg_ctx);
env->uc->next_pc = ctx.base.pc_next;
env->uc->next_pc = ctx->base.pc_next;
break;
case DISAS_NEXT:
save_cpu_state(&ctx, 0);
gen_goto_tb(&ctx, 0, ctx.base.pc_next);
save_cpu_state(ctx, 0);
gen_goto_tb(ctx, 0, ctx->base.pc_next);
break;
case DISAS_EXIT:
tcg_gen_exit_tb(tcg_ctx, 0);
@ -20598,10 +20599,10 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
}
}
done_generating:
gen_tb_end(tcg_ctx, tb, ctx.base.num_insns);
gen_tb_end(tcg_ctx, tb, ctx->base.num_insns);
tb->size = ctx.base.pc_next - ctx.base.pc_first;
tb->icount = ctx.base.num_insns;
tb->size = ctx->base.pc_next - ctx->base.pc_first;
tb->icount = ctx->base.num_insns;
env->uc->block_full = block_full;
}