tcg: Merge tb_find_slow() and tb_find_fast()

These functions are not too big and can be merged together. This makes
locking scheme more clear and easier to follow.

Backports commit bd2710d5da06ad7706d4864f65b3f0c9f7cb4d7f from qemu
This commit is contained in:
Sergey Fedorov 2018-02-26 02:05:14 -05:00 committed by Lioncash
parent 9b6f287488
commit ab0c87bc6f
No known key found for this signature in database
GPG Key ID: 4E3C3CC1031BA9C7

View File

@ -137,46 +137,9 @@ static TranslationBlock *tb_find_physical(CPUState *cpu,
return tb; return tb;
} }
static TranslationBlock *tb_find_slow(CPUState *cpu, static inline TranslationBlock *tb_find(CPUState *cpu,
target_ulong pc, TranslationBlock *last_tb,
target_ulong cs_base, int tb_exit)
uint32_t flags,
bool *have_tb_lock)
{
TranslationBlock *tb;
tb = tb_find_physical(cpu, pc, cs_base, flags);
if (!tb) {
/* mmap_lock is needed by tb_gen_code, and mmap_lock must be
* taken outside tb_lock. As system emulation is currently
* single threaded the locks are NOPs.
*/
mmap_lock();
// Unicorn: commented out
//tb_lock();
*have_tb_lock = true;
/* There's a chance that our desired tb has been translated while
* taking the locks so we check again inside the lock.
*/
tb = tb_find_physical(cpu, pc, cs_base, flags);
if (!tb) {
/* if no translated code available, then translate it now */
tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
}
// Unicorn: commented out
//tb_unlock();
mmap_unlock();
}
/* We add the TB in the virtual pc hash table for the fast lookup */
atomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
return tb;
}
static inline TranslationBlock *tb_find_fast(CPUState *cpu,
TranslationBlock *last_tb,
int tb_exit)
{ {
CPUArchState *env = (CPUArchState *)cpu->env_ptr; CPUArchState *env = (CPUArchState *)cpu->env_ptr;
TranslationBlock *tb; TranslationBlock *tb;
@ -192,7 +155,32 @@ static inline TranslationBlock *tb_find_fast(CPUState *cpu,
tb = atomic_read(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)]); tb = atomic_read(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)]);
if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base || if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
tb->flags != flags)) { tb->flags != flags)) {
tb = tb_find_slow(cpu, pc, cs_base, flags, &have_tb_lock); tb = tb_find_physical(cpu, pc, cs_base, flags);
if (!tb) {
/* mmap_lock is needed by tb_gen_code, and mmap_lock must be
* taken outside tb_lock. As system emulation is currently
* single threaded the locks are NOPs.
*/
mmap_lock();
// Unicorn: commented out
//tb_lock();
have_tb_lock = true;
/* There's a chance that our desired tb has been translated while
* taking the locks so we check again inside the lock.
*/
tb = tb_find_physical(cpu, pc, cs_base, flags);
if (!tb) {
/* if no translated code available, then translate it now */
tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
}
// Unicorn: commented out
//tb_unlock();
mmap_unlock();
}
/* We add the TB in the virtual pc hash table for the fast lookup */
atomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
} }
#ifndef CONFIG_USER_ONLY #ifndef CONFIG_USER_ONLY
/* We don't take care of direct jumps when address mapping changes in /* We don't take care of direct jumps when address mapping changes in
@ -444,7 +432,7 @@ int cpu_exec(struct uc_struct *uc, CPUState *cpu)
atomic_mb_set(&cpu->tb_flushed, false); /* reset before first TB lookup */ atomic_mb_set(&cpu->tb_flushed, false); /* reset before first TB lookup */
for(;;) { for(;;) {
cpu_handle_interrupt(cpu, &last_tb); cpu_handle_interrupt(cpu, &last_tb);
tb = tb_find_fast(cpu, last_tb, tb_exit); tb = tb_find(cpu, last_tb, tb_exit);
if (!tb) { // invalid TB due to invalid code? if (!tb) { // invalid TB due to invalid code?
uc->invalid_error = UC_ERR_FETCH_UNMAPPED; uc->invalid_error = UC_ERR_FETCH_UNMAPPED;
ret = EXCP_HLT; ret = EXCP_HLT;