unicorn/qemu/unicorn_common.h
Bharata B Rao 309b85548f
cpu: Convert cpu_index into a bitmap
Currently CPUState::cpu_index is monotonically increasing and a newly
created CPU always gets the next higher index. The next available
index is calculated by counting the existing number of CPUs. This is
fine as long as we only add CPUs, but there are architectures which
are starting to support CPU removal, too. For an architecture like PowerPC
which derives its CPU identifier (device tree ID) from cpu_index, the
existing logic of generating cpu_index values causes problems.

With the currently proposed method of handling vCPU removal by parking
the vCPU fd in QEMU
(Ref: http://lists.gnu.org/archive/html/qemu-devel/2015-02/msg02604.html),
generating cpu_index this way will not work for PowerPC.

This patch changes the way cpu_index is handed out by maintaining
a bit map of the CPUs that tracks both addition and removal of CPUs.

The CPU bitmap allocation logic is part of cpu_exec_init(), which is
called by instance_init routines of various CPU targets. Newly added
cpu_exec_exit() API handles the deallocation part and this routine is
called from generic CPU instance_finalize.

Note: This new CPU enumeration is for !CONFIG_USER_ONLY only.
CONFIG_USER_ONLY continues to have the old enumeration logic.

Backports commit b7bca7333411bd19c449147e8202ae6b0e4a8e09 from qemu
2018-03-21 08:06:07 -04:00

119 lines
3.2 KiB
C

#ifndef UNICORN_COMMON_H_
#define UNICORN_COMMON_H_
#include "tcg.h"
// This header define common patterns/codes that will be included in all arch-sepcific
// codes for unicorns purposes.
// return true on success, false on failure
static inline bool cpu_physical_mem_read(AddressSpace *as, hwaddr addr,
uint8_t *buf, int len)
{
return !cpu_physical_memory_rw(as, addr, (void *)buf, len, 0);
}
static inline bool cpu_physical_mem_write(AddressSpace *as, hwaddr addr,
const uint8_t *buf, int len)
{
return !cpu_physical_memory_rw(as, addr, (void *)buf, len, 1);
}
void tb_cleanup(struct uc_struct *uc);
void free_code_gen_buffer(struct uc_struct *uc);
static inline void free_address_spaces(struct uc_struct *uc)
{
int i;
address_space_destroy(&uc->as);
for (i = 0; i < uc->cpu->num_ases; i++) {
AddressSpace *as = uc->cpu->cpu_ases[i].as;
address_space_destroy(as);
g_free(as);
}
}
/* This is *supposed* to be done by the class finalizer but it never executes */
static inline void free_machine_class_name(struct uc_struct *uc) {
MachineClass *mc = MACHINE_GET_CLASS(uc, uc->machine_state);
g_free(mc->name);
mc->name = NULL;
}
static inline void free_tcg_temp_names(TCGContext *s)
{
#if TCG_TARGET_REG_BITS == 32
int i;
for (i = 0; i < s->nb_globals; i++) {
TCGTemp *ts = &s->temps[i];
if (ts->base_type == TCG_TYPE_I64) {
if (ts->name && ((strcmp(ts->name+(strlen(ts->name)-2), "_0") == 0) ||
(strcmp(ts->name+(strlen(ts->name)-2), "_1") == 0))) {
free((void *)ts->name);
}
}
}
#endif
}
/** Freeing common resources */
static void release_common(void *t)
{
TCGPool *po, *to;
TCGContext *s = (TCGContext *)t;
// Clean TCG.
TCGOpDef* def = &s->tcg_op_defs[0];
g_free(def->args_ct);
g_free(def->sorted_args);
g_free(s->tcg_op_defs);
for (po = s->pool_first; po; po = to) {
to = po->next;
g_free(po);
}
tcg_pool_reset(s);
g_hash_table_destroy(s->helpers);
// Destory flat view hash table
g_hash_table_destroy(s->uc->flat_views);
unicorn_free_empty_flat_view(s->uc);
// TODO(danghvu): these function is not available outside qemu
// so we keep them here instead of outside uc_close.
free_address_spaces(s->uc);
memory_free(s->uc);
tb_cleanup(s->uc);
free_code_gen_buffer(s->uc);
free_machine_class_name(s->uc);
free_tcg_temp_names(s);
}
static inline void uc_common_init(struct uc_struct* uc)
{
memory_register_types(uc);
uc->write_mem = cpu_physical_mem_write;
uc->read_mem = cpu_physical_mem_read;
uc->tcg_enabled = tcg_enabled;
uc->tcg_exec_init = tcg_exec_init;
uc->cpu_exec_init_all = cpu_exec_init_all;
uc->cpu_exec_exit = cpu_exec_exit;
uc->vm_start = vm_start;
uc->memory_map = memory_map;
uc->memory_map_ptr = memory_map_ptr;
uc->memory_unmap = memory_unmap;
uc->readonly_mem = memory_region_set_readonly;
uc->target_page_size = TARGET_PAGE_SIZE;
uc->target_page_align = TARGET_PAGE_SIZE - 1;
if (!uc->release) {
uc->release = release_common;
}
}
#endif