unicorn/qemu/target-i386/cpu.c

3305 lines
103 KiB
C
Raw Normal View History

2015-08-21 09:04:50 +02:00
/*
* i386 CPUID helper functions
*
* Copyright (c) 2003 Fabrice Bellard
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
#include "qemu/cutils.h"
#include "qapi/error.h"
2017-01-20 14:13:21 +01:00
#include "unicorn/platform.h"
2015-08-21 09:04:50 +02:00
#include "cpu.h"
#include "exec/exec-all.h"
2015-08-21 09:04:50 +02:00
#include "sysemu/cpus.h"
#include "sysemu/kvm.h"
2015-08-21 09:04:50 +02:00
#include "qapi/qmp/qerror.h"
#include "qapi-types.h"
#include "qapi-visit.h"
#include "qapi/visitor.h"
#include "hw/hw.h"
#include "sysemu/sysemu.h"
#include "topology.h"
2015-08-21 09:04:50 +02:00
#include "hw/cpu/icc_bus.h"
#ifndef CONFIG_USER_ONLY
#include "exec/address-spaces.h"
2015-08-21 09:04:50 +02:00
#include "hw/i386/apic_internal.h"
#endif
/* Cache topology CPUID constants: */
/* CPUID Leaf 2 Descriptors */
#define CPUID_2_L1D_32KB_8WAY_64B 0x2c
#define CPUID_2_L1I_32KB_8WAY_64B 0x30
#define CPUID_2_L2_2MB_8WAY_64B 0x7d
target-i386: present virtual L3 cache info for vcpus Some software algorithms are based on the hardware's cache info, for example, for x86 linux kernel, when cpu1 want to wakeup a task on cpu2, cpu1 will trigger a resched IPI and told cpu2 to do the wakeup if they don't share low level cache. Oppositely, cpu1 will access cpu2's runqueue directly if they share llc. The relevant linux-kernel code as bellow: static void ttwu_queue(struct task_struct *p, int cpu) { struct rq *rq = cpu_rq(cpu); ...... if (... && !cpus_share_cache(smp_processor_id(), cpu)) { ...... ttwu_queue_remote(p, cpu); /* will trigger RES IPI */ return; } ...... ttwu_do_activate(rq, p, 0); /* access target's rq directly */ ...... } In real hardware, the cpus on the same socket share L3 cache, so one won't trigger a resched IPIs when wakeup a task on others. But QEMU doesn't present a virtual L3 cache info for VM, then the linux guest will trigger lots of RES IPIs under some workloads even if the virtual cpus belongs to the same virtual socket. For KVM, there will be lots of vmexit due to guest send IPIs. The workload is a SAP HANA's testsuite, we run it one round(about 40 minuates) and observe the (Suse11sp3)Guest's amounts of RES IPIs which triggering during the period: No-L3 With-L3(applied this patch) cpu0: 363890 44582 cpu1: 373405 43109 cpu2: 340783 43797 cpu3: 333854 43409 cpu4: 327170 40038 cpu5: 325491 39922 cpu6: 319129 42391 cpu7: 306480 41035 cpu8: 161139 32188 cpu9: 164649 31024 cpu10: 149823 30398 cpu11: 149823 32455 cpu12: 164830 35143 cpu13: 172269 35805 cpu14: 179979 33898 cpu15: 194505 32754 avg: 268963.6 40129.8 The VM's topology is "1*socket 8*cores 2*threads". After present virtual L3 cache info for VM, the amounts of RES IPIs in guest reduce 85%. For KVM, vcpus send IPIs will cause vmexit which is expensive, so it can cause severe performance degradation. We had tested the overall system performance if vcpus actually run on sparate physical socket. With L3 cache, the performance improves 7.2%~33.1%(avg:15.7%). Backports commit 14c985cffa6cb177fc01a163d8bcf227c104718c from qemu
2018-02-26 05:16:03 +01:00
#define CPUID_2_L3_16MB_16WAY_64B 0x4d
2015-08-21 09:04:50 +02:00
/* CPUID Leaf 4 constants: */
/* EAX: */
#define CPUID_4_TYPE_DCACHE 1
#define CPUID_4_TYPE_ICACHE 2
#define CPUID_4_TYPE_UNIFIED 3
#define CPUID_4_LEVEL(l) ((l) << 5)
#define CPUID_4_SELF_INIT_LEVEL (1 << 8)
#define CPUID_4_FULLY_ASSOC (1 << 9)
/* EDX: */
#define CPUID_4_NO_INVD_SHARING (1 << 0)
#define CPUID_4_INCLUSIVE (1 << 1)
#define CPUID_4_COMPLEX_IDX (1 << 2)
#define ASSOC_FULL 0xFF
/* AMD associativity encoding used on CPUID Leaf 0x80000006: */
#define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
a == 2 ? 0x2 : \
a == 4 ? 0x4 : \
a == 8 ? 0x6 : \
a == 16 ? 0x8 : \
a == 32 ? 0xA : \
a == 48 ? 0xB : \
a == 64 ? 0xC : \
a == 96 ? 0xD : \
a == 128 ? 0xE : \
a == ASSOC_FULL ? 0xF : \
0 /* invalid value */)
/* Definitions of the hardcoded cache entries we expose: */
/* L1 data cache: */
#define L1D_LINE_SIZE 64
#define L1D_ASSOCIATIVITY 8
#define L1D_SETS 64
#define L1D_PARTITIONS 1
/* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
#define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
/*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
#define L1D_LINES_PER_TAG 1
#define L1D_SIZE_KB_AMD 64
#define L1D_ASSOCIATIVITY_AMD 2
/* L1 instruction cache: */
#define L1I_LINE_SIZE 64
#define L1I_ASSOCIATIVITY 8
#define L1I_SETS 64
#define L1I_PARTITIONS 1
/* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
#define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
/*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
#define L1I_LINES_PER_TAG 1
#define L1I_SIZE_KB_AMD 64
#define L1I_ASSOCIATIVITY_AMD 2
/* Level 2 unified cache: */
#define L2_LINE_SIZE 64
#define L2_ASSOCIATIVITY 16
#define L2_SETS 4096
#define L2_PARTITIONS 1
/* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
/*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
#define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
/*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
#define L2_LINES_PER_TAG 1
#define L2_SIZE_KB_AMD 512
target-i386: present virtual L3 cache info for vcpus Some software algorithms are based on the hardware's cache info, for example, for x86 linux kernel, when cpu1 want to wakeup a task on cpu2, cpu1 will trigger a resched IPI and told cpu2 to do the wakeup if they don't share low level cache. Oppositely, cpu1 will access cpu2's runqueue directly if they share llc. The relevant linux-kernel code as bellow: static void ttwu_queue(struct task_struct *p, int cpu) { struct rq *rq = cpu_rq(cpu); ...... if (... && !cpus_share_cache(smp_processor_id(), cpu)) { ...... ttwu_queue_remote(p, cpu); /* will trigger RES IPI */ return; } ...... ttwu_do_activate(rq, p, 0); /* access target's rq directly */ ...... } In real hardware, the cpus on the same socket share L3 cache, so one won't trigger a resched IPIs when wakeup a task on others. But QEMU doesn't present a virtual L3 cache info for VM, then the linux guest will trigger lots of RES IPIs under some workloads even if the virtual cpus belongs to the same virtual socket. For KVM, there will be lots of vmexit due to guest send IPIs. The workload is a SAP HANA's testsuite, we run it one round(about 40 minuates) and observe the (Suse11sp3)Guest's amounts of RES IPIs which triggering during the period: No-L3 With-L3(applied this patch) cpu0: 363890 44582 cpu1: 373405 43109 cpu2: 340783 43797 cpu3: 333854 43409 cpu4: 327170 40038 cpu5: 325491 39922 cpu6: 319129 42391 cpu7: 306480 41035 cpu8: 161139 32188 cpu9: 164649 31024 cpu10: 149823 30398 cpu11: 149823 32455 cpu12: 164830 35143 cpu13: 172269 35805 cpu14: 179979 33898 cpu15: 194505 32754 avg: 268963.6 40129.8 The VM's topology is "1*socket 8*cores 2*threads". After present virtual L3 cache info for VM, the amounts of RES IPIs in guest reduce 85%. For KVM, vcpus send IPIs will cause vmexit which is expensive, so it can cause severe performance degradation. We had tested the overall system performance if vcpus actually run on sparate physical socket. With L3 cache, the performance improves 7.2%~33.1%(avg:15.7%). Backports commit 14c985cffa6cb177fc01a163d8bcf227c104718c from qemu
2018-02-26 05:16:03 +01:00
/* Level 3 unified cache: */
2015-08-21 09:04:50 +02:00
#define L3_SIZE_KB 0 /* disabled */
#define L3_ASSOCIATIVITY 0 /* disabled */
#define L3_LINES_PER_TAG 0 /* disabled */
#define L3_LINE_SIZE 0 /* disabled */
target-i386: present virtual L3 cache info for vcpus Some software algorithms are based on the hardware's cache info, for example, for x86 linux kernel, when cpu1 want to wakeup a task on cpu2, cpu1 will trigger a resched IPI and told cpu2 to do the wakeup if they don't share low level cache. Oppositely, cpu1 will access cpu2's runqueue directly if they share llc. The relevant linux-kernel code as bellow: static void ttwu_queue(struct task_struct *p, int cpu) { struct rq *rq = cpu_rq(cpu); ...... if (... && !cpus_share_cache(smp_processor_id(), cpu)) { ...... ttwu_queue_remote(p, cpu); /* will trigger RES IPI */ return; } ...... ttwu_do_activate(rq, p, 0); /* access target's rq directly */ ...... } In real hardware, the cpus on the same socket share L3 cache, so one won't trigger a resched IPIs when wakeup a task on others. But QEMU doesn't present a virtual L3 cache info for VM, then the linux guest will trigger lots of RES IPIs under some workloads even if the virtual cpus belongs to the same virtual socket. For KVM, there will be lots of vmexit due to guest send IPIs. The workload is a SAP HANA's testsuite, we run it one round(about 40 minuates) and observe the (Suse11sp3)Guest's amounts of RES IPIs which triggering during the period: No-L3 With-L3(applied this patch) cpu0: 363890 44582 cpu1: 373405 43109 cpu2: 340783 43797 cpu3: 333854 43409 cpu4: 327170 40038 cpu5: 325491 39922 cpu6: 319129 42391 cpu7: 306480 41035 cpu8: 161139 32188 cpu9: 164649 31024 cpu10: 149823 30398 cpu11: 149823 32455 cpu12: 164830 35143 cpu13: 172269 35805 cpu14: 179979 33898 cpu15: 194505 32754 avg: 268963.6 40129.8 The VM's topology is "1*socket 8*cores 2*threads". After present virtual L3 cache info for VM, the amounts of RES IPIs in guest reduce 85%. For KVM, vcpus send IPIs will cause vmexit which is expensive, so it can cause severe performance degradation. We had tested the overall system performance if vcpus actually run on sparate physical socket. With L3 cache, the performance improves 7.2%~33.1%(avg:15.7%). Backports commit 14c985cffa6cb177fc01a163d8bcf227c104718c from qemu
2018-02-26 05:16:03 +01:00
#define L3_N_LINE_SIZE 64
#define L3_N_ASSOCIATIVITY 16
#define L3_N_SETS 16384
#define L3_N_PARTITIONS 1
#define L3_N_DESCRIPTOR CPUID_2_L3_16MB_16WAY_64B
#define L3_N_LINES_PER_TAG 1
#define L3_N_SIZE_KB_AMD 16384
2015-08-21 09:04:50 +02:00
/* TLB definitions: */
#define L1_DTLB_2M_ASSOC 1
#define L1_DTLB_2M_ENTRIES 255
#define L1_DTLB_4K_ASSOC 1
#define L1_DTLB_4K_ENTRIES 255
#define L1_ITLB_2M_ASSOC 1
#define L1_ITLB_2M_ENTRIES 255
#define L1_ITLB_4K_ASSOC 1
#define L1_ITLB_4K_ENTRIES 255
#define L2_DTLB_2M_ASSOC 0 /* disabled */
#define L2_DTLB_2M_ENTRIES 0 /* disabled */
#define L2_DTLB_4K_ASSOC 4
#define L2_DTLB_4K_ENTRIES 512
#define L2_ITLB_2M_ASSOC 0 /* disabled */
#define L2_ITLB_2M_ENTRIES 0 /* disabled */
#define L2_ITLB_4K_ASSOC 4
#define L2_ITLB_4K_ENTRIES 512
void x86_cpu_register_types(void *);
static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
uint32_t vendor2, uint32_t vendor3)
{
int i;
for (i = 0; i < 4; i++) {
dst[i] = vendor1 >> (8 * i);
dst[i + 4] = vendor2 >> (8 * i);
dst[i + 8] = vendor3 >> (8 * i);
}
dst[CPUID_VENDOR_SZ] = '\0';
}
#define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
#define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
#define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
CPUID_PSE36 | CPUID_FXSR)
#define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
#define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
CPUID_PAE | CPUID_SEP | CPUID_APIC)
#define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
2015-08-21 09:04:50 +02:00
/* partly implemented:
CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
/* missing:
CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
#define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
2015-08-21 09:04:50 +02:00
CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
/* missing:
CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
CPUID_EXT_F16C, CPUID_EXT_RDRAND */
2015-08-21 09:04:50 +02:00
#ifdef TARGET_X86_64
#define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
#else
#define TCG_EXT2_X86_64_FEATURES 0
#endif
#define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
TCG_EXT2_X86_64_FEATURES)
#define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
#define TCG_EXT4_FEATURES 0
#define TCG_SVM_FEATURES 0
#define TCG_KVM_FEATURES 0
#define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
CPUID_7_0_EBX_ERMS)
2015-08-21 09:04:50 +02:00
/* missing:
CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
2015-08-21 09:04:50 +02:00
CPUID_7_0_EBX_RDSEED */
#define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE)
2015-08-21 09:04:50 +02:00
#define TCG_APM_FEATURES 0
#define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
2015-08-21 09:04:50 +02:00
#define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
/* missing:
CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
2015-08-21 09:04:50 +02:00
typedef struct FeatureWordInfo {
/* feature flags names are taken from "Intel Processor Identification and
* the CPUID Instruction" and AMD's "CPUID Specification".
* In cases of disagreement between feature naming conventions,
* aliases may be added.
*/
const char *feat_names[32];
2015-08-21 09:04:50 +02:00
uint32_t cpuid_eax; /* Input EAX for CPUID */
bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
uint32_t cpuid_ecx; /* Input ECX value for CPUID */
int cpuid_reg; /* output register (R_* constant) */
uint32_t tcg_features; /* Feature flags supported by TCG */
uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
} FeatureWordInfo;
static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
// FEAT_1_EDX
{
{
"fpu", "vme", "de", "pse",
"tsc", "msr", "pae", "mce",
"cx8", "apic", NULL, "sep",
"mtrr", "pge", "mca", "cmov",
"pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
NULL, "ds" /* Intel dts */, "acpi", "mmx",
"fxsr", "sse", "sse2", "ss",
"ht" /* Intel htt */, "tm", "ia64", "pbe",
},
1,
false,0,
R_EDX,
TCG_FEATURES,
},
// FEAT_1_ECX
{
{
"pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
"ds_cpl", "vmx", "smx", "est",
"tm2", "ssse3", "cid", NULL,
"fma", "cx16", "xtpr", "pdcm",
NULL, "pcid", "dca", "sse4.1|sse4_1",
"sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
"tsc-deadline", "aes", "xsave", "osxsave",
"avx", "f16c", "rdrand", "hypervisor",
},
1,
false,0,
R_ECX,
TCG_EXT_FEATURES,
},
// FEAT_7_0_EBX
{
{
"fsgsbase", "tsc_adjust", NULL, "bmi1",
"hle", "avx2", NULL, "smep",
"bmi2", "erms", "invpcid", "rtm",
NULL, NULL, "mpx", NULL,
"avx512f", "avx512dq", "rdseed", "adx",
"smap", "avx512ifma", "pcommit", "clflushopt",
"clwb", NULL, "avx512pf", "avx512er",
"avx512cd", NULL, "avx512bw", "avx512vl",
},
7,
true, 0,
R_EBX,
TCG_7_0_EBX_FEATURES,
},
// FEAT_7_0_ECX
{
{
NULL, "avx512vbmi", "umip", "pku",
"ospke", NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, "rdpid", NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
},
7,
true, 0,
R_ECX,
TCG_7_0_ECX_FEATURES,
},
/* Feature names that are already defined on feature_name[] but
* are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
* names on feat_names below. They are copied automatically
* to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
*/
// FEAT_8000_0001_EDX
{
{
NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
"nx|xd", NULL, "mmxext", NULL /* mmx */,
NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb", "rdtscp",
NULL, "lm|i64", "3dnowext", "3dnow",
},
0x80000001,
false,0,
R_EDX,
TCG_EXT2_FEATURES,
},
// FEAT_8000_0001_ECX
{
{
"lahf_lm", "cmp_legacy", "svm", "extapic",
"cr8legacy", "abm", "sse4a", "misalignsse",
"3dnowprefetch", "osvw", "ibs", "xop",
"skinit", "wdt", NULL, "lwp",
"fma4", "tce", NULL, "nodeid_msr",
NULL, "tbm", "topoext", "perfctr_core",
"perfctr_nb", NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
},
0x80000001,
false,0,
R_ECX,
TCG_EXT3_FEATURES,
},
// FEAT_8000_0007_EDX
{
{
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
"invtsc", NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
},
0x80000007,
false,0,
R_EDX,
TCG_APM_FEATURES,
CPUID_APM_INVTSC,
},
// FEAT_C000_0001_EDX
{
{
NULL, NULL, "xstore", "xstore-en",
NULL, NULL, "xcrypt", "xcrypt-en",
"ace2", "ace2-en", "phe", "phe-en",
"pmm", "pmm-en", NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
},
0xC0000001,
false,0,
R_EDX,
TCG_EXT4_FEATURES,
},
// FEAT_KVM
{{NULL},
/* Unicorn: commented out
{
"kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
"kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
"kvmclock-stable-bit", NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
},
KVM_CPUID_FEATURES,
false, 0,
R_EAX,
TCG_KVM_FEATURES,*/
},
// FEAT_HYPERV_EAX
{
{
NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
},
0x40000003,
false, 0,
R_EAX,
},
// FEAT_HYPERV_EBX
{
{
NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
NULL /* hv_post_messages */, NULL /* hv_signal_events */,
NULL /* hv_create_port */, NULL /* hv_connect_port */,
NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
},
0x40000003,
false, 0,
R_EBX,
},
// FEAT_HYPERV_EDX
{
{
NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
NULL, NULL,
NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
},
0x40000003,
false, 0,
R_EDX,
},
// FEAT_SVM
{
{
"npt", "lbrv", "svm_lock", "nrip_save",
"tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
NULL, NULL, "pause_filter", NULL,
"pfthreshold", NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
},
0x8000000A,
false, 0,
R_EDX,
0,
TCG_SVM_FEATURES,
},
// FEAT_XSAVE
{
{
"xsaveopt", "xsavec", "xgetbv1", "xsaves",
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
},
0xd,
true,1,
R_EAX,
0,
TCG_XSAVE_FEATURES,
},
// FEAT_ARAT
{
{
NULL, NULL, "arat", NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
},
6,
false, 0,
R_EAX,
TCG_6_EAX_FEATURES,
}
2015-08-21 09:04:50 +02:00
};
typedef struct X86RegisterInfo32 {
/* Name of register */
const char *name;
/* QAPI enum value register */
X86CPURegister32 qapi_enum;
} X86RegisterInfo32;
#define REGISTER(reg) \
{ #reg, X86_CPU_REGISTER32_##reg }
2015-08-21 09:04:50 +02:00
static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
REGISTER(EAX),
REGISTER(ECX),
REGISTER(EDX),
REGISTER(EBX),
REGISTER(ESP),
REGISTER(EBP),
REGISTER(ESI),
REGISTER(EDI),
};
#undef REGISTER
typedef struct ExtSaveArea {
uint32_t feature, bits;
uint32_t offset, size;
} ExtSaveArea;
static const ExtSaveArea x86_ext_save_areas[] = {
// XSTATE_FP_BIT
{
0, 0,
0, 0
},
// XSTATE_SSE_BIT
{
0, 0,
0, 0,
},
// XSTATE_YMM_BIT
{
FEAT_1_ECX, CPUID_EXT_AVX,
offsetof(X86XSaveArea, avx_state),
sizeof(XSaveAVX),
},
// XSTATE_BNDREGS_BIT
{
FEAT_7_0_EBX, CPUID_7_0_EBX_MPX,
offsetof(X86XSaveArea, bndreg_state),
sizeof(XSaveBNDREG),
},
// XSTATE_BNDCSR_BIT
{
FEAT_7_0_EBX, CPUID_7_0_EBX_MPX,
offsetof(X86XSaveArea, bndcsr_state),
sizeof(XSaveBNDCSR),
},
// XSTATE_OPMASK_BIT
{
FEAT_7_0_EBX, CPUID_7_0_EBX_AVX512F,
offsetof(X86XSaveArea, opmask_state),
sizeof(XSaveOpmask),
},
// XSTATE_ZMM_Hi256_BIT
{
FEAT_7_0_EBX, CPUID_7_0_EBX_AVX512F,
offsetof(X86XSaveArea, zmm_hi256_state),
sizeof(XSaveZMM_Hi256),
},
// XSTATE_Hi16_ZMM_BIT
{
FEAT_7_0_EBX, CPUID_7_0_EBX_AVX512F,
offsetof(X86XSaveArea, hi16_zmm_state),
sizeof(XSaveHi16_ZMM),
},
// XSTATE_PKRU_BIT
{
FEAT_7_0_ECX, CPUID_7_0_ECX_PKU,
offsetof(X86XSaveArea, pkru_state),
sizeof(XSavePKRU),
},
};
2015-08-21 09:04:50 +02:00
const char *get_register_name_32(unsigned int reg)
{
if (reg >= CPU_NB_REGS32) {
return NULL;
}
return x86_reg_info_32[reg].name;
}
#ifdef _MSC_VER
#include <intrin.h>
#endif
/*
* Returns the set of feature flags that are supported and migratable by
* QEMU, for a given FeatureWord.
*/
static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
{
FeatureWordInfo *wi = &feature_word_info[w];
uint32_t r = 0;
int i;
for (i = 0; i < 32; i++) {
uint32_t f = 1U << i;
/* If the feature name is unknown, it is not supported by QEMU yet */
if (!wi->feat_names[i]) {
continue;
}
/* Skip features known to QEMU, but explicitly marked as unmigratable */
if (wi->unmigratable_flags & f) {
continue;
}
r |= f;
}
return r;
}
2015-08-21 09:04:50 +02:00
void host_cpuid(uint32_t function, uint32_t count,
uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
{
uint32_t vec[4];
#ifdef _MSC_VER
__cpuidex((int*)vec, function, count);
#else
2015-08-21 09:04:50 +02:00
#ifdef __x86_64__
asm volatile("cpuid"
: "=a"(vec[0]), "=b"(vec[1]),
"=c"(vec[2]), "=d"(vec[3])
: "0"(function), "c"(count) : "cc");
#elif defined(__i386__)
asm volatile("pusha \n\t"
"cpuid \n\t"
"mov %%eax, 0(%2) \n\t"
"mov %%ebx, 4(%2) \n\t"
"mov %%ecx, 8(%2) \n\t"
"mov %%edx, 12(%2) \n\t"
"popa"
: : "a"(function), "c"(count), "S"(vec)
: "memory", "cc");
#else
abort();
#endif
#endif // _MSC_VER
2015-08-21 09:04:50 +02:00
if (eax)
*eax = vec[0];
if (ebx)
*ebx = vec[1];
if (ecx)
*ecx = vec[2];
if (edx)
*edx = vec[3];
}
#define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
/* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
* a substring. ex if !NULL points to the first char after a substring,
* otherwise the string is assumed to sized by a terminating nul.
* Return lexical ordering of *s1:*s2.
*/
static int sstrcmp(const char *s1, const char *e1,
const char *s2, const char *e2)
{
for (;;) {
if (!*s1 || !*s2 || *s1 != *s2)
return (*s1 - *s2);
++s1, ++s2;
if (s1 == e1 && s2 == e2)
return (0);
else if (s1 == e1)
return (*s2);
else if (s2 == e2)
return (*s1);
}
}
/* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
* '|' delimited (possibly empty) strings in which case search for a match
* within the alternatives proceeds left to right. Return 0 for success,
* non-zero otherwise.
*/
static int altcmp(const char *s, const char *e, const char *altstr)
{
const char *p, *q;
for (q = p = altstr; ; ) {
while (*p && *p != '|')
++p;
if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
return (0);
if (!*p)
return (1);
else
q = ++p;
}
}
/* search featureset for flag *[s..e), if found set corresponding bit in
* *pval and return true, otherwise return false
*/
static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
const char **featureset)
{
uint32_t mask;
const char **ppc;
bool found = false;
for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
if (*ppc && !altcmp(s, e, *ppc)) {
*pval |= mask;
found = true;
}
}
return found;
}
static void add_flagname_to_bitmaps(const char *flagname,
FeatureWordArray words,
Error **errp)
{
FeatureWord w;
for (w = 0; w < FEATURE_WORDS; w++) {
FeatureWordInfo *wi = &feature_word_info[w];
if (lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
2015-08-21 09:04:50 +02:00
break;
}
}
if (w == FEATURE_WORDS) {
error_setg(errp, "CPU feature %s not found", flagname);
}
}
/* CPU class name definitions: */
#define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
#define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
/* Return type name for a given CPU model name
* Caller is responsible for freeing the returned string.
*/
static char *x86_cpu_type_name(const char *model_name)
{
return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
}
static ObjectClass *x86_cpu_class_by_name(struct uc_struct *uc, const char *cpu_model)
{
ObjectClass *oc;
char *typename;
if (cpu_model == NULL) {
return NULL;
}
typename = x86_cpu_type_name(cpu_model);
oc = object_class_by_name(uc, typename);
g_free(typename);
2015-08-21 09:04:50 +02:00
return oc;
}
struct X86CPUDefinition {
const char *name;
uint32_t level;
uint32_t xlevel;
/* vendor is zero-terminated, 12 character ASCII string */
char vendor[CPUID_VENDOR_SZ + 1];
int family;
int model;
int stepping;
FeatureWordArray features;
char model_id[48];
bool cache_info_passthrough;
};
static X86CPUDefinition builtin_x86_defs[] = {
{
"qemu64",
0xd, 0x8000000A,
CPUID_VENDOR_AMD,
6, 6, 3,
{
// FEAT_1_EDX
2015-08-21 09:04:50 +02:00
PPRO_FEATURES |
CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
CPUID_PSE36,
// FEAT_1_ECX
CPUID_EXT_SSE3 | CPUID_EXT_CX16,
// FEAT_7_0_EBX
0,
// FEAT_7_0_ECX
0,
// FEAT_8000_0001_EDX
2015-08-21 09:04:50 +02:00
CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
// FEAT_8000_0001_ECX
CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
},
"QEMU Virtual CPU version " QEMU_HW_VERSION
2015-08-21 09:04:50 +02:00
},
{
"phenom",
5, 0x8000001A,
CPUID_VENDOR_AMD,
16, 2, 3,
{
/* Missing: CPUID_HT */
// FEAT_1_EDX
2015-08-21 09:04:50 +02:00
PPRO_FEATURES |
CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
CPUID_PSE36 | CPUID_VME,
// FEAT_1_ECX
2015-08-21 09:04:50 +02:00
CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
CPUID_EXT_POPCNT,
// FEAT_7_0_EBX
0,
// FEAT_7_0_ECX
0,
// FEAT_8000_0001_EDX
2015-08-21 09:04:50 +02:00
CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
/* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
CPUID_EXT3_CR8LEG,
CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
// FEAT_8000_0001_ECX
2015-08-21 09:04:50 +02:00
CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
// FEAT_8000_0007_EDX
0,
// FEAT_C000_0001_EDX
0,
// FEAT_KVM
0,
// FEAT_HYPERV_EAX
0,
// FEAT_HYPERV_EBX
0,
// FEAT_HYPERV_EDX
0,
/* Missing: CPUID_SVM_LBRV */
// FEAT_SVM
2015-08-21 09:04:50 +02:00
CPUID_SVM_NPT,
},
"AMD Phenom(tm) 9550 Quad-Core Processor",
2015-08-21 09:04:50 +02:00
},
{
"core2duo",
10, 0x80000008,
CPUID_VENDOR_INTEL,
6, 15, 11,
{
2015-08-21 09:04:50 +02:00
/* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
// FEAT_1_EDX
2015-08-21 09:04:50 +02:00
PPRO_FEATURES |
CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
/* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
* CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
// FEAT_1_ECX
2015-08-21 09:04:50 +02:00
CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
CPUID_EXT_CX16,
// FEAT_7_0_EBX
0,
// FEAT_7_0_ECX
0,
// FEAT_8000_0001_EDX
2015-08-21 09:04:50 +02:00
CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
// FEAT_8000_0001_ECX
2015-08-21 09:04:50 +02:00
CPUID_EXT3_LAHF_LM,
},
"Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
2015-08-21 09:04:50 +02:00
},
{
"kvm64",
0xd, 0x80000008,
CPUID_VENDOR_INTEL,
15, 6, 1,
{
/* Missing: CPUID_HT */
// FEAT_1_EDX
PPRO_FEATURES | CPUID_VME |
2015-08-21 09:04:50 +02:00
CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
CPUID_PSE36,
/* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
// FEAT_1_ECX
2015-08-21 09:04:50 +02:00
CPUID_EXT_SSE3 | CPUID_EXT_CX16,
// FEAT_7_0_EBX
0,
// FEAT_7_0_ECX
0,
2015-08-21 09:04:50 +02:00
/* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
// FEAT_8000_0001_EDX
2015-08-21 09:04:50 +02:00
CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
/* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
// FEAT_8000_0001_ECX
2015-08-21 09:04:50 +02:00
0,
},
"Common KVM processor",
2015-08-21 09:04:50 +02:00
},
{
"qemu32",
4, 0x80000004,
CPUID_VENDOR_INTEL,
6, 6, 3,
{
// FEAT_1_EDX
2015-08-21 09:04:50 +02:00
PPRO_FEATURES,
// FEAT_1_ECX
CPUID_EXT_SSSE3,
},
"QEMU Virtual CPU version " QEMU_HW_VERSION
2015-08-21 09:04:50 +02:00
},
{
"kvm32",
5, 0x80000008,
CPUID_VENDOR_INTEL,
15, 6, 1,
{
// FEAT_1_EDX
PPRO_FEATURES | CPUID_VME |
2015-08-21 09:04:50 +02:00
CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
// FEAT_1_ECX
2015-08-21 09:04:50 +02:00
CPUID_EXT_SSE3,
// FEAT_7_0_EBX
0,
// FEAT_7_0_ECX
0,
// FEAT_8000_0001_EDX
// FEAT_8000_0001_ECX
2015-08-21 09:04:50 +02:00
0,
},
"Common 32-bit KVM processor",
2015-08-21 09:04:50 +02:00
},
{
"coreduo",
10, 0x80000008,
CPUID_VENDOR_INTEL,
6, 14, 8,
{
2015-08-21 09:04:50 +02:00
/* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
// FEAT_1_EDX
2015-08-21 09:04:50 +02:00
PPRO_FEATURES | CPUID_VME |
CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
CPUID_SS,
/* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
* CPUID_EXT_PDCM, CPUID_EXT_VMX */
// FEAT_1_ECX
2015-08-21 09:04:50 +02:00
CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
// FEAT_7_0_EBX
0,
// FEAT_7_0_ECX
0,
// FEAT_8000_0001_EDX
2015-08-21 09:04:50 +02:00
CPUID_EXT2_NX,
},
"Genuine Intel(R) CPU T2600 @ 2.16GHz",
2015-08-21 09:04:50 +02:00
},
{
"486",
1, 0,
CPUID_VENDOR_INTEL,
4, 8, 0,
{
// FEAT_1_EDX
2015-08-21 09:04:50 +02:00
I486_FEATURES,
},
2015-08-21 09:04:50 +02:00
},
{
"pentium",
1, 0,
CPUID_VENDOR_INTEL,
5, 4, 3,
{
// FEAT_1_EDX
2015-08-21 09:04:50 +02:00
PENTIUM_FEATURES,
},
2015-08-21 09:04:50 +02:00
},
{
"pentium2",
2, 0,
CPUID_VENDOR_INTEL,
6, 5, 2,
{
// FEAT_1_EDX
2015-08-21 09:04:50 +02:00
PENTIUM2_FEATURES,
},
2015-08-21 09:04:50 +02:00
},
{
"pentium3",
3, 0,
CPUID_VENDOR_INTEL,
6, 7, 3,
{
// FEAT_1_EDX
2015-08-21 09:04:50 +02:00
PENTIUM3_FEATURES,
},
2015-08-21 09:04:50 +02:00
},
{
"athlon",
2, 0x80000008,
CPUID_VENDOR_AMD,
6, 2, 3,
{
// FEAT_1_EDX
2015-08-21 09:04:50 +02:00
PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
CPUID_MCA,
// FEAT_1_ECX
0,
// FEAT_7_0_EBX
0,
// FEAT_7_0_ECX
0,
// FEAT_8000_0001_EDX
2015-08-21 09:04:50 +02:00
CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
},
"QEMU Virtual CPU version " QEMU_HW_VERSION
2015-08-21 09:04:50 +02:00
},
{
"n270",
10, 0x80000008,
CPUID_VENDOR_INTEL,
6, 28, 2,
{
2015-08-21 09:04:50 +02:00
/* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
// FEAT_1_EDX
2015-08-21 09:04:50 +02:00
PPRO_FEATURES |
CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
CPUID_ACPI | CPUID_SS,
/* Some CPUs got no CPUID_SEP */
/* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
* CPUID_EXT_XTPR */
// FEAT_1_ECX
2015-08-21 09:04:50 +02:00
CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
CPUID_EXT_MOVBE,
// FEAT_7_0_EBX
0,
// FEAT_7_0_ECX
0,
// FEAT_8000_0001_EDX
2015-08-21 09:04:50 +02:00
CPUID_EXT2_NX,
// FEAT_8000_0001_ECX
2015-08-21 09:04:50 +02:00
CPUID_EXT3_LAHF_LM,
},
"Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
2015-08-21 09:04:50 +02:00
},
{
"Conroe",
10, 0x80000008,
CPUID_VENDOR_INTEL,
6, 15, 3,
{
// FEAT_1_EDX
CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2015-08-21 09:04:50 +02:00
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
CPUID_DE | CPUID_FP87,
// FEAT_1_ECX
2015-08-21 09:04:50 +02:00
CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
// FEAT_7_0_EBX
0,
// FEAT_7_0_ECX
0,
// FEAT_8000_0001_EDX
2015-08-21 09:04:50 +02:00
CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
// FEAT_8000_0001_ECX
2015-08-21 09:04:50 +02:00
CPUID_EXT3_LAHF_LM,
},
"Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
2015-08-21 09:04:50 +02:00
},
{
"Penryn",
10, 0x80000008,
CPUID_VENDOR_INTEL,
6, 23, 3,
{
// FEAT_1_EDX
CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2015-08-21 09:04:50 +02:00
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
CPUID_DE | CPUID_FP87,
// FEAT_1_ECX
2015-08-21 09:04:50 +02:00
CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
CPUID_EXT_SSE3,
// FEAT_7_0_EBX
0,
// FEAT_7_0_ECX
0,
// FEAT_8000_0001_EDX
2015-08-21 09:04:50 +02:00
CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
// FEAT_8000_0001_ECX
2015-08-21 09:04:50 +02:00
CPUID_EXT3_LAHF_LM,
},
"Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
2015-08-21 09:04:50 +02:00
},
{
"Nehalem",
11, 0x80000008,
CPUID_VENDOR_INTEL,
6, 26, 3,
{
// FEAT_1_EDX
CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2015-08-21 09:04:50 +02:00
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
CPUID_DE | CPUID_FP87,
// FEAT_1_ECX
2015-08-21 09:04:50 +02:00
CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
// FEAT_7_0_EBX
0,
// FEAT_7_0_ECX
0,
// FEAT_8000_0001_EDX
2015-08-21 09:04:50 +02:00
CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
// FEAT_8000_0001_ECX
2015-08-21 09:04:50 +02:00
CPUID_EXT3_LAHF_LM,
},
"Intel Core i7 9xx (Nehalem Class Core i7)",
2015-08-21 09:04:50 +02:00
},
{
"Westmere",
11, 0x80000008,
CPUID_VENDOR_INTEL,
6, 44, 1,
{
// FEAT_1_EDX
CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2015-08-21 09:04:50 +02:00
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
CPUID_DE | CPUID_FP87,
// FEAT_1_ECX
2015-08-21 09:04:50 +02:00
CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
// FEAT_7_0_EBX
0,
// FEAT_7_0_ECX
0,
// FEAT_8000_0001_EDX
2015-08-21 09:04:50 +02:00
CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
// FEAT_8000_0001_ECX
2015-08-21 09:04:50 +02:00
CPUID_EXT3_LAHF_LM,
// FEAT_8000_0007_EDX
0,
// FEAT_C000_0001_EDX
0,
// FEAT_KVM
0,
// FEAT_HYPERV_EAX
0,
// FEAT_HYPERV_EBX
0,
// FEAT_HYPERV_EDX
0,
// FEAT_SVM
0,
// FEAT_XSAVE
0,
// FEAT_ARAT
CPUID_6_EAX_ARAT,
},
"Westmere E56xx/L56xx/X56xx (Nehalem-C)",
2015-08-21 09:04:50 +02:00
},
{
"SandyBridge",
0xd, 0x80000008,
CPUID_VENDOR_INTEL,
6, 42, 1,
{
// FEAT_1_EDX
CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2015-08-21 09:04:50 +02:00
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
CPUID_DE | CPUID_FP87,
// FEAT_1_ECX
2015-08-21 09:04:50 +02:00
CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
CPUID_EXT_SSE3,
// FEAT_7_0_EBX
0,
// FEAT_7_0_ECX
0,
// FEAT_8000_0001_EDX
2015-08-21 09:04:50 +02:00
CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
CPUID_EXT2_SYSCALL,
// FEAT_8000_0001_ECX
2015-08-21 09:04:50 +02:00
CPUID_EXT3_LAHF_LM,
// FEAT_8000_0007_EDX
0,
// FEAT_C000_0001_EDX
0,
// FEAT_KVM
0,
// FEAT_HYPERV_EAX
0,
// FEAT_HYPERV_EBX
0,
// FEAT_HYPERV_EDX
0,
// FEAT_SVM
0,
// FEAT_XSAVE
CPUID_XSAVE_XSAVEOPT,
// FEAT_ARAT
CPUID_6_EAX_ARAT,
},
"Intel Xeon E312xx (Sandy Bridge)",
2015-08-21 09:04:50 +02:00
},
{
"IvyBridge",
0xd, 0x80000008,
CPUID_VENDOR_INTEL,
6, 58, 9,
{
// FEAT_1_EDX
CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
CPUID_DE | CPUID_FP87,
// FEAT_1_ECX
CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
// FEAT_7_0_EBX
CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
CPUID_7_0_EBX_ERMS,
// FEAT_7_0_ECX
0,
// FEAT_8000_0001_EDX
CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
CPUID_EXT2_SYSCALL,
// FEAT_8000_0001_ECX
CPUID_EXT3_LAHF_LM,
// FEAT_8000_0007_EDX
0,
// FEAT_C000_0001_EDX
0,
// FEAT_KVM
0,
// FEAT_HYPERV_EAX
0,
// FEAT_HYPERV_EBX
0,
// FEAT_HYPERV_EDX
0,
// FEAT_SVM
0,
// FEAT_XSAVE
CPUID_XSAVE_XSAVEOPT,
// FEAT_ARAT
CPUID_6_EAX_ARAT,
},
"Intel Xeon E3-12xx v2 (Ivy Bridge)",
},
{
"Haswell-noTSX",
0xd, 0x80000008,
CPUID_VENDOR_INTEL,
6, 60, 1,
{
// FEAT_1_EDX
CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
CPUID_DE | CPUID_FP87,
// FEAT_1_ECX
CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
// FEAT_7_0_EBX
CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
// FEAT_7_0_ECX
0,
// FEAT_8000_0001_EDX
CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
CPUID_EXT2_SYSCALL,
// FEAT_8000_0001_ECX
CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
// FEAT_8000_0007_EDX
0,
// FEAT_C000_0001_EDX
0,
// FEAT_KVM
0,
// FEAT_HYPERV_EAX
0,
// FEAT_HYPERV_EBX
0,
// FEAT_HYPERV_EDX
0,
// FEAT_SVM
0,
// FEAT_XSAVE
CPUID_XSAVE_XSAVEOPT,
// FEAT_ARAT
CPUID_6_EAX_ARAT,
},
"Intel Core Processor (Haswell, no TSX)",
},
2015-08-21 09:04:50 +02:00
{
"Haswell",
0xd, 0x80000008,
CPUID_VENDOR_INTEL,
6, 60, 1,
{
// FEAT_1_EDX
CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2015-08-21 09:04:50 +02:00
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
CPUID_DE | CPUID_FP87,
// FEAT_1_ECX
2015-08-21 09:04:50 +02:00
CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
// FEAT_7_0_EBX
2015-08-21 09:04:50 +02:00
CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
CPUID_7_0_EBX_RTM,
// FEAT_7_0_ECX
0,
// FEAT_8000_0001_EDX
CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
CPUID_EXT2_SYSCALL,
// FEAT_8000_0001_ECX
CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
// FEAT_8000_0007_EDX
0,
// FEAT_C000_0001_EDX
0,
// FEAT_KVM
0,
// FEAT_HYPERV_EAX
0,
// FEAT_HYPERV_EBX
0,
// FEAT_HYPERV_EDX
0,
// FEAT_SVM
0,
// FEAT_XSAVE
CPUID_XSAVE_XSAVEOPT,
// FEAT_ARAT
CPUID_6_EAX_ARAT,
},
"Intel Core Processor (Haswell)",
2015-08-21 09:04:50 +02:00
},
{
"Broadwell-noTSX",
0xd, 0x80000008,
CPUID_VENDOR_INTEL,
6, 61, 2,
{
// FEAT_1_EDX
CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
CPUID_DE | CPUID_FP87,
// FEAT_1_ECX
CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
// FEAT_7_0_EBX
CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
CPUID_7_0_EBX_SMAP,
// FEAT_7_0_ECX
0,
// FEAT_8000_0001_EDX
CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
CPUID_EXT2_SYSCALL,
// FEAT_8000_0001_ECX
CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
// FEAT_8000_0007_EDX
0,
// FEAT_C000_0001_EDX
0,
// FEAT_KVM
0,
// FEAT_HYPERV_EAX
0,
// FEAT_HYPERV_EBX
0,
// FEAT_HYPERV_EDX
0,
// FEAT_SVM
0,
// FEAT_XSAVE
CPUID_XSAVE_XSAVEOPT,
// FEAT_ARAT
CPUID_6_EAX_ARAT,
},
"Intel Core Processor (Broadwell, no TSX)",
},
2015-08-21 09:04:50 +02:00
{
"Broadwell",
0xd, 0x80000008,
CPUID_VENDOR_INTEL,
6, 61, 2,
{
// FEAT_1_EDX
CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2015-08-21 09:04:50 +02:00
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
CPUID_DE | CPUID_FP87,
// FEAT_1_ECX
2015-08-21 09:04:50 +02:00
CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
// FEAT_7_0_EBX
2015-08-21 09:04:50 +02:00
CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
CPUID_7_0_EBX_SMAP,
// FEAT_7_0_ECX
0,
// FEAT_8000_0001_EDX
CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
CPUID_EXT2_SYSCALL,
// FEAT_8000_0001_ECX
CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
// FEAT_8000_0007_EDX
0,
// FEAT_C000_0001_EDX
0,
// FEAT_KVM
0,
// FEAT_HYPERV_EAX
0,
// FEAT_HYPERV_EBX
0,
// FEAT_HYPERV_EDX
0,
// FEAT_SVM
0,
// FEAT_XSAVE
CPUID_XSAVE_XSAVEOPT,
// FEAT_ARAT
CPUID_6_EAX_ARAT,
},
"Intel Core Processor (Broadwell)",
2015-08-21 09:04:50 +02:00
},
{
"Skylake-Client",
0xd, 0x80000008,
CPUID_VENDOR_INTEL,
6, 94, 3,
{
// FEAT_1_EDX
CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
CPUID_DE | CPUID_FP87,
// FEAT_1_ECX
CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
// FEAT_7_0_EBX
CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
// FEAT_7_0_ECX
0,
// FEAT_8000_0001_EDX
CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
CPUID_EXT2_SYSCALL,
// FEAT_8000_0001_ECX
CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
// FEAT_8000_0007_EDX
0,
// FEAT_C000_0001_EDX
0,
// FEAT_KVM
0,
// FEAT_HYPERV_EAX
0,
// FEAT_HYPERV_EBX
0,
// FEAT_HYPERV_EDX
0,
// FEAT_SVM
0,
/* Missing: XSAVES (not supported by some Linux versions,
* including v4.1 to v4.6).
* KVM doesn't yet expose any XSAVES state save component,
* and the only one defined in Skylake (processor tracing)
* probably will block migration anyway.
*/
// FEAT_XSAVE]
CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
CPUID_XSAVE_XGETBV1,
// FEAT_6_EAX
CPUID_6_EAX_ARAT,
},
"Intel Core Processor (Skylake)",
},
2015-08-21 09:04:50 +02:00
{
"Opteron_G1",
5, 0x80000008,
CPUID_VENDOR_AMD,
15, 6, 1,
{
// FEAT_1_EDX
CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2015-08-21 09:04:50 +02:00
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
CPUID_DE | CPUID_FP87,
// FEAT_1_ECX
2015-08-21 09:04:50 +02:00
CPUID_EXT_SSE3,
// FEAT_7_0_EBX
0,
// FEAT_7_0_ECX
0,
// FEAT_8000_0001_EDX
2015-08-21 09:04:50 +02:00
CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
},
"AMD Opteron 240 (Gen 1 Class Opteron)",
2015-08-21 09:04:50 +02:00
},
{
"Opteron_G2",
5, 0x80000008,
CPUID_VENDOR_AMD,
15, 6, 1,
{
// FEAT_1_EDX
CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2015-08-21 09:04:50 +02:00
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
CPUID_DE | CPUID_FP87,
// FEAT_1_ECX
2015-08-21 09:04:50 +02:00
CPUID_EXT_CX16 | CPUID_EXT_SSE3,
// FEAT_7_0_EBX
0,
// FEAT_7_0_ECX
0,
// FEAT_8000_0001_EDX
2015-08-21 09:04:50 +02:00
CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
CPUID_EXT2_DE | CPUID_EXT2_FPU,
// FEAT_8000_0001_ECX
2015-08-21 09:04:50 +02:00
CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
},
"AMD Opteron 22xx (Gen 2 Class Opteron)",
2015-08-21 09:04:50 +02:00
},
{
"Opteron_G3",
5, 0x80000008,
CPUID_VENDOR_AMD,
15, 6, 1,
{
// FEAT_1_EDX
CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2015-08-21 09:04:50 +02:00
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
CPUID_DE | CPUID_FP87,
// FEAT_1_ECX
2015-08-21 09:04:50 +02:00
CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
CPUID_EXT_SSE3,
// FEAT_7_0_EBX
0,
// FEAT_7_0_ECX
0,
// FEAT_8000_0001_EDX
2015-08-21 09:04:50 +02:00
CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
CPUID_EXT2_DE | CPUID_EXT2_FPU,
// FEAT_8000_0001_ECX
2015-08-21 09:04:50 +02:00
CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
},
"AMD Opteron 23xx (Gen 3 Class Opteron)",
2015-08-21 09:04:50 +02:00
},
{
"Opteron_G4",
0xd, 0x8000001A,
CPUID_VENDOR_AMD,
21, 1, 2,
{
// FEAT_1_EDX
CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2015-08-21 09:04:50 +02:00
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
CPUID_DE | CPUID_FP87,
// FEAT_1_ECX
2015-08-21 09:04:50 +02:00
CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
CPUID_EXT_SSE3,
// FEAT_7_0_EBX
0,
// FEAT_7_0_ECX
0,
// FEAT_8000_0001_EDX
2015-08-21 09:04:50 +02:00
CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
// FEAT_8000_0001_ECX
2015-08-21 09:04:50 +02:00
CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
CPUID_EXT3_LAHF_LM,
},
"AMD Opteron 62xx class CPU",
2015-08-21 09:04:50 +02:00
},
{
"Opteron_G5",
0xd, 0x8000001A,
CPUID_VENDOR_AMD,
21, 2, 0,
{
// FEAT_1_EDX
CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2015-08-21 09:04:50 +02:00
CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
CPUID_DE | CPUID_FP87,
// FEAT_1_ECX
2015-08-21 09:04:50 +02:00
CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
// FEAT_7_0_EBX
0,
// FEAT_7_0_ECX
0,
// FEAT_8000_0001_EDX
2015-08-21 09:04:50 +02:00
CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
// FEAT_8000_0001_ECX
2015-08-21 09:04:50 +02:00
CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
CPUID_EXT3_LAHF_LM,
},
"AMD Opteron 63xx class CPU",
2015-08-21 09:04:50 +02:00
},
};
static uint32_t x86_cpu_get_supported_feature_word(struct uc_struct *uc,
FeatureWord w, bool migratable);
2015-08-21 09:04:50 +02:00
static void report_unavailable_features(FeatureWord w, uint32_t mask)
{
FeatureWordInfo *f = &feature_word_info[w];
int i;
for (i = 0; i < 32; ++i) {
if ((1UL << i) & mask) {
2015-08-21 09:04:50 +02:00
const char *reg = get_register_name_32(f->cpuid_reg);
assert(reg);
fprintf(stderr, "warning: %s doesn't support requested feature: "
"CPUID.%02XH:%s%s%s [bit %d]\n",
"TCG",
f->cpuid_eax, reg,
f->feat_names[i] ? "." : "",
f->feat_names[i] ? f->feat_names[i] : "", i);
}
}
}
static void x86_cpuid_version_get_family(struct uc_struct *uc,
Object *obj, Visitor *v,
const char *name, void *opaque,
Error **errp)
2015-08-21 09:04:50 +02:00
{
X86CPU *cpu = X86_CPU(uc, obj);
CPUX86State *env = &cpu->env;
int64_t value;
value = (env->cpuid_version >> 8) & 0xf;
if (value == 0xf) {
value += (env->cpuid_version >> 20) & 0xff;
}
qapi: Swap visit_* arguments for consistent 'name' placement JSON uses "name":value, but many of our visitor interfaces were called with visit_type_FOO(v, &value, name, errp). This can be a bit confusing to have to mentally swap the parameter order to match JSON order. It's particularly bad for visit_start_struct(), where the 'name' parameter is smack in the middle of the otherwise-related group of 'obj, kind, size' parameters! It's time to do a global swap of the parameter ordering, so that the 'name' parameter is always immediately after the Visitor argument. Additional reason in favor of the swap: the existing include/qjson.h prefers listing 'name' first in json_prop_*(), and I have plans to unify that file with the qapi visitors; listing 'name' first in qapi will minimize churn to the (admittedly few) qjson.h clients. Later patches will then fix docs, object.h, visitor-impl.h, and those clients to match. Done by first patching scripts/qapi*.py by hand to make generated files do what I want, then by running the following Coccinelle script to affect the rest of the code base: $ spatch --sp-file script `git grep -l '\bvisit_' -- '**/*.[ch]'` I then had to apply some touchups (Coccinelle insisted on TAB indentation in visitor.h, and botched the signature of visit_type_enum() by rewriting 'const char *const strings[]' to the syntactically invalid 'const char*const[] strings'). The movement of parameters is sufficient to provoke compiler errors if any callers were missed. // Part 1: Swap declaration order @@ type TV, TErr, TObj, T1, T2; identifier OBJ, ARG1, ARG2; @@ void visit_start_struct -(TV v, TObj OBJ, T1 ARG1, const char *name, T2 ARG2, TErr errp) +(TV v, const char *name, TObj OBJ, T1 ARG1, T2 ARG2, TErr errp) { ... } @@ type bool, TV, T1; identifier ARG1; @@ bool visit_optional -(TV v, T1 ARG1, const char *name) +(TV v, const char *name, T1 ARG1) { ... } @@ type TV, TErr, TObj, T1; identifier OBJ, ARG1; @@ void visit_get_next_type -(TV v, TObj OBJ, T1 ARG1, const char *name, TErr errp) +(TV v, const char *name, TObj OBJ, T1 ARG1, TErr errp) { ... } @@ type TV, TErr, TObj, T1, T2; identifier OBJ, ARG1, ARG2; @@ void visit_type_enum -(TV v, TObj OBJ, T1 ARG1, T2 ARG2, const char *name, TErr errp) +(TV v, const char *name, TObj OBJ, T1 ARG1, T2 ARG2, TErr errp) { ... } @@ type TV, TErr, TObj; identifier OBJ; identifier VISIT_TYPE =~ "^visit_type_"; @@ void VISIT_TYPE -(TV v, TObj OBJ, const char *name, TErr errp) +(TV v, const char *name, TObj OBJ, TErr errp) { ... } // Part 2: swap caller order @@ expression V, NAME, OBJ, ARG1, ARG2, ERR; identifier VISIT_TYPE =~ "^visit_type_"; @@ ( -visit_start_struct(V, OBJ, ARG1, NAME, ARG2, ERR) +visit_start_struct(V, NAME, OBJ, ARG1, ARG2, ERR) | -visit_optional(V, ARG1, NAME) +visit_optional(V, NAME, ARG1) | -visit_get_next_type(V, OBJ, ARG1, NAME, ERR) +visit_get_next_type(V, NAME, OBJ, ARG1, ERR) | -visit_type_enum(V, OBJ, ARG1, ARG2, NAME, ERR) +visit_type_enum(V, NAME, OBJ, ARG1, ARG2, ERR) | -VISIT_TYPE(V, OBJ, NAME, ERR) +VISIT_TYPE(V, NAME, OBJ, ERR) ) Backports commit 51e72bc1dd6ace6e91d675f41a1f09bd00ab8043 from qemu
2018-02-20 04:31:04 +01:00
visit_type_int(v, name, &value, errp);
2015-08-21 09:04:50 +02:00
}
static void x86_cpuid_version_set_family(struct uc_struct *uc,
Object *obj, Visitor *v,
const char *name, void *opaque,
Error **errp)
2015-08-21 09:04:50 +02:00
{
X86CPU *cpu = X86_CPU(uc, obj);
CPUX86State *env = &cpu->env;
const int64_t min = 0;
const int64_t max = 0xff + 0xf;
Error *local_err = NULL;
int64_t value;
qapi: Swap visit_* arguments for consistent 'name' placement JSON uses "name":value, but many of our visitor interfaces were called with visit_type_FOO(v, &value, name, errp). This can be a bit confusing to have to mentally swap the parameter order to match JSON order. It's particularly bad for visit_start_struct(), where the 'name' parameter is smack in the middle of the otherwise-related group of 'obj, kind, size' parameters! It's time to do a global swap of the parameter ordering, so that the 'name' parameter is always immediately after the Visitor argument. Additional reason in favor of the swap: the existing include/qjson.h prefers listing 'name' first in json_prop_*(), and I have plans to unify that file with the qapi visitors; listing 'name' first in qapi will minimize churn to the (admittedly few) qjson.h clients. Later patches will then fix docs, object.h, visitor-impl.h, and those clients to match. Done by first patching scripts/qapi*.py by hand to make generated files do what I want, then by running the following Coccinelle script to affect the rest of the code base: $ spatch --sp-file script `git grep -l '\bvisit_' -- '**/*.[ch]'` I then had to apply some touchups (Coccinelle insisted on TAB indentation in visitor.h, and botched the signature of visit_type_enum() by rewriting 'const char *const strings[]' to the syntactically invalid 'const char*const[] strings'). The movement of parameters is sufficient to provoke compiler errors if any callers were missed. // Part 1: Swap declaration order @@ type TV, TErr, TObj, T1, T2; identifier OBJ, ARG1, ARG2; @@ void visit_start_struct -(TV v, TObj OBJ, T1 ARG1, const char *name, T2 ARG2, TErr errp) +(TV v, const char *name, TObj OBJ, T1 ARG1, T2 ARG2, TErr errp) { ... } @@ type bool, TV, T1; identifier ARG1; @@ bool visit_optional -(TV v, T1 ARG1, const char *name) +(TV v, const char *name, T1 ARG1) { ... } @@ type TV, TErr, TObj, T1; identifier OBJ, ARG1; @@ void visit_get_next_type -(TV v, TObj OBJ, T1 ARG1, const char *name, TErr errp) +(TV v, const char *name, TObj OBJ, T1 ARG1, TErr errp) { ... } @@ type TV, TErr, TObj, T1, T2; identifier OBJ, ARG1, ARG2; @@ void visit_type_enum -(TV v, TObj OBJ, T1 ARG1, T2 ARG2, const char *name, TErr errp) +(TV v, const char *name, TObj OBJ, T1 ARG1, T2 ARG2, TErr errp) { ... } @@ type TV, TErr, TObj; identifier OBJ; identifier VISIT_TYPE =~ "^visit_type_"; @@ void VISIT_TYPE -(TV v, TObj OBJ, const char *name, TErr errp) +(TV v, const char *name, TObj OBJ, TErr errp) { ... } // Part 2: swap caller order @@ expression V, NAME, OBJ, ARG1, ARG2, ERR; identifier VISIT_TYPE =~ "^visit_type_"; @@ ( -visit_start_struct(V, OBJ, ARG1, NAME, ARG2, ERR) +visit_start_struct(V, NAME, OBJ, ARG1, ARG2, ERR) | -visit_optional(V, ARG1, NAME) +visit_optional(V, NAME, ARG1) | -visit_get_next_type(V, OBJ, ARG1, NAME, ERR) +visit_get_next_type(V, NAME, OBJ, ARG1, ERR) | -visit_type_enum(V, OBJ, ARG1, ARG2, NAME, ERR) +visit_type_enum(V, NAME, OBJ, ARG1, ARG2, ERR) | -VISIT_TYPE(V, OBJ, NAME, ERR) +VISIT_TYPE(V, NAME, OBJ, ERR) ) Backports commit 51e72bc1dd6ace6e91d675f41a1f09bd00ab8043 from qemu
2018-02-20 04:31:04 +01:00
visit_type_int(v, name, &value, &local_err);
2015-08-21 09:04:50 +02:00
if (local_err) {
error_propagate(errp, local_err);
return;
2015-08-21 09:04:50 +02:00
}
if (value < min || value > max) {
error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
name ? name : "null", value, min, max);
return;
2015-08-21 09:04:50 +02:00
}
env->cpuid_version &= ~0xff00f00;
if (value > 0x0f) {
env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
} else {
env->cpuid_version |= value << 8;
}
}
static void x86_cpuid_version_get_model(struct uc_struct *uc,
Object *obj, Visitor *v,
const char *name, void *opaque,
Error **errp)
2015-08-21 09:04:50 +02:00
{
X86CPU *cpu = X86_CPU(uc, obj);
CPUX86State *env = &cpu->env;
int64_t value;
value = (env->cpuid_version >> 4) & 0xf;
value |= ((env->cpuid_version >> 16) & 0xf) << 4;
qapi: Swap visit_* arguments for consistent 'name' placement JSON uses "name":value, but many of our visitor interfaces were called with visit_type_FOO(v, &value, name, errp). This can be a bit confusing to have to mentally swap the parameter order to match JSON order. It's particularly bad for visit_start_struct(), where the 'name' parameter is smack in the middle of the otherwise-related group of 'obj, kind, size' parameters! It's time to do a global swap of the parameter ordering, so that the 'name' parameter is always immediately after the Visitor argument. Additional reason in favor of the swap: the existing include/qjson.h prefers listing 'name' first in json_prop_*(), and I have plans to unify that file with the qapi visitors; listing 'name' first in qapi will minimize churn to the (admittedly few) qjson.h clients. Later patches will then fix docs, object.h, visitor-impl.h, and those clients to match. Done by first patching scripts/qapi*.py by hand to make generated files do what I want, then by running the following Coccinelle script to affect the rest of the code base: $ spatch --sp-file script `git grep -l '\bvisit_' -- '**/*.[ch]'` I then had to apply some touchups (Coccinelle insisted on TAB indentation in visitor.h, and botched the signature of visit_type_enum() by rewriting 'const char *const strings[]' to the syntactically invalid 'const char*const[] strings'). The movement of parameters is sufficient to provoke compiler errors if any callers were missed. // Part 1: Swap declaration order @@ type TV, TErr, TObj, T1, T2; identifier OBJ, ARG1, ARG2; @@ void visit_start_struct -(TV v, TObj OBJ, T1 ARG1, const char *name, T2 ARG2, TErr errp) +(TV v, const char *name, TObj OBJ, T1 ARG1, T2 ARG2, TErr errp) { ... } @@ type bool, TV, T1; identifier ARG1; @@ bool visit_optional -(TV v, T1 ARG1, const char *name) +(TV v, const char *name, T1 ARG1) { ... } @@ type TV, TErr, TObj, T1; identifier OBJ, ARG1; @@ void visit_get_next_type -(TV v, TObj OBJ, T1 ARG1, const char *name, TErr errp) +(TV v, const char *name, TObj OBJ, T1 ARG1, TErr errp) { ... } @@ type TV, TErr, TObj, T1, T2; identifier OBJ, ARG1, ARG2; @@ void visit_type_enum -(TV v, TObj OBJ, T1 ARG1, T2 ARG2, const char *name, TErr errp) +(TV v, const char *name, TObj OBJ, T1 ARG1, T2 ARG2, TErr errp) { ... } @@ type TV, TErr, TObj; identifier OBJ; identifier VISIT_TYPE =~ "^visit_type_"; @@ void VISIT_TYPE -(TV v, TObj OBJ, const char *name, TErr errp) +(TV v, const char *name, TObj OBJ, TErr errp) { ... } // Part 2: swap caller order @@ expression V, NAME, OBJ, ARG1, ARG2, ERR; identifier VISIT_TYPE =~ "^visit_type_"; @@ ( -visit_start_struct(V, OBJ, ARG1, NAME, ARG2, ERR) +visit_start_struct(V, NAME, OBJ, ARG1, ARG2, ERR) | -visit_optional(V, ARG1, NAME) +visit_optional(V, NAME, ARG1) | -visit_get_next_type(V, OBJ, ARG1, NAME, ERR) +visit_get_next_type(V, NAME, OBJ, ARG1, ERR) | -visit_type_enum(V, OBJ, ARG1, ARG2, NAME, ERR) +visit_type_enum(V, NAME, OBJ, ARG1, ARG2, ERR) | -VISIT_TYPE(V, OBJ, NAME, ERR) +VISIT_TYPE(V, NAME, OBJ, ERR) ) Backports commit 51e72bc1dd6ace6e91d675f41a1f09bd00ab8043 from qemu
2018-02-20 04:31:04 +01:00
visit_type_int(v, name, &value, errp);
2015-08-21 09:04:50 +02:00
}
static void x86_cpuid_version_set_model(struct uc_struct *uc,
Object *obj, Visitor *v,
const char *name, void *opaque,
Error **errp)
2015-08-21 09:04:50 +02:00
{
X86CPU *cpu = X86_CPU(uc, obj);
CPUX86State *env = &cpu->env;
const int64_t min = 0;
const int64_t max = 0xff;
Error *local_err = NULL;
int64_t value;
qapi: Swap visit_* arguments for consistent 'name' placement JSON uses "name":value, but many of our visitor interfaces were called with visit_type_FOO(v, &value, name, errp). This can be a bit confusing to have to mentally swap the parameter order to match JSON order. It's particularly bad for visit_start_struct(), where the 'name' parameter is smack in the middle of the otherwise-related group of 'obj, kind, size' parameters! It's time to do a global swap of the parameter ordering, so that the 'name' parameter is always immediately after the Visitor argument. Additional reason in favor of the swap: the existing include/qjson.h prefers listing 'name' first in json_prop_*(), and I have plans to unify that file with the qapi visitors; listing 'name' first in qapi will minimize churn to the (admittedly few) qjson.h clients. Later patches will then fix docs, object.h, visitor-impl.h, and those clients to match. Done by first patching scripts/qapi*.py by hand to make generated files do what I want, then by running the following Coccinelle script to affect the rest of the code base: $ spatch --sp-file script `git grep -l '\bvisit_' -- '**/*.[ch]'` I then had to apply some touchups (Coccinelle insisted on TAB indentation in visitor.h, and botched the signature of visit_type_enum() by rewriting 'const char *const strings[]' to the syntactically invalid 'const char*const[] strings'). The movement of parameters is sufficient to provoke compiler errors if any callers were missed. // Part 1: Swap declaration order @@ type TV, TErr, TObj, T1, T2; identifier OBJ, ARG1, ARG2; @@ void visit_start_struct -(TV v, TObj OBJ, T1 ARG1, const char *name, T2 ARG2, TErr errp) +(TV v, const char *name, TObj OBJ, T1 ARG1, T2 ARG2, TErr errp) { ... } @@ type bool, TV, T1; identifier ARG1; @@ bool visit_optional -(TV v, T1 ARG1, const char *name) +(TV v, const char *name, T1 ARG1) { ... } @@ type TV, TErr, TObj, T1; identifier OBJ, ARG1; @@ void visit_get_next_type -(TV v, TObj OBJ, T1 ARG1, const char *name, TErr errp) +(TV v, const char *name, TObj OBJ, T1 ARG1, TErr errp) { ... } @@ type TV, TErr, TObj, T1, T2; identifier OBJ, ARG1, ARG2; @@ void visit_type_enum -(TV v, TObj OBJ, T1 ARG1, T2 ARG2, const char *name, TErr errp) +(TV v, const char *name, TObj OBJ, T1 ARG1, T2 ARG2, TErr errp) { ... } @@ type TV, TErr, TObj; identifier OBJ; identifier VISIT_TYPE =~ "^visit_type_"; @@ void VISIT_TYPE -(TV v, TObj OBJ, const char *name, TErr errp) +(TV v, const char *name, TObj OBJ, TErr errp) { ... } // Part 2: swap caller order @@ expression V, NAME, OBJ, ARG1, ARG2, ERR; identifier VISIT_TYPE =~ "^visit_type_"; @@ ( -visit_start_struct(V, OBJ, ARG1, NAME, ARG2, ERR) +visit_start_struct(V, NAME, OBJ, ARG1, ARG2, ERR) | -visit_optional(V, ARG1, NAME) +visit_optional(V, NAME, ARG1) | -visit_get_next_type(V, OBJ, ARG1, NAME, ERR) +visit_get_next_type(V, NAME, OBJ, ARG1, ERR) | -visit_type_enum(V, OBJ, ARG1, ARG2, NAME, ERR) +visit_type_enum(V, NAME, OBJ, ARG1, ARG2, ERR) | -VISIT_TYPE(V, OBJ, NAME, ERR) +VISIT_TYPE(V, NAME, OBJ, ERR) ) Backports commit 51e72bc1dd6ace6e91d675f41a1f09bd00ab8043 from qemu
2018-02-20 04:31:04 +01:00
visit_type_int(v, name, &value, &local_err);
2015-08-21 09:04:50 +02:00
if (local_err) {
error_propagate(errp, local_err);
return;
2015-08-21 09:04:50 +02:00
}
if (value < min || value > max) {
error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
name ? name : "null", value, min, max);
return;
2015-08-21 09:04:50 +02:00
}
env->cpuid_version &= ~0xf00f0;
env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
}
static void x86_cpuid_version_get_stepping(struct uc_struct *uc,
Object *obj, Visitor *v,
const char *name, void *opaque,
2015-08-21 09:04:50 +02:00
Error **errp)
{
X86CPU *cpu = X86_CPU(uc, obj);
CPUX86State *env = &cpu->env;
int64_t value;
value = env->cpuid_version & 0xf;
qapi: Swap visit_* arguments for consistent 'name' placement JSON uses "name":value, but many of our visitor interfaces were called with visit_type_FOO(v, &value, name, errp). This can be a bit confusing to have to mentally swap the parameter order to match JSON order. It's particularly bad for visit_start_struct(), where the 'name' parameter is smack in the middle of the otherwise-related group of 'obj, kind, size' parameters! It's time to do a global swap of the parameter ordering, so that the 'name' parameter is always immediately after the Visitor argument. Additional reason in favor of the swap: the existing include/qjson.h prefers listing 'name' first in json_prop_*(), and I have plans to unify that file with the qapi visitors; listing 'name' first in qapi will minimize churn to the (admittedly few) qjson.h clients. Later patches will then fix docs, object.h, visitor-impl.h, and those clients to match. Done by first patching scripts/qapi*.py by hand to make generated files do what I want, then by running the following Coccinelle script to affect the rest of the code base: $ spatch --sp-file script `git grep -l '\bvisit_' -- '**/*.[ch]'` I then had to apply some touchups (Coccinelle insisted on TAB indentation in visitor.h, and botched the signature of visit_type_enum() by rewriting 'const char *const strings[]' to the syntactically invalid 'const char*const[] strings'). The movement of parameters is sufficient to provoke compiler errors if any callers were missed. // Part 1: Swap declaration order @@ type TV, TErr, TObj, T1, T2; identifier OBJ, ARG1, ARG2; @@ void visit_start_struct -(TV v, TObj OBJ, T1 ARG1, const char *name, T2 ARG2, TErr errp) +(TV v, const char *name, TObj OBJ, T1 ARG1, T2 ARG2, TErr errp) { ... } @@ type bool, TV, T1; identifier ARG1; @@ bool visit_optional -(TV v, T1 ARG1, const char *name) +(TV v, const char *name, T1 ARG1) { ... } @@ type TV, TErr, TObj, T1; identifier OBJ, ARG1; @@ void visit_get_next_type -(TV v, TObj OBJ, T1 ARG1, const char *name, TErr errp) +(TV v, const char *name, TObj OBJ, T1 ARG1, TErr errp) { ... } @@ type TV, TErr, TObj, T1, T2; identifier OBJ, ARG1, ARG2; @@ void visit_type_enum -(TV v, TObj OBJ, T1 ARG1, T2 ARG2, const char *name, TErr errp) +(TV v, const char *name, TObj OBJ, T1 ARG1, T2 ARG2, TErr errp) { ... } @@ type TV, TErr, TObj; identifier OBJ; identifier VISIT_TYPE =~ "^visit_type_"; @@ void VISIT_TYPE -(TV v, TObj OBJ, const char *name, TErr errp) +(TV v, const char *name, TObj OBJ, TErr errp) { ... } // Part 2: swap caller order @@ expression V, NAME, OBJ, ARG1, ARG2, ERR; identifier VISIT_TYPE =~ "^visit_type_"; @@ ( -visit_start_struct(V, OBJ, ARG1, NAME, ARG2, ERR) +visit_start_struct(V, NAME, OBJ, ARG1, ARG2, ERR) | -visit_optional(V, ARG1, NAME) +visit_optional(V, NAME, ARG1) | -visit_get_next_type(V, OBJ, ARG1, NAME, ERR) +visit_get_next_type(V, NAME, OBJ, ARG1, ERR) | -visit_type_enum(V, OBJ, ARG1, ARG2, NAME, ERR) +visit_type_enum(V, NAME, OBJ, ARG1, ARG2, ERR) | -VISIT_TYPE(V, OBJ, NAME, ERR) +VISIT_TYPE(V, NAME, OBJ, ERR) ) Backports commit 51e72bc1dd6ace6e91d675f41a1f09bd00ab8043 from qemu
2018-02-20 04:31:04 +01:00
visit_type_int(v, name, &value, errp);
2015-08-21 09:04:50 +02:00
}
static void x86_cpuid_version_set_stepping(struct uc_struct *uc,
Object *obj, Visitor *v,
const char *name, void *opaque,
2015-08-21 09:04:50 +02:00
Error **errp)
{
X86CPU *cpu = X86_CPU(uc, obj);
CPUX86State *env = &cpu->env;
const int64_t min = 0;
const int64_t max = 0xf;
Error *local_err = NULL;
int64_t value;
qapi: Swap visit_* arguments for consistent 'name' placement JSON uses "name":value, but many of our visitor interfaces were called with visit_type_FOO(v, &value, name, errp). This can be a bit confusing to have to mentally swap the parameter order to match JSON order. It's particularly bad for visit_start_struct(), where the 'name' parameter is smack in the middle of the otherwise-related group of 'obj, kind, size' parameters! It's time to do a global swap of the parameter ordering, so that the 'name' parameter is always immediately after the Visitor argument. Additional reason in favor of the swap: the existing include/qjson.h prefers listing 'name' first in json_prop_*(), and I have plans to unify that file with the qapi visitors; listing 'name' first in qapi will minimize churn to the (admittedly few) qjson.h clients. Later patches will then fix docs, object.h, visitor-impl.h, and those clients to match. Done by first patching scripts/qapi*.py by hand to make generated files do what I want, then by running the following Coccinelle script to affect the rest of the code base: $ spatch --sp-file script `git grep -l '\bvisit_' -- '**/*.[ch]'` I then had to apply some touchups (Coccinelle insisted on TAB indentation in visitor.h, and botched the signature of visit_type_enum() by rewriting 'const char *const strings[]' to the syntactically invalid 'const char*const[] strings'). The movement of parameters is sufficient to provoke compiler errors if any callers were missed. // Part 1: Swap declaration order @@ type TV, TErr, TObj, T1, T2; identifier OBJ, ARG1, ARG2; @@ void visit_start_struct -(TV v, TObj OBJ, T1 ARG1, const char *name, T2 ARG2, TErr errp) +(TV v, const char *name, TObj OBJ, T1 ARG1, T2 ARG2, TErr errp) { ... } @@ type bool, TV, T1; identifier ARG1; @@ bool visit_optional -(TV v, T1 ARG1, const char *name) +(TV v, const char *name, T1 ARG1) { ... } @@ type TV, TErr, TObj, T1; identifier OBJ, ARG1; @@ void visit_get_next_type -(TV v, TObj OBJ, T1 ARG1, const char *name, TErr errp) +(TV v, const char *name, TObj OBJ, T1 ARG1, TErr errp) { ... } @@ type TV, TErr, TObj, T1, T2; identifier OBJ, ARG1, ARG2; @@ void visit_type_enum -(TV v, TObj OBJ, T1 ARG1, T2 ARG2, const char *name, TErr errp) +(TV v, const char *name, TObj OBJ, T1 ARG1, T2 ARG2, TErr errp) { ... } @@ type TV, TErr, TObj; identifier OBJ; identifier VISIT_TYPE =~ "^visit_type_"; @@ void VISIT_TYPE -(TV v, TObj OBJ, const char *name, TErr errp) +(TV v, const char *name, TObj OBJ, TErr errp) { ... } // Part 2: swap caller order @@ expression V, NAME, OBJ, ARG1, ARG2, ERR; identifier VISIT_TYPE =~ "^visit_type_"; @@ ( -visit_start_struct(V, OBJ, ARG1, NAME, ARG2, ERR) +visit_start_struct(V, NAME, OBJ, ARG1, ARG2, ERR) | -visit_optional(V, ARG1, NAME) +visit_optional(V, NAME, ARG1) | -visit_get_next_type(V, OBJ, ARG1, NAME, ERR) +visit_get_next_type(V, NAME, OBJ, ARG1, ERR) | -visit_type_enum(V, OBJ, ARG1, ARG2, NAME, ERR) +visit_type_enum(V, NAME, OBJ, ARG1, ARG2, ERR) | -VISIT_TYPE(V, OBJ, NAME, ERR) +VISIT_TYPE(V, NAME, OBJ, ERR) ) Backports commit 51e72bc1dd6ace6e91d675f41a1f09bd00ab8043 from qemu
2018-02-20 04:31:04 +01:00
visit_type_int(v, name, &value, &local_err);
2015-08-21 09:04:50 +02:00
if (local_err) {
error_propagate(errp, local_err);
return;
2015-08-21 09:04:50 +02:00
}
if (value < min || value > max) {
error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
name ? name : "null", value, min, max);
return;
2015-08-21 09:04:50 +02:00
}
env->cpuid_version &= ~0xf;
env->cpuid_version |= value & 0xf;
}
static char *x86_cpuid_get_vendor(struct uc_struct *uc, Object *obj, Error **errp)
{
X86CPU *cpu = X86_CPU(uc, obj);
CPUX86State *env = &cpu->env;
char *value;
value = (char *)g_malloc(CPUID_VENDOR_SZ + 1);
x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
env->cpuid_vendor3);
return value;
}
static int x86_cpuid_set_vendor(struct uc_struct *uc, Object *obj,
const char *value, Error **errp)
2015-08-21 09:04:50 +02:00
{
X86CPU *cpu = X86_CPU(uc, obj);
CPUX86State *env = &cpu->env;
int i;
if (strlen(value) != CPUID_VENDOR_SZ) {
error_setg(errp, QERR_PROPERTY_VALUE_BAD, "",
"vendor", value);
return -1;
2015-08-21 09:04:50 +02:00
}
env->cpuid_vendor1 = 0;
env->cpuid_vendor2 = 0;
env->cpuid_vendor3 = 0;
for (i = 0; i < 4; i++) {
env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
}
return 0;
2015-08-21 09:04:50 +02:00
}
static char *x86_cpuid_get_model_id(struct uc_struct *uc, Object *obj, Error **errp)
{
X86CPU *cpu = X86_CPU(uc, obj);
CPUX86State *env = &cpu->env;
char *value;
int i;
value = g_malloc(48 + 1);
for (i = 0; i < 48; i++) {
value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
}
value[48] = '\0';
return value;
}
static int x86_cpuid_set_model_id(struct uc_struct *uc, Object *obj,
const char *model_id, Error **errp)
2015-08-21 09:04:50 +02:00
{
X86CPU *cpu = X86_CPU(uc, obj);
CPUX86State *env = &cpu->env;
int c, len, i;
if (model_id == NULL) {
model_id = "";
}
len = strlen(model_id);
memset(env->cpuid_model, 0, 48);
for (i = 0; i < 48; i++) {
if (i >= len) {
c = '\0';
} else {
c = (uint8_t)model_id[i];
}
env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
}
return 0;
2015-08-21 09:04:50 +02:00
}
static void x86_cpuid_get_tsc_freq(struct uc_struct *uc,
Object *obj, Visitor *v,
const char *name, void *opaque,
Error **errp)
2015-08-21 09:04:50 +02:00
{
X86CPU *cpu = X86_CPU(uc, obj);
int64_t value;
value = cpu->env.tsc_khz * 1000;
qapi: Swap visit_* arguments for consistent 'name' placement JSON uses "name":value, but many of our visitor interfaces were called with visit_type_FOO(v, &value, name, errp). This can be a bit confusing to have to mentally swap the parameter order to match JSON order. It's particularly bad for visit_start_struct(), where the 'name' parameter is smack in the middle of the otherwise-related group of 'obj, kind, size' parameters! It's time to do a global swap of the parameter ordering, so that the 'name' parameter is always immediately after the Visitor argument. Additional reason in favor of the swap: the existing include/qjson.h prefers listing 'name' first in json_prop_*(), and I have plans to unify that file with the qapi visitors; listing 'name' first in qapi will minimize churn to the (admittedly few) qjson.h clients. Later patches will then fix docs, object.h, visitor-impl.h, and those clients to match. Done by first patching scripts/qapi*.py by hand to make generated files do what I want, then by running the following Coccinelle script to affect the rest of the code base: $ spatch --sp-file script `git grep -l '\bvisit_' -- '**/*.[ch]'` I then had to apply some touchups (Coccinelle insisted on TAB indentation in visitor.h, and botched the signature of visit_type_enum() by rewriting 'const char *const strings[]' to the syntactically invalid 'const char*const[] strings'). The movement of parameters is sufficient to provoke compiler errors if any callers were missed. // Part 1: Swap declaration order @@ type TV, TErr, TObj, T1, T2; identifier OBJ, ARG1, ARG2; @@ void visit_start_struct -(TV v, TObj OBJ, T1 ARG1, const char *name, T2 ARG2, TErr errp) +(TV v, const char *name, TObj OBJ, T1 ARG1, T2 ARG2, TErr errp) { ... } @@ type bool, TV, T1; identifier ARG1; @@ bool visit_optional -(TV v, T1 ARG1, const char *name) +(TV v, const char *name, T1 ARG1) { ... } @@ type TV, TErr, TObj, T1; identifier OBJ, ARG1; @@ void visit_get_next_type -(TV v, TObj OBJ, T1 ARG1, const char *name, TErr errp) +(TV v, const char *name, TObj OBJ, T1 ARG1, TErr errp) { ... } @@ type TV, TErr, TObj, T1, T2; identifier OBJ, ARG1, ARG2; @@ void visit_type_enum -(TV v, TObj OBJ, T1 ARG1, T2 ARG2, const char *name, TErr errp) +(TV v, const char *name, TObj OBJ, T1 ARG1, T2 ARG2, TErr errp) { ... } @@ type TV, TErr, TObj; identifier OBJ; identifier VISIT_TYPE =~ "^visit_type_"; @@ void VISIT_TYPE -(TV v, TObj OBJ, const char *name, TErr errp) +(TV v, const char *name, TObj OBJ, TErr errp) { ... } // Part 2: swap caller order @@ expression V, NAME, OBJ, ARG1, ARG2, ERR; identifier VISIT_TYPE =~ "^visit_type_"; @@ ( -visit_start_struct(V, OBJ, ARG1, NAME, ARG2, ERR) +visit_start_struct(V, NAME, OBJ, ARG1, ARG2, ERR) | -visit_optional(V, ARG1, NAME) +visit_optional(V, NAME, ARG1) | -visit_get_next_type(V, OBJ, ARG1, NAME, ERR) +visit_get_next_type(V, NAME, OBJ, ARG1, ERR) | -visit_type_enum(V, OBJ, ARG1, ARG2, NAME, ERR) +visit_type_enum(V, NAME, OBJ, ARG1, ARG2, ERR) | -VISIT_TYPE(V, OBJ, NAME, ERR) +VISIT_TYPE(V, NAME, OBJ, ERR) ) Backports commit 51e72bc1dd6ace6e91d675f41a1f09bd00ab8043 from qemu
2018-02-20 04:31:04 +01:00
visit_type_int(v, name, &value, errp);
2015-08-21 09:04:50 +02:00
}
static void x86_cpuid_set_tsc_freq(struct uc_struct *uc,
Object *obj, Visitor *v,
const char *name, void *opaque,
Error **errp)
2015-08-21 09:04:50 +02:00
{
X86CPU *cpu = X86_CPU(uc, obj);
const int64_t min = 0;
const int64_t max = INT64_MAX;
Error *local_err = NULL;
int64_t value;
qapi: Swap visit_* arguments for consistent 'name' placement JSON uses "name":value, but many of our visitor interfaces were called with visit_type_FOO(v, &value, name, errp). This can be a bit confusing to have to mentally swap the parameter order to match JSON order. It's particularly bad for visit_start_struct(), where the 'name' parameter is smack in the middle of the otherwise-related group of 'obj, kind, size' parameters! It's time to do a global swap of the parameter ordering, so that the 'name' parameter is always immediately after the Visitor argument. Additional reason in favor of the swap: the existing include/qjson.h prefers listing 'name' first in json_prop_*(), and I have plans to unify that file with the qapi visitors; listing 'name' first in qapi will minimize churn to the (admittedly few) qjson.h clients. Later patches will then fix docs, object.h, visitor-impl.h, and those clients to match. Done by first patching scripts/qapi*.py by hand to make generated files do what I want, then by running the following Coccinelle script to affect the rest of the code base: $ spatch --sp-file script `git grep -l '\bvisit_' -- '**/*.[ch]'` I then had to apply some touchups (Coccinelle insisted on TAB indentation in visitor.h, and botched the signature of visit_type_enum() by rewriting 'const char *const strings[]' to the syntactically invalid 'const char*const[] strings'). The movement of parameters is sufficient to provoke compiler errors if any callers were missed. // Part 1: Swap declaration order @@ type TV, TErr, TObj, T1, T2; identifier OBJ, ARG1, ARG2; @@ void visit_start_struct -(TV v, TObj OBJ, T1 ARG1, const char *name, T2 ARG2, TErr errp) +(TV v, const char *name, TObj OBJ, T1 ARG1, T2 ARG2, TErr errp) { ... } @@ type bool, TV, T1; identifier ARG1; @@ bool visit_optional -(TV v, T1 ARG1, const char *name) +(TV v, const char *name, T1 ARG1) { ... } @@ type TV, TErr, TObj, T1; identifier OBJ, ARG1; @@ void visit_get_next_type -(TV v, TObj OBJ, T1 ARG1, const char *name, TErr errp) +(TV v, const char *name, TObj OBJ, T1 ARG1, TErr errp) { ... } @@ type TV, TErr, TObj, T1, T2; identifier OBJ, ARG1, ARG2; @@ void visit_type_enum -(TV v, TObj OBJ, T1 ARG1, T2 ARG2, const char *name, TErr errp) +(TV v, const char *name, TObj OBJ, T1 ARG1, T2 ARG2, TErr errp) { ... } @@ type TV, TErr, TObj; identifier OBJ; identifier VISIT_TYPE =~ "^visit_type_"; @@ void VISIT_TYPE -(TV v, TObj OBJ, const char *name, TErr errp) +(TV v, const char *name, TObj OBJ, TErr errp) { ... } // Part 2: swap caller order @@ expression V, NAME, OBJ, ARG1, ARG2, ERR; identifier VISIT_TYPE =~ "^visit_type_"; @@ ( -visit_start_struct(V, OBJ, ARG1, NAME, ARG2, ERR) +visit_start_struct(V, NAME, OBJ, ARG1, ARG2, ERR) | -visit_optional(V, ARG1, NAME) +visit_optional(V, NAME, ARG1) | -visit_get_next_type(V, OBJ, ARG1, NAME, ERR) +visit_get_next_type(V, NAME, OBJ, ARG1, ERR) | -visit_type_enum(V, OBJ, ARG1, ARG2, NAME, ERR) +visit_type_enum(V, NAME, OBJ, ARG1, ARG2, ERR) | -VISIT_TYPE(V, OBJ, NAME, ERR) +VISIT_TYPE(V, NAME, OBJ, ERR) ) Backports commit 51e72bc1dd6ace6e91d675f41a1f09bd00ab8043 from qemu
2018-02-20 04:31:04 +01:00
visit_type_int(v, name, &value, &local_err);
2015-08-21 09:04:50 +02:00
if (local_err) {
error_propagate(errp, local_err);
return;
2015-08-21 09:04:50 +02:00
}
if (value < min || value > max) {
error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
name ? name : "null", value, min, max);
return;
2015-08-21 09:04:50 +02:00
}
cpu->env.tsc_khz = (int)(value / 1000);
2015-08-21 09:04:50 +02:00
}
/* Generic getter for "feature-words" and "filtered-features" properties */
static void x86_cpu_get_feature_words(struct uc_struct *uc,
Object *obj, Visitor *v,
const char *name, void *opaque,
Error **errp)
2015-08-21 09:04:50 +02:00
{
uint32_t *array = (uint32_t *)opaque;
FeatureWord w;
// These all get setup below, so no need to initialise them here.
X86CPUFeatureWordInfo word_infos[FEATURE_WORDS];
2017-01-21 01:41:11 +01:00
X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS];
2015-08-21 09:04:50 +02:00
X86CPUFeatureWordInfoList *list = NULL;
for (w = 0; w < FEATURE_WORDS; w++) {
FeatureWordInfo *wi = &feature_word_info[w];
X86CPUFeatureWordInfo *qwi = &word_infos[w];
qwi->cpuid_input_eax = wi->cpuid_eax;
qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
qwi->cpuid_input_ecx = wi->cpuid_ecx;
qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
qwi->features = array[w];
/* List will be in reverse order, but order shouldn't matter */
list_entries[w].next = list;
list_entries[w].value = &word_infos[w];
list = &list_entries[w];
}
visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
2015-08-21 09:04:50 +02:00
}
/* Convert all '_' in a feature string option name to '-', to make feature
* name conform to QOM property naming rule, which uses '-' instead of '_'.
*/
static inline void feat2prop(char *s)
{
while ((s = strchr(s, '_'))) {
*s = '-';
}
}
/* Parse "+feature,-feature,feature=foo" CPU feature string
*/
static void x86_cpu_parse_featurestr(CPUState *cs, char *features,
Error **errp)
{
X86CPU *cpu = X86_CPU(cs->uc, cs);
char *featurestr; /* Single 'key=value" string being parsed */
Error *local_err = NULL;
// Unicorn: added for consistent zeroing out
memset(cpu->plus_features, 0, sizeof(cpu->plus_features));
memset(cpu->minus_features, 0, sizeof(cpu->minus_features));
if (!features) {
return;
}
for (featurestr = strtok(features, ",");
featurestr && !local_err;
featurestr = strtok(NULL, ",")) {
const char *name;
const char *val = NULL;
char *eq = NULL;
char num[32];
2015-08-21 09:04:50 +02:00
/* Compatibility syntax: */
2015-08-21 09:04:50 +02:00
if (featurestr[0] == '+') {
add_flagname_to_bitmaps(featurestr + 1, cpu->plus_features, &local_err);
continue;
2015-08-21 09:04:50 +02:00
} else if (featurestr[0] == '-') {
add_flagname_to_bitmaps(featurestr + 1, cpu->minus_features, &local_err);
continue;
}
eq = strchr(featurestr, '=');
if (eq) {
*eq++ = 0;
val = eq;
2015-08-21 09:04:50 +02:00
} else {
val = "on";
2015-08-21 09:04:50 +02:00
}
feat2prop(featurestr);
name = featurestr;
/* Special case: */
if (!strcmp(name, "tsc-freq")) {
int64_t tsc_freq;
char *err;
tsc_freq = qemu_strtosz_suffix_unit(val, &err,
QEMU_STRTOSZ_DEFSUFFIX_B, 1000);
if (tsc_freq < 0 || *err) {
error_setg(errp, "bad numerical value %s", val);
return;
}
snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
val = num;
name = "tsc-frequency";
2015-08-21 09:04:50 +02:00
}
object_property_parse(cs->uc, OBJECT(cpu), val, name, &local_err);
}
if (local_err) {
error_propagate(errp, local_err);
2015-08-21 09:04:50 +02:00
}
}
static uint32_t x86_cpu_get_supported_feature_word(struct uc_struct *uc,
FeatureWord w, bool migratable_only)
2015-08-21 09:04:50 +02:00
{
FeatureWordInfo *wi = &feature_word_info[w];
uint32_t r;
2015-08-21 09:04:50 +02:00
if (tcg_enabled(uc)) {
r = wi->tcg_features;
2015-08-21 09:04:50 +02:00
} else {
return ~0;
}
if (migratable_only) {
r &= x86_cpu_get_migratable_flags(w);
}
return r;
2015-08-21 09:04:50 +02:00
}
/*
* Filters CPU feature words based on host availability of each feature.
*
* Returns: 0 if all flags are supported by the host, non-zero otherwise.
*/
static int x86_cpu_filter_features(X86CPU *cpu)
{
CPUX86State *env = &cpu->env;
FeatureWord w;
int rv = 0;
for (w = 0; w < FEATURE_WORDS; w++) {
uint32_t host_feat = x86_cpu_get_supported_feature_word(env->uc, w, cpu->migratable);
2015-08-21 09:04:50 +02:00
uint32_t requested_features = env->features[w];
env->features[w] &= host_feat;
cpu->filtered_features[w] = requested_features & ~env->features[w];
if (cpu->filtered_features[w]) {
if (cpu->check_cpuid || cpu->enforce_cpuid) {
report_unavailable_features(w, cpu->filtered_features[w]);
}
rv = 1;
}
}
return rv;
}
/* Load data from X86CPUDefinition
*/
static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
{
CPUX86State *env = &cpu->env;
const char *vendor;
FeatureWord w;
object_property_set_int(env->uc, OBJECT(cpu), def->level, "level", errp);
object_property_set_int(env->uc, OBJECT(cpu), def->family, "family", errp);
object_property_set_int(env->uc, OBJECT(cpu), def->model, "model", errp);
object_property_set_int(env->uc, OBJECT(cpu), def->stepping, "stepping", errp);
object_property_set_int(env->uc, OBJECT(cpu), def->xlevel, "xlevel", errp);
cpu->cache_info_passthrough = def->cache_info_passthrough;
object_property_set_str(env->uc, OBJECT(cpu), def->model_id, "model-id", errp);
for (w = 0; w < FEATURE_WORDS; w++) {
env->features[w] = def->features[w];
}
env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
/* sysenter isn't supported in compatibility mode on AMD,
* syscall isn't supported in compatibility mode on Intel.
* Normally we advertise the actual CPU vendor, but you can
* override this using the 'vendor' property if you want to use
* KVM's sysenter/syscall emulation in compatibility mode and
* when doing cross vendor migration
*/
vendor = def->vendor;
object_property_set_str(env->uc, OBJECT(cpu), vendor, "vendor", errp);
}
X86CPU *cpu_x86_create(struct uc_struct *uc, const char *cpu_model, Error **errp)
{
X86CPU *cpu = NULL;
ObjectClass *oc;
gchar **model_pieces;
char *name, *features;
Error *error = NULL;
model_pieces = g_strsplit(cpu_model, ",", 2);
if (!model_pieces[0]) {
error_setg(&error, "Invalid/empty CPU model name");
goto out;
}
name = model_pieces[0];
features = model_pieces[1];
oc = x86_cpu_class_by_name(uc, name);
if (oc == NULL) {
error_setg(&error, "Unable to find CPU definition: %s", name);
goto out;
}
cpu = X86_CPU(uc, object_new(uc, object_class_get_name(oc)));
x86_cpu_parse_featurestr(CPU(cpu), features, &error);
if (error) {
goto out;
}
out:
if (error != NULL) {
error_propagate(errp, error);
if (cpu) {
object_unref(uc, OBJECT(cpu));
cpu = NULL;
}
}
g_strfreev(model_pieces);
return cpu;
}
CPUX86State *cpu_x86_init_user(struct uc_struct *uc, const char *cpu_model)
2015-08-21 09:04:50 +02:00
{
Error *error = NULL;
X86CPU *cpu;
cpu = cpu_x86_create(uc, cpu_model, &error);
if (error) {
goto error;
2015-08-21 09:04:50 +02:00
}
object_property_set_int(uc, OBJECT(cpu), CPU(cpu)->cpu_index, "apic-id",
&error);
if (error) {
goto error;
}
2015-08-21 09:04:50 +02:00
object_property_set_bool(uc, OBJECT(cpu), true, "realized", &error);
if (error) {
goto error;
}
return &cpu->env;
2015-08-21 09:04:50 +02:00
error:
error_free(error);
if (cpu != NULL) {
object_unref(uc, OBJECT(cpu));
2015-08-21 09:04:50 +02:00
}
return NULL;
2015-08-21 09:04:50 +02:00
}
static void x86_cpu_cpudef_class_init(struct uc_struct *uc, ObjectClass *oc, void *data)
{
X86CPUDefinition *cpudef = data;
X86CPUClass *xcc = X86_CPU_CLASS(uc, oc);
xcc->cpu_def = cpudef;
}
static void x86_register_cpudef_type(struct uc_struct *uc, X86CPUDefinition *def)
{
char *typename = x86_cpu_type_name(def->name);
TypeInfo ti = {
typename,
TYPE_X86_CPU,
2017-01-22 14:27:17 +01:00
0,
0,
NULL,
NULL,
NULL,
NULL,
2017-01-22 14:27:17 +01:00
def,
2017-01-22 14:27:17 +01:00
x86_cpu_cpudef_class_init,
2015-08-21 09:04:50 +02:00
};
type_register(uc, &ti);
g_free(typename);
2015-08-21 09:04:50 +02:00
}
#if !defined(CONFIG_USER_ONLY)
void cpu_clear_apic_feature(CPUX86State *env)
{
env->features[FEAT_1_EDX] &= ~CPUID_APIC;
}
#endif /* !CONFIG_USER_ONLY */
void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
uint32_t *eax, uint32_t *ebx,
uint32_t *ecx, uint32_t *edx)
{
X86CPU *cpu = x86_env_get_cpu(env);
CPUState *cs = CPU(cpu);
target-i386: present virtual L3 cache info for vcpus Some software algorithms are based on the hardware's cache info, for example, for x86 linux kernel, when cpu1 want to wakeup a task on cpu2, cpu1 will trigger a resched IPI and told cpu2 to do the wakeup if they don't share low level cache. Oppositely, cpu1 will access cpu2's runqueue directly if they share llc. The relevant linux-kernel code as bellow: static void ttwu_queue(struct task_struct *p, int cpu) { struct rq *rq = cpu_rq(cpu); ...... if (... && !cpus_share_cache(smp_processor_id(), cpu)) { ...... ttwu_queue_remote(p, cpu); /* will trigger RES IPI */ return; } ...... ttwu_do_activate(rq, p, 0); /* access target's rq directly */ ...... } In real hardware, the cpus on the same socket share L3 cache, so one won't trigger a resched IPIs when wakeup a task on others. But QEMU doesn't present a virtual L3 cache info for VM, then the linux guest will trigger lots of RES IPIs under some workloads even if the virtual cpus belongs to the same virtual socket. For KVM, there will be lots of vmexit due to guest send IPIs. The workload is a SAP HANA's testsuite, we run it one round(about 40 minuates) and observe the (Suse11sp3)Guest's amounts of RES IPIs which triggering during the period: No-L3 With-L3(applied this patch) cpu0: 363890 44582 cpu1: 373405 43109 cpu2: 340783 43797 cpu3: 333854 43409 cpu4: 327170 40038 cpu5: 325491 39922 cpu6: 319129 42391 cpu7: 306480 41035 cpu8: 161139 32188 cpu9: 164649 31024 cpu10: 149823 30398 cpu11: 149823 32455 cpu12: 164830 35143 cpu13: 172269 35805 cpu14: 179979 33898 cpu15: 194505 32754 avg: 268963.6 40129.8 The VM's topology is "1*socket 8*cores 2*threads". After present virtual L3 cache info for VM, the amounts of RES IPIs in guest reduce 85%. For KVM, vcpus send IPIs will cause vmexit which is expensive, so it can cause severe performance degradation. We had tested the overall system performance if vcpus actually run on sparate physical socket. With L3 cache, the performance improves 7.2%~33.1%(avg:15.7%). Backports commit 14c985cffa6cb177fc01a163d8bcf227c104718c from qemu
2018-02-26 05:16:03 +01:00
uint32_t pkg_offset;
2015-08-21 09:04:50 +02:00
/* test if maximum index reached */
if (index & 0x80000000) {
if (index > env->cpuid_xlevel) {
if (env->cpuid_xlevel2 > 0) {
/* Handle the Centaur's CPUID instruction. */
if (index > env->cpuid_xlevel2) {
index = env->cpuid_xlevel2;
} else if (index < 0xC0000000) {
index = env->cpuid_xlevel;
}
} else {
/* Intel documentation states that invalid EAX input will
* return the same information as EAX=cpuid_level
* (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
*/
index = env->cpuid_level;
}
}
} else {
if (index > env->cpuid_level)
index = env->cpuid_level;
}
switch(index) {
case 0:
*eax = env->cpuid_level;
*ebx = env->cpuid_vendor1;
*edx = env->cpuid_vendor2;
*ecx = env->cpuid_vendor3;
2015-08-21 09:04:50 +02:00
break;
case 1:
*eax = env->cpuid_version;
*ebx = (cpu->apic_id << 24) |
8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2015-08-21 09:04:50 +02:00
*ecx = env->features[FEAT_1_ECX];
if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
*ecx |= CPUID_EXT_OSXSAVE;
}
2015-08-21 09:04:50 +02:00
*edx = env->features[FEAT_1_EDX];
if (cs->nr_cores * cs->nr_threads > 1) {
*ebx |= (cs->nr_cores * cs->nr_threads) << 16;
*edx |= CPUID_HT;
2015-08-21 09:04:50 +02:00
}
break;
case 2:
/* cache info: needed for Pentium Pro compatibility */
if (cpu->cache_info_passthrough) {
host_cpuid(index, 0, eax, ebx, ecx, edx);
break;
}
*eax = 1; /* Number of CPUID[EAX=2] calls required */
*ebx = 0;
target-i386: present virtual L3 cache info for vcpus Some software algorithms are based on the hardware's cache info, for example, for x86 linux kernel, when cpu1 want to wakeup a task on cpu2, cpu1 will trigger a resched IPI and told cpu2 to do the wakeup if they don't share low level cache. Oppositely, cpu1 will access cpu2's runqueue directly if they share llc. The relevant linux-kernel code as bellow: static void ttwu_queue(struct task_struct *p, int cpu) { struct rq *rq = cpu_rq(cpu); ...... if (... && !cpus_share_cache(smp_processor_id(), cpu)) { ...... ttwu_queue_remote(p, cpu); /* will trigger RES IPI */ return; } ...... ttwu_do_activate(rq, p, 0); /* access target's rq directly */ ...... } In real hardware, the cpus on the same socket share L3 cache, so one won't trigger a resched IPIs when wakeup a task on others. But QEMU doesn't present a virtual L3 cache info for VM, then the linux guest will trigger lots of RES IPIs under some workloads even if the virtual cpus belongs to the same virtual socket. For KVM, there will be lots of vmexit due to guest send IPIs. The workload is a SAP HANA's testsuite, we run it one round(about 40 minuates) and observe the (Suse11sp3)Guest's amounts of RES IPIs which triggering during the period: No-L3 With-L3(applied this patch) cpu0: 363890 44582 cpu1: 373405 43109 cpu2: 340783 43797 cpu3: 333854 43409 cpu4: 327170 40038 cpu5: 325491 39922 cpu6: 319129 42391 cpu7: 306480 41035 cpu8: 161139 32188 cpu9: 164649 31024 cpu10: 149823 30398 cpu11: 149823 32455 cpu12: 164830 35143 cpu13: 172269 35805 cpu14: 179979 33898 cpu15: 194505 32754 avg: 268963.6 40129.8 The VM's topology is "1*socket 8*cores 2*threads". After present virtual L3 cache info for VM, the amounts of RES IPIs in guest reduce 85%. For KVM, vcpus send IPIs will cause vmexit which is expensive, so it can cause severe performance degradation. We had tested the overall system performance if vcpus actually run on sparate physical socket. With L3 cache, the performance improves 7.2%~33.1%(avg:15.7%). Backports commit 14c985cffa6cb177fc01a163d8bcf227c104718c from qemu
2018-02-26 05:16:03 +01:00
if (!cpu->enable_l3_cache) {
*ecx = 0;
} else {
*ecx = L3_N_DESCRIPTOR;
}
2015-08-21 09:04:50 +02:00
*edx = (L1D_DESCRIPTOR << 16) | \
(L1I_DESCRIPTOR << 8) | \
(L2_DESCRIPTOR);
break;
case 4:
/* cache info: needed for Core compatibility */
if (cpu->cache_info_passthrough) {
host_cpuid(index, count, eax, ebx, ecx, edx);
*eax &= ~0xFC000000;
} else {
*eax = 0;
switch (count) {
case 0: /* L1 dcache info */
*eax |= CPUID_4_TYPE_DCACHE | \
CPUID_4_LEVEL(1) | \
CPUID_4_SELF_INIT_LEVEL;
*ebx = (L1D_LINE_SIZE - 1) | \
((L1D_PARTITIONS - 1) << 12) | \
((L1D_ASSOCIATIVITY - 1) << 22);
*ecx = L1D_SETS - 1;
*edx = CPUID_4_NO_INVD_SHARING;
break;
case 1: /* L1 icache info */
*eax |= CPUID_4_TYPE_ICACHE | \
CPUID_4_LEVEL(1) | \
CPUID_4_SELF_INIT_LEVEL;
*ebx = (L1I_LINE_SIZE - 1) | \
((L1I_PARTITIONS - 1) << 12) | \
((L1I_ASSOCIATIVITY - 1) << 22);
*ecx = L1I_SETS - 1;
*edx = CPUID_4_NO_INVD_SHARING;
break;
case 2: /* L2 cache info */
*eax |= CPUID_4_TYPE_UNIFIED | \
CPUID_4_LEVEL(2) | \
CPUID_4_SELF_INIT_LEVEL;
if (cs->nr_threads > 1) {
*eax |= (cs->nr_threads - 1) << 14;
}
*ebx = (L2_LINE_SIZE - 1) | \
((L2_PARTITIONS - 1) << 12) | \
((L2_ASSOCIATIVITY - 1) << 22);
*ecx = L2_SETS - 1;
*edx = CPUID_4_NO_INVD_SHARING;
break;
target-i386: present virtual L3 cache info for vcpus Some software algorithms are based on the hardware's cache info, for example, for x86 linux kernel, when cpu1 want to wakeup a task on cpu2, cpu1 will trigger a resched IPI and told cpu2 to do the wakeup if they don't share low level cache. Oppositely, cpu1 will access cpu2's runqueue directly if they share llc. The relevant linux-kernel code as bellow: static void ttwu_queue(struct task_struct *p, int cpu) { struct rq *rq = cpu_rq(cpu); ...... if (... && !cpus_share_cache(smp_processor_id(), cpu)) { ...... ttwu_queue_remote(p, cpu); /* will trigger RES IPI */ return; } ...... ttwu_do_activate(rq, p, 0); /* access target's rq directly */ ...... } In real hardware, the cpus on the same socket share L3 cache, so one won't trigger a resched IPIs when wakeup a task on others. But QEMU doesn't present a virtual L3 cache info for VM, then the linux guest will trigger lots of RES IPIs under some workloads even if the virtual cpus belongs to the same virtual socket. For KVM, there will be lots of vmexit due to guest send IPIs. The workload is a SAP HANA's testsuite, we run it one round(about 40 minuates) and observe the (Suse11sp3)Guest's amounts of RES IPIs which triggering during the period: No-L3 With-L3(applied this patch) cpu0: 363890 44582 cpu1: 373405 43109 cpu2: 340783 43797 cpu3: 333854 43409 cpu4: 327170 40038 cpu5: 325491 39922 cpu6: 319129 42391 cpu7: 306480 41035 cpu8: 161139 32188 cpu9: 164649 31024 cpu10: 149823 30398 cpu11: 149823 32455 cpu12: 164830 35143 cpu13: 172269 35805 cpu14: 179979 33898 cpu15: 194505 32754 avg: 268963.6 40129.8 The VM's topology is "1*socket 8*cores 2*threads". After present virtual L3 cache info for VM, the amounts of RES IPIs in guest reduce 85%. For KVM, vcpus send IPIs will cause vmexit which is expensive, so it can cause severe performance degradation. We had tested the overall system performance if vcpus actually run on sparate physical socket. With L3 cache, the performance improves 7.2%~33.1%(avg:15.7%). Backports commit 14c985cffa6cb177fc01a163d8bcf227c104718c from qemu
2018-02-26 05:16:03 +01:00
case 3: /* L3 cache info */
if (!cpu->enable_l3_cache) {
*eax = 0;
*ebx = 0;
*ecx = 0;
*edx = 0;
break;
}
*eax |= CPUID_4_TYPE_UNIFIED | \
CPUID_4_LEVEL(3) | \
CPUID_4_SELF_INIT_LEVEL;
pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
*eax |= ((1 << pkg_offset) - 1) << 14;
*ebx = (L3_N_LINE_SIZE - 1) | \
((L3_N_PARTITIONS - 1) << 12) | \
((L3_N_ASSOCIATIVITY - 1) << 22);
*ecx = L3_N_SETS - 1;
*edx = CPUID_4_INCLUSIVE | CPUID_4_COMPLEX_IDX;
break;
2015-08-21 09:04:50 +02:00
default: /* end of info */
*eax = 0;
*ebx = 0;
*ecx = 0;
*edx = 0;
break;
}
}
/* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
if ((*eax & 31) && cs->nr_cores > 1) {
*eax |= (cs->nr_cores - 1) << 26;
}
break;
case 5:
/* mwait info: needed for Core compatibility */
*eax = 0; /* Smallest monitor-line size in bytes */
*ebx = 0; /* Largest monitor-line size in bytes */
*ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
*edx = 0;
break;
case 6:
/* Thermal and Power Leaf */
*eax = env->features[FEAT_6_EAX];
2015-08-21 09:04:50 +02:00
*ebx = 0;
*ecx = 0;
*edx = 0;
break;
case 7:
/* Structured Extended Feature Flags Enumeration Leaf */
if (count == 0) {
*eax = 0; /* Maximum ECX value for sub-leaves */
*ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
*ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
*ecx |= CPUID_7_0_ECX_OSPKE;
}
2015-08-21 09:04:50 +02:00
*edx = 0; /* Reserved */
} else {
*eax = 0;
*ebx = 0;
*ecx = 0;
*edx = 0;
}
break;
case 9:
/* Direct Cache Access Information Leaf */
*eax = 0; /* Bits 0-31 in DCA_CAP MSR */
*ebx = 0;
*ecx = 0;
*edx = 0;
break;
case 0xA:
/* Architectural Performance Monitoring Leaf */
*eax = 0;
*ebx = 0;
*ecx = 0;
*edx = 0;
2015-08-21 09:04:50 +02:00
break;
case 0xB:
/* Extended Topology Enumeration Leaf */
if (!cpu->enable_cpuid_0xb) {
*eax = *ebx = *ecx = *edx = 0;
break;
}
*ecx = count & 0xff;
*edx = cpu->apic_id;
switch (count) {
case 0:
*eax = apicid_core_offset(smp_cores, smp_threads);
*ebx = smp_threads;
*ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
break;
case 1:
*eax = apicid_pkg_offset(smp_cores, smp_threads);
*ebx = smp_cores * smp_threads;
*ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
break;
default:
*eax = 0;
*ebx = 0;
*ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
}
assert(!(*eax & ~0x1f));
*ebx &= 0xffff; /* The count doesn't need to be reliable. */
break;
2015-08-21 09:04:50 +02:00
case 0xD: {
uint64_t ena_mask;
int i;
/* Processor Extended State */
*eax = 0;
*ebx = 0;
*ecx = 0;
*edx = 0;
if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
break;
}
ena_mask = (XSTATE_FP_MASK | XSTATE_SSE_MASK);
for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
const ExtSaveArea *esa = &x86_ext_save_areas[i];
if (env->features[esa->feature] & esa->bits) {
ena_mask |= (1ULL << i);
}
}
/* Unicorn: commented out
if (kvm_enabled()) {
KVMState *s = cs->kvm_state;
uint64_t kvm_mask = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX);
kvm_mask <<= 32;
kvm_mask |= kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX);
ena_mask &= kvm_mask;
} */
if (count == 0) {
*ecx = 0x240;
for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
const ExtSaveArea *esa = &x86_ext_save_areas[i];
if ((ena_mask >> i) & 1) {
*ecx = MAX(*ecx, esa->offset + esa->size);
}
}
*eax = ena_mask;
*edx = ena_mask >> 32;
*ebx = *ecx;
} else if (count == 1) {
*eax = env->features[FEAT_XSAVE];
} else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
const ExtSaveArea *esa = &x86_ext_save_areas[count];
if ((ena_mask >> count) & 1) {
*eax = esa->size;
*ebx = esa->offset;
}
}
2015-08-21 09:04:50 +02:00
break;
}
case 0x80000000:
*eax = env->cpuid_xlevel;
*ebx = env->cpuid_vendor1;
*edx = env->cpuid_vendor2;
*ecx = env->cpuid_vendor3;
break;
case 0x80000001:
*eax = env->cpuid_version;
*ebx = 0;
*ecx = env->features[FEAT_8000_0001_ECX];
*edx = env->features[FEAT_8000_0001_EDX];
/* The Linux kernel checks for the CMPLegacy bit and
* discards multiple thread information if it is set.
* So dont set it here for Intel to make Linux guests happy.
*/
if (cs->nr_cores * cs->nr_threads > 1) {
if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2015-08-21 09:04:50 +02:00
*ecx |= 1 << 1; /* CmpLegacy bit */
}
}
break;
case 0x80000002:
case 0x80000003:
case 0x80000004:
*eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
*ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
*ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
*edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
break;
case 0x80000005:
/* cache info (L1 cache) */
if (cpu->cache_info_passthrough) {
host_cpuid(index, 0, eax, ebx, ecx, edx);
break;
}
*eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
(L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
*ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
(L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
*ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
(L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
*edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
(L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
break;
case 0x80000006:
/* cache info (L2 cache) */
if (cpu->cache_info_passthrough) {
host_cpuid(index, 0, eax, ebx, ecx, edx);
break;
}
*eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
(L2_DTLB_2M_ENTRIES << 16) | \
(AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
(L2_ITLB_2M_ENTRIES);
*ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
(L2_DTLB_4K_ENTRIES << 16) | \
(AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
(L2_ITLB_4K_ENTRIES);
*ecx = (L2_SIZE_KB_AMD << 16) | \
(AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
(L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
target-i386: present virtual L3 cache info for vcpus Some software algorithms are based on the hardware's cache info, for example, for x86 linux kernel, when cpu1 want to wakeup a task on cpu2, cpu1 will trigger a resched IPI and told cpu2 to do the wakeup if they don't share low level cache. Oppositely, cpu1 will access cpu2's runqueue directly if they share llc. The relevant linux-kernel code as bellow: static void ttwu_queue(struct task_struct *p, int cpu) { struct rq *rq = cpu_rq(cpu); ...... if (... && !cpus_share_cache(smp_processor_id(), cpu)) { ...... ttwu_queue_remote(p, cpu); /* will trigger RES IPI */ return; } ...... ttwu_do_activate(rq, p, 0); /* access target's rq directly */ ...... } In real hardware, the cpus on the same socket share L3 cache, so one won't trigger a resched IPIs when wakeup a task on others. But QEMU doesn't present a virtual L3 cache info for VM, then the linux guest will trigger lots of RES IPIs under some workloads even if the virtual cpus belongs to the same virtual socket. For KVM, there will be lots of vmexit due to guest send IPIs. The workload is a SAP HANA's testsuite, we run it one round(about 40 minuates) and observe the (Suse11sp3)Guest's amounts of RES IPIs which triggering during the period: No-L3 With-L3(applied this patch) cpu0: 363890 44582 cpu1: 373405 43109 cpu2: 340783 43797 cpu3: 333854 43409 cpu4: 327170 40038 cpu5: 325491 39922 cpu6: 319129 42391 cpu7: 306480 41035 cpu8: 161139 32188 cpu9: 164649 31024 cpu10: 149823 30398 cpu11: 149823 32455 cpu12: 164830 35143 cpu13: 172269 35805 cpu14: 179979 33898 cpu15: 194505 32754 avg: 268963.6 40129.8 The VM's topology is "1*socket 8*cores 2*threads". After present virtual L3 cache info for VM, the amounts of RES IPIs in guest reduce 85%. For KVM, vcpus send IPIs will cause vmexit which is expensive, so it can cause severe performance degradation. We had tested the overall system performance if vcpus actually run on sparate physical socket. With L3 cache, the performance improves 7.2%~33.1%(avg:15.7%). Backports commit 14c985cffa6cb177fc01a163d8bcf227c104718c from qemu
2018-02-26 05:16:03 +01:00
if (!cpu->enable_l3_cache) {
*edx = ((L3_SIZE_KB / 512) << 18) | \
(AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
(L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
} else {
*edx = ((L3_N_SIZE_KB_AMD / 512) << 18) | \
(AMD_ENC_ASSOC(L3_N_ASSOCIATIVITY) << 12) | \
(L3_N_LINES_PER_TAG << 8) | (L3_N_LINE_SIZE);
}
2015-08-21 09:04:50 +02:00
break;
case 0x80000007:
*eax = 0;
*ebx = 0;
*ecx = 0;
*edx = env->features[FEAT_8000_0007_EDX];
break;
case 0x80000008:
/* virtual & phys address size in low 2 bytes. */
if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
/* 64 bit processor, 48 bits virtual, configurable
* physical bits.
*/
*eax = 0x00003000 + cpu->phys_bits;
2015-08-21 09:04:50 +02:00
} else {
*eax = cpu->phys_bits;
2015-08-21 09:04:50 +02:00
}
*ebx = 0;
*ecx = 0;
*edx = 0;
if (cs->nr_cores * cs->nr_threads > 1) {
*ecx |= (cs->nr_cores * cs->nr_threads) - 1;
}
break;
case 0x8000000A:
if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
*eax = 0x00000001; /* SVM Revision */
*ebx = 0x00000010; /* nr of ASIDs */
*ecx = 0;
*edx = env->features[FEAT_SVM]; /* optional features */
} else {
*eax = 0;
*ebx = 0;
*ecx = 0;
*edx = 0;
}
break;
case 0xC0000000:
*eax = env->cpuid_xlevel2;
*ebx = 0;
*ecx = 0;
*edx = 0;
break;
case 0xC0000001:
/* Support for VIA CPU's CPUID instruction */
*eax = env->cpuid_version;
*ebx = 0;
*ecx = 0;
*edx = env->features[FEAT_C000_0001_EDX];
break;
case 0xC0000002:
case 0xC0000003:
case 0xC0000004:
/* Reserved for the future, and now filled with zero */
*eax = 0;
*ebx = 0;
*ecx = 0;
*edx = 0;
break;
default:
/* reserved values: zero */
*eax = 0;
*ebx = 0;
*ecx = 0;
*edx = 0;
break;
}
}
/* CPUClass::reset() */
static void x86_cpu_reset(CPUState *s)
{
X86CPU *cpu = X86_CPU(s->uc, s);
X86CPUClass *xcc = X86_CPU_GET_CLASS(s->uc, cpu);
CPUX86State *env = &cpu->env;
int i;
target_ulong cr4;
uint64_t xcr0;
2015-08-21 09:04:50 +02:00
xcc->parent_reset(s);
memset(env, 0, offsetof(CPUX86State, end_reset_fields));
2015-08-21 09:04:50 +02:00
tlb_flush(s, 1);
env->old_exception = -1;
/* init to reset state */
env->hflags2 |= HF2_GIF_MASK;
cpu_x86_update_cr0(env, 0x60000010);
env->a20_mask = ~0x0;
env->smbase = 0x30000;
env->idt.limit = 0xffff;
env->gdt.limit = 0xffff;
env->ldt.limit = 0xffff;
env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
env->tr.limit = 0xffff;
env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
DESC_R_MASK | DESC_A_MASK);
cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
DESC_A_MASK);
cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
DESC_A_MASK);
cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
DESC_A_MASK);
cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
DESC_A_MASK);
cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
DESC_A_MASK);
env->eip = 0xfff0;
env->regs[R_EDX] = env->cpuid_version;
env->eflags = 0x2;
/* FPU init */
for (i = 0; i < 8; i++) {
env->fptags[i] = 1;
}
cpu_set_fpuc(env, 0x37f);
env->mxcsr = 0x1f80;
/* All units are in INIT state. */
env->xstate_bv = 0;
2015-08-21 09:04:50 +02:00
env->pat = 0x0007040600070406ULL;
env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
memset(env->dr, 0, sizeof(env->dr));
env->dr[6] = DR6_FIXED_1;
env->dr[7] = DR7_FIXED_1;
cpu_breakpoint_remove_all(s, BP_CPU);
cpu_watchpoint_remove_all(s, BP_CPU);
cr4 = 0;
xcr0 = XSTATE_FP_MASK;
#ifdef CONFIG_USER_ONLY
/* Enable all the features for user-mode. */
if (env->features[FEAT_1_EDX] & CPUID_SSE) {
xcr0 |= XSTATE_SSE_MASK;
}
for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
const ExtSaveArea *esa = &x86_ext_save_areas[i];
if (env->features[esa->feature] & esa->bits) {
xcr0 |= 1ull << i;
}
}
if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
}
if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
cr4 |= CR4_FSGSBASE_MASK;
}
#endif
env->xcr0 = xcr0;
cpu_x86_update_cr4(env, cr4);
2015-08-21 09:04:50 +02:00
/*
* SDM 11.11.5 requires:
* - IA32_MTRR_DEF_TYPE MSR.E = 0
* - IA32_MTRR_PHYSMASKn.V = 0
* All other bits are undefined. For simplification, zero it all.
*/
env->mtrr_deftype = 0;
memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
#if !defined(CONFIG_USER_ONLY)
/* We hard-wire the BSP to the first CPU. */
apic_designate_bsp(env->uc, cpu->apic_state, s->cpu_index == 0);
2015-08-21 09:04:50 +02:00
s->halted = !cpu_is_bsp(cpu);
#endif
}
#ifndef CONFIG_USER_ONLY
bool cpu_is_bsp(X86CPU *cpu)
{
return (cpu_get_apic_base((&cpu->env)->uc, cpu->apic_state) & MSR_IA32_APICBASE_BSP) != 0;
2015-08-21 09:04:50 +02:00
}
#endif
static void mce_init(X86CPU *cpu)
{
CPUX86State *cenv = &cpu->env;
unsigned int bank;
if (((cenv->cpuid_version >> 8) & 0xf) >= 6
&& (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
(CPUID_MCE | CPUID_MCA)) {
cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
(cpu->enable_lmce ? MCG_LMCE_P : 0);
2015-08-21 09:04:50 +02:00
cenv->mcg_ctl = ~(uint64_t)0;
for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
cenv->mce_banks[bank * 4] = ~(uint64_t)0;
}
}
}
#ifndef CONFIG_USER_ONLY
static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
{
#if 0
DeviceState *dev = DEVICE(cpu);
APICCommonState *apic;
const char *apic_type = "apic";
cpu->apic_state = qdev_try_create(qdev_get_parent_bus(dev), apic_type);
if (cpu->apic_state == NULL) {
error_setg(errp, "APIC device '%s' could not be created", apic_type);
return;
}
object_property_add_child(OBJECT(cpu), "lapic",
OBJECT(cpu->apic_state), &error_abort);
object_unref(OBJECT(cpu->apic_state));
//qdev_prop_set_uint8(cpu->apic_state, "id", cpu->apic_id);
2015-08-21 09:04:50 +02:00
/* TODO: convert to link<> */
apic = APIC_COMMON(cpu->apic_state);
apic->cpu = cpu;
#endif
}
static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
{
if (cpu->apic_state == NULL) {
return;
}
if (qdev_init(cpu->apic_state)) {
error_setg(errp, "APIC device '%s' could not be initialized",
object_get_typename(OBJECT(cpu->apic_state)));
return;
}
}
#else
static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
{
}
#endif
/* Note: Only safe for use on x86(-64) hosts */
static QEMU_UNUSED_FUNC uint32_t x86_host_phys_bits(void)
{
uint32_t eax;
uint32_t host_phys_bits;
host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
if (eax >= 0x80000008) {
host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
/* Note: According to AMD doc 25481 rev 2.34 they have a field
* at 23:16 that can specify a maximum physical address bits for
* the guest that can override this value; but I've not seen
* anything with that set.
*/
host_phys_bits = eax & 0xff;
} else {
/* It's an odd 64 bit machine that doesn't have the leaf for
* physical address bits; fall back to 36 that's most older
* Intel.
*/
host_phys_bits = 36;
}
return host_phys_bits;
}
2015-08-21 09:04:50 +02:00
static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
{
if (*min < value) {
*min = value;
}
}
/* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
{
CPUX86State *env = &cpu->env;
FeatureWordInfo *fi = &feature_word_info[w];
uint32_t eax = fi->cpuid_eax;
uint32_t region = eax & 0xF0000000;
if (!env->features[w]) {
return;
}
switch (region) {
case 0x00000000:
x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
break;
case 0x80000000:
x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
break;
case 0xC0000000:
x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
break;
}
}
2015-08-21 09:04:50 +02:00
#define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
(env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
(env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
#define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
(env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
(env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
static int x86_cpu_realizefn(struct uc_struct *uc, DeviceState *dev, Error **errp)
2015-08-21 09:04:50 +02:00
{
CPUState *cs = CPU(dev);
X86CPU *cpu = X86_CPU(uc, dev);
X86CPUClass *xcc = X86_CPU_GET_CLASS(uc, dev);
CPUX86State *env = &cpu->env;
Error *local_err = NULL;
FeatureWord w;
2015-08-21 09:04:50 +02:00
if (cpu->apic_id == UNASSIGNED_APIC_ID) {
error_setg(errp, "apic-id property was not initialized properly");
return -1;
}
/* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
if (cpu->full_cpuid_auto_level) {
x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
/* SVM requires CPUID[0x8000000A] */
if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
}
}
/* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
if (env->cpuid_level == UINT32_MAX) {
env->cpuid_level = env->cpuid_min_level;
}
if (env->cpuid_xlevel == UINT32_MAX) {
env->cpuid_xlevel = env->cpuid_min_xlevel;
}
if (env->cpuid_xlevel2 == UINT32_MAX) {
env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
2015-08-21 09:04:50 +02:00
}
/*TODO: cpu->host_features incorrectly overwrites features
* set using "feat=on|off". Once we fix this, we can convert
* plus_features & minus_features to global properties
* inside x86_cpu_parse_featurestr() too.
*/
if (cpu->host_features) {
for (w = 0; w < FEATURE_WORDS; w++) {
env->features[w] =
x86_cpu_get_supported_feature_word(uc, w, cpu->migratable);
}
}
for (w = 0; w < FEATURE_WORDS; w++) {
cpu->env.features[w] |= cpu->plus_features[w];
cpu->env.features[w] &= ~cpu->minus_features[w];
}
2015-08-21 09:04:50 +02:00
/* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
* CPUID[1].EDX.
*/
if (IS_AMD_CPU(env)) {
env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
& CPUID_EXT2_AMD_ALIASES);
}
/* For 64bit systems think about the number of physical bits to present.
* ideally this should be the same as the host; anything other than matching
* the host can cause incorrect guest behaviour.
* QEMU used to pick the magic value of 40 bits that corresponds to
* consumer AMD devices but nothing else.
*/
if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
// Unicorn: removed KVM checks
if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
error_setg(errp, "TCG only supports phys-bits=%u",
TCG_PHYS_ADDR_BITS);
return -1;
}
/* 0 means it was not explicitly set by the user (or by machine
* compat_props or by the host code above). In this case, the default
* is the value used by TCG (40).
*/
if (cpu->phys_bits == 0) {
cpu->phys_bits = TCG_PHYS_ADDR_BITS;
}
} else {
/* For 32 bit systems don't use the user set value, but keep
* phys_bits consistent with what we tell the guest.
*/
if (cpu->phys_bits != 0) {
error_setg(errp, "phys-bits is not user-configurable in 32 bit");
return -1;
}
if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
cpu->phys_bits = 36;
} else {
cpu->phys_bits = 32;
}
}
2015-08-21 09:04:50 +02:00
if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
error_setg(&local_err,
"TCG doesn't support requested features");
goto out;
}
if (tcg_enabled(env->uc)) {
tcg_x86_init(env->uc);
}
2015-08-21 09:04:50 +02:00
#ifndef CONFIG_USER_ONLY
//qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
x86_cpu_apic_create(cpu, &local_err);
if (local_err != NULL) {
goto out;
}
}
#endif
mce_init(cpu);
#ifndef CONFIG_USER_ONLY
if (tcg_enabled(uc)) {
AddressSpace *newas = g_new(AddressSpace, 1);
cpu->cpu_as_root = g_new(MemoryRegion, 1);
memory_region_init_alias(uc, cpu->cpu_as_root, OBJECT(cpu), "memory",
get_system_memory(uc), 0, ~0ull);
memory_region_set_enabled(cpu->cpu_as_root, true);
cs->num_ases = 1;
address_space_init(uc, newas, cpu->cpu_as_root, "CPU");
cpu_address_space_init(cs, newas, 0);
}
#endif
if (qemu_init_vcpu(cs))
return -1;
2015-08-21 09:04:50 +02:00
x86_cpu_apic_realize(cpu, &local_err);
if (local_err != NULL) {
goto out;
}
cpu_reset(cs);
xcc->parent_realize(uc, dev, &local_err);
out:
if (local_err != NULL) {
error_propagate(errp, local_err);
return -1;
2015-08-21 09:04:50 +02:00
}
return 0;
2015-08-21 09:04:50 +02:00
}
static void x86_cpu_unrealizefn(struct uc_struct *uc, DeviceState *dev, Error **errp)
{
/* Unicorn: commented out
X86CPU *cpu = X86_CPU(uc, dev);
#ifndef CONFIG_USER_ONLY
cpu_remove_sync(CPU(dev));
qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
#endif
if (cpu->apic_state) {
object_unparent(OBJECT(cpu->apic_state));
cpu->apic_state = NULL;
}*/
}
2015-08-21 09:04:50 +02:00
static void x86_cpu_initfn(struct uc_struct *uc, Object *obj, void *opaque)
{
//printf("... X86 initialize (object)\n");
2015-08-21 09:04:50 +02:00
CPUState *cs = CPU(obj);
X86CPU *cpu = X86_CPU(cs->uc, obj);
X86CPUClass *xcc = X86_CPU_GET_CLASS(uc, obj);
CPUX86State *env = &cpu->env;
cs->env_ptr = env;
cpu_exec_init(cs, opaque);
2015-08-21 09:04:50 +02:00
object_property_add(uc, obj, "family", "int",
2015-08-21 09:04:50 +02:00
x86_cpuid_version_get_family,
x86_cpuid_version_set_family, NULL, NULL, NULL);
object_property_add(uc, obj, "model", "int",
2015-08-21 09:04:50 +02:00
x86_cpuid_version_get_model,
x86_cpuid_version_set_model, NULL, NULL, NULL);
object_property_add(uc, obj, "stepping", "int",
2015-08-21 09:04:50 +02:00
x86_cpuid_version_get_stepping,
x86_cpuid_version_set_stepping, NULL, NULL, NULL);
object_property_add_str(uc, obj, "vendor",
2015-08-21 09:04:50 +02:00
x86_cpuid_get_vendor,
x86_cpuid_set_vendor, NULL);
object_property_add_str(uc, obj, "model-id",
2015-08-21 09:04:50 +02:00
x86_cpuid_get_model_id,
x86_cpuid_set_model_id, NULL);
object_property_add(uc, obj, "tsc-frequency", "int",
2015-08-21 09:04:50 +02:00
x86_cpuid_get_tsc_freq,
x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
object_property_add(uc, obj, "feature-words", "X86CPUFeatureWordInfo",
2015-08-21 09:04:50 +02:00
x86_cpu_get_feature_words,
NULL, NULL, (void *)env->features, NULL);
object_property_add(uc, obj, "filtered-features", "X86CPUFeatureWordInfo",
2015-08-21 09:04:50 +02:00
x86_cpu_get_feature_words,
NULL, NULL, (void *)cpu->filtered_features, NULL);
cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
// Unicorn: Should be removed with the commit backporting 2da00e3176abac34ca7a6aab1f5bbb94a0d03fc5
// from qemu, but left this in to keep the member value initialized
cpu->apic_id = UNASSIGNED_APIC_ID;
2015-08-21 09:04:50 +02:00
x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
}
static int64_t x86_cpu_get_arch_id(CPUState *cs)
{
X86CPU *cpu = X86_CPU(cs->uc, cs);
return cpu->apic_id;
2015-08-21 09:04:50 +02:00
}
static bool x86_cpu_get_paging_enabled(const CPUState *cs)
{
X86CPU *cpu = X86_CPU(cs->uc, cs);
return (cpu->env.cr[0] & CR0_PG_MASK) != 0;
2015-08-21 09:04:50 +02:00
}
static void x86_cpu_set_pc(CPUState *cs, vaddr value)
{
X86CPU *cpu = X86_CPU(cs->uc, cs);
cpu->env.eip = value;
}
static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
{
X86CPU *cpu = X86_CPU(cs->uc, cs);
cpu->env.eip = tb->pc - tb->cs_base;
}
static bool x86_cpu_has_work(CPUState *cs)
{
X86CPU *cpu = X86_CPU(cs->uc, cs);
CPUX86State *env = &cpu->env;
#if !defined(CONFIG_USER_ONLY)
if (cs->interrupt_request & CPU_INTERRUPT_POLL) {
apic_poll_irq(cpu->apic_state);
cpu_reset_interrupt(cs, CPU_INTERRUPT_POLL);
}
#endif
return ((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
(env->eflags & IF_MASK)) ||
(cs->interrupt_request & (CPU_INTERRUPT_NMI |
CPU_INTERRUPT_INIT |
CPU_INTERRUPT_SIPI |
CPU_INTERRUPT_MCE)) ||
((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
!(env->hflags & HF_SMM_MASK));
2015-08-21 09:04:50 +02:00
}
static void x86_cpu_common_class_init(struct uc_struct *uc, ObjectClass *oc, void *data)
{
//printf("... init X86 cpu common class\n");
2015-08-21 09:04:50 +02:00
X86CPUClass *xcc = X86_CPU_CLASS(uc, oc);
CPUClass *cc = CPU_CLASS(uc, oc);
DeviceClass *dc = DEVICE_CLASS(uc, oc);
xcc->parent_realize = dc->realize;
dc->realize = x86_cpu_realizefn;
dc->unrealize = x86_cpu_unrealizefn;
2015-08-21 09:04:50 +02:00
dc->bus_type = TYPE_ICC_BUS;
xcc->parent_reset = cc->reset;
cc->reset = x86_cpu_reset;
cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
cc->class_by_name = x86_cpu_class_by_name;
cc->parse_features = x86_cpu_parse_featurestr;
cc->has_work = x86_cpu_has_work;
cc->do_interrupt = x86_cpu_do_interrupt;
cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
cc->dump_state = x86_cpu_dump_state;
cc->set_pc = x86_cpu_set_pc;
cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
cc->get_arch_id = x86_cpu_get_arch_id;
cc->get_paging_enabled = x86_cpu_get_paging_enabled;
#ifdef CONFIG_USER_ONLY
cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
#else
cc->get_memory_mapping = x86_cpu_get_memory_mapping;
cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
#endif
#ifndef CONFIG_USER_ONLY
cc->debug_excp_handler = breakpoint_handler;
#endif
cc->cpu_exec_enter = x86_cpu_exec_enter;
cc->cpu_exec_exit = x86_cpu_exec_exit;
}
void x86_cpu_register_types(void *opaque)
{
const TypeInfo x86_cpu_type_info = {
TYPE_X86_CPU,
TYPE_CPU,
2017-01-22 14:27:17 +01:00
sizeof(X86CPUClass),
sizeof(X86CPU),
opaque,
2017-01-22 14:27:17 +01:00
x86_cpu_initfn,
NULL,
NULL,
2017-01-22 14:27:17 +01:00
NULL,
x86_cpu_common_class_init,
NULL,
NULL,
2017-01-22 14:27:17 +01:00
true,
};
//printf("... register X86 cpu\n");
2015-08-21 09:04:50 +02:00
int i;
type_register_static(opaque, &x86_cpu_type_info);
for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
x86_register_cpudef_type(opaque, &builtin_x86_defs[i]);
}
//printf("... END OF register X86 cpu\n");
2015-08-21 09:04:50 +02:00
}