Lines 428-434
vmx_allow_x2apic_msrs(struct vmx *vmx)
Link Here
|
428 |
|
428 |
|
429 |
for (i = 0; i < 8; i++) |
429 |
for (i = 0; i < 8; i++) |
430 |
error += guest_msr_ro(vmx, MSR_APIC_TMR0 + i); |
430 |
error += guest_msr_ro(vmx, MSR_APIC_TMR0 + i); |
431 |
|
431 |
|
432 |
for (i = 0; i < 8; i++) |
432 |
for (i = 0; i < 8; i++) |
433 |
error += guest_msr_ro(vmx, MSR_APIC_IRR0 + i); |
433 |
error += guest_msr_ro(vmx, MSR_APIC_IRR0 + i); |
434 |
|
434 |
|
Lines 576-582
vmx_disable(void *arg __unused)
Link Here
|
576 |
static int |
576 |
static int |
577 |
vmx_cleanup(void) |
577 |
vmx_cleanup(void) |
578 |
{ |
578 |
{ |
579 |
|
579 |
|
580 |
if (pirvec >= 0) |
580 |
if (pirvec >= 0) |
581 |
lapic_ipi_free(pirvec); |
581 |
lapic_ipi_free(pirvec); |
582 |
|
582 |
|
Lines 816-822
vmx_init(int ipinum)
Link Here
|
816 |
|
816 |
|
817 |
guest_l1d_flush = (cpu_ia32_arch_caps & |
817 |
guest_l1d_flush = (cpu_ia32_arch_caps & |
818 |
IA32_ARCH_CAP_SKIP_L1DFL_VMENTRY) == 0; |
818 |
IA32_ARCH_CAP_SKIP_L1DFL_VMENTRY) == 0; |
819 |
TUNABLE_INT_FETCH("hw.vmm.l1d_flush", &guest_l1d_flush); |
819 |
TUNABLE_INT_FETCH("hw.vmm.vmx.l1d_flush", &guest_l1d_flush); |
820 |
|
820 |
|
821 |
/* |
821 |
/* |
822 |
* L1D cache flush is enabled. Use IA32_FLUSH_CMD MSR when |
822 |
* L1D cache flush is enabled. Use IA32_FLUSH_CMD MSR when |
Lines 828-834
vmx_init(int ipinum)
Link Here
|
828 |
if (guest_l1d_flush) { |
828 |
if (guest_l1d_flush) { |
829 |
if ((cpu_stdext_feature3 & CPUID_STDEXT3_L1D_FLUSH) == 0) { |
829 |
if ((cpu_stdext_feature3 & CPUID_STDEXT3_L1D_FLUSH) == 0) { |
830 |
guest_l1d_flush_sw = 1; |
830 |
guest_l1d_flush_sw = 1; |
831 |
TUNABLE_INT_FETCH("hw.vmm.l1d_flush_sw", |
831 |
TUNABLE_INT_FETCH("hw.vmm.vmx.l1d_flush_sw", |
832 |
&guest_l1d_flush_sw); |
832 |
&guest_l1d_flush_sw); |
833 |
} |
833 |
} |
834 |
if (guest_l1d_flush_sw) { |
834 |
if (guest_l1d_flush_sw) { |
Lines 1097-1103
static int
Link Here
|
1097 |
vmx_handle_cpuid(struct vm *vm, int vcpu, struct vmxctx *vmxctx) |
1097 |
vmx_handle_cpuid(struct vm *vm, int vcpu, struct vmxctx *vmxctx) |
1098 |
{ |
1098 |
{ |
1099 |
int handled, func; |
1099 |
int handled, func; |
1100 |
|
1100 |
|
1101 |
func = vmxctx->guest_rax; |
1101 |
func = vmxctx->guest_rax; |
1102 |
|
1102 |
|
1103 |
handled = x86_emulate_cpuid(vm, vcpu, |
1103 |
handled = x86_emulate_cpuid(vm, vcpu, |
Lines 3096-3102
vmx_get_intr_shadow(struct vmx *vmx, int vcpu, int running, uint64_t *retval)
Link Here
|
3096 |
uint64_t gi; |
3096 |
uint64_t gi; |
3097 |
int error; |
3097 |
int error; |
3098 |
|
3098 |
|
3099 |
error = vmcs_getreg(&vmx->vmcs[vcpu], running, |
3099 |
error = vmcs_getreg(&vmx->vmcs[vcpu], running, |
3100 |
VMCS_IDENT(VMCS_GUEST_INTERRUPTIBILITY), &gi); |
3100 |
VMCS_IDENT(VMCS_GUEST_INTERRUPTIBILITY), &gi); |
3101 |
*retval = (gi & HWINTR_BLOCKING) ? 1 : 0; |
3101 |
*retval = (gi & HWINTR_BLOCKING) ? 1 : 0; |
3102 |
return (error); |
3102 |
return (error); |
Lines 3212-3218
vmx_setreg(void *arg, int vcpu, int reg, uint64_t val)
Link Here
|
3212 |
if (shadow > 0) { |
3212 |
if (shadow > 0) { |
3213 |
/* |
3213 |
/* |
3214 |
* Store the unmodified value in the shadow |
3214 |
* Store the unmodified value in the shadow |
3215 |
*/ |
3215 |
*/ |
3216 |
error = vmcs_setreg(&vmx->vmcs[vcpu], running, |
3216 |
error = vmcs_setreg(&vmx->vmcs[vcpu], running, |
3217 |
VMCS_IDENT(shadow), val); |
3217 |
VMCS_IDENT(shadow), val); |
3218 |
} |
3218 |
} |
Lines 3698-3704
vmx_vlapic_init(void *arg, int vcpuid)
Link Here
|
3698 |
struct vmx *vmx; |
3698 |
struct vmx *vmx; |
3699 |
struct vlapic *vlapic; |
3699 |
struct vlapic *vlapic; |
3700 |
struct vlapic_vtx *vlapic_vtx; |
3700 |
struct vlapic_vtx *vlapic_vtx; |
3701 |
|
3701 |
|
3702 |
vmx = arg; |
3702 |
vmx = arg; |
3703 |
|
3703 |
|
3704 |
vlapic = malloc(sizeof(struct vlapic_vtx), M_VLAPIC, M_WAITOK | M_ZERO); |
3704 |
vlapic = malloc(sizeof(struct vlapic_vtx), M_VLAPIC, M_WAITOK | M_ZERO); |