diff --git sys/amd64/vmm/intel/vmx.c sys/amd64/vmm/intel/vmx.c index 8deafb2..5c0abd3 100644 --- sys/amd64/vmm/intel/vmx.c +++ sys/amd64/vmm/intel/vmx.c @@ -428,7 +428,7 @@ vmx_allow_x2apic_msrs(struct vmx *vmx) for (i = 0; i < 8; i++) error += guest_msr_ro(vmx, MSR_APIC_TMR0 + i); - + for (i = 0; i < 8; i++) error += guest_msr_ro(vmx, MSR_APIC_IRR0 + i); @@ -576,7 +576,7 @@ vmx_disable(void *arg __unused) static int vmx_cleanup(void) { - + if (pirvec >= 0) lapic_ipi_free(pirvec); @@ -816,7 +816,7 @@ vmx_init(int ipinum) guest_l1d_flush = (cpu_ia32_arch_caps & IA32_ARCH_CAP_SKIP_L1DFL_VMENTRY) == 0; - TUNABLE_INT_FETCH("hw.vmm.l1d_flush", &guest_l1d_flush); + TUNABLE_INT_FETCH("hw.vmm.vmx.l1d_flush", &guest_l1d_flush); /* * L1D cache flush is enabled. Use IA32_FLUSH_CMD MSR when @@ -828,7 +828,7 @@ vmx_init(int ipinum) if (guest_l1d_flush) { if ((cpu_stdext_feature3 & CPUID_STDEXT3_L1D_FLUSH) == 0) { guest_l1d_flush_sw = 1; - TUNABLE_INT_FETCH("hw.vmm.l1d_flush_sw", + TUNABLE_INT_FETCH("hw.vmm.vmx.l1d_flush_sw", &guest_l1d_flush_sw); } if (guest_l1d_flush_sw) { @@ -1097,7 +1097,7 @@ static int vmx_handle_cpuid(struct vm *vm, int vcpu, struct vmxctx *vmxctx) { int handled, func; - + func = vmxctx->guest_rax; handled = x86_emulate_cpuid(vm, vcpu, @@ -3096,7 +3096,7 @@ vmx_get_intr_shadow(struct vmx *vmx, int vcpu, int running, uint64_t *retval) uint64_t gi; int error; - error = vmcs_getreg(&vmx->vmcs[vcpu], running, + error = vmcs_getreg(&vmx->vmcs[vcpu], running, VMCS_IDENT(VMCS_GUEST_INTERRUPTIBILITY), &gi); *retval = (gi & HWINTR_BLOCKING) ? 1 : 0; return (error); @@ -3212,7 +3212,7 @@ vmx_setreg(void *arg, int vcpu, int reg, uint64_t val) if (shadow > 0) { /* * Store the unmodified value in the shadow - */ + */ error = vmcs_setreg(&vmx->vmcs[vcpu], running, VMCS_IDENT(shadow), val); } @@ -3698,7 +3698,7 @@ vmx_vlapic_init(void *arg, int vcpuid) struct vmx *vmx; struct vlapic *vlapic; struct vlapic_vtx *vlapic_vtx; - + vmx = arg; vlapic = malloc(sizeof(struct vlapic_vtx), M_VLAPIC, M_WAITOK | M_ZERO);