diff options
Diffstat (limited to 'src/VBox/VMM/VMMR0/HWACCMR0.cpp')
-rw-r--r-- | src/VBox/VMM/VMMR0/HWACCMR0.cpp | 176 |
1 files changed, 131 insertions, 45 deletions
diff --git a/src/VBox/VMM/VMMR0/HWACCMR0.cpp b/src/VBox/VMM/VMMR0/HWACCMR0.cpp index 30b6f362c..d8e225327 100644 --- a/src/VBox/VMM/VMMR0/HWACCMR0.cpp +++ b/src/VBox/VMM/VMMR0/HWACCMR0.cpp @@ -50,8 +50,8 @@ /******************************************************************************* * Internal Functions * *******************************************************************************/ -static DECLCALLBACK(void) hwaccmR0EnableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2); -static DECLCALLBACK(void) hwaccmR0DisableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2); +static DECLCALLBACK(void) hwaccmR0EnableCpuCallback(RTCPUID idCpu, void *pvUser1, void *pvUser2); +static DECLCALLBACK(void) hwaccmR0DisableCpuCallback(RTCPUID idCpu, void *pvUser1, void *pvUser2); static DECLCALLBACK(void) HWACCMR0InitCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2); static int hwaccmR0CheckCpuRcArray(int *paRc, unsigned cErrorCodes, RTCPUID *pidCpu); static DECLCALLBACK(void) hwaccmR0PowerCallback(RTPOWEREVENT enmEvent, void *pvUser); @@ -132,6 +132,7 @@ static struct HWACCMSTATE enmHwAccmState; + bool fGlobalInit; volatile bool fSuspended; } HWACCMR0Globals; @@ -163,6 +164,9 @@ VMMR0DECL(int) HWACCMR0Init(void) HWACCMR0Globals.pfnTermVM = HWACCMR0DummyTermVM; HWACCMR0Globals.pfnSetupVM = HWACCMR0DummySetupVM; + /* Default is global VT-x/AMD-V init */ + HWACCMR0Globals.fGlobalInit = true; + /* * Check for VT-x and AMD-V capabilities */ @@ -454,6 +458,7 @@ VMMR0DECL(int) HWACCMR0Term(void) if ( HWACCMR0Globals.vmx.fSupported && HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx) { + Assert(HWACCMR0Globals.fGlobalInit); rc = SUPR0EnableVTx(false /* fEnable */); for (unsigned iCpu = 0; iCpu < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo); iCpu++) { @@ -463,23 +468,32 @@ VMMR0DECL(int) HWACCMR0Term(void) } else { - int aRc[RTCPUSET_MAX_CPUS]; - Assert(!HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx); if (!HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx) { rc = RTPowerNotificationDeregister(hwaccmR0PowerCallback, 0); Assert(RT_SUCCESS(rc)); } + else + rc = VINF_SUCCESS; - memset(aRc, 0, sizeof(aRc)); - rc = RTMpOnAll(hwaccmR0DisableCPU, aRc, NULL); - Assert(RT_SUCCESS(rc) || rc == VERR_NOT_SUPPORTED); + /* Only disable VT-x/AMD-V on all CPUs if we enabled it before. */ + if (HWACCMR0Globals.fGlobalInit) + { + int aRc[RTCPUSET_MAX_CPUS]; + + memset(aRc, 0, sizeof(aRc)); + rc = RTMpOnAll(hwaccmR0DisableCpuCallback, aRc, NULL); + Assert(RT_SUCCESS(rc) || rc == VERR_NOT_SUPPORTED); +#ifdef VBOX_STRICT + for (unsigned i=0;i<RT_ELEMENTS(HWACCMR0Globals.aCpuInfo);i++) + AssertMsgRC(aRc[i], ("hwaccmR0DisableCpuCallback failed for cpu %d with rc=%d\n", i, aRc[i])); +#endif + } /* Free the per-cpu pages used for VT-x and AMD-V */ for (unsigned i=0;i<RT_ELEMENTS(HWACCMR0Globals.aCpuInfo);i++) { - AssertMsgRC(aRc[i], ("hwaccmR0DisableCPU failed for cpu %d with rc=%d\n", i, aRc[i])); if (HWACCMR0Globals.aCpuInfo[i].pMemObj != NIL_RTR0MEMOBJ) { RTR0MemObjFree(HWACCMR0Globals.aCpuInfo[i].pMemObj, false); @@ -583,6 +597,8 @@ VMMR0DECL(int) HWACCMR0EnableAllCpus(PVM pVM) { int rc; + HWACCMR0Globals.fGlobalInit = pVM->hwaccm.s.fGlobalInit; + if ( HWACCMR0Globals.vmx.fSupported && HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx) { @@ -594,6 +610,8 @@ VMMR0DECL(int) HWACCMR0EnableAllCpus(PVM pVM) HWACCMR0Globals.aCpuInfo[iCpu].fConfigured = true; Assert(HWACCMR0Globals.aCpuInfo[iCpu].pMemObj == NIL_RTR0MEMOBJ); } + /* If the host provides a VT-x init API, then we'll rely on that for global init. */ + HWACCMR0Globals.fGlobalInit = pVM->hwaccm.s.fGlobalInit = true; } else AssertMsgFailed(("HWACCMR0EnableAllCpus/SUPR0EnableVTx: rc=%Rrc\n", rc)); @@ -627,13 +645,18 @@ VMMR0DECL(int) HWACCMR0EnableAllCpus(PVM pVM) #endif } } - /* First time, so initialize each cpu/core */ - rc = RTMpOnAll(hwaccmR0EnableCPU, (void *)pVM, aRc); + if (HWACCMR0Globals.fGlobalInit) + { + /* First time, so initialize each cpu/core */ + rc = RTMpOnAll(hwaccmR0EnableCpuCallback, (void *)pVM, aRc); - /* Check the return code of all invocations. */ - if (RT_SUCCESS(rc)) - rc = hwaccmR0CheckCpuRcArray(aRc, RT_ELEMENTS(aRc), &idCpu); - AssertMsgRC(rc, ("HWACCMR0EnableAllCpus failed for cpu %d with rc=%d\n", idCpu, rc)); + /* Check the return code of all invocations. */ + if (RT_SUCCESS(rc)) + rc = hwaccmR0CheckCpuRcArray(aRc, RT_ELEMENTS(aRc), &idCpu); + AssertMsgRC(rc, ("HWACCMR0EnableAllCpus failed for cpu %d with rc=%d\n", idCpu, rc)); + } + else + rc = VINF_SUCCESS; } return rc; @@ -642,17 +665,14 @@ VMMR0DECL(int) HWACCMR0EnableAllCpus(PVM pVM) } /** - * Worker function passed to RTMpOnAll, RTMpOnOthers and RTMpOnSpecific that - * is to be called on the target cpus. + * Disable VT-x or AMD-V on the current CPU * + * @returns VBox status code. + * @param pVM VM handle (can be 0!) * @param idCpu The identifier for the CPU the function is called on. - * @param pvUser1 The 1st user argument. - * @param pvUser2 The 2nd user argument. */ -static DECLCALLBACK(void) hwaccmR0EnableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2) +static int hwaccmR0EnableCpu(PVM pVM, RTCPUID idCpu) { - PVM pVM = (PVM)pvUser1; /* can be NULL! */ - int *paRc = (int *)pvUser2; void *pvPageCpu; RTHCPHYS pPageCpuPhys; PHWACCM_CPUINFO pCpu = &HWACCMR0Globals.aCpuInfo[idCpu]; @@ -661,7 +681,7 @@ static DECLCALLBACK(void) hwaccmR0EnableCPU(RTCPUID idCpu, void *pvUser1, void * Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /// @todo fix idCpu == index assumption (rainy day) Assert(idCpu < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo)); Assert(!pCpu->fConfigured); - Assert(ASMAtomicReadBool(&pCpu->fInUse) == false); + Assert(!HWACCMR0Globals.fGlobalInit || ASMAtomicReadBool(&pCpu->fInUse) == false); pCpu->idCpu = idCpu; @@ -675,21 +695,21 @@ static DECLCALLBACK(void) hwaccmR0EnableCPU(RTCPUID idCpu, void *pvUser1, void * if (!pCpu->pMemObj) { AssertFailed(); - paRc[idCpu] = VERR_INTERNAL_ERROR; - return; + return VERR_INTERNAL_ERROR; } pvPageCpu = RTR0MemObjAddress(pCpu->pMemObj); pPageCpuPhys = RTR0MemObjGetPagePhysAddr(pCpu->pMemObj, 0); - paRc[idCpu] = HWACCMR0Globals.pfnEnableCpu(pCpu, pVM, pvPageCpu, pPageCpuPhys); - AssertRC(paRc[idCpu]); - if (RT_SUCCESS(paRc[idCpu])) + int rc = HWACCMR0Globals.pfnEnableCpu(pCpu, pVM, pvPageCpu, pPageCpuPhys); + AssertRC(rc); + if (RT_SUCCESS(rc)) pCpu->fConfigured = true; - return; + return rc; } + /** * Worker function passed to RTMpOnAll, RTMpOnOthers and RTMpOnSpecific that * is to be called on the target cpus. @@ -698,36 +718,66 @@ static DECLCALLBACK(void) hwaccmR0EnableCPU(RTCPUID idCpu, void *pvUser1, void * * @param pvUser1 The 1st user argument. * @param pvUser2 The 2nd user argument. */ -static DECLCALLBACK(void) hwaccmR0DisableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2) +static DECLCALLBACK(void) hwaccmR0EnableCpuCallback(RTCPUID idCpu, void *pvUser1, void *pvUser2) +{ + PVM pVM = (PVM)pvUser1; /* can be NULL! */ + int *paRc = (int *)pvUser2; + + paRc[idCpu] = hwaccmR0EnableCpu(pVM, idCpu); +} + + +/** + * Disable VT-x or AMD-V on the current CPU + * + * @returns VBox status code. + * @param idCpu The identifier for the CPU the function is called on. + */ +static int hwaccmR0DisableCpu(RTCPUID idCpu) { void *pvPageCpu; RTHCPHYS pPageCpuPhys; - int *paRc = (int *)pvUser1; + int rc; PHWACCM_CPUINFO pCpu = &HWACCMR0Globals.aCpuInfo[idCpu]; Assert(!HWACCMR0Globals.vmx.fSupported || !HWACCMR0Globals.vmx.fUsingSUPR0EnableVTx); Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /// @todo fix idCpu == index assumption (rainy day) Assert(idCpu < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo)); - Assert(ASMAtomicReadBool(&pCpu->fInUse) == false); + Assert(!HWACCMR0Globals.fGlobalInit || ASMAtomicReadBool(&pCpu->fInUse) == false); Assert(!pCpu->fConfigured || pCpu->pMemObj); if (!pCpu->pMemObj) - return; + return (pCpu->fConfigured) ? VERR_NO_MEMORY : VINF_SUCCESS /* not initialized. */; pvPageCpu = RTR0MemObjAddress(pCpu->pMemObj); pPageCpuPhys = RTR0MemObjGetPagePhysAddr(pCpu->pMemObj, 0); if (pCpu->fConfigured) { - paRc[idCpu] = HWACCMR0Globals.pfnDisableCpu(pCpu, pvPageCpu, pPageCpuPhys); - AssertRC(paRc[idCpu]); + rc = HWACCMR0Globals.pfnDisableCpu(pCpu, pvPageCpu, pPageCpuPhys); + AssertRC(rc); pCpu->fConfigured = false; } else - paRc[idCpu] = VINF_SUCCESS; /* nothing to do */ + rc = VINF_SUCCESS; /* nothing to do */ pCpu->uCurrentASID = 0; - return; + return rc; +} + +/** + * Worker function passed to RTMpOnAll, RTMpOnOthers and RTMpOnSpecific that + * is to be called on the target cpus. + * + * @param idCpu The identifier for the CPU the function is called on. + * @param pvUser1 The 1st user argument. + * @param pvUser2 The 2nd user argument. + */ +static DECLCALLBACK(void) hwaccmR0DisableCpuCallback(RTCPUID idCpu, void *pvUser1, void *pvUser2) +{ + int *paRc = (int *)pvUser1; + + paRc[idCpu] = hwaccmR0DisableCpu(idCpu); } /** @@ -751,7 +801,8 @@ static DECLCALLBACK(void) hwaccmR0PowerCallback(RTPOWEREVENT enmEvent, void *pvU if (enmEvent == RTPOWEREVENT_SUSPEND) ASMAtomicWriteBool(&HWACCMR0Globals.fSuspended, true); - if (HWACCMR0Globals.enmHwAccmState == HWACCMSTATE_ENABLED) + if ( HWACCMR0Globals.enmHwAccmState == HWACCMSTATE_ENABLED + && HWACCMR0Globals.fGlobalInit) { int aRc[RTCPUSET_MAX_CPUS]; int rc; @@ -761,7 +812,7 @@ static DECLCALLBACK(void) hwaccmR0PowerCallback(RTPOWEREVENT enmEvent, void *pvU if (enmEvent == RTPOWEREVENT_SUSPEND) { /* Turn off VT-x or AMD-V on all CPUs. */ - rc = RTMpOnAll(hwaccmR0DisableCPU, aRc, NULL); + rc = RTMpOnAll(hwaccmR0DisableCpuCallback, aRc, NULL); Assert(RT_SUCCESS(rc) || rc == VERR_NOT_SUPPORTED); } else @@ -778,7 +829,7 @@ static DECLCALLBACK(void) hwaccmR0PowerCallback(RTPOWEREVENT enmEvent, void *pvU #endif /* Turn VT-x or AMD-V back on on all CPUs. */ - rc = RTMpOnAll(hwaccmR0EnableCPU, NULL, aRc); + rc = RTMpOnAll(hwaccmR0EnableCpuCallback, NULL, aRc); Assert(RT_SUCCESS(rc) || rc == VERR_NOT_SUPPORTED); } } @@ -846,7 +897,7 @@ VMMR0DECL(int) HWACCMR0InitVM(PVM pVM) #endif } - for (unsigned i=0;i<pVM->cCPUs;i++) + for (VMCPUID i = 0; i < pVM->cCPUs; i++) { PVMCPU pVCpu = &pVM->aCpus[i]; @@ -931,15 +982,29 @@ VMMR0DECL(int) HWACCMR0SetupVM(PVM pVM) ASMAtomicWriteBool(&pCpu->fInUse, true); - for (unsigned i=0;i<pVM->cCPUs;i++) + for (VMCPUID i = 0; i < pVM->cCPUs; i++) { /* On first entry we'll sync everything. */ pVM->aCpus[i].hwaccm.s.fContextUseFlags = HWACCM_CHANGED_ALL; } + /* Enable VT-x or AMD-V if local init is required. */ + if (!HWACCMR0Globals.fGlobalInit) + { + rc = hwaccmR0EnableCpu(pVM, idCpu); + AssertRCReturn(rc, rc); + } + /* Setup VT-x or AMD-V. */ rc = HWACCMR0Globals.pfnSetupVM(pVM); + /* Disable VT-x or AMD-V if local init was done before. */ + if (!HWACCMR0Globals.fGlobalInit) + { + rc = hwaccmR0DisableCpu(idCpu); + AssertRC(rc); + } + ASMAtomicWriteBool(&pCpu->fInUse, false); return rc; @@ -984,6 +1049,13 @@ VMMR0DECL(int) HWACCMR0Enter(PVM pVM, PVMCPU pVCpu) else pVM->hwaccm.s.u64RegisterMask = UINT64_C(0xFFFFFFFF); + /* Enable VT-x or AMD-V if local init is required. */ + if (!HWACCMR0Globals.fGlobalInit) + { + rc = hwaccmR0EnableCpu(pVM, idCpu); + AssertRCReturn(rc, rc); + } + rc = HWACCMR0Globals.pfnEnterSession(pVM, pVCpu, pCpu); AssertRC(rc); /* We must save the host context here (VT-x) as we might be rescheduled on a different cpu after a long jump back to ring 3. */ @@ -1050,6 +1122,18 @@ VMMR0DECL(int) HWACCMR0Leave(PVM pVM, PVMCPU pVCpu) #endif pVCpu->hwaccm.s.idEnteredCpu = NIL_RTCPUID; + /* Disable VT-x or AMD-V if local init was done before. */ + if (!HWACCMR0Globals.fGlobalInit) + { + rc = hwaccmR0DisableCpu(idCpu); + AssertRC(rc); + + /* Reset these to force a TLB flush for the next entry. (-> EXPENSIVE) */ + pVCpu->hwaccm.s.idLastCpu = NIL_RTCPUID; + pVCpu->hwaccm.s.uCurrentASID = 0; + VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH); + } + ASMAtomicWriteBool(&pCpu->fInUse, false); return rc; } @@ -1064,9 +1148,9 @@ VMMR0DECL(int) HWACCMR0Leave(PVM pVM, PVMCPU pVCpu) VMMR0DECL(int) HWACCMR0RunGuestCode(PVM pVM, PVMCPU pVCpu) { CPUMCTX *pCtx; - RTCPUID idCpu = RTMpCpuId(); NOREF(idCpu); int rc; #ifdef VBOX_STRICT + RTCPUID idCpu = RTMpCpuId(); NOREF(idCpu); PHWACCM_CPUINFO pCpu = &HWACCMR0Globals.aCpuInfo[idCpu]; #endif @@ -1195,7 +1279,7 @@ VMMR0DECL(PVMCPU) HWACCMR0GetVMCPU(PVM pVM) RTCPUID idHostCpu = RTMpCpuId(); /** @todo optimize for large number of VCPUs when that becomes more common. */ - for (unsigned idCpu=0;idCpu<pVM->cCPUs;idCpu++) + for (VMCPUID idCpu = 0; idCpu < pVM->cCPUs; idCpu++) { PVMCPU pVCpu = &pVM->aCpus[idCpu]; @@ -1274,7 +1358,8 @@ VMMR0DECL(int) HWACCMR0EnterSwitcher(PVM pVM, bool *pfVTxDisabled) *pfVTxDisabled = false; if ( HWACCMR0Globals.enmHwAccmState != HWACCMSTATE_ENABLED - || !HWACCMR0Globals.vmx.fSupported /* no such issues with AMD-V */) + || !HWACCMR0Globals.vmx.fSupported /* no such issues with AMD-V */ + || !pVM->hwaccm.s.fGlobalInit /* Local init implies the CPU is currently not in VMX root mode. */) return VINF_SUCCESS; /* nothing to do */ switch(VMMGetSwitcher(pVM)) @@ -1321,7 +1406,8 @@ VMMR0DECL(int) HWACCMR0LeaveSwitcher(PVM pVM, bool fVTxDisabled) return VINF_SUCCESS; /* nothing to do */ Assert( HWACCMR0Globals.enmHwAccmState == HWACCMSTATE_ENABLED - && HWACCMR0Globals.vmx.fSupported); + && HWACCMR0Globals.vmx.fSupported + && pVM->hwaccm.s.fGlobalInit); PHWACCM_CPUINFO pCpu = HWACCMR0GetCurrentCpu(); void *pvPageCpu; |