summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFelix Geyer <debfx-pkg@fobos.de>2009-11-23 11:32:17 +0100
committerFelix Geyer <debfx-pkg@fobos.de>2009-11-23 11:32:17 +0100
commit541f51c4dab24f1decc1cb44888af9d45d619338 (patch)
tree54e9fa6ac80d775650d0ce66c7c674163a567e0c
parent44ee97ac270d79c70093cbdc4ad6d9d55eb0c679 (diff)
downloadvirtualbox-541f51c4dab24f1decc1cb44888af9d45d619338.tar.gz
Imported Upstream version 3.0.12-dfsgupstream/3.0.12-dfsg
-rw-r--r--Config.kmk6
-rw-r--r--configure.vbs28
-rw-r--r--include/iprt/initterm.h8
-rw-r--r--include/iprt/memobj.h20
-rw-r--r--src/VBox/Additions/common/VBoxGuestLib/SysHlp.cpp8
-rw-r--r--src/VBox/Additions/common/VBoxService/VBoxServiceTimeSync.cpp335
-rw-r--r--src/VBox/Additions/common/VBoxService/VBoxServiceVMInfo.cpp6
-rw-r--r--src/VBox/Additions/linux/drm/Makefile.kmk3
-rw-r--r--src/VBox/Additions/linux/module/Makefile.kmk3
-rw-r--r--src/VBox/Additions/linux/sharedfolders/Makefile.kmk3
-rw-r--r--src/VBox/Additions/linux/sharedfolders/regops.c12
-rw-r--r--src/VBox/Additions/linux/sharedfolders/utils.c2
-rw-r--r--src/VBox/Devices/Network/DevE1000.cpp7
-rw-r--r--src/VBox/Devices/Network/DevPCNet.cpp27
-rw-r--r--src/VBox/Devices/PC/BIOS/rombios.c70
-rw-r--r--src/VBox/Devices/PC/DevAPIC.cpp33
-rw-r--r--src/VBox/Devices/Serial/DrvRawFile.cpp3
-rw-r--r--src/VBox/Devices/Storage/VmdkHDDCore.cpp4
-rw-r--r--src/VBox/Frontends/VirtualBox/src/VBoxConsoleWnd.cpp1
-rw-r--r--src/VBox/Frontends/VirtualBox/src/VBoxGlobal.cpp10
-rw-r--r--src/VBox/Frontends/VirtualBox/src/VBoxSelectorWnd.cpp2
-rw-r--r--src/VBox/Frontends/VirtualBox/src/hardenedmain.cpp7
-rw-r--r--src/VBox/Frontends/VirtualBox/src/main.cpp7
-rw-r--r--src/VBox/HostDrivers/Support/SUPDrv.c6
-rw-r--r--src/VBox/HostDrivers/Support/SUPDrvIOC.h7
-rw-r--r--src/VBox/HostDrivers/Support/SUPLib.cpp12
-rw-r--r--src/VBox/HostDrivers/Support/solaris/SUPDrv-solaris.c4
-rw-r--r--src/VBox/HostDrivers/Support/win/SUPDrvA-win.asm2
-rw-r--r--src/VBox/HostServices/SharedFolders/vbsf.cpp21
-rwxr-xr-xsrc/VBox/Installer/solaris/vboxdrv.sh4
-rw-r--r--src/VBox/Main/MachineImpl.cpp4
-rw-r--r--src/VBox/Main/include/VirtualBoxImpl.h20
-rw-r--r--src/VBox/Runtime/include/internal/memobj.h10
-rw-r--r--src/VBox/Runtime/r0drv/darwin/memobj-r0drv-darwin.cpp21
-rw-r--r--src/VBox/Runtime/r0drv/freebsd/memobj-r0drv-freebsd.c6
-rw-r--r--src/VBox/Runtime/r0drv/initterm-r0drv.cpp50
-rw-r--r--src/VBox/Runtime/r0drv/linux/memobj-r0drv-linux.c7
-rw-r--r--src/VBox/Runtime/r0drv/memobj-r0drv.cpp29
-rw-r--r--src/VBox/Runtime/r0drv/nt/memobj-r0drv-nt.cpp35
-rw-r--r--src/VBox/Runtime/r0drv/os2/memobj-r0drv-os2.cpp10
-rw-r--r--src/VBox/Runtime/r0drv/solaris/memobj-r0drv-solaris.c21
-rw-r--r--src/VBox/Runtime/r0drv/solaris/vbi/memobj-r0drv-solaris.c7
-rw-r--r--src/VBox/Runtime/r0drv/solaris/vbi/mpnotification-r0drv-solaris.c7
-rw-r--r--src/VBox/VMM/HWACCM.cpp122
-rw-r--r--src/VBox/VMM/HWACCMInternal.h23
-rw-r--r--src/VBox/VMM/PDMDevHlp.cpp20
-rw-r--r--src/VBox/VMM/PGM.cpp3
-rw-r--r--src/VBox/VMM/VMMR0/GMMR0.cpp2
-rw-r--r--src/VBox/VMM/VMMR0/HWACCMR0.cpp39
-rw-r--r--src/VBox/VMM/VMMR0/HWSVMR0.cpp16
-rw-r--r--src/VBox/VMM/VMMR0/HWVMXR0.cpp100
-rw-r--r--src/VBox/VMM/testcase/tstAnimate.cpp2
52 files changed, 806 insertions, 409 deletions
diff --git a/Config.kmk b/Config.kmk
index bf989666e..20f5971c8 100644
--- a/Config.kmk
+++ b/Config.kmk
@@ -154,7 +154,7 @@ VBOX_VERSION_MINOR = 0
# This is the current build number. It should be increased every time we publish a
# new build. The define is available in every source file. Only even build numbers
# will be published, odd numbers are set during development.
-VBOX_VERSION_BUILD = 10
+VBOX_VERSION_BUILD = 12
# Full version string (may include more than just x.y.z, but no spaces or other problematic chars).
VBOX_VERSION_STRING = $(VBOX_VERSION_MAJOR).$(VBOX_VERSION_MINOR).$(VBOX_VERSION_BUILD)
# Force the additions.sh script to get an exact additions build when we're doing the release.
@@ -420,7 +420,7 @@ if1of ($(KBUILD_TARGET), darwin solaris linux win freebsd)
endif
# Enables the Python<->XPCOM and Python<->COM bindings.
VBOX_WITH_PYTHON ?= 1
-# Build multiple Python<->XPCOM bridges for different Python version
+# Build multiple Python<->XPCOM bridges for different Python version
ifn1of ($(KBUILD_TARGET), darwin)
VBOX_WITH_MULTIVERSION_PYTHON ?= 1
endif
@@ -3789,7 +3789,7 @@ endif
SVN ?= svn$(HOSTSUFF_EXE)
VBOX_SVN_REV_KMK = $(PATH_OUT)/revision.kmk
ifndef VBOX_SVN_REV
- VBOX_SVN_REV_FALLBACK := $(patsubst %:,, $Rev: 54097 $ )
+ VBOX_SVN_REV_FALLBACK := $(patsubst %:,, $Rev: 54655 $ )
VBOX_SVN_DEP := $(wildcard $(PATH_ROOT)/.svn/entries)
ifeq ($(which $(SVN)),)
VBOX_SVN_DEP :=
diff --git a/configure.vbs b/configure.vbs
index dbba81be2..2da4f21c1 100644
--- a/configure.vbs
+++ b/configure.vbs
@@ -9,20 +9,20 @@
'
'
-' Copyright (C) 2006-2007 Sun Microsystems, Inc.
-'
-' This file is part of VirtualBox Open Source Edition (OSE), as
-' available from http://www.virtualbox.org. This file is free software;
-' you can redistribute it and/or modify it under the terms of the GNU
-' General Public License (GPL) as published by the Free Software
-' Foundation, in version 2 as it comes in the "COPYING" file of the
-' VirtualBox OSE distribution. VirtualBox OSE is distributed in the
-' hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
-'
-' Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
-' Clara, CA 95054 USA or visit http://www.sun.com if you need
-' additional information or have any questions.
-'
+' Copyright (C) 2006-2007 Sun Microsystems, Inc.
+'
+' This file is part of VirtualBox Open Source Edition (OSE), as
+' available from http://www.virtualbox.org. This file is free software;
+' you can redistribute it and/or modify it under the terms of the GNU
+' General Public License (GPL) as published by the Free Software
+' Foundation, in version 2 as it comes in the "COPYING" file of the
+' VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+' hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+'
+' Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
+' Clara, CA 95054 USA or visit http://www.sun.com if you need
+' additional information or have any questions.
+'
'*****************************************************************************
diff --git a/include/iprt/initterm.h b/include/iprt/initterm.h
index b02975d92..20cb7b890 100644
--- a/include/iprt/initterm.h
+++ b/include/iprt/initterm.h
@@ -114,6 +114,14 @@ RTR0DECL(int) RTR0Init(unsigned fReserved);
* Terminates the ring-0 driver runtime library.
*/
RTR0DECL(void) RTR0Term(void);
+
+/**
+ * Forcibily terminates the ring-0 driver runtime library.
+ *
+ * This should be used when statically linking the IPRT. Module using dynamic
+ * linking shall use RTR0Term. If you're not sure, use RTR0Term!
+ */
+RTR0DECL(void) RTR0TermForced(void);
#endif
#ifdef IN_RC
diff --git a/include/iprt/memobj.h b/include/iprt/memobj.h
index 38854a819..e50529aed 100644
--- a/include/iprt/memobj.h
+++ b/include/iprt/memobj.h
@@ -145,18 +145,24 @@ RTR0DECL(int) RTR0MemObjAllocCont(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutab
*
* @returns IPRT status code.
* @param pMemObj Where to store the ring-0 memory object handle.
- * @param R3Ptr User virtual address. This is rounded down to a page boundrary.
- * @param cb Number of bytes to lock. This is rounded up to nearest page boundrary.
- * @param R0Process The process to lock pages in. NIL_R0PROCESS is an alias for the current one.
+ * @param R3Ptr User virtual address. This is rounded down to a page
+ * boundrary.
+ * @param cb Number of bytes to lock. This is rounded up to
+ * nearest page boundrary.
+ * @param fAccess The desired access, a combination of RTMEM_PROT_READ
+ * and RTMEM_PROT_WRITE.
+ * @param R0Process The process to lock pages in. NIL_R0PROCESS is an
+ * alias for the current one.
*
- * @remarks RTR0MemGetAddressR3() and RTR0MemGetAddress() will return the rounded
+ * @remarks RTR0MemGetAddressR3() and RTR0MemGetAddress() will return therounded
* down address.
+ *
* @remarks Linux: This API requires that the memory begin locked is in a memory
* mapping that is not required in any forked off child process. This
* is not intented as permanent restriction, feel free to help out
* lifting it.
*/
-RTR0DECL(int) RTR0MemObjLockUser(PRTR0MEMOBJ pMemObj, RTR3PTR R3Ptr, size_t cb, RTR0PROCESS R0Process);
+RTR0DECL(int) RTR0MemObjLockUser(PRTR0MEMOBJ pMemObj, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process);
/**
* Locks a range of kernel virtual memory.
@@ -165,10 +171,12 @@ RTR0DECL(int) RTR0MemObjLockUser(PRTR0MEMOBJ pMemObj, RTR3PTR R3Ptr, size_t cb,
* @param pMemObj Where to store the ring-0 memory object handle.
* @param pv Kernel virtual address. This is rounded down to a page boundrary.
* @param cb Number of bytes to lock. This is rounded up to nearest page boundrary.
+ * @param fAccess The desired access, a combination of RTMEM_PROT_READ
+ * and RTMEM_PROT_WRITE.
*
* @remark RTR0MemGetAddress() will return the rounded down address.
*/
-RTR0DECL(int) RTR0MemObjLockKernel(PRTR0MEMOBJ pMemObj, void *pv, size_t cb);
+RTR0DECL(int) RTR0MemObjLockKernel(PRTR0MEMOBJ pMemObj, void *pv, size_t cb, uint32_t fAccess);
/**
* Allocates contiguous page aligned physical memory without (necessarily) any kernel mapping.
diff --git a/src/VBox/Additions/common/VBoxGuestLib/SysHlp.cpp b/src/VBox/Additions/common/VBoxGuestLib/SysHlp.cpp
index 554fbb7cc..4e1382dde 100644
--- a/src/VBox/Additions/common/VBoxGuestLib/SysHlp.cpp
+++ b/src/VBox/Additions/common/VBoxGuestLib/SysHlp.cpp
@@ -27,7 +27,8 @@
#include <iprt/assert.h>
#if !defined(RT_OS_WINDOWS) && !defined(RT_OS_LINUX)
-#include <iprt/memobj.h>
+# include <iprt/memobj.h>
+# include <iprt/mem.h>
#endif
@@ -92,8 +93,9 @@ int vbglLockLinear (void **ppvCtx, void *pv, uint32_t u32Size, bool fWriteAccess
#else
/* Default to IPRT - this ASSUMES that it is USER addresses we're locking. */
- RTR0MEMOBJ MemObj;
- rc = RTR0MemObjLockUser(&MemObj, (RTR3PTR)pv, u32Size, NIL_RTR0PROCESS);
+ RTR0MEMOBJ MemObj = NIL_RTR0MEMOBJ;
+ uint32_t fAccess = RTMEM_PROT_READ | (fWriteAccess ? RTMEM_PROT_WRITE : 0);
+ rc = RTR0MemObjLockUser(&MemObj, (RTR3PTR)pv, u32Size, fAccess, NIL_RTR0PROCESS);
if (RT_SUCCESS(rc))
*ppvCtx = MemObj;
else
diff --git a/src/VBox/Additions/common/VBoxService/VBoxServiceTimeSync.cpp b/src/VBox/Additions/common/VBoxService/VBoxServiceTimeSync.cpp
index 6746c8a9e..ac6882081 100644
--- a/src/VBox/Additions/common/VBoxService/VBoxServiceTimeSync.cpp
+++ b/src/VBox/Additions/common/VBoxService/VBoxServiceTimeSync.cpp
@@ -1,4 +1,4 @@
-/** $Id: VBoxServiceTimeSync.cpp $ */
+/* $Id: VBoxServiceTimeSync.cpp $ */
/** @file
* VBoxService - Guest Additions TimeSync Service.
*/
@@ -76,16 +76,16 @@
* calculate the dynamic minimum adjust factor.
* -# g_TimesyncMaxLatency - When to start discarding the data as utterly
* useless and take a rest (someone is too busy to give us good data).
+ * -# g_TimeSyncSetThreshold - The threshold at which we will just set the time
+ * instead of trying to adjust it (milliseconds).
*/
-
-
/*******************************************************************************
* Header Files *
*******************************************************************************/
#ifdef RT_OS_WINDOWS
-# include <windows.h>
-# include <winbase.h>
+# include <Windows.h>
+# include <winbase.h> /** @todo r=bird: Why is this here? Windows.h should include winbase.h... */
#else
# include <unistd.h>
# include <errno.h>
@@ -123,6 +123,14 @@ static uint32_t g_TimeSyncMinAdjust = 100;
static uint32_t g_TimeSyncLatencyFactor = 8;
/** @see pg_vboxservice_timesync */
static uint32_t g_TimeSyncMaxLatency = 250;
+/** @see pg_vboxservice_timesync */
+static uint32_t g_TimeSyncSetThreshold = 20*60*1000;
+/** Whether the next adjustment should just set the time instead of trying to
+ * adjust it. This is used to implement --timesync-set-start. */
+static bool volatile g_fTimeSyncSetNext = false;
+
+/** Current error count. Used to knowing when to bitch and when not to. */
+static uint32_t g_cTimeSyncErrors = 0;
/** The semaphore we're blocking on. */
static RTSEMEVENTMULTI g_TimeSyncEvent = NIL_RTSEMEVENTMULTI;
@@ -164,6 +172,15 @@ static DECLCALLBACK(int) VBoxServiceTimeSyncOption(const char **ppszShort, int a
else if (!strcmp(argv[*pi], "--timesync-max-latency"))
rc = VBoxServiceArgUInt32(argc, argv, "", pi,
&g_TimeSyncMaxLatency, 1, 3600000);
+ else if (!strcmp(argv[*pi], "--timesync-set-threshold"))
+ rc = VBoxServiceArgUInt32(argc, argv, "", pi,
+ &g_TimeSyncSetThreshold, 0, 7*24*60*1000); /* a week */
+ else if (!strcmp(argv[*pi], "--timesync-set-start"))
+ {
+ g_fTimeSyncSetNext = true;
+ rc = VINF_SUCCESS;
+ }
+
return rc;
}
@@ -197,7 +214,9 @@ static DECLCALLBACK(int) VBoxServiceTimeSyncInit(void)
if (LookupPrivilegeValue(NULL, SE_SYSTEMTIME_NAME, &tkPriv.Privileges[0].Luid))
{
DWORD cbRet = sizeof(g_TkOldPrivileges);
- if (!AdjustTokenPrivileges(g_hTokenProcess, FALSE, &tkPriv, sizeof(TOKEN_PRIVILEGES), &g_TkOldPrivileges, &cbRet))
+ if (AdjustTokenPrivileges(g_hTokenProcess, FALSE, &tkPriv, sizeof(TOKEN_PRIVILEGES), &g_TkOldPrivileges, &cbRet))
+ rc = VINF_SUCCESS;
+ else
{
DWORD dwErr = GetLastError();
rc = RTErrConvertFromWin32(dwErr);
@@ -210,7 +229,6 @@ static DECLCALLBACK(int) VBoxServiceTimeSyncInit(void)
rc = RTErrConvertFromWin32(dwErr);
VBoxServiceError("Looking up token privileges (SE_SYSTEMTIME_NAME) failed with status code %u/%Rrc!\n", dwErr, rc);
}
-
if (RT_FAILURE(rc))
{
CloseHandle(g_hTokenProcess);
@@ -226,20 +244,170 @@ static DECLCALLBACK(int) VBoxServiceTimeSyncInit(void)
}
}
- if (!::GetSystemTimeAdjustment(&g_dwWinTimeAdjustment, &g_dwWinTimeIncrement, &g_bWinTimeAdjustmentDisabled))
+ if (GetSystemTimeAdjustment(&g_dwWinTimeAdjustment, &g_dwWinTimeIncrement, &g_bWinTimeAdjustmentDisabled))
+ VBoxServiceVerbose(3, "Windows time adjustment: Initially %ld (100ns) units per %ld (100 ns) units interval, disabled=%d\n",
+ g_dwWinTimeAdjustment, g_dwWinTimeIncrement, g_bWinTimeAdjustmentDisabled ? 1 : 0);
+ else
{
DWORD dwErr = GetLastError();
rc = RTErrConvertFromWin32(dwErr);
VBoxServiceError("Could not get time adjustment values! Last error: %ld!\n", dwErr);
}
- else VBoxServiceVerbose(3, "Windows time adjustment: Initially %ld (100ns) units per %ld (100 ns) units interval, disabled=%d\n",
- g_dwWinTimeAdjustment, g_dwWinTimeIncrement, g_bWinTimeAdjustmentDisabled ? 1 : 0);
#endif /* RT_OS_WINDOWS */
return rc;
}
+/**
+ * Try adjust the time using adjtime or similar.
+ *
+ * @returns true on success, false on failure.
+ *
+ * @param pDrift The time adjustment.
+ */
+static bool VBoxServiceTimeSyncAdjust(PCRTTIMESPEC pDrift)
+{
+#ifdef RT_OS_WINDOWS
+/** @todo r=bird: NT4 doesn't have GetSystemTimeAdjustment. */
+ DWORD dwWinTimeAdjustment, dwWinNewTimeAdjustment, dwWinTimeIncrement;
+ BOOL fWinTimeAdjustmentDisabled;
+ if (GetSystemTimeAdjustment(&dwWinTimeAdjustment, &dwWinTimeIncrement, &fWinTimeAdjustmentDisabled))
+ {
+ DWORD dwDiffMax = g_dwWinTimeAdjustment * 0.50;
+ DWORD dwDiffNew = dwWinTimeAdjustment * 0.10;
+
+ if (RTTimeSpecGetMilli(pDrift) > 0)
+ {
+ dwWinNewTimeAdjustment = dwWinTimeAdjustment + dwDiffNew;
+ if (dwWinNewTimeAdjustment > (g_dwWinTimeAdjustment + dwDiffMax))
+ {
+ dwWinNewTimeAdjustment = g_dwWinTimeAdjustment + dwDiffMax;
+ dwDiffNew = dwDiffMax;
+ }
+ }
+ else
+ {
+ dwWinNewTimeAdjustment = dwWinTimeAdjustment - dwDiffNew;
+ if (dwWinNewTimeAdjustment < (g_dwWinTimeAdjustment - dwDiffMax))
+ {
+ dwWinNewTimeAdjustment = g_dwWinTimeAdjustment - dwDiffMax;
+ dwDiffNew = dwDiffMax;
+ }
+ }
+
+ VBoxServiceVerbose(3, "Windows time adjustment: Drift=%lldms\n", RTTimeSpecGetMilli(pDrift));
+ VBoxServiceVerbose(3, "Windows time adjustment: OrgTA=%ld, CurTA=%ld, NewTA=%ld, DiffNew=%ld, DiffMax=%ld\n",
+ g_dwWinTimeAdjustment, dwWinTimeAdjustment, dwWinNewTimeAdjustment, dwDiffNew, dwDiffMax);
+ if (SetSystemTimeAdjustment(dwWinNewTimeAdjustment, FALSE /* Periodic adjustments enabled. */))
+ {
+ g_cTimeSyncErrors = 0;
+ return true;
+ }
+
+ if (g_cTimeSyncErrors++ < 10)
+ VBoxServiceError("SetSystemTimeAdjustment failed, error=%u\n", GetLastError());
+ }
+ else if (g_cTimeSyncErrors++ < 10)
+ VBoxServiceError("GetSystemTimeAdjustment failed, error=%ld\n", GetLastError());
+
+#elif defined(RT_OS_OS2)
+ /* No API for doing gradual time adjustments. */
+
+#else /* PORTME */
+ /*
+ * Try use adjtime(), most unix-like systems have this.
+ */
+ struct timeval tv;
+ RTTimeSpecGetTimeval(pDrift, &tv);
+ if (adjtime(&tv, NULL) == 0)
+ {
+ if (g_cVerbosity >= 1)
+ VBoxServiceVerbose(1, "adjtime by %RDtimespec\n", pDrift);
+ g_cTimeSyncErrors = 0;
+ return true;
+ }
+#endif
+
+ /* failed */
+ return false;
+}
+
+
+/**
+ * Cancels any pending time adjustment.
+ *
+ * Called when we've caught up and before calls to VBoxServiceTimeSyncSet.
+ */
+static void VBoxServiceTimeSyncCancelAdjust(void)
+{
+#ifdef RT_OS_WINDOWS
+ if (SetSystemTimeAdjustment(0, TRUE /* Periodic adjustments disabled. */))
+ VBoxServiceVerbose(3, "Windows Time Adjustment is now disabled.\n");
+ else if (g_cTimeSyncErrors++ < 10)
+ VBoxServiceError("SetSystemTimeAdjustment(,disable) failed, error=%u\n", GetLastError());
+#endif /* !RT_OS_WINDOWS */
+}
+
+
+/**
+ * Try adjust the time using adjtime or similar.
+ *
+ * @returns true on success, false on failure.
+ *
+ * @param pDrift The time adjustment.
+ * @param pHostNow The host time at the time of the host query.
+ * REMOVE THIS ARGUMENT!
+ */
+static void VBoxServiceTimeSyncSet(PCRTTIMESPEC pDrift, PCRTTIMESPEC pHostNow)
+{
+ /*
+ * Query the current time, add the adjustment, then try it.
+ */
+#ifdef RT_OS_WINDOWS
+/** @todo r=bird: Get current time and add the adjustment, the host time is
+ * stale by now. */
+ FILETIME ft;
+ RTTimeSpecGetNtFileTime(pHostNow, &ft);
+ SYSTEMTIME st;
+ if (FileTimeToSystemTime(&ft, &st))
+ {
+ if (!SetSystemTime(&st))
+ VBoxServiceError("SetSystemTime failed, error=%u\n", GetLastError());
+ }
+ else
+ VBoxServiceError("Cannot convert system times, error=%u\n", GetLastError());
+
+#else /* !RT_OS_WINDOWS */
+ struct timeval tv;
+ errno = 0;
+ if (!gettimeofday(&tv, NULL))
+ {
+ RTTIMESPEC Tmp;
+ RTTimeSpecAdd(RTTimeSpecSetTimeval(&Tmp, &tv), pDrift);
+ if (!settimeofday(RTTimeSpecGetTimeval(&Tmp, &tv), NULL))
+ {
+ char sz[64];
+ RTTIME Time;
+ if (g_cVerbosity >= 1)
+ VBoxServiceVerbose(1, "settimeofday to %s\n",
+ RTTimeToString(RTTimeExplode(&Time, &Tmp), sz, sizeof(sz)));
+# ifdef DEBUG
+ if (g_cVerbosity >= 3)
+ VBoxServiceVerbose(2, " new time %s\n",
+ RTTimeToString(RTTimeExplode(&Time, RTTimeNow(&Tmp)), sz, sizeof(sz)));
+# endif
+ g_cTimeSyncErrors = 0;
+ }
+ else if (g_cTimeSyncErrors++ < 10)
+ VBoxServiceError("settimeofday failed; errno=%d: %s\n", errno, strerror(errno));
+ }
+ else if (g_cTimeSyncErrors++ < 10)
+ VBoxServiceError("gettimeofday failed; errno=%d: %s\n", errno, strerror(errno));
+#endif /* !RT_OS_WINDOWS */
+}
+
+
/** @copydoc VBOXSERVICE::pfnWorker */
DECLCALLBACK(int) VBoxServiceTimeSyncWorker(bool volatile *pfShutdown)
{
@@ -248,12 +416,13 @@ DECLCALLBACK(int) VBoxServiceTimeSyncWorker(bool volatile *pfShutdown)
int rc = VINF_SUCCESS;
/*
- * Tell the control thread that it can continue
- * spawning services.
+ * Tell the control thread that it can continue spawning services.
*/
RTThreadUserSignal(RTThreadSelf());
- unsigned cErrors = 0;
+ /*
+ * The Work Loop.
+ */
for (;;)
{
/*
@@ -268,7 +437,7 @@ DECLCALLBACK(int) VBoxServiceTimeSyncWorker(bool volatile *pfShutdown)
int rc2 = VbglR3GetHostTime(&HostNow);
if (RT_FAILURE(rc2))
{
- if (cErrors++ < 10)
+ if (g_cTimeSyncErrors++ < 10)
VBoxServiceError("VbglR3GetHostTime failed; rc2=%Rrc\n", rc2);
break;
}
@@ -306,119 +475,31 @@ DECLCALLBACK(int) VBoxServiceTimeSyncWorker(bool volatile *pfShutdown)
if (AbsDriftMilli > MinAdjust)
{
/*
- * The drift is too big, we have to make adjustments. :-/
- * If we've got adjtime around, try that first - most
- * *NIX systems have it. Fall back on settimeofday.
+ * Ok, the drift is above the threshold.
+ *
+ * Try a gradual adjustment first, if that fails or the drift is
+ * too big, fall back on just setting the time.
*/
-#ifdef RT_OS_WINDOWS
- DWORD dwWinTimeAdjustment, dwWinNewTimeAdjustment, dwWinTimeIncrement;
- BOOL bWinTimeAdjustmentDisabled;
- if (!::GetSystemTimeAdjustment(&dwWinTimeAdjustment, &dwWinTimeIncrement, &bWinTimeAdjustmentDisabled))
- {
- VBoxServiceError("GetSystemTimeAdjustment failed, error=%ld\n", GetLastError());
- }
- else
- {
- DWORD dwDiffMax = g_dwWinTimeAdjustment * 0.50;
- DWORD dwDiffNew = dwWinTimeAdjustment * 0.10;
-
- if (RTTimeSpecGetMilli(&Drift) > 0)
- {
- dwWinNewTimeAdjustment = dwWinTimeAdjustment + dwDiffNew;
- if (dwWinNewTimeAdjustment > (g_dwWinTimeAdjustment + dwDiffMax))
- {
- dwWinNewTimeAdjustment = g_dwWinTimeAdjustment + dwDiffMax;
- dwDiffNew = dwDiffMax;
- }
- }
- else
- {
- dwWinNewTimeAdjustment = dwWinTimeAdjustment - dwDiffNew;
- if (dwWinNewTimeAdjustment < (g_dwWinTimeAdjustment - dwDiffMax))
- {
- dwWinNewTimeAdjustment = g_dwWinTimeAdjustment - dwDiffMax;
- dwDiffNew = dwDiffMax;
- }
- }
-
- VBoxServiceVerbose(3, "Windows time adjustment: Drift=%ldms\n", RTTimeSpecGetMilli(&Drift));
- VBoxServiceVerbose(3, "Windows time adjustment: OrgTA=%ld, CurTA=%ld, NewTA=%ld, DiffNew=%ld, DiffMax=%ld\n",
- g_dwWinTimeAdjustment, dwWinTimeAdjustment, dwWinNewTimeAdjustment, dwDiffNew, dwDiffMax);
-
- /* Is AbsDrift way too big? Then a minimum adjustment via SetSystemTimeAdjustment() would take ages.
- So set the time in a hard manner. */
- if (AbsDriftMilli > (60 * 1000 * 20)) /** @todo 20 minutes here hardcoded here. Needs configurable parameter later. */
- {
- SYSTEMTIME st = {0};
- FILETIME ft = {0};
-
- VBoxServiceVerbose(3, "Windows time adjustment: Setting system time directly.\n");
-
- RTTimeSpecGetNtFileTime(&HostNow, &ft);
- if (FALSE == FileTimeToSystemTime(&ft,&st))
- VBoxServiceError("Cannot convert system times, error=%ld\n", GetLastError());
-
- if (!::SetSystemTime(&st))
- VBoxServiceError("SetSystemTime failed, error=%ld\n", GetLastError());
- }
- else
- {
- if (!::SetSystemTimeAdjustment(dwWinNewTimeAdjustment, FALSE /* Periodic adjustments enabled. */))
- VBoxServiceError("SetSystemTimeAdjustment failed, error=%ld\n", GetLastError());
- }
- }
-#else /* !RT_OS_WINDOWS */
- struct timeval tv;
-# if !defined(RT_OS_OS2) /* PORTME */
- RTTimeSpecGetTimeval(&Drift, &tv);
- if (adjtime(&tv, NULL) == 0)
+ if ( AbsDriftMilli > g_TimeSyncSetThreshold
+ || g_fTimeSyncSetNext
+ || !VBoxServiceTimeSyncAdjust(&Drift))
{
- if (g_cVerbosity >= 1)
- VBoxServiceVerbose(1, "adjtime by %RDtimespec\n", &Drift);
- cErrors = 0;
+ VBoxServiceTimeSyncCancelAdjust();
+ VBoxServiceTimeSyncSet(&Drift, &HostNow);
}
- else
-# endif
- {
- errno = 0;
- if (!gettimeofday(&tv, NULL))
- {
- RTTIMESPEC Tmp;
- RTTimeSpecAdd(RTTimeSpecSetTimeval(&Tmp, &tv), &Drift);
- if (!settimeofday(RTTimeSpecGetTimeval(&Tmp, &tv), NULL))
- {
- if (g_cVerbosity >= 1)
- VBoxServiceVerbose(1, "settimeofday to %s\n",
- RTTimeToString(RTTimeExplode(&Time, &Tmp), sz, sizeof(sz)));
-# ifdef DEBUG
- if (g_cVerbosity >= 3)
- VBoxServiceVerbose(2, " new time %s\n",
- RTTimeToString(RTTimeExplode(&Time, RTTimeNow(&Tmp)), sz, sizeof(sz)));
-# endif
- cErrors = 0;
- }
- else if (cErrors++ < 10)
- VBoxServiceError("settimeofday failed; errno=%d: %s\n", errno, strerror(errno));
- }
- else if (cErrors++ < 10)
- VBoxServiceError("gettimeofday failed; errno=%d: %s\n", errno, strerror(errno));
- }
-#endif /* !RT_OS_WINDOWS */
- }
- else /* The time delta is <= MinAdjust, so don't do anything here (anymore). */
- {
-#ifdef RT_OS_WINDOWS
- if (::SetSystemTimeAdjustment(0, TRUE /* Periodic adjustments disabled. */))
- VBoxServiceVerbose(3, "Windows Time Adjustment is now disabled.\n");
-#endif /* !RT_OS_WINDOWS */
}
+ else
+ VBoxServiceTimeSyncCancelAdjust();
break;
}
VBoxServiceVerbose(3, "%RDtimespec: latency too high (%RDtimespec) sleeping 1s\n", GuestElapsed);
RTThreadSleep(1000);
} while (--cTries > 0);
+ /* Clear the set-next/set-start flag. */
+ g_fTimeSyncSetNext = false;
+
/*
* Block for a while.
*
@@ -489,19 +570,29 @@ VBOXSERVICE g_TimeSync =
"Time synchronization",
/* pszUsage. */
"[--timesync-interval <ms>] [--timesync-min-adjust <ms>] "
- "[--timesync-latency-factor <x>] [--time-sync-max-latency <ms>]"
+ "[--timesync-latency-factor <x>] [--timesync-max-latency <ms>]"
+ "[--timesync-set-threshold <ms>] [--timesync-set-start]"
,
/* pszOptions. */
" --timesync-interval Specifies the interval at which to synchronize the\n"
" time with the host. The default is 10000 ms.\n"
- " --timesync-min-adjust The minimum absolute drift value measured\n"
- " in milliseconds to make adjustments for.\n"
+ " --timesync-min-adjust\n"
+ " The minimum absolute drift value measured in\n"
+ " milliseconds to make adjustments for.\n"
" The default is 1000 ms on OS/2 and 100 ms elsewhere.\n"
- " --timesync-latency-factor The factor to multiply the time query latency\n"
- " with to calculate the dynamic minimum adjust time.\n"
+ " --timesync-latency-factor\n"
+ " The factor to multiply the time query latency with to\n"
+ " calculate the dynamic minimum adjust time.\n"
" The default is 8 times.\n"
- " --timesync-max-latency The max host timer query latency to accept.\n"
+ " --timesync-max-latency\n"
+ " The max host timer query latency to accept.\n"
" The default is 250 ms.\n"
+ " --timesync-set-threshold\n"
+ " The absolute drift threshold, given as milliseconds,\n"
+ " where to start setting the time instead of trying to\n"
+ " adjust it. The default is 20 min.\n"
+ " --timesync-set-start\n"
+ " Set the time when starting the time sync service.\n"
,
/* methods */
VBoxServiceTimeSyncPreInit,
diff --git a/src/VBox/Additions/common/VBoxService/VBoxServiceVMInfo.cpp b/src/VBox/Additions/common/VBoxService/VBoxServiceVMInfo.cpp
index 55ce596f2..29c5590a6 100644
--- a/src/VBox/Additions/common/VBoxService/VBoxServiceVMInfo.cpp
+++ b/src/VBox/Additions/common/VBoxService/VBoxServiceVMInfo.cpp
@@ -240,9 +240,13 @@ DECLCALLBACK(int) VBoxServiceVMInfoWorker(bool volatile *pfShutdown)
#else
utmp* ut_user;
rc = utmpname(UTMP_FILE);
+ #ifdef RT_OS_SOLARIS
+ if (rc == 0)
+ #else
if (rc != 0)
+ #endif /* !RT_OS_SOLARIS */
{
- VBoxServiceError("Could not set UTMP file! Error: %ld", errno);
+ VBoxServiceError("Could not set UTMP file! Error: %ld\n", errno);
}
setutent();
while ((ut_user=getutent()))
diff --git a/src/VBox/Additions/linux/drm/Makefile.kmk b/src/VBox/Additions/linux/drm/Makefile.kmk
index 45f5118b3..e38cb45db 100644
--- a/src/VBox/Additions/linux/drm/Makefile.kmk
+++ b/src/VBox/Additions/linux/drm/Makefile.kmk
@@ -60,7 +60,8 @@ vboxvideo_drm_CFLAGS = -fshort-wchar
vboxvideo_drm_DEFS = \
MODULE IN_RT_R0 VBOXGUEST VBOX_WITH_HGCM \
KBUILD_MODNAME=KBUILD_STR\(vboxvideo\) \
- KBUILD_BASENAME=KBUILD_STR\(vboxvideo\)
+ KBUILD_BASENAME=KBUILD_STR\(vboxvideo\) \
+ DEBUG_HASH=2 DEBUG_HASH2=3
vboxvideo_drm_SOURCES = vboxvideo_drm.c
# detect fc6 2.6.18
diff --git a/src/VBox/Additions/linux/module/Makefile.kmk b/src/VBox/Additions/linux/module/Makefile.kmk
index e1875f247..8d27a63d9 100644
--- a/src/VBox/Additions/linux/module/Makefile.kmk
+++ b/src/VBox/Additions/linux/module/Makefile.kmk
@@ -57,7 +57,8 @@ vboxadd_NOINST = 1
vboxadd_DEFS = \
MODULE IN_RT_R0 VBGL_VBOXGUEST EXPORT_SYMTAB VBGL_HGCM VBOX_WITH_HGCM \
KBUILD_MODNAME=KBUILD_STR\(vboxadd\) \
- KBUILD_BASENAME=KBUILD_STR\(vboxadd\)
+ KBUILD_BASENAME=KBUILD_STR\(vboxadd\) \
+ DEBUG_HASH=2 DEBUG_HASH2=3
vboxadd_INCS = $(PATH_ROOT)/src/VBox/Runtime/r0drv/linux
vboxadd_SOURCES = \
vboxmod.c \
diff --git a/src/VBox/Additions/linux/sharedfolders/Makefile.kmk b/src/VBox/Additions/linux/sharedfolders/Makefile.kmk
index 7561e86d3..10b01aadc 100644
--- a/src/VBox/Additions/linux/sharedfolders/Makefile.kmk
+++ b/src/VBox/Additions/linux/sharedfolders/Makefile.kmk
@@ -59,7 +59,8 @@ vboxvfs_CFLAGS = -fshort-wchar
vboxvfs_DEFS = \
MODULE IN_RT_R0 VBOXGUEST VBOX_WITH_HGCM \
KBUILD_MODNAME=KBUILD_STR\(vboxadd\) \
- KBUILD_BASENAME=KBUILD_STR\(vboxadd\)
+ KBUILD_BASENAME=KBUILD_STR\(vboxadd\) \
+ DEBUG_HASH=2 DEBUG_HASH2=3
vboxvfs_INCS = \
$(PATH_ROOT)/src/VBox/Additions/common/VBoxGuestLib \
$(PATH_ROOT)/src/VBox/Runtime/r0drv/linux
diff --git a/src/VBox/Additions/linux/sharedfolders/regops.c b/src/VBox/Additions/linux/sharedfolders/regops.c
index c7f880553..aeb375b7f 100644
--- a/src/VBox/Additions/linux/sharedfolders/regops.c
+++ b/src/VBox/Additions/linux/sharedfolders/regops.c
@@ -150,7 +150,10 @@ sf_reg_write (struct file *file, const char *buf, size_t size, loff_t *off)
pos = *off;
if (file->f_flags & O_APPEND)
- pos += inode->i_size;
+ {
+ pos = inode->i_size;
+ *off = pos;
+ }
/** XXX Check write permission accoring to inode->i_mode! */
@@ -189,11 +192,10 @@ sf_reg_write (struct file *file, const char *buf, size_t size, loff_t *off)
break;
}
-#if 1 /* XXX: which way is correct? */
*off += total_bytes_written;
-#else
- file->f_pos += total_bytes_written;
-#endif
+ if (*off > inode->i_size)
+ inode->i_size = *off;
+
sf_i->force_restat = 1;
kfree (tmp);
return total_bytes_written;
diff --git a/src/VBox/Additions/linux/sharedfolders/utils.c b/src/VBox/Additions/linux/sharedfolders/utils.c
index df87bb69b..461a5b294 100644
--- a/src/VBox/Additions/linux/sharedfolders/utils.c
+++ b/src/VBox/Additions/linux/sharedfolders/utils.c
@@ -367,11 +367,9 @@ sf_setattr (struct dentry *dentry, struct iattr *iattr)
memset(&info, 0, sizeof(info));
info.cbObject = iattr->ia_size;
cbBuffer = sizeof(info);
- printk("set size %lld\n", (long long)info.cbObject);
rc = vboxCallFSInfo(&client_handle, &sf_g->map, params.Handle,
SHFL_INFO_SET | SHFL_INFO_SIZE, &cbBuffer,
(PSHFLDIRINFO)&info);
- printk(" => %d\n", rc);
if (VBOX_FAILURE (rc)) {
LogFunc(("vboxCallFSInfo(%s, SIZE) failed rc=%Rrc\n",
sf_i->path->String.utf8, rc));
diff --git a/src/VBox/Devices/Network/DevE1000.cpp b/src/VBox/Devices/Network/DevE1000.cpp
index d40002208..ee635a99a 100644
--- a/src/VBox/Devices/Network/DevE1000.cpp
+++ b/src/VBox/Devices/Network/DevE1000.cpp
@@ -863,7 +863,7 @@ struct E1kState_st
uint64_t u64AckedAt;
/** All: Used for eliminating spurious interrupts. */
bool fIntRaised;
- /** EMT: */
+ /** EMT: false if the cable is disconnected by the GUI. */
bool fCableConnected;
/** EMT: */
bool fR0Enabled;
@@ -2012,7 +2012,8 @@ static int e1kRegWriteCTRL(E1KSTATE* pState, uint32_t offset, uint32_t index, ui
}
else
{
- if (value & CTRL_SLU)
+ if ( (value & CTRL_SLU)
+ && pState->fCableConnected)
{
/* The driver indicates that we should bring up the link */
STATUS |= STATUS_LU;
@@ -4303,12 +4304,14 @@ static DECLCALLBACK(int) e1kSetLinkState(PPDMINETWORKCONFIG pInterface, PDMNETWO
if (fNewUp)
{
E1kLog(("%s Link is up\n", INSTANCE(pState)));
+ pState->fCableConnected = true;
STATUS |= STATUS_LU;
Phy::setLinkStatus(&pState->phy, true);
}
else
{
E1kLog(("%s Link is down\n", INSTANCE(pState)));
+ pState->fCableConnected = false;
STATUS &= ~STATUS_LU;
Phy::setLinkStatus(&pState->phy, false);
}
diff --git a/src/VBox/Devices/Network/DevPCNet.cpp b/src/VBox/Devices/Network/DevPCNet.cpp
index a5a20e649..0f2d97d67 100644
--- a/src/VBox/Devices/Network/DevPCNet.cpp
+++ b/src/VBox/Devices/Network/DevPCNet.cpp
@@ -618,6 +618,16 @@ static int pcnetSyncTransmit(PCNetState *pThis);
static void pcnetPollTimerStart(PCNetState *pThis);
/**
+ * Checks if the link is up.
+ * @returns true if the link is up.
+ * @returns false if the link is down.
+ */
+DECLINLINE(bool) pcnetIsLinkUp(PCNetState *pThis)
+{
+ return pThis->pDrv && !pThis->fLinkTempDown && pThis->fLinkUp;
+}
+
+/**
* Load transmit message descriptor
* Make sure we read the own flag first.
*
@@ -1838,6 +1848,12 @@ static void pcnetReceiveNoSync(PCNetState *pThis, const uint8_t *buf, size_t cbT
if (PDMDevHlpVMState(pDevIns) != VMSTATE_RUNNING)
return;
+ /*
+ * Drop packets if the cable is not connected
+ */
+ if (!pcnetIsLinkUp(pThis))
+ return;
+
Log(("#%d pcnetReceiveNoSync: size=%d\n", PCNET_INST_NR, cbToRecv));
/*
@@ -2030,17 +2046,6 @@ static void pcnetReceiveNoSync(PCNetState *pThis, const uint8_t *buf, size_t cbT
/**
- * Checks if the link is up.
- * @returns true if the link is up.
- * @returns false if the link is down.
- */
-DECLINLINE(bool) pcnetIsLinkUp(PCNetState *pThis)
-{
- return pThis->pDrv && !pThis->fLinkTempDown && pThis->fLinkUp;
-}
-
-
-/**
* Transmit queue consumer
* This is just a very simple way of delaying sending to R3.
*
diff --git a/src/VBox/Devices/PC/BIOS/rombios.c b/src/VBox/Devices/PC/BIOS/rombios.c
index 6da6067cc..fa48bc89f 100644
--- a/src/VBox/Devices/PC/BIOS/rombios.c
+++ b/src/VBox/Devices/PC/BIOS/rombios.c
@@ -11135,6 +11135,69 @@ rom_scan_increment:
mov ds, ax
ret
+#define LVT0 0xFEE00350
+#define LVT1 0xFEE00360
+
+;; Program LVT0/LVT1 entries in the local APIC. Some Linux kernels (e.g., RHEL4
+;; SMP 32-bit) expect the entries to be unmasked in virtual wire mode.
+
+setup_lapic:
+ pushf
+ cli ;; Interrupts would kill us!
+ call pmode_enter
+ mov esi, #LVT0 ;; Program LVT0 to ExtINT and unmask
+ mov eax, [esi]
+ and eax, #0xfffe00ff
+ or ah, #0x07
+ mov [esi], eax
+ mov esi, #LVT1 ;; Program LVT1 to NMI and unmask
+ mov eax, [esi]
+ and eax, #0xfffe00ff
+ or ah, #0x04
+ mov [esi], eax
+ call pmode_exit
+ popf
+ ret
+
+;; Enter and exit minimal protected-mode environment. May only be called from
+;; the F000 segment (16-bit). Does not switch stacks. Must be run with disabled
+;; interrupts(!). On return from pmode_enter, DS contains a selector which can
+;; address the entire 4GB address space.
+
+pmode_enter:
+ push cs
+ pop ds
+ lgdt [pmbios_gdt_desc]
+ mov eax, cr0
+ or al, #0x1
+ mov cr0, eax
+ JMP_AP(0x20, really_enter_pm)
+really_enter_pm:
+ mov ax, #0x18
+ mov ds, ax
+ ret
+
+pmode_exit:
+ mov eax, cr0
+ and al, #0xfe
+ mov cr0, eax
+ JMP_AP(0xF000, really_exit_pm)
+really_exit_pm:
+ ret
+
+pmbios_gdt_desc:
+ dw 0x30
+ dw pmbios_gdt
+ dw 0x000f
+
+pmbios_gdt:
+ dw 0, 0, 0, 0
+ dw 0, 0, 0, 0
+ dw 0xffff, 0, 0x9b00, 0x00cf ; 32 bit flat code segment (0x10)
+ dw 0xffff, 0, 0x9300, 0x00cf ; 32 bit flat data segment (0x18)
+ dw 0xffff, 0, 0x9b0f, 0x0000 ; 16 bit code segment base=0xf0000 limit=0xffff
+ dw 0xffff, 0, 0x9300, 0x0000 ; 16 bit data segment base=0x0 limit=0xffff
+
;; for 'C' strings and other data, insert them here with
;; a the following hack:
;; DATA_SEG_DEFS_HERE
@@ -11194,10 +11257,16 @@ post:
cmp al, #0x05
je eoi_jmp_post
+#ifdef VBOX
+ ;; just ignore all other CMOS shutdown status values (OpenSolaris sets it to 0xA for some reason in certain cases)
+ ;; (shutdown_status_panic just crashes the VM as it calls int 0x10 before the IDT table has been initialized)
+ jmp normal_post
+#else
;; Examine CMOS shutdown status.
;; 0x01,0x02,0x03,0x04,0x06,0x07,0x08, 0x0a, 0x0b, 0x0c = Unimplemented shutdown status.
push bx
call _shutdown_status_panic
+#endif
#if 0
HALT(__LINE__)
@@ -11463,6 +11532,7 @@ post_default_ints:
call pcibios_init_iomem_bases
call pcibios_init_irqs
#endif
+ call setup_lapic
call rom_scan
#if BX_USE_ATADRV
diff --git a/src/VBox/Devices/PC/DevAPIC.cpp b/src/VBox/Devices/PC/DevAPIC.cpp
index 8317f5098..ddbe244de 100644
--- a/src/VBox/Devices/PC/DevAPIC.cpp
+++ b/src/VBox/Devices/PC/DevAPIC.cpp
@@ -192,9 +192,9 @@ typedef struct APICState {
#ifdef VBOX
/* Task priority register (interrupt level) */
uint32_t tpr;
- /* Logical APIC id */
+ /* Logical APIC id - user programmable */
LogApicId id;
- /* Physical APIC id */
+ /* Physical APIC id - not visible to user, constant */
PhysApicId phys_id;
/** @todo: is it logical or physical? Not really used anyway now. */
PhysApicId arb_id;
@@ -386,6 +386,7 @@ static int apic_get_ppr(APICState *s);
static uint32_t apic_get_current_count(APICDeviceInfo* dev, APICState *s);
static void apicTimerSetInitialCount(APICDeviceInfo *dev, APICState *s, uint32_t initial_count);
static void apicTimerSetLvt(APICDeviceInfo *dev, APICState *pThis, uint32_t fNew);
+static void apicSendInitIpi(APICDeviceInfo* dev, APICState *s);
#endif /* VBOX */
@@ -532,7 +533,7 @@ static int apic_bus_deliver(APICDeviceInfo* dev,
#ifdef VBOX
#ifdef IN_RING3
foreach_apic(dev, deliver_bitmask,
- apic_init_ipi(dev, apic));
+ apicSendInitIpi(dev, apic));
return VINF_SUCCESS;
#else
/* We shall send init IPI only in R3, R0 calls should be
@@ -986,7 +987,13 @@ PDMBOTHCBDECL(int) apicLocalInterrupt(PPDMDEVINS pDevIns, uint8_t u8Pin, uint8_t
case APIC_DM_EXTINT:
Assert(u8Pin == 0); /* PIC should be wired to LINT0. */
enmType = PDMAPICIRQ_EXTINT;
- break;
+ /* ExtINT can be both set and cleared, NMI/SMI/INIT can only be set. */
+ LogFlow(("apicLocalInterrupt: %s ExtINT interrupt\n", u8Level ? "setting" : "clearing"));
+ if (u8Level)
+ cpuSetInterrupt(dev, s, enmType);
+ else
+ cpuClearInterrupt(dev, s, enmType);
+ return VINF_SUCCESS;
case APIC_DM_NMI:
Assert(u8Pin == 1); /* NMI should be wired to LINT1. */
enmType = PDMAPICIRQ_NMI;
@@ -1224,11 +1231,16 @@ static void apic_init_ipi(APICDeviceInfo* dev, APICState *s)
s->initial_count = 0;
s->initial_count_load_time = 0;
s->next_time = 0;
+}
+
#ifdef VBOX
+static void apicSendInitIpi(APICDeviceInfo* dev, APICState *s)
+{
+ apic_init_ipi(dev, s);
cpuSendInitIpi(dev, s);
-#endif
}
+#endif
/* send a SIPI message to the CPU to start it */
static void apic_startup(APICDeviceInfo* dev, APICState *s, int vector_num)
@@ -2459,14 +2471,11 @@ static DECLCALLBACK(void) apicReset(PPDMDEVINS pDevIns)
APICState *pApic = &dev->CTX_SUFF(paLapics)[i];
TMTimerStop(pApic->CTX_SUFF(pTimer));
- /* Do not send an init ipi to the VCPU; we take
- * care of the proper init ourselves.
+ /* Clear LAPIC state as if an INIT IPI was sent. */
apic_init_ipi(dev, pApic);
- */
-
- /* malc, I've removed the initing duplicated in apic_init_ipi(). This
- * arb_id was left over.. */
- pApic->arb_id = 0;
+ /* The IDs are not touched by apic_init_ipi() and must be reset now. */
+ pApic->arb_id = pApic->id = i;
+ Assert(pApic->id == pApic->phys_id); /* The two should match again. */
/* Reset should re-enable the APIC. */
pApic->apicbase = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
if (pApic->phys_id == 0)
diff --git a/src/VBox/Devices/Serial/DrvRawFile.cpp b/src/VBox/Devices/Serial/DrvRawFile.cpp
index 747bf92e2..e52e2ed7d 100644
--- a/src/VBox/Devices/Serial/DrvRawFile.cpp
+++ b/src/VBox/Devices/Serial/DrvRawFile.cpp
@@ -82,8 +82,11 @@ static DECLCALLBACK(int) drvRawFileWrite(PPDMISTREAM pInterface, const void *pvB
{
size_t cbWritten;
rc = RTFileWrite(pThis->OutputFile, pvBuf, *pcbWrite, &cbWritten);
+#if 0
+ /* don't flush here, takes too long and we will loose characters */
if (RT_SUCCESS(rc))
RTFileFlush(pThis->OutputFile);
+#endif
*pcbWrite = cbWritten;
}
diff --git a/src/VBox/Devices/Storage/VmdkHDDCore.cpp b/src/VBox/Devices/Storage/VmdkHDDCore.cpp
index a51f92a58..0c611d6ff 100644
--- a/src/VBox/Devices/Storage/VmdkHDDCore.cpp
+++ b/src/VBox/Devices/Storage/VmdkHDDCore.cpp
@@ -3649,7 +3649,7 @@ static int vmdkCreateRegularImage(PVMDKIMAGE pImage, uint64_t cbSize,
pExtent->enmAccess = VMDKACCESS_READWRITE;
pExtent->fUncleanShutdown = true;
pExtent->cNominalSectors = VMDK_BYTE2SECTOR(cbExtent);
- pExtent->uSectorOffset = VMDK_BYTE2SECTOR(cbOffset);
+ pExtent->uSectorOffset = 0;
pExtent->fMetaDirty = true;
if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED))
@@ -4033,7 +4033,7 @@ static int vmdkFindExtent(PVMDKIMAGE pImage, uint64_t offSector,
if (offSector < pImage->pExtents[i].cNominalSectors)
{
pExtent = &pImage->pExtents[i];
- *puSectorInExtent = offSector;
+ *puSectorInExtent = offSector + pImage->pExtents[i].uSectorOffset;
break;
}
offSector -= pImage->pExtents[i].cNominalSectors;
diff --git a/src/VBox/Frontends/VirtualBox/src/VBoxConsoleWnd.cpp b/src/VBox/Frontends/VirtualBox/src/VBoxConsoleWnd.cpp
index 37aee1d4d..8a6929f63 100644
--- a/src/VBox/Frontends/VirtualBox/src/VBoxConsoleWnd.cpp
+++ b/src/VBox/Frontends/VirtualBox/src/VBoxConsoleWnd.cpp
@@ -1378,6 +1378,7 @@ void VBoxConsoleWnd::closeEvent (QCloseEvent *e)
AssertWrapperOk (cmachine);
if (lastAction [0] == kSave)
{
+ dlg.mRbShutdown->setEnabled (isACPIEnabled);
dlg.mRbSave->setChecked (true);
dlg.mRbSave->setFocus();
}
diff --git a/src/VBox/Frontends/VirtualBox/src/VBoxGlobal.cpp b/src/VBox/Frontends/VirtualBox/src/VBoxGlobal.cpp
index 3dab745ea..fe9850b28 100644
--- a/src/VBox/Frontends/VirtualBox/src/VBoxGlobal.cpp
+++ b/src/VBox/Frontends/VirtualBox/src/VBoxGlobal.cpp
@@ -4694,11 +4694,11 @@ void VBoxGlobal::init()
while (i < argc)
{
const char *arg = qApp->argv() [i];
- if ( !::strcmp (arg, "--startvm")
- || !::strcmp (arg, "-startvm")
- || !::strcmp (arg, "-s")
- || !::strcmp (arg, "--vm")
- || !::strcmp (arg, "-vm"))
+ /* NOTE: the check here must match the corresponding check for the
+ * options to start a VM in main.cpp and hardenedmain.cpp exactly,
+ * otherwise there will be weird error messages. */
+ if ( !::strcmp (arg, "--startvm")
+ || !::strcmp (arg, "-startvm"))
{
if (++i < argc)
{
diff --git a/src/VBox/Frontends/VirtualBox/src/VBoxSelectorWnd.cpp b/src/VBox/Frontends/VirtualBox/src/VBoxSelectorWnd.cpp
index 60b94e668..2f266cf17 100644
--- a/src/VBox/Frontends/VirtualBox/src/VBoxSelectorWnd.cpp
+++ b/src/VBox/Frontends/VirtualBox/src/VBoxSelectorWnd.cpp
@@ -500,7 +500,7 @@ VBoxSelectorWnd (VBoxSelectorWnd **aSelf, QWidget* aParent,
/* Make non-possible to activate list elements by single click,
* this hack should disable the current possibility to do it if present */
if (mVMListView->style()->styleHint (QStyle::SH_ItemView_ActivateItemOnSingleClick, 0, mVMListView))
- mVMListView->setStyleSheet ("activate-on-singleclick");
+ mVMListView->setStyleSheet ("activate-on-singleclick : 0");
leftVLayout->addWidget (mVMListView);
diff --git a/src/VBox/Frontends/VirtualBox/src/hardenedmain.cpp b/src/VBox/Frontends/VirtualBox/src/hardenedmain.cpp
index b5120a7d2..9dbcbdd4a 100644
--- a/src/VBox/Frontends/VirtualBox/src/hardenedmain.cpp
+++ b/src/VBox/Frontends/VirtualBox/src/hardenedmain.cpp
@@ -30,8 +30,11 @@ int main(int argc, char **argv, char **envp)
*/
uint32_t fFlags = SUPSECMAIN_FLAGS_DONT_OPEN_DEV;
for (int i = 1; i < argc; i++)
- if ( !strcmp(argv[i], "--startvm")
- || !strcmp(argv[i], "-startvm"))
+ /* NOTE: the check here must match the corresponding check for the
+ * options to start a VM in main.cpp and VBoxGlobal.cpp exactly,
+ * otherwise there will be weird error messages. */
+ if ( !::strcmp(argv[i], "--startvm")
+ || !::strcmp(argv[i], "-startvm"))
{
fFlags &= ~SUPSECMAIN_FLAGS_DONT_OPEN_DEV;
break;
diff --git a/src/VBox/Frontends/VirtualBox/src/main.cpp b/src/VBox/Frontends/VirtualBox/src/main.cpp
index 72aa437ff..f873ea8bc 100644
--- a/src/VBox/Frontends/VirtualBox/src/main.cpp
+++ b/src/VBox/Frontends/VirtualBox/src/main.cpp
@@ -556,8 +556,11 @@ int main (int argc, char **argv, char **envp)
bool fInitSUPLib = false;
for (int i = 1; i < argc; i++)
{
- if ( !::strcmp(argv[i], "--startvm")
- || !::strcmp(argv[i], "-startvm"))
+ /* NOTE: the check here must match the corresponding check for the
+ * options to start a VM in hardenedmain.cpp and VBoxGlobal.cpp exactly,
+ * otherwise there will be weird error messages. */
+ if ( !::strcmp(argv[i], "--startvm")
+ || !::strcmp(argv[i], "-startvm"))
{
fInitSUPLib = true;
break;
diff --git a/src/VBox/HostDrivers/Support/SUPDrv.c b/src/VBox/HostDrivers/Support/SUPDrv.c
index e8dfab7b7..2dc403cd3 100644
--- a/src/VBox/HostDrivers/Support/SUPDrv.c
+++ b/src/VBox/HostDrivers/Support/SUPDrv.c
@@ -1,4 +1,4 @@
-/* $Revision: 53866 $ */
+/* $Revision: 54555 $ */
/** @file
* VBoxDrv - The VirtualBox Support Driver - Common code.
*/
@@ -201,7 +201,7 @@ DECLASM(int) UNWIND_WRAP(RTR0MemObjAllocPhys)(PRTR0MEMOBJ pMemObj, size_t cb,
DECLASM(int) UNWIND_WRAP(RTR0MemObjAllocPhysNC)(PRTR0MEMOBJ pMemObj, size_t cb, RTHCPHYS PhysHighest);
DECLASM(int) UNWIND_WRAP(RTR0MemObjAllocCont)(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable);
DECLASM(int) UNWIND_WRAP(RTR0MemObjEnterPhys)(PRTR0MEMOBJ pMemObj, RTHCPHYS Phys, size_t cb);
-DECLASM(int) UNWIND_WRAP(RTR0MemObjLockUser)(PRTR0MEMOBJ pMemObj, RTR3PTR R3Ptr, size_t cb, RTR0PROCESS R0Process);
+DECLASM(int) UNWIND_WRAP(RTR0MemObjLockUser)(PRTR0MEMOBJ pMemObj, RTR3PTR R3Ptr, size_t cb, uint32_t fFlags, RTR0PROCESS R0Process);
DECLASM(int) UNWIND_WRAP(RTR0MemObjMapKernel)(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, void *pvFixed, size_t uAlignment, unsigned fProt);
DECLASM(int) UNWIND_WRAP(RTR0MemObjMapKernelEx)(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, void *pvFixed, size_t uAlignment, unsigned fProt, size_t offSub, size_t cbSub);
DECLASM(int) UNWIND_WRAP(RTR0MemObjMapUser)(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, RTR3PTR R3PtrFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process);
@@ -2303,7 +2303,7 @@ SUPR0DECL(int) SUPR0LockMem(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t cPag
* Let IPRT do the job.
*/
Mem.eType = MEMREF_TYPE_LOCKED;
- rc = RTR0MemObjLockUser(&Mem.MemObj, pvR3, cb, RTR0ProcHandleSelf());
+ rc = RTR0MemObjLockUser(&Mem.MemObj, pvR3, cb, RTMEM_PROT_READ | RTMEM_PROT_WRITE, RTR0ProcHandleSelf());
if (RT_SUCCESS(rc))
{
uint32_t iPage = cPages;
diff --git a/src/VBox/HostDrivers/Support/SUPDrvIOC.h b/src/VBox/HostDrivers/Support/SUPDrvIOC.h
index c524bc037..8bab36bc1 100644
--- a/src/VBox/HostDrivers/Support/SUPDrvIOC.h
+++ b/src/VBox/HostDrivers/Support/SUPDrvIOC.h
@@ -1,4 +1,4 @@
-/* $Revision: 53491 $ */
+/* $Revision: 54555 $ */
/** @file
* VirtualBox Support Driver - IOCtl definitions.
*/
@@ -192,8 +192,11 @@ typedef SUPREQHDR *PSUPREQHDR;
*
* @todo Pending work on next major version change:
* - Nothing.
+ *
+ * @remarks The current major version (0x0011YYYY) has never appeared on the
+ * trunk.
*/
-#define SUPDRV_IOC_VERSION 0x000e0001
+#define SUPDRV_IOC_VERSION 0x00110000
/** SUP_IOCTL_COOKIE. */
typedef struct SUPCOOKIE
diff --git a/src/VBox/HostDrivers/Support/SUPLib.cpp b/src/VBox/HostDrivers/Support/SUPLib.cpp
index 25df8cb41..e82d64c97 100644
--- a/src/VBox/HostDrivers/Support/SUPLib.cpp
+++ b/src/VBox/HostDrivers/Support/SUPLib.cpp
@@ -271,16 +271,16 @@ SUPR3DECL(int) SUPR3Init(PSUPDRVSESSION *ppSession)
CookieReq.Hdr.rc = VERR_INTERNAL_ERROR;
strcpy(CookieReq.u.In.szMagic, SUPCOOKIE_MAGIC);
CookieReq.u.In.u32ReqVersion = SUPDRV_IOC_VERSION;
- const uint32_t MinVersion = (SUPDRV_IOC_VERSION & 0xffff0000) == 0x000e0001
- ? 0x000e0001
- : SUPDRV_IOC_VERSION & 0xffff0000;
- CookieReq.u.In.u32MinVersion = MinVersion;
+ const uint32_t uMinVersion = (SUPDRV_IOC_VERSION & 0xffff0000) == 0x00110000
+ ? 0x00110000
+ : SUPDRV_IOC_VERSION & 0xffff0000;
+ CookieReq.u.In.u32MinVersion = uMinVersion;
rc = suplibOsIOCtl(&g_supLibData, SUP_IOCTL_COOKIE, &CookieReq, SUP_IOCTL_COOKIE_SIZE);
if ( RT_SUCCESS(rc)
&& RT_SUCCESS(CookieReq.Hdr.rc))
{
if ( (CookieReq.u.Out.u32SessionVersion & 0xffff0000) == (SUPDRV_IOC_VERSION & 0xffff0000)
- && CookieReq.u.Out.u32SessionVersion >= MinVersion)
+ && CookieReq.u.Out.u32SessionVersion >= uMinVersion)
{
/*
* Query the functions.
@@ -347,7 +347,7 @@ SUPR3DECL(int) SUPR3Init(PSUPDRVSESSION *ppSession)
else
{
LogRel(("Support driver version mismatch: SessionVersion=%#x DriverVersion=%#x ClientVersion=%#x MinVersion=%#x\n",
- CookieReq.u.Out.u32SessionVersion, CookieReq.u.Out.u32DriverVersion, SUPDRV_IOC_VERSION, MinVersion));
+ CookieReq.u.Out.u32SessionVersion, CookieReq.u.Out.u32DriverVersion, SUPDRV_IOC_VERSION, uMinVersion));
rc = VERR_VM_DRIVER_VERSION_MISMATCH;
}
}
diff --git a/src/VBox/HostDrivers/Support/solaris/SUPDrv-solaris.c b/src/VBox/HostDrivers/Support/solaris/SUPDrv-solaris.c
index 10cb33fcf..716456261 100644
--- a/src/VBox/HostDrivers/Support/solaris/SUPDrv-solaris.c
+++ b/src/VBox/HostDrivers/Support/solaris/SUPDrv-solaris.c
@@ -245,7 +245,7 @@ int _init(void)
}
else
LogRel((DEVICE_NAME ":VBoxDrvSolarisAttach: supdrvInitDevExt failed\n"));
- RTR0Term();
+ RTR0TermForced();
}
else
LogRel((DEVICE_NAME ":VBoxDrvSolarisAttach: failed to init R0Drv\n"));
@@ -272,7 +272,7 @@ int _fini(void)
AssertRC(rc);
g_Spinlock = NIL_RTSPINLOCK;
- RTR0Term();
+ RTR0TermForced();
memset(&g_DevExt, 0, sizeof(g_DevExt));
diff --git a/src/VBox/HostDrivers/Support/win/SUPDrvA-win.asm b/src/VBox/HostDrivers/Support/win/SUPDrvA-win.asm
index db27d5224..a14abe3cd 100644
--- a/src/VBox/HostDrivers/Support/win/SUPDrvA-win.asm
+++ b/src/VBox/HostDrivers/Support/win/SUPDrvA-win.asm
@@ -98,7 +98,7 @@ NtWrapDyn2DrvFunctionWithAllRegParams supdrvNtWrap, RTR0MemObjAllocPhys
NtWrapDyn2DrvFunctionWithAllRegParams supdrvNtWrap, RTR0MemObjAllocPhysNC
NtWrapDyn2DrvFunctionWithAllRegParams supdrvNtWrap, RTR0MemObjAllocCont
NtWrapDyn2DrvFunctionWithAllRegParams supdrvNtWrap, RTR0MemObjEnterPhys
-NtWrapDyn2DrvFunctionWithAllRegParams supdrvNtWrap, RTR0MemObjLockUser
+NtWrapDyn2DrvFunctionWith5Params supdrvNtWrap, RTR0MemObjLockUser
NtWrapDyn2DrvFunctionWith5Params supdrvNtWrap, RTR0MemObjMapKernel
NtWrapDyn2DrvFunctionWith7Params supdrvNtWrap, RTR0MemObjMapKernelEx
NtWrapDyn2DrvFunctionWith6Params supdrvNtWrap, RTR0MemObjMapUser
diff --git a/src/VBox/HostServices/SharedFolders/vbsf.cpp b/src/VBox/HostServices/SharedFolders/vbsf.cpp
index dbbe6845f..66ac03520 100644
--- a/src/VBox/HostServices/SharedFolders/vbsf.cpp
+++ b/src/VBox/HostServices/SharedFolders/vbsf.cpp
@@ -242,7 +242,8 @@ static int vbsfPathCheck(const char *pUtf8Path, size_t cbPath)
}
static int vbsfBuildFullPath (SHFLCLIENTDATA *pClient, SHFLROOT root, PSHFLSTRING pPath,
- uint32_t cbPath, char **ppszFullPath, uint32_t *pcbFullPathRoot, bool fWildCard = false)
+ uint32_t cbPath, char **ppszFullPath, uint32_t *pcbFullPathRoot,
+ bool fWildCard = false, bool fPreserveLastComponent = false)
{
int rc = VINF_SUCCESS;
@@ -457,11 +458,11 @@ static int vbsfBuildFullPath (SHFLCLIENTDATA *pClient, SHFLROOT root, PSHFLSTRIN
&& !vbsfIsGuestMappingCaseSensitive(root))
{
RTFSOBJINFO info;
- char *pszWildCardComponent = NULL;
+ char *pszLastComponent = NULL;
- if (fWildCard)
+ if (fWildCard || fPreserveLastComponent)
{
- /* strip off the last path component, that contains the wildcard(s) */
+ /* strip off the last path component, that has to be preserved: contains the wildcard(s) or a 'rename' target. */
uint32_t len = (uint32_t)strlen(pszFullPath);
char *src = pszFullPath + len - 1;
@@ -487,10 +488,10 @@ static int vbsfBuildFullPath (SHFLCLIENTDATA *pClient, SHFLROOT root, PSHFLSTRIN
temp++;
}
- if (fHaveWildcards)
+ if (fHaveWildcards || fPreserveLastComponent)
{
- pszWildCardComponent = src;
- *pszWildCardComponent = 0;
+ pszLastComponent = src;
+ *pszLastComponent = 0;
}
}
}
@@ -580,8 +581,8 @@ static int vbsfBuildFullPath (SHFLCLIENTDATA *pClient, SHFLROOT root, PSHFLSTRIN
rc = VERR_FILE_NOT_FOUND;
}
- if (pszWildCardComponent)
- *pszWildCardComponent = RTPATH_DELIMITER;
+ if (pszLastComponent)
+ *pszLastComponent = RTPATH_DELIMITER;
/* might be a new file so don't fail here! */
rc = VINF_SUCCESS;
@@ -2068,7 +2069,7 @@ int vbsfRename(SHFLCLIENTDATA *pClient, SHFLROOT root, SHFLSTRING *pSrc, SHFLSTR
if (rc != VINF_SUCCESS)
return rc;
- rc = vbsfBuildFullPath (pClient, root, pDest, pDest->u16Size, &pszFullPathDest, NULL);
+ rc = vbsfBuildFullPath (pClient, root, pDest, pDest->u16Size, &pszFullPathDest, NULL, false, true);
if (RT_SUCCESS (rc))
{
Log(("Rename %s to %s\n", pszFullPathSrc, pszFullPathDest));
diff --git a/src/VBox/Installer/solaris/vboxdrv.sh b/src/VBox/Installer/solaris/vboxdrv.sh
index 809273c65..9cde46ff5 100755
--- a/src/VBox/Installer/solaris/vboxdrv.sh
+++ b/src/VBox/Installer/solaris/vboxdrv.sh
@@ -154,8 +154,8 @@ check_root()
idbin=/usr/xpg4/bin/id
if test ! -f "$idbin"; then
found=`which id`
- if test ! -f "$found" || test ! -h "$found"; then
- abort "Failed to find a suitable user id binary! Aborting"
+ if test ! -x "$found"; then
+ abort "Failed to find a suitable user id binary or not executable! Aborting"
else
idbin=$found
fi
diff --git a/src/VBox/Main/MachineImpl.cpp b/src/VBox/Main/MachineImpl.cpp
index 4ed2f26b5..9f30e14d2 100644
--- a/src/VBox/Main/MachineImpl.cpp
+++ b/src/VBox/Main/MachineImpl.cpp
@@ -8908,7 +8908,9 @@ STDMETHODIMP SessionMachine::OnSessionEnd (ISession *aSession,
ComAssertRet (!control.isNull(), E_INVALIDARG);
- AutoWriteLock alock (this);
+ /* Creating a Progress object requires the VirtualBox children lock, and
+ * thus locking it here is required by the lock order rules. */
+ AutoMultiWriteLock2 alock(mParent->childrenLock(), this->lockHandle());
if (control.equalsTo (mData->mSession.mDirectControl))
{
diff --git a/src/VBox/Main/include/VirtualBoxImpl.h b/src/VBox/Main/include/VirtualBoxImpl.h
index 6474b9f0a..ed0a94bc1 100644
--- a/src/VBox/Main/include/VirtualBoxImpl.h
+++ b/src/VBox/Main/include/VirtualBoxImpl.h
@@ -361,6 +361,16 @@ public:
*/
RWLockHandle *hardDiskTreeLockHandle() { return &mHardDiskTreeLockHandle; }
+ /**
+ * Reimplements VirtualBoxWithTypedChildren::childrenLock() to return a
+ * dedicated lock instead of the main object lock. The dedicated lock for
+ * child map operations frees callers of init() methods of these children
+ * from acquiring a write parent (VirtualBox) lock (which would be mandatory
+ * otherwise). Since VirtualBox has a lot of heterogenous children which
+ * init() methods are called here and there, it definitely makes sense.
+ */
+ RWLockHandle *childrenLock() { return &mChildrenMapLockHandle; }
+
/* for VirtualBoxSupportErrorInfoImpl */
static const wchar_t *getComponentName() { return L"VirtualBox"; }
@@ -379,16 +389,6 @@ private:
typedef std::map <Guid, ComObjPtr<HardDisk> > HardDiskMap;
- /**
- * Reimplements VirtualBoxWithTypedChildren::childrenLock() to return a
- * dedicated lock instead of the main object lock. The dedicated lock for
- * child map operations frees callers of init() methods of these children
- * from acquiring a write parent (VirtualBox) lock (which would be mandatory
- * otherwise). Since VirtualBox has a lot of heterogenous children which
- * init() methods are called here and there, it definitely makes sense.
- */
- RWLockHandle *childrenLock() { return &mChildrenMapLockHandle; }
-
HRESULT checkMediaForConflicts2 (const Guid &aId, const Bstr &aLocation,
Utf8Str &aConflictType);
diff --git a/src/VBox/Runtime/include/internal/memobj.h b/src/VBox/Runtime/include/internal/memobj.h
index 96ecd8d72..dfd95cf31 100644
--- a/src/VBox/Runtime/include/internal/memobj.h
+++ b/src/VBox/Runtime/include/internal/memobj.h
@@ -1,4 +1,4 @@
-/* $Revision: 48527 $ */
+/* $Revision: 54555 $ */
/** @file
* IPRT - Ring-0 Memory Objects.
*/
@@ -334,9 +334,11 @@ int rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecu
* @param ppMem Where to store the ring-0 memory object handle.
* @param R3Ptr User virtual address, page aligned.
* @param cb Number of bytes to lock, page aligned.
+ * @param fAccess The desired access, a combination of RTMEM_PROT_READ
+ * and RTMEM_PROT_WRITE.
* @param R0Process The process to lock pages in.
*/
-int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, RTR0PROCESS R0Process);
+int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process);
/**
* Locks a range of kernel virtual memory.
@@ -345,8 +347,10 @@ int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t c
* @param ppMem Where to store the ring-0 memory object handle.
* @param pv Kernel virtual address, page aligned.
* @param cb Number of bytes to lock, page aligned.
+ * @param fAccess The desired access, a combination of RTMEM_PROT_READ
+ * and RTMEM_PROT_WRITE.
*/
-int rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb);
+int rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess);
/**
* Allocates contiguous page aligned physical memory without (necessarily) any kernel mapping.
diff --git a/src/VBox/Runtime/r0drv/darwin/memobj-r0drv-darwin.cpp b/src/VBox/Runtime/r0drv/darwin/memobj-r0drv-darwin.cpp
index e363b82f2..9547d3ee4 100644
--- a/src/VBox/Runtime/r0drv/darwin/memobj-r0drv-darwin.cpp
+++ b/src/VBox/Runtime/r0drv/darwin/memobj-r0drv-darwin.cpp
@@ -655,13 +655,16 @@ int rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t
*
* @return IPRT status code.
*
- * @param ppMem Where to store the memory object pointer.
- * @param pv First page.
- * @param cb Number of bytes.
- * @param Task The task \a pv and \a cb refers to.
+ * @param ppMem Where to store the memory object pointer.
+ * @param pv First page.
+ * @param cb Number of bytes.
+ * @param fAccess The desired access, a combination of RTMEM_PROT_READ
+ * and RTMEM_PROT_WRITE.
+ * @param Task The task \a pv and \a cb refers to.
*/
-static int rtR0MemObjNativeLock(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, task_t Task)
+static int rtR0MemObjNativeLock(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess, task_t Task)
{
+ NOREF(fAccess);
#ifdef USE_VM_MAP_WIRE
vm_map_t Map = get_task_map(Task);
Assert(Map);
@@ -729,15 +732,15 @@ static int rtR0MemObjNativeLock(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb,
}
-int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, RTR0PROCESS R0Process)
+int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process)
{
- return rtR0MemObjNativeLock(ppMem, (void *)R3Ptr, cb, (task_t)R0Process);
+ return rtR0MemObjNativeLock(ppMem, (void *)R3Ptr, cb, fAccess, (task_t)R0Process);
}
-int rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb)
+int rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess)
{
- return rtR0MemObjNativeLock(ppMem, pv, cb, kernel_task);
+ return rtR0MemObjNativeLock(ppMem, pv, cb, fAccess, kernel_task);
}
diff --git a/src/VBox/Runtime/r0drv/freebsd/memobj-r0drv-freebsd.c b/src/VBox/Runtime/r0drv/freebsd/memobj-r0drv-freebsd.c
index 6e5b796fe..91e3e765f 100644
--- a/src/VBox/Runtime/r0drv/freebsd/memobj-r0drv-freebsd.c
+++ b/src/VBox/Runtime/r0drv/freebsd/memobj-r0drv-freebsd.c
@@ -370,9 +370,10 @@ int rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t
}
-int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, RTR0PROCESS R0Process)
+int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process)
{
int rc;
+ NOREF(fAccess);
/* create the object. */
PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_LOCK, (void *)R3Ptr, cb);
@@ -398,9 +399,10 @@ int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t c
}
-int rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb)
+int rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess)
{
int rc;
+ NOREF(fAccess);
/* create the object. */
PRTR0MEMOBJFREEBSD pMemFreeBSD = (PRTR0MEMOBJFREEBSD)rtR0MemObjNew(sizeof(*pMemFreeBSD), RTR0MEMOBJTYPE_LOCK, pv, cb);
diff --git a/src/VBox/Runtime/r0drv/initterm-r0drv.cpp b/src/VBox/Runtime/r0drv/initterm-r0drv.cpp
index 0e25a72c0..22633213c 100644
--- a/src/VBox/Runtime/r0drv/initterm-r0drv.cpp
+++ b/src/VBox/Runtime/r0drv/initterm-r0drv.cpp
@@ -77,7 +77,7 @@ RTR0DECL(int) RTR0Init(unsigned fReserved)
rc = rtR0InitNative();
if (RT_SUCCESS(rc))
{
-#if !defined(RT_OS_LINUX)
+#if !defined(RT_OS_LINUX) /** @todo implement thread2-r0drv-linux.c */
rc = rtThreadInit();
#endif
if (RT_SUCCESS(rc))
@@ -85,32 +85,29 @@ RTR0DECL(int) RTR0Init(unsigned fReserved)
#ifndef IN_GUEST /* play safe for now */
rc = rtR0MpNotificationInit();
if (RT_SUCCESS(rc))
+ {
rc = rtR0PowerNotificationInit();
-#endif
+ if (RT_SUCCESS(rc))
+ return rc;
+ rtR0MpNotificationTerm();
+ }
+#else
if (RT_SUCCESS(rc))
return rc;
+#endif
+#if !defined(RT_OS_LINUX) /** @todo implement thread2-r0drv-linux.c */
+ rtThreadTerm();
+#endif
}
-
rtR0TermNative();
}
return rc;
}
-/**
- * Terminates the ring-0 driver runtime library.
- */
-RTR0DECL(void) RTR0Term(void)
+static void rtR0Term(void)
{
- /*
- * Last user does the cleanup.
- */
- int32_t cNewUsers = ASMAtomicDecS32(&g_crtR0Users);
- Assert(cNewUsers >= 0);
- if (cNewUsers != 0)
- return;
-
-#if !defined(RT_OS_LINUX)
+#if !defined(RT_OS_LINUX) /** @todo implement thread2-r0drv-linux.c */
rtThreadTerm();
#endif
#ifndef IN_GUEST /* play safe for now */
@@ -120,3 +117,24 @@ RTR0DECL(void) RTR0Term(void)
rtR0TermNative();
}
+
+/**
+ * Terminates the ring-0 driver runtime library.
+ */
+RTR0DECL(void) RTR0Term(void)
+{
+ int32_t cNewUsers = ASMAtomicDecS32(&g_crtR0Users);
+ Assert(cNewUsers >= 0);
+ if (cNewUsers == 0)
+ rtR0Term();
+}
+
+
+/* Note! Should *not* be exported since it's only for static linking. */
+RTR0DECL(void) RTR0TermForced(void)
+{
+ AssertMsg(g_crtR0Users == 1, ("%d\n", g_crtR0Users));
+
+ rtR0Term();
+}
+
diff --git a/src/VBox/Runtime/r0drv/linux/memobj-r0drv-linux.c b/src/VBox/Runtime/r0drv/linux/memobj-r0drv-linux.c
index 9d9abd5bc..f113ec199 100644
--- a/src/VBox/Runtime/r0drv/linux/memobj-r0drv-linux.c
+++ b/src/VBox/Runtime/r0drv/linux/memobj-r0drv-linux.c
@@ -1,4 +1,4 @@
-/* $Revision: 51699 $ */
+/* $Revision: 54555 $ */
/** @file
* IPRT - Ring-0 Memory Objects, Linux.
*/
@@ -734,13 +734,14 @@ int rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t
}
-int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, RTR0PROCESS R0Process)
+int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process)
{
const int cPages = cb >> PAGE_SHIFT;
struct task_struct *pTask = rtR0ProcessToLinuxTask(R0Process);
struct vm_area_struct **papVMAs;
PRTR0MEMOBJLNX pMemLnx;
int rc = VERR_NO_MEMORY;
+ NOREF(fAccess);
/*
* Check for valid task and size overflows.
@@ -830,7 +831,7 @@ int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t c
}
-int rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb)
+int rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fFlags)
{
/* What is there to lock? Should/Can we fake this? */
return VERR_NOT_SUPPORTED;
diff --git a/src/VBox/Runtime/r0drv/memobj-r0drv.cpp b/src/VBox/Runtime/r0drv/memobj-r0drv.cpp
index 004ae4cb5..fac9f2032 100644
--- a/src/VBox/Runtime/r0drv/memobj-r0drv.cpp
+++ b/src/VBox/Runtime/r0drv/memobj-r0drv.cpp
@@ -1,4 +1,4 @@
-/* $Revision: 48527 $ */
+/* $Revision: 54555 $ */
/** @file
* IPRT - Ring-0 Memory Objects, Common Code.
*/
@@ -475,9 +475,14 @@ RTR0DECL(int) RTR0MemObjAllocCont(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutab
*
* @returns IPRT status code.
* @param pMemObj Where to store the ring-0 memory object handle.
- * @param R3Ptr User virtual address. This is rounded down to a page boundrary.
- * @param cb Number of bytes to lock. This is rounded up to nearest page boundrary.
- * @param R0Process The process to lock pages in. NIL_R0PROCESS is an alias for the current one.
+ * @param R3Ptr User virtual address. This is rounded down to a page
+ * boundrary.
+ * @param cb Number of bytes to lock. This is rounded up to
+ * nearest page boundrary.
+ * @param fAccess The desired access, a combination of RTMEM_PROT_READ
+ * and RTMEM_PROT_WRITE.
+ * @param R0Process The process to lock pages in. NIL_R0PROCESS is an
+ * alias for the current one.
*
* @remarks RTR0MemGetAddressR3() and RTR0MemGetAddress() will return therounded
* down address.
@@ -487,7 +492,7 @@ RTR0DECL(int) RTR0MemObjAllocCont(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutab
* is not intented as permanent restriction, feel free to help out
* lifting it.
*/
-RTR0DECL(int) RTR0MemObjLockUser(PRTR0MEMOBJ pMemObj, RTR3PTR R3Ptr, size_t cb, RTR0PROCESS R0Process)
+RTR0DECL(int) RTR0MemObjLockUser(PRTR0MEMOBJ pMemObj, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process)
{
/* sanity checks. */
const size_t cbAligned = RT_ALIGN_Z(cb + (R3Ptr & PAGE_OFFSET_MASK), PAGE_SIZE);
@@ -499,8 +504,11 @@ RTR0DECL(int) RTR0MemObjLockUser(PRTR0MEMOBJ pMemObj, RTR3PTR R3Ptr, size_t cb,
if (R0Process == NIL_RTR0PROCESS)
R0Process = RTR0ProcHandleSelf();
+ AssertReturn(!(fAccess & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE)), VERR_INVALID_PARAMETER);
+ AssertReturn(fAccess, VERR_INVALID_PARAMETER);
+
/* do the locking. */
- return rtR0MemObjNativeLockUser(pMemObj, R3PtrAligned, cbAligned, R0Process);
+ return rtR0MemObjNativeLockUser(pMemObj, R3PtrAligned, cbAligned, fAccess, R0Process);
}
@@ -511,10 +519,12 @@ RTR0DECL(int) RTR0MemObjLockUser(PRTR0MEMOBJ pMemObj, RTR3PTR R3Ptr, size_t cb,
* @param pMemObj Where to store the ring-0 memory object handle.
* @param pv Kernel virtual address. This is rounded down to a page boundrary.
* @param cb Number of bytes to lock. This is rounded up to nearest page boundrary.
+ * @param fAccess The desired access, a combination of RTMEM_PROT_READ
+ * and RTMEM_PROT_WRITE.
*
* @remark RTR0MemGetAddress() will return the rounded down address.
*/
-RTR0DECL(int) RTR0MemObjLockKernel(PRTR0MEMOBJ pMemObj, void *pv, size_t cb)
+RTR0DECL(int) RTR0MemObjLockKernel(PRTR0MEMOBJ pMemObj, void *pv, size_t cb, uint32_t fAccess)
{
/* sanity checks. */
const size_t cbAligned = RT_ALIGN_Z(cb + ((uintptr_t)pv & PAGE_OFFSET_MASK), PAGE_SIZE);
@@ -525,8 +535,11 @@ RTR0DECL(int) RTR0MemObjLockKernel(PRTR0MEMOBJ pMemObj, void *pv, size_t cb)
AssertReturn(cb <= cbAligned, VERR_INVALID_PARAMETER);
AssertPtrReturn(pvAligned, VERR_INVALID_POINTER);
+ AssertReturn(!(fAccess & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE)), VERR_INVALID_PARAMETER);
+ AssertReturn(fAccess, VERR_INVALID_PARAMETER);
+
/* do the allocation. */
- return rtR0MemObjNativeLockKernel(pMemObj, pvAligned, cbAligned);
+ return rtR0MemObjNativeLockKernel(pMemObj, pvAligned, cbAligned, fAccess);
}
diff --git a/src/VBox/Runtime/r0drv/nt/memobj-r0drv-nt.cpp b/src/VBox/Runtime/r0drv/nt/memobj-r0drv-nt.cpp
index cbf6f616f..551f9c4d1 100644
--- a/src/VBox/Runtime/r0drv/nt/memobj-r0drv-nt.cpp
+++ b/src/VBox/Runtime/r0drv/nt/memobj-r0drv-nt.cpp
@@ -500,12 +500,14 @@ int rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t
*
* @return IPRT status code.
*
- * @param ppMem Where to store the memory object pointer.
- * @param pv First page.
- * @param cb Number of bytes.
- * @param R0Process The process \a pv and \a cb refers to.
+ * @param ppMem Where to store the memory object pointer.
+ * @param pv First page.
+ * @param cb Number of bytes.
+ * @param fAccess The desired access, a combination of RTMEM_PROT_READ
+ * and RTMEM_PROT_WRITE.
+ * @param R0Process The process \a pv and \a cb refers to.
*/
-static int rtR0MemObjNtLock(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, RTR0PROCESS R0Process)
+static int rtR0MemObjNtLock(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process)
{
/*
* Calc the number of MDLs we need and allocate the memory object structure.
@@ -548,7 +550,13 @@ static int rtR0MemObjNtLock(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, RTR
*/
__try
{
- MmProbeAndLockPages(pMdl, R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode, IoModifyAccess);
+ MmProbeAndLockPages(pMdl,
+ R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode,
+ fAccess == RTMEM_PROT_READ
+ ? IoReadAccess
+ : fAccess == RTMEM_PROT_WRITE
+ ? IoWriteAccess
+ : IoModifyAccess);
pMemNt->apMdls[iMdl] = pMdl;
pMemNt->cMdls++;
@@ -560,10 +568,13 @@ static int rtR0MemObjNtLock(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, RTR
break;
}
- if (R0Process != NIL_RTR0PROCESS )
+ if (R0Process != NIL_RTR0PROCESS)
{
/* Make sure the user process can't change the allocation. */
- pMemNt->pvSecureMem = MmSecureVirtualMemory(pv, cb, PAGE_READWRITE);
+ pMemNt->pvSecureMem = MmSecureVirtualMemory(pv, cb,
+ fAccess & RTMEM_PROT_WRITE
+ ? PAGE_READWRITE
+ : PAGE_READONLY);
if (!pMemNt->pvSecureMem)
{
rc = VERR_NO_MEMORY;
@@ -603,17 +614,17 @@ static int rtR0MemObjNtLock(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, RTR
}
-int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, RTR0PROCESS R0Process)
+int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process)
{
AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
/* (Can use MmProbeAndLockProcessPages if we need to mess with other processes later.) */
- return rtR0MemObjNtLock(ppMem, (void *)R3Ptr, cb, R0Process);
+ return rtR0MemObjNtLock(ppMem, (void *)R3Ptr, cb, fAccess, R0Process);
}
-int rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb)
+int rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess)
{
- return rtR0MemObjNtLock(ppMem, pv, cb, NIL_RTR0PROCESS);
+ return rtR0MemObjNtLock(ppMem, pv, cb, fAccess, NIL_RTR0PROCESS);
}
diff --git a/src/VBox/Runtime/r0drv/os2/memobj-r0drv-os2.cpp b/src/VBox/Runtime/r0drv/os2/memobj-r0drv-os2.cpp
index 019e1933a..362824d54 100644
--- a/src/VBox/Runtime/r0drv/os2/memobj-r0drv-os2.cpp
+++ b/src/VBox/Runtime/r0drv/os2/memobj-r0drv-os2.cpp
@@ -241,7 +241,7 @@ int rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t
}
-int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, RTR0PROCESS R0Process)
+int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process)
{
AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
@@ -253,7 +253,8 @@ int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t c
/* lock it. */
ULONG cPagesRet = cPages;
- int rc = KernVMLock(VMDHL_LONG | VMDHL_WRITE, (void *)R3Ptr, cb, &pMemOs2->Lock, &pMemOs2->aPages[0], &cPagesRet);
+ int rc = KernVMLock(VMDHL_LONG | (fAccess & RTMEM_PROT_WRITE ? VMDHL_WRITE : 0),
+ (void *)R3Ptr, cb, &pMemOs2->Lock, &pMemOs2->aPages[0], &cPagesRet);
if (!rc)
{
rtR0MemObjFixPageList(&pMemOs2->aPages[0], cPages, cPagesRet);
@@ -268,7 +269,7 @@ int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t c
}
-int rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb)
+int rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess)
{
/* create the object. */
const ULONG cPages = cb >> PAGE_SHIFT;
@@ -278,7 +279,8 @@ int rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb)
/* lock it. */
ULONG cPagesRet = cPages;
- int rc = KernVMLock(VMDHL_LONG | VMDHL_WRITE, pv, cb, &pMemOs2->Lock, &pMemOs2->aPages[0], &cPagesRet);
+ int rc = KernVMLock(VMDHL_LONG | (fAccess & RTMEM_PROT_WRITE ? VMDHL_WRITE : 0),
+ pv, cb, &pMemOs2->Lock, &pMemOs2->aPages[0], &cPagesRet);
if (!rc)
{
rtR0MemObjFixPageList(&pMemOs2->aPages[0], cPages, cPagesRet);
diff --git a/src/VBox/Runtime/r0drv/solaris/memobj-r0drv-solaris.c b/src/VBox/Runtime/r0drv/solaris/memobj-r0drv-solaris.c
index a73bc53f5..99fdba856 100644
--- a/src/VBox/Runtime/r0drv/solaris/memobj-r0drv-solaris.c
+++ b/src/VBox/Runtime/r0drv/solaris/memobj-r0drv-solaris.c
@@ -56,8 +56,10 @@ typedef struct RTR0MEMOBJSOLARIS
RTR0MEMOBJINTERNAL Core;
/** Pointer to kernel memory cookie. */
ddi_umem_cookie_t Cookie;
+ /** Access during locking. */
+ int fAccess;
/** Shadow locked pages. */
- page_t **ppShadowPages;
+ page_t **ppShadowPages;
} RTR0MEMOBJSOLARIS, *PRTR0MEMOBJSOLARIS;
/**
@@ -116,7 +118,7 @@ int rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
if ((uintptr_t)pMemSolaris->Core.pv < kernelbase)
{
addrSpace = ((proc_t *)pMemSolaris->Core.u.Lock.R0Process)->p_as;
- as_pageunlock(addrSpace, pMemSolaris->ppShadowPages, pMemSolaris->Core.pv, pMemSolaris->Core.cb, S_WRITE);
+ as_fault(addrSpace->a_hat, addrSpace, (caddr_t)pMemSolaris->Core.pv, pMemSolaris->Core.cb, F_SOFTUNLOCK, pMemSolaris->fAccess);
}
/* Nothing to unlock for kernel addresses. */
break;
@@ -257,7 +259,7 @@ int rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t
}
-int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, RTR0PROCESS R0Process)
+int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process)
{
AssertReturn(R0Process == RTR0ProcHandleSelf(), VERR_INVALID_PARAMETER);
@@ -270,17 +272,24 @@ int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t c
struct as *useras = userproc->p_as;
page_t **ppl;
+ int fPageAccess = S_READ;
+ if (fAccess & RTMEM_PROT_WRITE)
+ fPageAccess = S_WRITE;
+ if (fAccess & RTMEM_PROT_EXEC)
+ fPageAccess = S_EXEC;
+
/* Lock down user pages */
int rc;
ppl = NULL;
if ((uintptr_t)R3Ptr < kernelbase)
- rc = as_pagelock(useras, &ppl, (caddr_t)R3Ptr, cb, S_WRITE);
+ rc = as_fault(userproc->p_as->a_hat, userproc->p_as, (caddr_t)R3Ptr, cb, F_SOFTLOCK, fPageAccess);
else
rc = 0;
if (rc == 0)
{
pMemSolaris->Core.u.Lock.R0Process = (RTR0PROCESS)userproc;
pMemSolaris->ppShadowPages = ppl;
+ pMemSolaris->fAccess = fPageAccess;
*ppMem = &pMemSolaris->Core;
return VINF_SUCCESS;
}
@@ -291,8 +300,10 @@ int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t c
}
-int rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb)
+int rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess)
{
+ NOREF(fAccess);
+
/* Create the locking object */
PRTR0MEMOBJSOLARIS pMemSolaris = (PRTR0MEMOBJSOLARIS)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_LOCK, pv, cb);
if (!pMemSolaris)
diff --git a/src/VBox/Runtime/r0drv/solaris/vbi/memobj-r0drv-solaris.c b/src/VBox/Runtime/r0drv/solaris/vbi/memobj-r0drv-solaris.c
index 90598bf70..c2a4ef6e3 100644
--- a/src/VBox/Runtime/r0drv/solaris/vbi/memobj-r0drv-solaris.c
+++ b/src/VBox/Runtime/r0drv/solaris/vbi/memobj-r0drv-solaris.c
@@ -203,9 +203,10 @@ int rtR0MemObjNativeEnterPhys(PPRTR0MEMOBJINTERNAL ppMem, RTHCPHYS Phys, size_t
}
-int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, RTR0PROCESS R0Process)
+int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process)
{
AssertReturn(R0Process == RTR0ProcHandleSelf(), VERR_INVALID_PARAMETER);
+ NOREF(fAccess);
/* Create the locking object */
PRTR0MEMOBJSOLARIS pMemSolaris = (PRTR0MEMOBJSOLARIS)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_LOCK, (void *)R3Ptr, cb);
@@ -229,8 +230,10 @@ int rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t c
}
-int rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb)
+int rtR0MemObjNativeLockKernel(PPRTR0MEMOBJINTERNAL ppMem, void *pv, size_t cb, uint32_t fAccess)
{
+ NOREF(fAccess);
+
PRTR0MEMOBJSOLARIS pMemSolaris = (PRTR0MEMOBJSOLARIS)rtR0MemObjNew(sizeof(*pMemSolaris), RTR0MEMOBJTYPE_LOCK, pv, cb);
if (!pMemSolaris)
return VERR_NO_MEMORY;
diff --git a/src/VBox/Runtime/r0drv/solaris/vbi/mpnotification-r0drv-solaris.c b/src/VBox/Runtime/r0drv/solaris/vbi/mpnotification-r0drv-solaris.c
index b6a775dfc..5ce3d89f8 100644
--- a/src/VBox/Runtime/r0drv/solaris/vbi/mpnotification-r0drv-solaris.c
+++ b/src/VBox/Runtime/r0drv/solaris/vbi/mpnotification-r0drv-solaris.c
@@ -68,7 +68,6 @@ static void rtMpNotificationSolarisCallback(void *pvUser, int iCpu, int online)
}
}
-static vbi_cpu_watch_t *watch_handle = NULL;
int rtR0MpNotificationNativeInit(void)
{
@@ -90,8 +89,8 @@ int rtR0MpNotificationNativeInit(void)
void rtR0MpNotificationNativeTerm(void)
{
- if (vbi_revision_level >= 2 && watch_handle != NULL)
- vbi_ignore_cpus(watch_handle);
- watch_handle = NULL;
+ if (vbi_revision_level >= 2 && g_hVbiCpuWatch != NULL)
+ vbi_ignore_cpus(g_hVbiCpuWatch);
+ g_hVbiCpuWatch = NULL;
}
diff --git a/src/VBox/VMM/HWACCM.cpp b/src/VBox/VMM/HWACCM.cpp
index 13a2c21d5..c0f671236 100644
--- a/src/VBox/VMM/HWACCM.cpp
+++ b/src/VBox/VMM/HWACCM.cpp
@@ -1117,6 +1117,31 @@ VMMR3DECL(int) HWACCMR3InitFinalizeR0(PVM pVM)
LogRel(("HWACCM: enmFlushPage %d\n", pVM->hwaccm.s.vmx.enmFlushPage));
LogRel(("HWACCM: enmFlushContext %d\n", pVM->hwaccm.s.vmx.enmFlushContext));
}
+
+ /* TPR patching status logging. */
+ if (pVM->hwaccm.s.fTRPPatchingAllowed)
+ {
+ if ( (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
+ && (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC))
+ {
+ pVM->hwaccm.s.fTRPPatchingAllowed = false; /* not necessary as we have a hardware solution. */
+ LogRel(("HWACCM: TPR Patching not required (VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC).\n"));
+ }
+ else
+ {
+ uint32_t u32Eax, u32Dummy;
+
+ /* TPR patching needs access to the MSR_K8_LSTAR msr. */
+ ASMCpuId(0x80000000, &u32Eax, &u32Dummy, &u32Dummy, &u32Dummy);
+ if ( u32Eax < 0x80000001
+ || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE))
+ {
+ pVM->hwaccm.s.fTRPPatchingAllowed = false;
+ LogRel(("HWACCM: TPR patching disabled (long mode not supported).\n"));
+ }
+ }
+ }
+ LogRel(("HWACCM: TPR Patching %s.\n", (pVM->hwaccm.s.fTRPPatchingAllowed) ? "enabled" : "disabled"));
}
else
{
@@ -1464,13 +1489,13 @@ VMMR3DECL(void) HWACCMR3Reset(PVM pVM)
}
/* Clear all patch information. */
- pVM->hwaccm.s.pGuestPatchMem = 0;
- pVM->hwaccm.s.pFreeGuestPatchMem = 0;
- pVM->hwaccm.s.cbGuestPatchMem = 0;
- pVM->hwaccm.s.svm.cPatches = 0;
- pVM->hwaccm.s.svm.PatchTree = 0;
- pVM->hwaccm.s.svm.fTPRPatchingActive = false;
- ASMMemZero32(pVM->hwaccm.s.svm.aPatches, sizeof(pVM->hwaccm.s.svm.aPatches));
+ pVM->hwaccm.s.pGuestPatchMem = 0;
+ pVM->hwaccm.s.pFreeGuestPatchMem = 0;
+ pVM->hwaccm.s.cbGuestPatchMem = 0;
+ pVM->hwaccm.s.cPatches = 0;
+ pVM->hwaccm.s.PatchTree = 0;
+ pVM->hwaccm.s.fTPRPatchingActive = false;
+ ASMMemZero32(pVM->hwaccm.s.aPatches, sizeof(pVM->hwaccm.s.aPatches));
}
/**
@@ -1491,10 +1516,10 @@ DECLCALLBACK(int) hwaccmR3RemovePatches(PVM pVM, PVMCPU pVCpu, void *pvUser)
return VINF_SUCCESS;
Log(("hwaccmR3RemovePatches\n"));
- for (unsigned i = 0; i < pVM->hwaccm.s.svm.cPatches; i++)
+ for (unsigned i = 0; i < pVM->hwaccm.s.cPatches; i++)
{
uint8_t szInstr[15];
- PHWACCMTPRPATCH pPatch = &pVM->hwaccm.s.svm.aPatches[i];
+ PHWACCMTPRPATCH pPatch = &pVM->hwaccm.s.aPatches[i];
RTGCPTR pInstrGC = (RTGCPTR)pPatch->Core.Key;
int rc;
@@ -1529,10 +1554,10 @@ DECLCALLBACK(int) hwaccmR3RemovePatches(PVM pVM, PVMCPU pVCpu, void *pvUser)
Log(("Original instr: %s\n", szOutput));
#endif
}
- pVM->hwaccm.s.svm.cPatches = 0;
- pVM->hwaccm.s.svm.PatchTree = 0;
+ pVM->hwaccm.s.cPatches = 0;
+ pVM->hwaccm.s.PatchTree = 0;
pVM->hwaccm.s.pFreeGuestPatchMem = pVM->hwaccm.s.pGuestPatchMem;
- pVM->hwaccm.s.svm.fTPRPatchingActive = false;
+ pVM->hwaccm.s.fTPRPatchingActive = false;
return VINF_SUCCESS;
}
@@ -1568,12 +1593,6 @@ VMMR3DECL(int) HWACMMR3EnablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPa
{
Log(("HWACMMR3EnablePatching %RGv size %x\n", pPatchMem, cbPatchMem));
- /* Current TPR patching only applies to AMD cpus.
- * Needs to be extended to Intel CPUs without the APIC TPR hardware optimization.
- */
- if (CPUMGetCPUVendor(pVM) != CPUMCPUVENDOR_AMD)
- return VERR_NOT_SUPPORTED;
-
if (pVM->cCPUs > 1)
{
/* We own the IOM lock here and could cause a deadlock by waiting for a VCPU that is blocking on the IOM lock. */
@@ -1583,8 +1602,7 @@ VMMR3DECL(int) HWACMMR3EnablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPa
AssertRC(rc);
return rc;
}
- else
- return hwaccmR3EnablePatching(pVM, VMMGetCpuId(pVM), (RTRCPTR)pPatchMem, cbPatchMem);
+ return hwaccmR3EnablePatching(pVM, VMMGetCpuId(pVM), (RTRCPTR)pPatchMem, cbPatchMem);
}
/**
@@ -1609,7 +1627,7 @@ VMMR3DECL(int) HWACMMR3DisablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbP
pVM->hwaccm.s.pGuestPatchMem = 0;
pVM->hwaccm.s.pFreeGuestPatchMem = 0;
pVM->hwaccm.s.cbGuestPatchMem = 0;
- pVM->hwaccm.s.svm.fTPRPatchingActive = false;
+ pVM->hwaccm.s.fTPRPatchingActive = false;
return VINF_SUCCESS;
}
@@ -1638,11 +1656,11 @@ DECLCALLBACK(int) hwaccmR3ReplaceTprInstr(PVM pVM, PVMCPU pVCpu, void *pvUser)
Log(("hwaccmR3ReplaceTprInstr: %RGv\n", pCtx->rip));
/* Two or more VCPUs were racing to patch this instruction. */
- PHWACCMTPRPATCH pPatch = (PHWACCMTPRPATCH)RTAvloU32Get(&pVM->hwaccm.s.svm.PatchTree, (AVLOU32KEY)pCtx->eip);
+ PHWACCMTPRPATCH pPatch = (PHWACCMTPRPATCH)RTAvloU32Get(&pVM->hwaccm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
if (pPatch)
return VINF_SUCCESS;
- Assert(pVM->hwaccm.s.svm.cPatches < RT_ELEMENTS(pVM->hwaccm.s.svm.aPatches));
+ Assert(pVM->hwaccm.s.cPatches < RT_ELEMENTS(pVM->hwaccm.s.aPatches));
int rc = EMInterpretDisasOne(pVM, pVCpu, CPUMCTX2CORE(pCtx), pDis, &cbOp);
AssertRC(rc);
@@ -1651,8 +1669,8 @@ DECLCALLBACK(int) hwaccmR3ReplaceTprInstr(PVM pVM, PVMCPU pVCpu, void *pvUser)
&& cbOp >= 3)
{
uint8_t aVMMCall[3] = { 0xf, 0x1, 0xd9};
- uint32_t idx = pVM->hwaccm.s.svm.cPatches;
- PHWACCMTPRPATCH pPatch = &pVM->hwaccm.s.svm.aPatches[idx];
+ uint32_t idx = pVM->hwaccm.s.cPatches;
+ PHWACCMTPRPATCH pPatch = &pVM->hwaccm.s.aPatches[idx];
rc = PGMPhysSimpleReadGCPtr(pVCpu, pPatch->aOpcode, pCtx->rip, cbOp);
AssertRC(rc);
@@ -1702,7 +1720,7 @@ DECLCALLBACK(int) hwaccmR3ReplaceTprInstr(PVM pVM, PVMCPU pVCpu, void *pvUser)
&& pDis->param1.base.reg_gen == uMmioReg
&& pDis->param2.flags == USE_IMMEDIATE8
&& pDis->param2.parval == 4
- && oldcbOp + cbOp < sizeof(pVM->hwaccm.s.svm.aPatches[idx].aOpcode))
+ && oldcbOp + cbOp < sizeof(pVM->hwaccm.s.aPatches[idx].aOpcode))
{
uint8_t szInstr[15];
@@ -1743,16 +1761,16 @@ DECLCALLBACK(int) hwaccmR3ReplaceTprInstr(PVM pVM, PVMCPU pVCpu, void *pvUser)
}
pPatch->Core.Key = pCtx->eip;
- rc = RTAvloU32Insert(&pVM->hwaccm.s.svm.PatchTree, &pPatch->Core);
+ rc = RTAvloU32Insert(&pVM->hwaccm.s.PatchTree, &pPatch->Core);
AssertRC(rc);
- pVM->hwaccm.s.svm.cPatches++;
+ pVM->hwaccm.s.cPatches++;
STAM_COUNTER_INC(&pVM->hwaccm.s.StatTPRReplaceSuccess);
return VINF_SUCCESS;
}
/* Save invalid patch, so we will not try again. */
- uint32_t idx = pVM->hwaccm.s.svm.cPatches;
+ uint32_t idx = pVM->hwaccm.s.cPatches;
#ifdef LOG_ENABLED
char szOutput[256];
@@ -1761,12 +1779,12 @@ DECLCALLBACK(int) hwaccmR3ReplaceTprInstr(PVM pVM, PVMCPU pVCpu, void *pvUser)
Log(("Failed to patch instr: %s\n", szOutput));
#endif
- pPatch = &pVM->hwaccm.s.svm.aPatches[idx];
+ pPatch = &pVM->hwaccm.s.aPatches[idx];
pPatch->Core.Key = pCtx->eip;
pPatch->enmType = HWACCMTPRINSTR_INVALID;
- rc = RTAvloU32Insert(&pVM->hwaccm.s.svm.PatchTree, &pPatch->Core);
+ rc = RTAvloU32Insert(&pVM->hwaccm.s.PatchTree, &pPatch->Core);
AssertRC(rc);
- pVM->hwaccm.s.svm.cPatches++;
+ pVM->hwaccm.s.cPatches++;
STAM_COUNTER_INC(&pVM->hwaccm.s.StatTPRReplaceFailure);
return VINF_SUCCESS;
}
@@ -1796,10 +1814,10 @@ DECLCALLBACK(int) hwaccmR3PatchTprInstr(PVM pVM, PVMCPU pVCpu, void *pvUser)
if (pVCpu->idCpu != idCpu)
return VINF_SUCCESS;
- Assert(pVM->hwaccm.s.svm.cPatches < RT_ELEMENTS(pVM->hwaccm.s.svm.aPatches));
+ Assert(pVM->hwaccm.s.cPatches < RT_ELEMENTS(pVM->hwaccm.s.aPatches));
/* Two or more VCPUs were racing to patch this instruction. */
- PHWACCMTPRPATCH pPatch = (PHWACCMTPRPATCH)RTAvloU32Get(&pVM->hwaccm.s.svm.PatchTree, (AVLOU32KEY)pCtx->eip);
+ PHWACCMTPRPATCH pPatch = (PHWACCMTPRPATCH)RTAvloU32Get(&pVM->hwaccm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
if (pPatch)
{
Log(("hwaccmR3PatchTprInstr: already patched %RGv\n", pCtx->rip));
@@ -1814,8 +1832,8 @@ DECLCALLBACK(int) hwaccmR3PatchTprInstr(PVM pVM, PVMCPU pVCpu, void *pvUser)
&& pDis->pCurInstr->opcode == OP_MOV
&& cbOp >= 5)
{
- uint32_t idx = pVM->hwaccm.s.svm.cPatches;
- PHWACCMTPRPATCH pPatch = &pVM->hwaccm.s.svm.aPatches[idx];
+ uint32_t idx = pVM->hwaccm.s.cPatches;
+ PHWACCMTPRPATCH pPatch = &pVM->hwaccm.s.aPatches[idx];
uint8_t aPatch[64];
uint32_t off = 0;
@@ -1977,11 +1995,11 @@ DECLCALLBACK(int) hwaccmR3PatchTprInstr(PVM pVM, PVMCPU pVCpu, void *pvUser)
pPatch->cbNewOp = 5;
pPatch->Core.Key = pCtx->eip;
- rc = RTAvloU32Insert(&pVM->hwaccm.s.svm.PatchTree, &pPatch->Core);
+ rc = RTAvloU32Insert(&pVM->hwaccm.s.PatchTree, &pPatch->Core);
AssertRC(rc);
- pVM->hwaccm.s.svm.cPatches++;
- pVM->hwaccm.s.svm.fTPRPatchingActive = true;
+ pVM->hwaccm.s.cPatches++;
+ pVM->hwaccm.s.fTPRPatchingActive = true;
STAM_COUNTER_INC(&pVM->hwaccm.s.StatTPRPatchSuccess);
return VINF_SUCCESS;
}
@@ -1990,7 +2008,7 @@ DECLCALLBACK(int) hwaccmR3PatchTprInstr(PVM pVM, PVMCPU pVCpu, void *pvUser)
}
/* Save invalid patch, so we will not try again. */
- uint32_t idx = pVM->hwaccm.s.svm.cPatches;
+ uint32_t idx = pVM->hwaccm.s.cPatches;
#ifdef LOG_ENABLED
rc = DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCtx->rip, 0, szOutput, sizeof(szOutput), 0);
@@ -1998,12 +2016,12 @@ DECLCALLBACK(int) hwaccmR3PatchTprInstr(PVM pVM, PVMCPU pVCpu, void *pvUser)
Log(("Failed to patch instr: %s\n", szOutput));
#endif
- pPatch = &pVM->hwaccm.s.svm.aPatches[idx];
+ pPatch = &pVM->hwaccm.s.aPatches[idx];
pPatch->Core.Key = pCtx->eip;
pPatch->enmType = HWACCMTPRINSTR_INVALID;
- rc = RTAvloU32Insert(&pVM->hwaccm.s.svm.PatchTree, &pPatch->Core);
+ rc = RTAvloU32Insert(&pVM->hwaccm.s.PatchTree, &pPatch->Core);
AssertRC(rc);
- pVM->hwaccm.s.svm.cPatches++;
+ pVM->hwaccm.s.cPatches++;
STAM_COUNTER_INC(&pVM->hwaccm.s.StatTPRPatchFailure);
return VINF_SUCCESS;
}
@@ -2434,12 +2452,12 @@ static DECLCALLBACK(int) hwaccmR3Save(PVM pVM, PSSMHANDLE pSSM)
AssertRCReturn(rc, rc);
/* Store all the guest patch records too. */
- rc = SSMR3PutU32(pSSM, pVM->hwaccm.s.svm.cPatches);
+ rc = SSMR3PutU32(pSSM, pVM->hwaccm.s.cPatches);
AssertRCReturn(rc, rc);
- for (unsigned i = 0; i < pVM->hwaccm.s.svm.cPatches; i++)
+ for (unsigned i = 0; i < pVM->hwaccm.s.cPatches; i++)
{
- PHWACCMTPRPATCH pPatch = &pVM->hwaccm.s.svm.aPatches[i];
+ PHWACCMTPRPATCH pPatch = &pVM->hwaccm.s.aPatches[i];
rc = SSMR3PutU32(pSSM, pPatch->Core.Key);
AssertRCReturn(rc, rc);
@@ -2537,12 +2555,12 @@ static DECLCALLBACK(int) hwaccmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Vers
AssertRCReturn(rc, rc);
/* Fetch all TPR patch records. */
- rc = SSMR3GetU32(pSSM, &pVM->hwaccm.s.svm.cPatches);
+ rc = SSMR3GetU32(pSSM, &pVM->hwaccm.s.cPatches);
AssertRCReturn(rc, rc);
- for (unsigned i = 0; i < pVM->hwaccm.s.svm.cPatches; i++)
+ for (unsigned i = 0; i < pVM->hwaccm.s.cPatches; i++)
{
- PHWACCMTPRPATCH pPatch = &pVM->hwaccm.s.svm.aPatches[i];
+ PHWACCMTPRPATCH pPatch = &pVM->hwaccm.s.aPatches[i];
rc = SSMR3GetU32(pSSM, &pPatch->Core.Key);
AssertRCReturn(rc, rc);
@@ -2563,9 +2581,9 @@ static DECLCALLBACK(int) hwaccmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Vers
AssertRCReturn(rc, rc);
if (pPatch->enmType == HWACCMTPRINSTR_JUMP_REPLACEMENT)
- pVM->hwaccm.s.svm.fTPRPatchingActive = true;
+ pVM->hwaccm.s.fTPRPatchingActive = true;
- Assert(pPatch->enmType == HWACCMTPRINSTR_JUMP_REPLACEMENT || pVM->hwaccm.s.svm.fTPRPatchingActive == false);
+ Assert(pPatch->enmType == HWACCMTPRINSTR_JUMP_REPLACEMENT || pVM->hwaccm.s.fTPRPatchingActive == false);
rc = SSMR3GetU32(pSSM, &pPatch->uSrcOperand);
AssertRCReturn(rc, rc);
@@ -2589,7 +2607,7 @@ static DECLCALLBACK(int) hwaccmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Vers
Log(("cFaults = %d\n", pPatch->cFaults));
Log(("target = %x\n", pPatch->pJumpTarget));
- rc = RTAvloU32Insert(&pVM->hwaccm.s.svm.PatchTree, &pPatch->Core);
+ rc = RTAvloU32Insert(&pVM->hwaccm.s.PatchTree, &pPatch->Core);
AssertRC(rc);
}
}
diff --git a/src/VBox/VMM/HWACCMInternal.h b/src/VBox/VMM/HWACCMInternal.h
index 99f05863e..214b29f39 100644
--- a/src/VBox/VMM/HWACCMInternal.h
+++ b/src/VBox/VMM/HWACCMInternal.h
@@ -279,6 +279,10 @@ typedef struct HWACCM
/** Set when we initialize VT-x or AMD-V once for all CPUs. */
bool fGlobalInit;
+ /** Set when TPR patching is active. */
+ bool fTPRPatchingActive;
+ bool u8Alignment[7];
+
/** And mask for copying register contents. */
uint64_t u64RegisterMask;
@@ -412,8 +416,7 @@ typedef struct HWACCM
bool fEnabled;
/** Set if erratum 170 affects the AMD cpu. */
bool fAlwaysFlushTLB;
- /** Set when TPR patching is active. */
- bool fTPRPatchingActive;
+ bool u8Alignment;
/** R0 memory object for the IO bitmap (12kb). */
RTR0MEMOBJ pMemObjIOBitmap;
@@ -427,15 +430,15 @@ typedef struct HWACCM
/** SVM feature bits from cpuid 0x8000000a */
uint32_t u32Features;
-
- /**
- * AVL tree with all patches (active or disabled) sorted by guest instruction address
- */
- AVLOU32TREE PatchTree;
- uint32_t cPatches;
- HWACCMTPRPATCH aPatches[64];
} svm;
+ /**
+ * AVL tree with all patches (active or disabled) sorted by guest instruction address
+ */
+ AVLOU32TREE PatchTree;
+ uint32_t cPatches;
+ HWACCMTPRPATCH aPatches[64];
+
struct
{
uint32_t u32AMDFeatureECX;
@@ -447,7 +450,7 @@ typedef struct HWACCM
/** HWACCMR0Init was run */
bool fHWACCMR0Init;
- bool u8Alignment[7];
+ bool u8Alignment1[7];
STAMCOUNTER StatTPRPatchSuccess;
STAMCOUNTER StatTPRPatchFailure;
diff --git a/src/VBox/VMM/PDMDevHlp.cpp b/src/VBox/VMM/PDMDevHlp.cpp
index 9ba1aafe7..3598e233c 100644
--- a/src/VBox/VMM/PDMDevHlp.cpp
+++ b/src/VBox/VMM/PDMDevHlp.cpp
@@ -1756,12 +1756,8 @@ static DECLCALLBACK(int) pdmR3DevHlp_APICRegister(PPDMDEVINS pDevIns, PPDMAPICRE
}
if (RT_SUCCESS(rc))
{
-#if 0
rc = PDMR3LdrGetSymbolRCLazy(pVM, pDevIns->pDevReg->szRCMod, pApicReg->pszLocalInterruptRC, &pVM->pdm.s.Apic.pfnLocalInterruptRC);
AssertMsgRC(rc, ("%s::%s rc=%Rrc\n", pDevIns->pDevReg->szRCMod, pApicReg->pszLocalInterruptRC, rc));
-#else
- pVM->pdm.s.Apic.pfnLocalInterruptRC = NIL_RTRCPTR;
-#endif
}
if (RT_FAILURE(rc))
{
@@ -1834,12 +1830,8 @@ static DECLCALLBACK(int) pdmR3DevHlp_APICRegister(PPDMDEVINS pDevIns, PPDMAPICRE
}
if (RT_SUCCESS(rc))
{
-#if 0
rc = PDMR3LdrGetSymbolR0Lazy(pVM, pDevIns->pDevReg->szR0Mod, pApicReg->pszLocalInterruptR0, &pVM->pdm.s.Apic.pfnLocalInterruptR0);
AssertMsgRC(rc, ("%s::%s rc=%Rrc\n", pDevIns->pDevReg->szR0Mod, pApicReg->pszLocalInterruptR0, rc));
-#else
- pVM->pdm.s.Apic.pfnLocalInterruptR0 = NIL_RTR0PTR;
-#endif
}
if (RT_FAILURE(rc))
{
@@ -1877,21 +1869,9 @@ static DECLCALLBACK(int) pdmR3DevHlp_APICRegister(PPDMDEVINS pDevIns, PPDMAPICRE
pVM->pdm.s.Apic.pfnWriteMSRR3 = pApicReg->pfnWriteMSRR3;
pVM->pdm.s.Apic.pfnReadMSRR3 = pApicReg->pfnReadMSRR3;
pVM->pdm.s.Apic.pfnBusDeliverR3 = pApicReg->pfnBusDeliverR3;
-#if 0
pVM->pdm.s.Apic.pfnLocalInterruptR3 = pApicReg->pfnLocalInterruptR3;
-#else
- pVM->pdm.s.Apic.pfnLocalInterruptR3 = NULL;
-#endif
Log(("PDM: Registered APIC device '%s'/%d pDevIns=%p\n", pDevIns->pDevReg->szDeviceName, pDevIns->iInstance, pDevIns));
-
-#if 1
- /* Disable the APIC fix due to Linux SMP regressions. */
- pVM->pdm.s.Apic.pfnLocalInterruptR3 = 0;
- pVM->pdm.s.Apic.pfnLocalInterruptR0 = 0;
- pVM->pdm.s.Apic.pfnLocalInterruptRC = 0;
-#endif
-
/* set the helper pointer and return. */
*ppApicHlpR3 = &g_pdmR3DevApicHlp;
LogFlow(("pdmR3DevHlp_APICRegister: caller='%s'/%d: returns %Rrc\n", pDevIns->pDevReg->szDeviceName, pDevIns->iInstance, VINF_SUCCESS));
diff --git a/src/VBox/VMM/PGM.cpp b/src/VBox/VMM/PGM.cpp
index 9d8701651..72f95bdb2 100644
--- a/src/VBox/VMM/PGM.cpp
+++ b/src/VBox/VMM/PGM.cpp
@@ -2870,7 +2870,6 @@ static int pgmR3LoadLocked(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
/*
* Old format.
*/
- AssertLogRelReturn(!pVM->pgm.s.fRamPreAlloc, VERR_NOT_SUPPORTED); /* can't be detected. */
/* Of the page flags, pick up MMIO2 and ROM/RESERVED for the !fHaveBits case.
The rest is generally irrelevant and wrong since the stuff have to match registrations. */
@@ -3808,7 +3807,7 @@ VMMR3DECL(int) PGMR3ChangeMode(PVM pVM, PVMCPU pVCpu, PGMMODE enmGuestMode)
*/
#if HC_ARCH_BITS == 32
/* The nested shadow paging mode for AMD-V does change when running 64 bits guests on 32 bits hosts; typically PAE <-> AMD64 */
- const bool fForceShwEnterExit = ( fIsOldGuestPagingMode64Bits != fIsNewGuestPagingMode64Bits
+ const bool fForceShwEnterExit = ( fIsOldGuestPagingMode64Bits != fIsNewGuestPagingMode64Bits
&& enmShadowMode == PGMMODE_NESTED);
#else
const bool fForceShwEnterExit = false;
diff --git a/src/VBox/VMM/VMMR0/GMMR0.cpp b/src/VBox/VMM/VMMR0/GMMR0.cpp
index 16f60957e..a206c1afb 100644
--- a/src/VBox/VMM/VMMR0/GMMR0.cpp
+++ b/src/VBox/VMM/VMMR0/GMMR0.cpp
@@ -3104,7 +3104,7 @@ GMMR0DECL(int) GMMR0SeedChunk(PVM pVM, VMCPUID idCpu, RTR3PTR pvR3)
* Lock the memory before taking the semaphore.
*/
RTR0MEMOBJ MemObj;
- rc = RTR0MemObjLockUser(&MemObj, pvR3, GMM_CHUNK_SIZE, NIL_RTR0PROCESS);
+ rc = RTR0MemObjLockUser(&MemObj, pvR3, GMM_CHUNK_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
if (RT_SUCCESS(rc))
{
/*
diff --git a/src/VBox/VMM/VMMR0/HWACCMR0.cpp b/src/VBox/VMM/VMMR0/HWACCMR0.cpp
index d8e225327..5a6eae226 100644
--- a/src/VBox/VMM/VMMR0/HWACCMR0.cpp
+++ b/src/VBox/VMM/VMMR0/HWACCMR0.cpp
@@ -723,6 +723,13 @@ static DECLCALLBACK(void) hwaccmR0EnableCpuCallback(RTCPUID idCpu, void *pvUser1
PVM pVM = (PVM)pvUser1; /* can be NULL! */
int *paRc = (int *)pvUser2;
+ if (!HWACCMR0Globals.fGlobalInit)
+ {
+ paRc[idCpu] = VINF_SUCCESS;
+ AssertFailed();
+ return;
+ }
+
paRc[idCpu] = hwaccmR0EnableCpu(pVM, idCpu);
}
@@ -777,6 +784,13 @@ static DECLCALLBACK(void) hwaccmR0DisableCpuCallback(RTCPUID idCpu, void *pvUser
{
int *paRc = (int *)pvUser1;
+ if (!HWACCMR0Globals.fGlobalInit)
+ {
+ paRc[idCpu] = VINF_SUCCESS;
+ AssertFailed();
+ return;
+ }
+
paRc[idCpu] = hwaccmR0DisableCpu(idCpu);
}
@@ -801,8 +815,7 @@ static DECLCALLBACK(void) hwaccmR0PowerCallback(RTPOWEREVENT enmEvent, void *pvU
if (enmEvent == RTPOWEREVENT_SUSPEND)
ASMAtomicWriteBool(&HWACCMR0Globals.fSuspended, true);
- if ( HWACCMR0Globals.enmHwAccmState == HWACCMSTATE_ENABLED
- && HWACCMR0Globals.fGlobalInit)
+ if (HWACCMR0Globals.enmHwAccmState == HWACCMSTATE_ENABLED)
{
int aRc[RTCPUSET_MAX_CPUS];
int rc;
@@ -811,13 +824,17 @@ static DECLCALLBACK(void) hwaccmR0PowerCallback(RTPOWEREVENT enmEvent, void *pvU
memset(aRc, 0, sizeof(aRc));
if (enmEvent == RTPOWEREVENT_SUSPEND)
{
- /* Turn off VT-x or AMD-V on all CPUs. */
- rc = RTMpOnAll(hwaccmR0DisableCpuCallback, aRc, NULL);
- Assert(RT_SUCCESS(rc) || rc == VERR_NOT_SUPPORTED);
+ if (HWACCMR0Globals.fGlobalInit)
+ {
+ /* Turn off VT-x or AMD-V on all CPUs. */
+ rc = RTMpOnAll(hwaccmR0DisableCpuCallback, aRc, NULL);
+ Assert(RT_SUCCESS(rc) || rc == VERR_NOT_SUPPORTED);
+ }
+ /* else nothing to do here for the local init case */
}
else
{
- /* Reinit the CPUs from scratch as the suspend state has messed with the MSRs. */
+ /* Reinit the CPUs from scratch as the suspend state might have messed with the MSRs. (lousy BIOSes as usual) */
rc = RTMpOnAll(HWACCMR0InitCPU, (void *)((HWACCMR0Globals.vmx.fSupported) ? X86_CPUID_VENDOR_INTEL_EBX : X86_CPUID_VENDOR_AMD_EBX), aRc);
Assert(RT_SUCCESS(rc) || rc == VERR_NOT_SUPPORTED);
@@ -828,9 +845,13 @@ static DECLCALLBACK(void) hwaccmR0PowerCallback(RTPOWEREVENT enmEvent, void *pvU
SUPR0Printf("hwaccmR0PowerCallback HWACCMR0InitCPU failed with %d\n", rc);
#endif
- /* Turn VT-x or AMD-V back on on all CPUs. */
- rc = RTMpOnAll(hwaccmR0EnableCpuCallback, NULL, aRc);
- Assert(RT_SUCCESS(rc) || rc == VERR_NOT_SUPPORTED);
+ if (HWACCMR0Globals.fGlobalInit)
+ {
+ /* Turn VT-x or AMD-V back on on all CPUs. */
+ rc = RTMpOnAll(hwaccmR0EnableCpuCallback, NULL, aRc);
+ Assert(RT_SUCCESS(rc) || rc == VERR_NOT_SUPPORTED);
+ }
+ /* else nothing to do here for the local init case */
}
}
if (enmEvent == RTPOWEREVENT_RESUME)
diff --git a/src/VBox/VMM/VMMR0/HWSVMR0.cpp b/src/VBox/VMM/VMMR0/HWSVMR0.cpp
index acd54e780..8f72f0bec 100644
--- a/src/VBox/VMM/VMMR0/HWSVMR0.cpp
+++ b/src/VBox/VMM/VMMR0/HWSVMR0.cpp
@@ -1094,7 +1094,7 @@ ResumeExecution:
int rc = PDMApicGetTPR(pVCpu, &u8LastTPR, &fPending);
AssertRC(rc);
- if (pVM->hwaccm.s.svm.fTPRPatchingActive)
+ if (pVM->hwaccm.s.fTPRPatchingActive)
{
/* Our patch code uses LSTAR for TPR caching. */
pCtx->msrLSTAR = u8LastTPR;
@@ -1518,7 +1518,7 @@ ResumeExecution:
/* Sync back the TPR if it was changed. */
if (fSyncTPR)
{
- if (pVM->hwaccm.s.svm.fTPRPatchingActive)
+ if (pVM->hwaccm.s.fTPRPatchingActive)
{
if ((pCtx->msrLSTAR & 0xff) != u8LastTPR)
{
@@ -1654,7 +1654,7 @@ ResumeExecution:
&& !(errCode & X86_TRAP_PF_P) /* not present */
&& CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx)) == 0
&& !CPUMIsGuestInLongModeEx(pCtx)
- && pVM->hwaccm.s.svm.cPatches < RT_ELEMENTS(pVM->hwaccm.s.svm.aPatches))
+ && pVM->hwaccm.s.cPatches < RT_ELEMENTS(pVM->hwaccm.s.aPatches))
{
RTGCPHYS GCPhysApicBase, GCPhys;
PDMApicGetBase(pVM, &GCPhysApicBase); /* @todo cache this */
@@ -1665,7 +1665,7 @@ ResumeExecution:
&& GCPhys == GCPhysApicBase)
{
/* Only attempt to patch the instruction once. */
- PHWACCMTPRPATCH pPatch = (PHWACCMTPRPATCH)RTAvloU32Get(&pVM->hwaccm.s.svm.PatchTree, (AVLOU32KEY)pCtx->eip);
+ PHWACCMTPRPATCH pPatch = (PHWACCMTPRPATCH)RTAvloU32Get(&pVM->hwaccm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
if (!pPatch)
{
rc = VINF_EM_HWACCM_PATCH_TPR_INSTR;
@@ -1822,7 +1822,7 @@ ResumeExecution:
&& !(errCode & X86_TRAP_PF_P) /* not present */
&& CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx)) == 0
&& !CPUMIsGuestInLongModeEx(pCtx)
- && pVM->hwaccm.s.svm.cPatches < RT_ELEMENTS(pVM->hwaccm.s.svm.aPatches))
+ && pVM->hwaccm.s.cPatches < RT_ELEMENTS(pVM->hwaccm.s.aPatches))
{
RTGCPHYS GCPhysApicBase;
PDMApicGetBase(pVM, &GCPhysApicBase); /* @todo cache this */
@@ -1831,7 +1831,7 @@ ResumeExecution:
if (uFaultAddress == GCPhysApicBase + 0x80)
{
/* Only attempt to patch the instruction once. */
- PHWACCMTPRPATCH pPatch = (PHWACCMTPRPATCH)RTAvloU32Get(&pVM->hwaccm.s.svm.PatchTree, (AVLOU32KEY)pCtx->eip);
+ PHWACCMTPRPATCH pPatch = (PHWACCMTPRPATCH)RTAvloU32Get(&pVM->hwaccm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
if (!pPatch)
{
rc = VINF_EM_HWACCM_PATCH_TPR_INSTR;
@@ -2379,7 +2379,7 @@ ResumeExecution:
uint32_t cbSize;
/* When an interrupt is pending, we'll let MSR_K8_LSTAR writes fault in our TPR patch code. */
- if ( pVM->hwaccm.s.svm.fTPRPatchingActive
+ if ( pVM->hwaccm.s.fTPRPatchingActive
&& pCtx->ecx == MSR_K8_LSTAR
&& pVMCB->ctrl.u64ExitInfo1 == 1 /* wrmsr */)
{
@@ -2528,7 +2528,7 @@ static int svmR0EmulateTprVMMCall(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
bool fPending;
uint8_t u8Tpr;
- PHWACCMTPRPATCH pPatch = (PHWACCMTPRPATCH)RTAvloU32Get(&pVM->hwaccm.s.svm.PatchTree, (AVLOU32KEY)pCtx->eip);
+ PHWACCMTPRPATCH pPatch = (PHWACCMTPRPATCH)RTAvloU32Get(&pVM->hwaccm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
if (!pPatch)
break;
diff --git a/src/VBox/VMM/VMMR0/HWVMXR0.cpp b/src/VBox/VMM/VMMR0/HWVMXR0.cpp
index 07d12201d..2dba9f9ed 100644
--- a/src/VBox/VMM/VMMR0/HWVMXR0.cpp
+++ b/src/VBox/VMM/VMMR0/HWVMXR0.cpp
@@ -2138,7 +2138,7 @@ static void vmxR0SetupTLBEPT(PVM pVM, PVMCPU pVCpu)
for (unsigned i=0;i<pVCpu->hwaccm.s.TlbShootdown.cPages;i++)
{
/* aTlbShootdownPages contains physical addresses in this case. */
- vmxR0FlushEPT(pVM, pVCpu, pVM->hwaccm.s.vmx.enmFlushContext, pVCpu->hwaccm.s.TlbShootdown.aPages[i]);
+ vmxR0FlushEPT(pVM, pVCpu, pVM->hwaccm.s.vmx.enmFlushPage, pVCpu->hwaccm.s.TlbShootdown.aPages[i]);
}
}
pVCpu->hwaccm.s.TlbShootdown.cPages= 0;
@@ -2215,7 +2215,7 @@ static void vmxR0SetupTLBVPID(PVM pVM, PVMCPU pVCpu)
/* Deal with pending TLB shootdown actions which were queued when we were not executing code. */
STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatTlbShootdown);
for (unsigned i=0;i<pVCpu->hwaccm.s.TlbShootdown.cPages;i++)
- vmxR0FlushVPID(pVM, pVCpu, pVM->hwaccm.s.vmx.enmFlushContext, pVCpu->hwaccm.s.TlbShootdown.aPages[i]);
+ vmxR0FlushVPID(pVM, pVCpu, pVM->hwaccm.s.vmx.enmFlushPage, pVCpu->hwaccm.s.TlbShootdown.aPages[i]);
}
}
pVCpu->hwaccm.s.TlbShootdown.cPages = 0;
@@ -2258,8 +2258,8 @@ VMMR0DECL(int) VMXR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
RTGCUINTPTR intInfo = 0; /* shut up buggy gcc 4 */
RTGCUINTPTR errCode, instrInfo;
bool fSetupTPRCaching = false;
+ uint64_t u64OldLSTAR = 0;
uint8_t u8LastTPR = 0;
- PHWACCM_CPUINFO pCpu = 0;
RTCCUINTREG uOldEFlags = ~(RTCCUINTREG)0;
unsigned cResume = 0;
#ifdef VBOX_STRICT
@@ -2278,7 +2278,7 @@ VMMR0DECL(int) VMXR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
/* Check if we need to use TPR shadowing. */
if ( CPUMIsGuestInLongModeEx(pCtx)
- || ( (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
+ || ( ((pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC) || pVM->hwaccm.s.fTRPPatchingAllowed)
&& pVM->hwaccm.s.fHasIoApic)
)
{
@@ -2490,6 +2490,26 @@ ResumeExecution:
*/
rc = VMXWriteVMCS(VMX_VMCS_CTRL_TPR_THRESHOLD, (fPending) ? (u8LastTPR >> 4) : 0); /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
AssertRC(rc);
+
+ if (pVM->hwaccm.s.fTPRPatchingActive)
+ {
+ Assert(!CPUMIsGuestInLongModeEx(pCtx));
+ /* Our patch code uses LSTAR for TPR caching. */
+ pCtx->msrLSTAR = u8LastTPR;
+
+ if (fPending)
+ {
+ /* A TPR change could activate a pending interrupt, so catch lstar writes. */
+ vmxR0SetMSRPermission(pVCpu, MSR_K8_LSTAR, true, false);
+ }
+ else
+ {
+ /* No interrupts are pending, so we don't need to be explicitely notified.
+ * There are enough world switches for detecting pending interrupts.
+ */
+ vmxR0SetMSRPermission(pVCpu, MSR_K8_LSTAR, true, true);
+ }
+ }
}
#if defined(HWACCM_VTX_WITH_EPT) && defined(LOG_ENABLED)
@@ -2499,6 +2519,8 @@ ResumeExecution:
# endif /* HWACCM_VTX_WITH_VPID */
)
{
+ PHWACCM_CPUINFO pCpu;
+
pCpu = HWACCMR0GetCurrentCpu();
if ( pVCpu->hwaccm.s.idLastCpu != pCpu->idCpu
|| pVCpu->hwaccm.s.cTLBFlushes != pCpu->cTLBFlushes)
@@ -2586,6 +2608,14 @@ ResumeExecution:
pVCpu->hwaccm.s.vmx.VMCSCache.u64TimeSwitch = RTTimeNanoTS();
#endif
+ /* Save the current TPR value in the LSTAR msr so our patches can access it. */
+ if (pVM->hwaccm.s.fTPRPatchingActive)
+ {
+ Assert(pVM->hwaccm.s.fTPRPatchingActive);
+ u64OldLSTAR = ASMRdMsr(MSR_K8_LSTAR);
+ ASMWrMsr(MSR_K8_LSTAR, u8LastTPR);
+ }
+
TMNotifyStartOfExecution(pVCpu);
#ifdef VBOX_WITH_KERNEL_USING_XMM
rc = hwaccmR0VMXStartVMWrapXMM(pVCpu->hwaccm.s.fResumeVM, pCtx, &pVCpu->hwaccm.s.vmx.VMCSCache, pVM, pVCpu, pVCpu->hwaccm.s.vmx.pfnStartVM);
@@ -2601,6 +2631,15 @@ ResumeExecution:
TMNotifyEndOfExecution(pVCpu);
VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
Assert(!(ASMGetFlags() & X86_EFL_IF));
+
+ /* Restore the host LSTAR msr if the guest could have changed it. */
+ if (pVM->hwaccm.s.fTPRPatchingActive)
+ {
+ Assert(pVM->hwaccm.s.fTPRPatchingActive);
+ pVCpu->hwaccm.s.vmx.pVAPIC[0x80] = pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
+ ASMWrMsr(MSR_K8_LSTAR, u64OldLSTAR);
+ }
+
ASMSetFlags(uOldEFlags);
#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
uOldEFlags = ~(RTCCUINTREG)0;
@@ -2794,6 +2833,35 @@ ResumeExecution:
#endif
Assert(!pVM->hwaccm.s.fNestedPaging);
+#ifdef VBOX_HWACCM_WITH_GUEST_PATCHING
+ /* Shortcut for APIC TPR reads and writes; 32 bits guests only */
+ if ( pVM->hwaccm.s.fTRPPatchingAllowed
+ && pVM->hwaccm.s.pGuestPatchMem
+ && (exitQualification & 0xfff) == 0x080
+ && !(errCode & X86_TRAP_PF_P) /* not present */
+ && CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx)) == 0
+ && !CPUMIsGuestInLongModeEx(pCtx)
+ && pVM->hwaccm.s.cPatches < RT_ELEMENTS(pVM->hwaccm.s.aPatches))
+ {
+ RTGCPHYS GCPhysApicBase, GCPhys;
+ PDMApicGetBase(pVM, &GCPhysApicBase); /* @todo cache this */
+ GCPhysApicBase &= PAGE_BASE_GC_MASK;
+
+ rc = PGMGstGetPage(pVCpu, (RTGCPTR)exitQualification, NULL, &GCPhys);
+ if ( rc == VINF_SUCCESS
+ && GCPhys == GCPhysApicBase)
+ {
+ /* Only attempt to patch the instruction once. */
+ PHWACCMTPRPATCH pPatch = (PHWACCMTPRPATCH)RTAvloU32Get(&pVM->hwaccm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
+ if (!pPatch)
+ {
+ rc = VINF_EM_HWACCM_PATCH_TPR_INSTR;
+ break;
+ }
+ }
+ }
+#endif
+
Log2(("Page fault at %RGv error code %x\n", exitQualification, errCode));
/* Exit qualification contains the linear address of the page fault. */
TRPMAssertTrap(pVCpu, X86_XCPT_PF, TRPM_TRAP);
@@ -3425,8 +3493,30 @@ ResumeExecution:
break;
}
- case VMX_EXIT_RDMSR: /* 31 RDMSR. Guest software attempted to execute RDMSR. */
case VMX_EXIT_WRMSR: /* 32 WRMSR. Guest software attempted to execute WRMSR. */
+ /* When an interrupt is pending, we'll let MSR_K8_LSTAR writes fault in our TPR patch code. */
+ if ( pVM->hwaccm.s.fTPRPatchingActive
+ && pCtx->ecx == MSR_K8_LSTAR)
+ {
+ Assert(!CPUMIsGuestInLongModeEx(pCtx));
+ if ((pCtx->eax & 0xff) != u8LastTPR)
+ {
+ Log(("VMX: Faulting MSR_K8_LSTAR write with new TPR value %x\n", pCtx->eax & 0xff));
+
+ /* Our patch code uses LSTAR for TPR caching. */
+ rc = PDMApicSetTPR(pVCpu, pCtx->eax & 0xff);
+ AssertRC(rc);
+ }
+
+ /* Skip the instruction and continue. */
+ pCtx->rip += cbInstr; /* wrmsr = [0F 30] */
+
+ /* Only resume if successful. */
+ STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatExit1, x);
+ goto ResumeExecution;
+ }
+ /* no break */
+ case VMX_EXIT_RDMSR: /* 31 RDMSR. Guest software attempted to execute RDMSR. */
{
uint32_t cbSize;
diff --git a/src/VBox/VMM/testcase/tstAnimate.cpp b/src/VBox/VMM/testcase/tstAnimate.cpp
index b9b4e23ca..7b08ca471 100644
--- a/src/VBox/VMM/testcase/tstAnimate.cpp
+++ b/src/VBox/VMM/testcase/tstAnimate.cpp
@@ -837,7 +837,7 @@ int main(int argc, char **argv)
if (FileRawMem != NIL_RTFILE)
rc = VMR3ReqCall(pVM, VMCPUID_ANY, &pReq1, RT_INDEFINITE_WAIT, (PFNRT)loadMem, 3, pVM, FileRawMem, &offRawMem);
else
- rc = VMR3ReqCall(pVM, VMCPUID_ANY, &pReq1, RT_INDEFINITE_WAIT, (PFNRT)SSMR3Load, 4, pVM, pszSavedState, SSMAFTER_DEBUG_IT, NULL, NULL);
+ rc = VMR3ReqCall(pVM, VMCPUID_ANY, &pReq1, RT_INDEFINITE_WAIT, (PFNRT)SSMR3Load, 5, pVM, pszSavedState, SSMAFTER_DEBUG_IT, (uintptr_t)NULL, (uintptr_t)NULL);
AssertReleaseRC(rc);
rc = pReq1->iStatus;
VMR3ReqFree(pReq1);