1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
|
/* $Id: VMMInternal.h $ */
/** @file
* VMM - Internal header file.
*/
/*
* Copyright (C) 2006-2012 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* you can redistribute it and/or modify it under the terms of the GNU
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*/
#ifndef ___VMMInternal_h
#define ___VMMInternal_h
#include <VBox/cdefs.h>
#include <VBox/sup.h>
#include <VBox/vmm/stam.h>
#include <VBox/vmm/vmm.h>
#include <VBox/log.h>
#include <iprt/critsect.h>
#if !defined(IN_VMM_R3) && !defined(IN_VMM_R0) && !defined(IN_VMM_RC)
# error "Not in VMM! This is an internal header!"
#endif
/** @defgroup grp_vmm_int Internals
* @ingroup grp_vmm
* @internal
* @{
*/
/** @def VBOX_WITH_RC_RELEASE_LOGGING
* Enables RC release logging. */
#define VBOX_WITH_RC_RELEASE_LOGGING
/** @def VBOX_WITH_R0_LOGGING
* Enables Ring-0 logging (non-release).
*
* Ring-0 logging isn't 100% safe yet (thread id reuse / process exit cleanup),
* so you have to sign up here by adding your defined(DEBUG_<userid>) to the
* #if, or by adding VBOX_WITH_R0_LOGGING to your LocalConfig.kmk.
*/
#if defined(DEBUG_sandervl) || defined(DEBUG_frank) || defined(DEBUG_ramshankar) || defined(DOXYGEN_RUNNING)
# define VBOX_WITH_R0_LOGGING
#endif
/** @def VBOX_STRICT_VMM_STACK
* Enables VMM stack guard pages to catch stack over- and underruns. */
#if defined(VBOX_STRICT) || defined(DOXYGEN_RUNNING)
# define VBOX_STRICT_VMM_STACK
#endif
/**
* Converts a VMM pointer into a VM pointer.
* @returns Pointer to the VM structure the VMM is part of.
* @param pVMM Pointer to VMM instance data.
*/
#define VMM2VM(pVMM) ( (PVM)((char*)pVMM - pVMM->offVM) )
/**
* Switcher function, HC to RC.
*
* @param pVM Pointer to the VM.
* @returns Return code indicating the action to take.
*/
typedef DECLASMTYPE(int) FNVMMSWITCHERHC(PVM pVM);
/** Pointer to switcher function. */
typedef FNVMMSWITCHERHC *PFNVMMSWITCHERHC;
/**
* Switcher function, RC to HC.
*
* @param rc VBox status code.
*/
typedef DECLASMTYPE(void) FNVMMSWITCHERRC(int rc);
/** Pointer to switcher function. */
typedef FNVMMSWITCHERRC *PFNVMMSWITCHERRC;
/**
* The ring-0 logger instance wrapper.
*
* We need to be able to find the VM handle from the logger instance, so we wrap
* it in this structure.
*/
typedef struct VMMR0LOGGER
{
/** Pointer to Pointer to the VM. */
R0PTRTYPE(PVM) pVM;
/** Size of the allocated logger instance (Logger). */
uint32_t cbLogger;
/** Flag indicating whether we've create the logger Ring-0 instance yet. */
bool fCreated;
/** Flag indicating whether we've disabled flushing (world switch) or not. */
bool fFlushingDisabled;
/** Flag indicating whether we've registered the instance already. */
bool fRegistered;
bool a8Alignment;
/** The CPU ID. */
VMCPUID idCpu;
#if HC_ARCH_BITS == 64
uint32_t u32Alignment;
#endif
/** The ring-0 logger instance. This extends beyond the size. */
RTLOGGER Logger;
} VMMR0LOGGER;
/** Pointer to a ring-0 logger instance wrapper. */
typedef VMMR0LOGGER *PVMMR0LOGGER;
/**
* Jump buffer for the setjmp/longjmp like constructs used to
* quickly 'call' back into Ring-3.
*/
typedef struct VMMR0JMPBUF
{
/** Traditional jmp_buf stuff
* @{ */
#if HC_ARCH_BITS == 32
uint32_t ebx;
uint32_t esi;
uint32_t edi;
uint32_t ebp;
uint32_t esp;
uint32_t eip;
uint32_t eflags;
#endif
#if HC_ARCH_BITS == 64
uint64_t rbx;
# ifdef RT_OS_WINDOWS
uint64_t rsi;
uint64_t rdi;
# endif
uint64_t rbp;
uint64_t r12;
uint64_t r13;
uint64_t r14;
uint64_t r15;
uint64_t rsp;
uint64_t rip;
# ifdef RT_OS_WINDOWS
uint128_t xmm6;
uint128_t xmm7;
uint128_t xmm8;
uint128_t xmm9;
uint128_t xmm10;
uint128_t xmm11;
uint128_t xmm12;
uint128_t xmm13;
uint128_t xmm14;
uint128_t xmm15;
# endif
uint64_t rflags;
#endif
/** @} */
/** Flag that indicates that we've done a ring-3 call. */
bool fInRing3Call;
/** The number of bytes we've saved. */
uint32_t cbSavedStack;
/** Pointer to the buffer used to save the stack.
* This is assumed to be 8KB. */
RTR0PTR pvSavedStack;
/** Esp we we match against esp on resume to make sure the stack wasn't relocated. */
RTHCUINTREG SpCheck;
/** The esp we should resume execution with after the restore. */
RTHCUINTREG SpResume;
/** ESP/RSP at the time of the jump to ring 3. */
RTHCUINTREG SavedEsp;
/** EBP/RBP at the time of the jump to ring 3. */
RTHCUINTREG SavedEbp;
/** Stats: Max amount of stack used. */
uint32_t cbUsedMax;
/** Stats: Average stack usage. (Avg = cbUsedTotal / cUsedTotal) */
uint32_t cbUsedAvg;
/** Stats: Total amount of stack used. */
uint64_t cbUsedTotal;
/** Stats: Number of stack usages. */
uint64_t cUsedTotal;
} VMMR0JMPBUF;
/** Pointer to a ring-0 jump buffer. */
typedef VMMR0JMPBUF *PVMMR0JMPBUF;
/**
* VMM Data (part of VM)
*/
typedef struct VMM
{
/** Offset to the VM structure.
* See VMM2VM(). */
RTINT offVM;
/** @name World Switcher and Related
* @{
*/
/** Size of the core code. */
RTUINT cbCoreCode;
/** Physical address of core code. */
RTHCPHYS HCPhysCoreCode;
/** Pointer to core code ring-3 mapping - contiguous memory.
* At present this only means the context switcher code. */
RTR3PTR pvCoreCodeR3;
/** Pointer to core code ring-0 mapping - contiguous memory.
* At present this only means the context switcher code. */
RTR0PTR pvCoreCodeR0;
/** Pointer to core code guest context mapping. */
RTRCPTR pvCoreCodeRC;
RTRCPTR pRCPadding0; /**< Alignment padding. */
#ifdef VBOX_WITH_NMI
/** The guest context address of the APIC (host) mapping. */
RTRCPTR GCPtrApicBase;
RTRCPTR pRCPadding1; /**< Alignment padding. */
#endif
/** The current switcher.
* This will be set before the VMM is fully initialized. */
VMMSWITCHER enmSwitcher;
/** Array of offsets to the different switchers within the core code. */
uint32_t aoffSwitchers[VMMSWITCHER_MAX];
uint32_t u32Padding2; /**< Alignment padding. */
/** Resume Guest Execution. See CPUMGCResumeGuest(). */
RTRCPTR pfnCPUMRCResumeGuest;
/** Resume Guest Execution in V86 mode. See CPUMGCResumeGuestV86(). */
RTRCPTR pfnCPUMRCResumeGuestV86;
/** Call Trampoline. See vmmGCCallTrampoline(). */
RTRCPTR pfnCallTrampolineRC;
/** Guest to host switcher entry point. */
RCPTRTYPE(PFNVMMSWITCHERRC) pfnRCToHost;
/** Host to guest switcher entry point. */
R0PTRTYPE(PFNVMMSWITCHERHC) pfnR0ToRawMode;
/** @} */
/** @name Logging
* @{
*/
/** Size of the allocated logger instance (pRCLoggerRC/pRCLoggerR3). */
uint32_t cbRCLogger;
/** Pointer to the RC logger instance - RC Ptr.
* This is NULL if logging is disabled. */
RCPTRTYPE(PRTLOGGERRC) pRCLoggerRC;
/** Pointer to the GC logger instance - R3 Ptr.
* This is NULL if logging is disabled. */
R3PTRTYPE(PRTLOGGERRC) pRCLoggerR3;
/** Pointer to the GC release logger instance - R3 Ptr. */
R3PTRTYPE(PRTLOGGERRC) pRCRelLoggerR3;
/** Pointer to the GC release logger instance - RC Ptr. */
RCPTRTYPE(PRTLOGGERRC) pRCRelLoggerRC;
/** Size of the allocated release logger instance (pRCRelLoggerRC/pRCRelLoggerR3).
* This may differ from cbRCLogger. */
uint32_t cbRCRelLogger;
/** Whether log flushing has been disabled or not. */
bool fRCLoggerFlushingDisabled;
bool afAlignment[5]; /**< Alignment padding. */
/** @} */
/** Whether the stack guard pages have been stationed or not. */
bool fStackGuardsStationed;
/** Whether we should use the periodic preemption timers. */
bool fUsePeriodicPreemptionTimers;
/** The EMT yield timer. */
PTMTIMERR3 pYieldTimer;
/** The period to the next timeout when suspended or stopped.
* This is 0 when running. */
uint32_t cYieldResumeMillies;
/** The EMT yield timer interval (milliseconds). */
uint32_t cYieldEveryMillies;
/** The timestamp of the previous yield. (nano) */
uint64_t u64LastYield;
/** @name EMT Rendezvous
* @{ */
/** Semaphore to wait on upon entering ordered execution. */
R3PTRTYPE(PRTSEMEVENT) pahEvtRendezvousEnterOrdered;
/** Semaphore to wait on upon entering for one-by-one execution. */
RTSEMEVENT hEvtRendezvousEnterOneByOne;
/** Semaphore to wait on upon entering for all-at-once execution. */
RTSEMEVENTMULTI hEvtMulRendezvousEnterAllAtOnce;
/** Semaphore to wait on when done. */
RTSEMEVENTMULTI hEvtMulRendezvousDone;
/** Semaphore the VMMR3EmtRendezvous caller waits on at the end. */
RTSEMEVENT hEvtRendezvousDoneCaller;
/** Callback. */
R3PTRTYPE(PFNVMMEMTRENDEZVOUS) volatile pfnRendezvous;
/** The user argument for the callback. */
RTR3PTR volatile pvRendezvousUser;
/** Flags. */
volatile uint32_t fRendezvousFlags;
/** The number of EMTs that has entered. */
volatile uint32_t cRendezvousEmtsEntered;
/** The number of EMTs that has done their job. */
volatile uint32_t cRendezvousEmtsDone;
/** The number of EMTs that has returned. */
volatile uint32_t cRendezvousEmtsReturned;
/** The status code. */
volatile int32_t i32RendezvousStatus;
/** Spin lock. */
volatile uint32_t u32RendezvousLock;
/** @} */
#if HC_ARCH_BITS == 32
uint32_t u32Alignment; /**< Alignment padding. */
#endif
/** Buffer for storing the standard assertion message for a ring-0 assertion.
* Used for saving the assertion message text for the release log and guru
* meditation dump. */
char szRing0AssertMsg1[512];
/** Buffer for storing the custom message for a ring-0 assertion. */
char szRing0AssertMsg2[256];
/** Number of VMMR0_DO_RUN_GC calls. */
STAMCOUNTER StatRunRC;
/** Statistics for each of the RC/R0 return codes.
* @{ */
STAMCOUNTER StatRZRetNormal;
STAMCOUNTER StatRZRetInterrupt;
STAMCOUNTER StatRZRetInterruptHyper;
STAMCOUNTER StatRZRetGuestTrap;
STAMCOUNTER StatRZRetRingSwitch;
STAMCOUNTER StatRZRetRingSwitchInt;
STAMCOUNTER StatRZRetStaleSelector;
STAMCOUNTER StatRZRetIRETTrap;
STAMCOUNTER StatRZRetEmulate;
STAMCOUNTER StatRZRetIOBlockEmulate;
STAMCOUNTER StatRZRetPatchEmulate;
STAMCOUNTER StatRZRetIORead;
STAMCOUNTER StatRZRetIOWrite;
STAMCOUNTER StatRZRetMMIORead;
STAMCOUNTER StatRZRetMMIOWrite;
STAMCOUNTER StatRZRetMMIOPatchRead;
STAMCOUNTER StatRZRetMMIOPatchWrite;
STAMCOUNTER StatRZRetMMIOReadWrite;
STAMCOUNTER StatRZRetLDTFault;
STAMCOUNTER StatRZRetGDTFault;
STAMCOUNTER StatRZRetIDTFault;
STAMCOUNTER StatRZRetTSSFault;
STAMCOUNTER StatRZRetPDFault;
STAMCOUNTER StatRZRetCSAMTask;
STAMCOUNTER StatRZRetSyncCR3;
STAMCOUNTER StatRZRetMisc;
STAMCOUNTER StatRZRetPatchInt3;
STAMCOUNTER StatRZRetPatchPF;
STAMCOUNTER StatRZRetPatchGP;
STAMCOUNTER StatRZRetPatchIretIRQ;
STAMCOUNTER StatRZRetRescheduleREM;
STAMCOUNTER StatRZRetToR3;
STAMCOUNTER StatRZRetToR3Unknown;
STAMCOUNTER StatRZRetToR3TMVirt;
STAMCOUNTER StatRZRetToR3HandyPages;
STAMCOUNTER StatRZRetToR3PDMQueues;
STAMCOUNTER StatRZRetToR3Rendezvous;
STAMCOUNTER StatRZRetToR3Timer;
STAMCOUNTER StatRZRetToR3DMA;
STAMCOUNTER StatRZRetToR3CritSect;
STAMCOUNTER StatRZRetTimerPending;
STAMCOUNTER StatRZRetInterruptPending;
STAMCOUNTER StatRZRetCallRing3;
STAMCOUNTER StatRZRetPATMDuplicateFn;
STAMCOUNTER StatRZRetPGMChangeMode;
STAMCOUNTER StatRZRetPendingRequest;
STAMCOUNTER StatRZRetPGMFlushPending;
STAMCOUNTER StatRZRetPatchTPR;
STAMCOUNTER StatRZCallPDMCritSectEnter;
STAMCOUNTER StatRZCallPDMLock;
STAMCOUNTER StatRZCallLogFlush;
STAMCOUNTER StatRZCallPGMPoolGrow;
STAMCOUNTER StatRZCallPGMMapChunk;
STAMCOUNTER StatRZCallPGMAllocHandy;
STAMCOUNTER StatRZCallRemReplay;
STAMCOUNTER StatRZCallVMSetError;
STAMCOUNTER StatRZCallVMSetRuntimeError;
STAMCOUNTER StatRZCallPGMLock;
/** @} */
} VMM;
/** Pointer to VMM. */
typedef VMM *PVMM;
/**
* VMMCPU Data (part of VMCPU)
*/
typedef struct VMMCPU
{
/** Offset to the VMCPU structure.
* See VMM2VMCPU(). */
int32_t offVMCPU;
/** The last RC/R0 return code. */
int32_t iLastGZRc;
/** VMM stack, pointer to the top of the stack in R3.
* Stack is allocated from the hypervisor heap and is page aligned
* and always writable in RC. */
R3PTRTYPE(uint8_t *) pbEMTStackR3;
/** Pointer to the bottom of the stack - needed for doing relocations. */
RCPTRTYPE(uint8_t *) pbEMTStackRC;
/** Pointer to the bottom of the stack - needed for doing relocations. */
RCPTRTYPE(uint8_t *) pbEMTStackBottomRC;
/** Pointer to the R0 logger instance - R3 Ptr.
* This is NULL if logging is disabled. */
R3PTRTYPE(PVMMR0LOGGER) pR0LoggerR3;
/** Pointer to the R0 logger instance - R0 Ptr.
* This is NULL if logging is disabled. */
R0PTRTYPE(PVMMR0LOGGER) pR0LoggerR0;
/** @name Thread-context hooks.
* @{*/
R0PTRTYPE(RTTHREADCTX) hR0ThreadCtx;
#if HC_ARCH_BITS == 32
uint32_t u32Padding;
#else
uint64_t u64Padding;
#endif
/** @} */
/** @name Rendezvous
* @{ */
/** Whether the EMT is executing a rendezvous right now. For detecting
* attempts at recursive rendezvous. */
bool volatile fInRendezvous;
bool afPadding[HC_ARCH_BITS == 32 ? 3 : 7];
/** @} */
/** @name Raw-mode context tracting data.
* @{ */
SUPDRVTRACERUSRCTX TracerCtx;
/** @} */
/** Alignment padding, making sure u64CallRing3Arg is nicely aligned. */
uint32_t au32Padding1[3];
/** @name Call Ring-3
* Formerly known as host calls.
* @{ */
/** The disable counter. */
uint32_t cCallRing3Disabled;
/** The pending operation. */
VMMCALLRING3 enmCallRing3Operation;
/** The result of the last operation. */
int32_t rcCallRing3;
/** The argument to the operation. */
uint64_t u64CallRing3Arg;
/** The Ring-0 notification callback. */
R0PTRTYPE(PFNVMMR0CALLRING3NOTIFICATION) pfnCallRing3CallbackR0;
/** The Ring-0 notification callback user argument. */
R0PTRTYPE(void *) pvCallRing3CallbackUserR0;
/** The Ring-0 jmp buffer.
* @remarks The size of this type isn't stable in assembly, so don't put
* anything that needs to be accessed from assembly after it. */
VMMR0JMPBUF CallRing3JmpBufR0;
/** @} */
} VMMCPU;
AssertCompileMemberAlignment(VMMCPU, TracerCtx, 8);
/** Pointer to VMMCPU. */
typedef VMMCPU *PVMMCPU;
/**
* The VMMGCEntry() codes.
*/
typedef enum VMMGCOPERATION
{
/** Do GC module init. */
VMMGC_DO_VMMGC_INIT = 1,
/** The first Trap testcase. */
VMMGC_DO_TESTCASE_TRAP_FIRST = 0x0dead000,
/** Trap 0 testcases, uArg selects the variation. */
VMMGC_DO_TESTCASE_TRAP_0 = VMMGC_DO_TESTCASE_TRAP_FIRST,
/** Trap 1 testcases, uArg selects the variation. */
VMMGC_DO_TESTCASE_TRAP_1,
/** Trap 2 testcases, uArg selects the variation. */
VMMGC_DO_TESTCASE_TRAP_2,
/** Trap 3 testcases, uArg selects the variation. */
VMMGC_DO_TESTCASE_TRAP_3,
/** Trap 4 testcases, uArg selects the variation. */
VMMGC_DO_TESTCASE_TRAP_4,
/** Trap 5 testcases, uArg selects the variation. */
VMMGC_DO_TESTCASE_TRAP_5,
/** Trap 6 testcases, uArg selects the variation. */
VMMGC_DO_TESTCASE_TRAP_6,
/** Trap 7 testcases, uArg selects the variation. */
VMMGC_DO_TESTCASE_TRAP_7,
/** Trap 8 testcases, uArg selects the variation. */
VMMGC_DO_TESTCASE_TRAP_8,
/** Trap 9 testcases, uArg selects the variation. */
VMMGC_DO_TESTCASE_TRAP_9,
/** Trap 0a testcases, uArg selects the variation. */
VMMGC_DO_TESTCASE_TRAP_0A,
/** Trap 0b testcases, uArg selects the variation. */
VMMGC_DO_TESTCASE_TRAP_0B,
/** Trap 0c testcases, uArg selects the variation. */
VMMGC_DO_TESTCASE_TRAP_0C,
/** Trap 0d testcases, uArg selects the variation. */
VMMGC_DO_TESTCASE_TRAP_0D,
/** Trap 0e testcases, uArg selects the variation. */
VMMGC_DO_TESTCASE_TRAP_0E,
/** The last trap testcase (exclusive). */
VMMGC_DO_TESTCASE_TRAP_LAST,
/** Testcase for checking interrupt forwarding. */
VMMGC_DO_TESTCASE_HYPER_INTERRUPT,
/** Switching testing and profiling stub. */
VMMGC_DO_TESTCASE_NOP,
/** Testcase for checking interrupt masking.. */
VMMGC_DO_TESTCASE_INTERRUPT_MASKING,
/** Switching testing and profiling stub. */
VMMGC_DO_TESTCASE_HM_NOP,
/** The usual 32-bit hack. */
VMMGC_DO_32_BIT_HACK = 0x7fffffff
} VMMGCOPERATION;
/**
* MSR test result entry.
*/
typedef struct VMMTESTMSRENTRY
{
/** The MSR number, including padding.
* Set to UINT64_MAX if invalid MSR. */
uint64_t uMsr;
/** The register value. */
uint64_t uValue;
} VMMTESTMSRENTRY;
/** Pointer to an MSR test result entry. */
typedef VMMTESTMSRENTRY *PVMMTESTMSRENTRY;
RT_C_DECLS_BEGIN
int vmmInitFormatTypes(void);
void vmmTermFormatTypes(void);
uint32_t vmmGetBuildType(void);
#ifdef IN_RING3
int vmmR3SwitcherInit(PVM pVM);
void vmmR3SwitcherRelocate(PVM pVM, RTGCINTPTR offDelta);
#endif /* IN_RING3 */
#ifdef IN_RING0
/**
* World switcher assembly routine.
* It will call VMMGCEntry().
*
* @returns return code from VMMGCEntry().
* @param pVM Pointer to the VM.
* @param uArg See VMMGCEntry().
* @internal
*/
DECLASM(int) vmmR0WorldSwitch(PVM pVM, unsigned uArg);
/**
* Callback function for vmmR0CallRing3SetJmp.
*
* @returns VBox status code.
* @param pVM Pointer to the VM.
*/
typedef DECLCALLBACK(int) FNVMMR0SETJMP(PVM pVM, PVMCPU pVCpu);
/** Pointer to FNVMMR0SETJMP(). */
typedef FNVMMR0SETJMP *PFNVMMR0SETJMP;
/**
* The setjmp variant used for calling Ring-3.
*
* This differs from the normal setjmp in that it will resume VMMRZCallRing3 if we're
* in the middle of a ring-3 call. Another differences is the function pointer and
* argument. This has to do with resuming code and the stack frame of the caller.
*
* @returns VINF_SUCCESS on success or whatever is passed to vmmR0CallRing3LongJmp.
* @param pJmpBuf The jmp_buf to set.
* @param pfn The function to be called when not resuming..
* @param pVM The argument of that function.
*/
DECLASM(int) vmmR0CallRing3SetJmp(PVMMR0JMPBUF pJmpBuf, PFNVMMR0SETJMP pfn, PVM pVM, PVMCPU pVCpu);
/**
* Callback function for vmmR0CallRing3SetJmpEx.
*
* @returns VBox status code.
* @param pvUser The user argument.
*/
typedef DECLCALLBACK(int) FNVMMR0SETJMPEX(void *pvUser);
/** Pointer to FNVMMR0SETJMP(). */
typedef FNVMMR0SETJMPEX *PFNVMMR0SETJMPEX;
/**
* Same as vmmR0CallRing3SetJmp except for the function signature.
*
* @returns VINF_SUCCESS on success or whatever is passed to vmmR0CallRing3LongJmp.
* @param pJmpBuf The jmp_buf to set.
* @param pfn The function to be called when not resuming..
* @param pvUser The argument of that function.
*/
DECLASM(int) vmmR0CallRing3SetJmpEx(PVMMR0JMPBUF pJmpBuf, PFNVMMR0SETJMPEX pfn, void *pvUser);
/**
* Worker for VMMRZCallRing3.
* This will save the stack and registers.
*
* @returns rc.
* @param pJmpBuf Pointer to the jump buffer.
* @param rc The return code.
*/
DECLASM(int) vmmR0CallRing3LongJmp(PVMMR0JMPBUF pJmpBuf, int rc);
/**
* Internal R0 logger worker: Logger wrapper.
*/
VMMR0DECL(void) vmmR0LoggerWrapper(const char *pszFormat, ...);
/**
* Internal R0 logger worker: Flush logger.
*
* @param pLogger The logger instance to flush.
* @remark This function must be exported!
*/
VMMR0DECL(void) vmmR0LoggerFlush(PRTLOGGER pLogger);
/**
* Internal R0 logger worker: Custom prefix.
*
* @returns Number of chars written.
*
* @param pLogger The logger instance.
* @param pchBuf The output buffer.
* @param cchBuf The size of the buffer.
* @param pvUser User argument (ignored).
*/
VMMR0DECL(size_t) vmmR0LoggerPrefix(PRTLOGGER pLogger, char *pchBuf, size_t cchBuf, void *pvUser);
# ifdef VBOX_WITH_TRIPLE_FAULT_HACK
int vmmR0TripleFaultHackInit(void);
void vmmR0TripleFaultHackTerm(void);
# endif
#endif /* IN_RING0 */
#ifdef IN_RC
/**
* Internal GC logger worker: Logger wrapper.
*/
VMMRCDECL(void) vmmGCLoggerWrapper(const char *pszFormat, ...);
/**
* Internal GC release logger worker: Logger wrapper.
*/
VMMRCDECL(void) vmmGCRelLoggerWrapper(const char *pszFormat, ...);
/**
* Internal GC logger worker: Flush logger.
*
* @returns VINF_SUCCESS.
* @param pLogger The logger instance to flush.
* @remark This function must be exported!
*/
VMMRCDECL(int) vmmGCLoggerFlush(PRTLOGGERRC pLogger);
/** @name Trap testcases and related labels.
* @{ */
DECLASM(void) vmmGCEnableWP(void);
DECLASM(void) vmmGCDisableWP(void);
DECLASM(int) vmmGCTestTrap3(void);
DECLASM(int) vmmGCTestTrap8(void);
DECLASM(int) vmmGCTestTrap0d(void);
DECLASM(int) vmmGCTestTrap0e(void);
DECLASM(int) vmmGCTestTrap0e_FaultEIP(void); /**< a label */
DECLASM(int) vmmGCTestTrap0e_ResumeEIP(void); /**< a label */
/** @} */
#endif /* IN_RC */
RT_C_DECLS_END
/** @} */
#endif
|