summaryrefslogtreecommitdiff
path: root/usr/src/uts/intel/kdi/ia32/kdi_asm.s
blob: fbf207a99e3fc38354c3116e8efe14ea0db7b6cb (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
/*
 * CDDL HEADER START
 *
 * The contents of this file are subject to the terms of the
 * Common Development and Distribution License (the "License").
 * You may not use this file except in compliance with the License.
 *
 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
 * or http://www.opensolaris.org/os/licensing.
 * See the License for the specific language governing permissions
 * and limitations under the License.
 *
 * When distributing Covered Code, include this CDDL HEADER in each
 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
 * If applicable, add the following below this CDDL HEADER, with the
 * fields enclosed by brackets "[]" replaced with your own identifying
 * information: Portions Copyright [yyyy] [name of copyright owner]
 *
 * CDDL HEADER END
 */

/*
 * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
 * Use is subject to license terms.
 */

#pragma ident	"%Z%%M%	%I%	%E% SMI"

/*
 * Debugger entry for both master and slave CPUs
 */

#if defined(__lint)
#include <sys/types.h>
#endif

#include <sys/segments.h>
#include <sys/asm_linkage.h>
#include <sys/controlregs.h>
#include <sys/x86_archext.h>
#include <sys/privregs.h>
#include <sys/machprivregs.h>
#include <sys/kdi_regs.h>
#include <sys/uadmin.h>
#include <sys/psw.h>

#ifdef _ASM

#include <kdi_assym.h>
#include <assym.h>

/* clobbers %edx, %ecx, returns addr in %eax, cpu id in %ebx */
#define	GET_CPUSAVE_ADDR \
	movl	%gs:CPU_ID, %ebx;		\
	movl	%ebx, %eax;			\
	movl	$KRS_SIZE, %ecx;		\
	mull	%ecx;				\
	movl	$kdi_cpusave, %edx;		\
	/*CSTYLED*/				\
	addl	(%edx), %eax

/*
 * Save copies of the IDT and GDT descriptors.  Note that we only save the IDT
 * and GDT if the IDT isn't ours, as we may be legitimately re-entering the
 * debugger through the trap handler.  We don't want to clobber the saved IDT
 * in the process, as we'd end up resuming the world on our IDT.
 */
#define	SAVE_IDTGDT				\
	movl	%gs:CPU_IDT, %edx;		\
	cmpl	$kdi_idt, %edx;			\
	je	1f;				\
	movl	%edx, KRS_IDT(%eax);		\
	movl	%gs:CPU_GDT, %edx;		\
	movl	%edx, KRS_GDT(%eax);		\
1:

/*
 * Given the address of the current CPU's cpusave area in %edi, the following
 * macro restores the debugging state to said CPU.  Restored state includes
 * the debug registers from the global %dr variables, and debugging MSRs from
 * the CPU save area.  This code would be in a separate routine, but for the
 * fact that some of the MSRs are jump-sensitive.  As such, we need to minimize
 * the number of jumps taken subsequent to the update of said MSRs.  We can
 * remove one jump (the ret) by using a macro instead of a function for the
 * debugging state restoration code.
 *
 * Takes the cpusave area in %edi as a parameter, clobbers %eax-%edx
 */	
#define	KDI_RESTORE_DEBUGGING_STATE \
	leal	kdi_drreg, %ebx;				\
								\
	pushl	DR_CTL(%ebx);					\
	pushl	$7;						\
	call	kdi_dreg_set;					\
	addl	$8, %esp;					\
								\
	pushl	$KDIREG_DRSTAT_RESERVED;				\
	pushl	$6;						\
	call	kdi_dreg_set;					\
	addl	$8, %esp;					\
								\
	pushl	DRADDR_OFF(0)(%ebx);				\
	pushl	$0;						\
	call	kdi_dreg_set;					\
	addl	$8, %esp;					\
								\
	pushl	DRADDR_OFF(1)(%ebx);				\
	pushl	$1;						\
	call	kdi_dreg_set;			 		\
	addl	$8, %esp;					\
								\
	pushl	DRADDR_OFF(2)(%ebx);				\
	pushl	$2;						\
	call	kdi_dreg_set;					\
	addl	$8, %esp;					\
								\
	pushl	DRADDR_OFF(3)(%ebx);				\
	pushl	$3;						\
	call	kdi_dreg_set;					\
	addl	$8, %esp;					\
								\
	/*							\
	 * Write any requested MSRs.				\
	 */							\
	movl	KRS_MSR(%edi), %ebx;				\
	cmpl	$0, %ebx;					\
	je	3f;						\
1:								\
	movl	MSR_NUM(%ebx), %ecx;				\
	cmpl	$0, %ecx;					\
	je	3f;						\
								\
	movl	MSR_TYPE(%ebx), %edx;				\
	cmpl	$KDI_MSR_WRITE, %edx;				\
	jne	2f;						\
								\
	movl	MSR_VALP(%ebx), %edx;				\
	movl	0(%edx), %eax;					\
	movl	4(%edx), %edx;					\
	wrmsr;							\
2:								\
	addl	$MSR_SIZE, %ebx;				\
	jmp	1b;						\
3:								\
	/*							\
	 * We must not branch after re-enabling LBR.  If	\
	 * kdi_wsr_wrexit_msr is set, it contains the number	\
	 * of the MSR that controls LBR.  kdi_wsr_wrexit_valp	\
	 * contains the value that is to be written to enable	\
	 * LBR.							\
	 */							\
	movl	kdi_msr_wrexit_msr, %ecx;			\
	cmpl	$0, %ecx;					\
	je	1f;						\
								\
	movl	kdi_msr_wrexit_valp, %edx;			\
	movl	0(%edx), %eax;					\
	movl	4(%edx), %edx;					\
								\
	wrmsr;							\
1:

#define	KDI_RESTORE_REGS() \
	/* Discard savfp and savpc */ \
	addl	$8, %esp; \
	popl	%ss; \
	popl	%gs; \
	popl	%fs; \
	popl	%es; \
	popl	%ds; \
	popal; \
	/* Discard trapno and err */ \
	addl	$8, %esp

/*
 * Each cpusave buffer has an area set aside for a ring buffer of breadcrumbs.
 * The following macros manage the buffer.
 */

/* Advance the ring buffer */
#define	ADVANCE_CRUMB_POINTER(cpusave, tmp1, tmp2) \
	movl	KRS_CURCRUMBIDX(cpusave), tmp1;	\
	cmpl	$[KDI_NCRUMBS - 1], tmp1;	\
	jge	1f;				\
	/* Advance the pointer and index */	\
	addl	$1, tmp1;			\
	movl	tmp1, KRS_CURCRUMBIDX(cpusave);	\
	movl	KRS_CURCRUMB(cpusave), tmp1;	\
	addl	$KRM_SIZE, tmp1;		\
	jmp	2f;				\
1:	/* Reset the pointer and index */	\
	movw	$0, KRS_CURCRUMBIDX(cpusave);	\
	leal	KRS_CRUMBS(cpusave), tmp1;	\
2:	movl	tmp1, KRS_CURCRUMB(cpusave);	\
	/* Clear the new crumb */		\
	movl	$KDI_NCRUMBS, tmp2;		\
3:	movl	$0, -4(tmp1, tmp2, 4);		\
	decl	tmp2;				\
	jnz	3b

/* Set a value in the current breadcrumb buffer */
#define	ADD_CRUMB(cpusave, offset, value, tmp) \
	movl	KRS_CURCRUMB(cpusave), tmp;	\
	movl	value, offset(tmp)

#endif	/* _ASM */

/*
 * The main entry point for master CPUs.  It also serves as the trap handler
 * for all traps and interrupts taken during single-step.
 */
#if defined(__lint)
void
kdi_cmnint(void)
{
}
#else	/* __lint */

 	/* XXX implement me */
	ENTRY_NP(kdi_nmiint)
	clr	%ecx
	movl	(%ecx), %ecx
	SET_SIZE(kdi_nmiint)

	ENTRY_NP(kdi_cmnint)
	ALTENTRY(kdi_master_entry)
	
	/* Save all registers and selectors */

	pushal
	pushl	%ds
	pushl	%es
	pushl	%fs
	pushl	%gs
	pushl	%ss

	subl	$8, %esp
	movl	%ebp, REG_OFF(KDIREG_SAVFP)(%esp)
	movl	REG_OFF(KDIREG_EIP)(%esp), %eax
	movl	%eax, REG_OFF(KDIREG_SAVPC)(%esp)

	/*
	 * If the kernel has started using its own selectors, we should too.
	 * Update our saved selectors if they haven't been updated already.
	 */
	movw	%cs, %ax
	cmpw	$KCS_SEL, %ax
	jne	1f			/* The kernel hasn't switched yet */

	movw	$KDS_SEL, %ax
	movw	%ax, %ds
	movw	kdi_cs, %ax
	cmpw	$KCS_SEL, %ax
	je	1f			/* We already switched */

	/*
	 * The kernel switched, but we haven't.  Update our saved selectors
	 * to match the kernel's copies for use below.
	 */
	movl	$KCS_SEL, kdi_cs
	movl	$KDS_SEL, kdi_ds
	movl	$KFS_SEL, kdi_fs
	movl	$KGS_SEL, kdi_gs

1:
	/*
	 * Set the selectors to a known state.  If we come in from kmdb's IDT,
	 * we'll be on boot's %cs.  This will cause GET_CPUSAVE_ADDR to return
	 * CPU 0's cpusave, regardless of which CPU we're on, and chaos will
	 * ensue.  So, if we've got $KCSSEL in kdi_cs, switch to it.  The other
	 * selectors are restored normally.
	 */
	movw	%cs:kdi_cs, %ax
	cmpw	$KCS_SEL, %ax
	jne	1f
	ljmp	$KCS_SEL, $1f
1:
	movw	%cs:kdi_ds, %ds
	movw	kdi_ds, %es
	movw	kdi_fs, %fs
	movw	kdi_gs, %gs
	movw	kdi_ds, %ss

	/*
	 * This has to come after we set %gs to the kernel descriptor.  Since
	 * we've hijacked some IDT entries used in user-space such as the
	 * breakpoint handler, we can enter kdi_cmnint() with GDT_LWPGS used
	 * in %gs.  On the hypervisor, CLI() needs GDT_GS to access the machcpu.
	 */
	CLI(%eax)

#if defined(__xpv)
	/*
	 * Clear saved_upcall_mask in unused byte of cs slot on stack.
	 * It can only confuse things.
	 */
	movb    $0, REG_OFF(KDIREG_CS)+2(%esp)

#endif

	GET_CPUSAVE_ADDR		/* %eax = cpusave, %ebx = CPU ID */

	ADVANCE_CRUMB_POINTER(%eax, %ecx, %edx)

	ADD_CRUMB(%eax, KRM_CPU_STATE, $KDI_CPU_STATE_MASTER, %edx)

	movl	REG_OFF(KDIREG_EIP)(%esp), %ecx
	ADD_CRUMB(%eax, KRM_PC, %ecx, %edx)
	ADD_CRUMB(%eax, KRM_SP, %esp, %edx)
	movl	REG_OFF(KDIREG_TRAPNO)(%esp), %ecx
	ADD_CRUMB(%eax, KRM_TRAPNO, %ecx, %edx)

	movl	%esp, %ebp
	pushl	%eax

	/*
	 * Were we in the debugger when we took the trap (i.e. was %esp in one
	 * of the debugger's memory ranges)?
	 */
	leal	kdi_memranges, %ecx
	movl	kdi_nmemranges, %edx
1:	cmpl	MR_BASE(%ecx), %esp
	jl	2f		/* below this range -- try the next one */
	cmpl	MR_LIM(%ecx), %esp
	jg	2f		/* above this range -- try the next one */
	jmp	3f		/* matched within this range */

2:	decl	%edx
	jz	kdi_save_common_state	/* %esp not within debugger memory */
	addl	$MR_SIZE, %ecx
	jmp	1b

3:	/*
	 * %esp was within one of the debugger's memory ranges.  This should
	 * only happen when we take a trap while running in the debugger.
	 * kmdb_dpi_handle_fault will determine whether or not it was an
	 * expected trap, and will take the appropriate action.
	 */

	pushl	%ebx			/* cpuid */

	movl	REG_OFF(KDIREG_ESP)(%ebp), %ecx
	addl	$REG_OFF(KDIREG_EFLAGS - KDIREG_EAX), %ecx
	pushl	%ecx

	pushl	REG_OFF(KDIREG_EIP)(%ebp)
	pushl	REG_OFF(KDIREG_TRAPNO)(%ebp)

	call	kdi_dvec_handle_fault
	addl	$16, %esp

	/*
	 * If we're here, we ran into a debugger problem, and the user
	 * elected to solve it by having the debugger debug itself.  The
	 * state we're about to save is that of the debugger when it took
	 * the fault.
	 */

	jmp	kdi_save_common_state

	SET_SIZE(kdi_master_entry)
	SET_SIZE(kdi_cmnint)

#endif	/* __lint */

/*
 * The cross-call handler for slave CPUs.
 *
 * The debugger is single-threaded, so only one CPU, called the master, may be
 * running it at any given time.  The other CPUs, known as slaves, spin in a
 * busy loop until there's something for them to do.  This is the entry point
 * for the slaves - they'll be sent here in response to a cross-call sent by the
 * master.
 */

#if defined(__lint)
char kdi_slave_entry_patch;

void
kdi_slave_entry(void)
{
}
#else /* __lint */
	.globl	kdi_slave_entry_patch;

	ENTRY_NP(kdi_slave_entry)

	/* kdi_msr_add_clrentry knows where this is */
kdi_slave_entry_patch:
	KDI_MSR_PATCH;

	/*
	 * Cross calls are implemented as function calls, so our stack
	 * currently looks like one you'd get from a zero-argument function
	 * call. There's an %eip at %esp, and that's about it.  We want to
	 * make it look like the master CPU's stack.  By doing this, we can
	 * use the same resume code for both master and slave.  We need to
	 * make our stack look like a `struct regs' before we jump into the
	 * common save routine.
	 */

	pushl	%cs
	pushfl
	pushl	$-1		/* A phony trap error code */
	pushl	$-1		/* A phony trap number */
	pushal
	pushl	%ds
	pushl	%es
	pushl	%fs
	pushl	%gs
	pushl	%ss

	subl	$8, %esp
	movl	%ebp, REG_OFF(KDIREG_SAVFP)(%esp)
	movl	REG_OFF(KDIREG_EIP)(%esp), %eax
	movl	%eax, REG_OFF(KDIREG_SAVPC)(%esp)

	/*
	 * Swap our saved EFLAGS and %eip.  Each is where the other
	 * should be.
	 */
	movl	REG_OFF(KDIREG_EFLAGS)(%esp), %eax
	xchgl	REG_OFF(KDIREG_EIP)(%esp), %eax
	movl	%eax, REG_OFF(KDIREG_EFLAGS)(%esp)

	/*
	 * Our stack now matches struct regs, and is irettable.  We don't need
	 * to do anything special for the hypervisor w.r.t. PS_IE since we
	 * iret twice anyway; the second iret back to the hypervisor
	 * will re-enable interrupts.
	 */
	CLI(%eax)

	/* Load sanitized segment selectors */
	movw	kdi_ds, %ds
	movw	kdi_ds, %es
	movw	kdi_fs, %fs
	movw	kdi_gs, %gs
	movw	kdi_ds, %ss

	GET_CPUSAVE_ADDR	/* %eax = cpusave, %ebx = CPU ID */

	ADVANCE_CRUMB_POINTER(%eax, %ecx, %edx)

	ADD_CRUMB(%eax, KRM_CPU_STATE, $KDI_CPU_STATE_SLAVE, %edx)

	movl	REG_OFF(KDIREG_EIP)(%esp), %ecx
	ADD_CRUMB(%eax, KRM_PC, %ecx, %edx)

	pushl	%eax
	jmp	kdi_save_common_state

	SET_SIZE(kdi_slave_entry)

#endif	/* __lint */

/*
 * The state of the world:
 *
 * The stack has a complete set of saved registers and segment
 * selectors, arranged in `struct regs' order (or vice-versa), up to
 * and including EFLAGS.  It also has a pointer to our cpusave area.
 *
 * We need to save a pointer to these saved registers.  We also want
 * to adjust the saved %esp - it should point just beyond the saved
 * registers to the last frame of the thread we interrupted.  Finally,
 * we want to clear out bits 16-31 of the saved selectors, as the
 * selector pushls don't automatically clear them.
 */
#if !defined(__lint)

	ENTRY_NP(kdi_save_common_state)

	popl	%eax			/* the cpusave area */

	movl	%esp, KRS_GREGS(%eax)	/* save ptr to current saved regs */

	addl	$REG_OFF(KDIREG_EFLAGS - KDIREG_EAX), KDIREG_OFF(KDIREG_ESP)(%esp)

	andl	$0xffff, KDIREG_OFF(KDIREG_SS)(%esp)
	andl	$0xffff, KDIREG_OFF(KDIREG_GS)(%esp)
	andl	$0xffff, KDIREG_OFF(KDIREG_FS)(%esp)
	andl	$0xffff, KDIREG_OFF(KDIREG_ES)(%esp)
	andl	$0xffff, KDIREG_OFF(KDIREG_DS)(%esp)

	pushl	%eax
	call	kdi_trap_pass
	cmpl	$1, %eax
	je	kdi_pass_to_kernel
	popl	%eax

	SAVE_IDTGDT

#if !defined(__xpv)
	/* Save off %cr0, and clear write protect */
	movl	%cr0, %ecx
	movl	%ecx, KRS_CR0(%eax)
	andl	$_BITNOT(CR0_WP), %ecx
	movl	%ecx, %cr0
#endif
	pushl	%edi
	movl	%eax, %edi

	/* Save the debug registers and disable any active watchpoints */
	pushl	$7
	call	kdi_dreg_get
	addl	$4, %esp

	movl	%eax, KRS_DRCTL(%edi)
	andl	$_BITNOT(KDIREG_DRCTL_WPALLEN_MASK), %eax

	pushl	%eax
	pushl	$7
	call	kdi_dreg_set
	addl	$8, %esp

	pushl	$6
	call	kdi_dreg_get
	addl	$4, %esp
	movl	%eax, KRS_DRSTAT(%edi)

	pushl	$0
	call	kdi_dreg_get
	addl	$4, %esp
	movl	%eax, KRS_DROFF(0)(%edi)

	pushl	$1
	call	kdi_dreg_get
	addl	$4, %esp
	movl	%eax, KRS_DROFF(1)(%edi)

	pushl	$2
	call	kdi_dreg_get
	addl	$4, %esp
	movl	%eax, KRS_DROFF(2)(%edi)

	pushl	$3
	call	kdi_dreg_get
	addl	$4, %esp
	movl	%eax, KRS_DROFF(3)(%edi)

	movl	%edi, %eax
	popl	%edi

	/*
	 * Save any requested MSRs.
	 */
	movl	KRS_MSR(%eax), %ecx
	cmpl	$0, %ecx
	je	no_msr

	pushl	%eax		/* rdmsr clobbers %eax */
	movl	%ecx, %ebx
1:
	movl	MSR_NUM(%ebx), %ecx
	cmpl	$0, %ecx
	je	msr_done

	movl	MSR_TYPE(%ebx), %edx
	cmpl	$KDI_MSR_READ, %edx
	jne	msr_next

	rdmsr			/* addr in %ecx, value into %edx:%eax */
	movl	%eax, MSR_VAL(%ebx)
	movl	%edx, _CONST(MSR_VAL + 4)(%ebx)

msr_next:
	addl	$MSR_SIZE, %ebx
	jmp	1b

msr_done:
	popl	%eax

no_msr:
	clr	%ebp		/* stack traces should end here */

	pushl	%eax
	call	kdi_debugger_entry
	popl	%eax
 
	jmp	kdi_resume

	SET_SIZE(kdi_save_common_state)

#endif	/* !__lint */

/*
 * Resume the world.  The code that calls kdi_resume has already
 * decided whether or not to restore the IDT.
 */
#if defined(__lint)
void
kdi_resume(void)
{
}
#else	/* __lint */

	/* cpusave in %eax */
	ENTRY_NP(kdi_resume)

	/*
	 * Send this CPU back into the world
	 */

#if !defined(__xpv)
	movl	KRS_CR0(%eax), %edx
	movl	%edx, %cr0
#endif

	pushl	%edi
	movl	%eax, %edi

	KDI_RESTORE_DEBUGGING_STATE

	popl	%edi

#if defined(__xpv)
	/*
	 * kmdb might have set PS_T in the saved eflags, so we can't use
	 * intr_restore, since that restores all of eflags; instead, just
	 * pick up PS_IE from the saved eflags.
	 */
	movl	REG_OFF(KDIREG_EFLAGS)(%esp), %eax
	testl	$PS_IE, %eax
	jz	2f
	STI
2:
#endif

	addl	$8, %esp	/* Discard savfp and savpc */

	popl	%ss
	popl	%gs
	popl	%fs
	popl	%es
	popl	%ds
	popal

	addl	$8, %esp	/* Discard TRAPNO and ERROR */

	IRET

	SET_SIZE(kdi_resume)
#endif	/* __lint */

#if !defined(__lint)

	ENTRY_NP(kdi_pass_to_kernel)

	/* pop cpusave, leaving %esp pointing to saved regs */
	popl	%eax

	movl	$KDI_CPU_STATE_NONE, KRS_CPU_STATE(%eax)

	/*
	 * Find the trap and vector off the right kernel handler.  The trap
	 * handler will expect the stack to be in trap order, with %eip being
	 * the last entry, so we'll need to restore all our regs.
	 *
	 * We're hard-coding the three cases where KMDB has installed permanent
	 * handlers, since after we restore, we don't have registers to work
	 * with; we can't use a global since other CPUs can easily pass through
	 * here at the same time.
	 *
	 * Note that we handle T_DBGENTR since userspace might have tried it.
	 */
	movl	REG_OFF(KDIREG_TRAPNO)(%esp), %eax
	cmpl	$T_SGLSTP, %eax
	je	kpass_dbgtrap
	cmpl	$T_BPTFLT, %eax
	je	kpass_brktrap
	cmpl	$T_DBGENTR, %eax
	je	kpass_invaltrap
	/*
	 * Hmm, unknown handler.  Somebody forgot to update this when they
	 * added a new trap interposition... try to drop back into kmdb.
	 */
	int	$T_DBGENTR
	
kpass_dbgtrap:
	KDI_RESTORE_REGS()
	ljmp	$KCS_SEL, $1f
1:	jmp	%cs:dbgtrap
	/*NOTREACHED*/

kpass_brktrap:
	KDI_RESTORE_REGS()
	ljmp	$KCS_SEL, $2f
2:	jmp	%cs:brktrap
	/*NOTREACHED*/

kpass_invaltrap:
	KDI_RESTORE_REGS()
	ljmp	$KCS_SEL, $3f
3:	jmp	%cs:invaltrap
	/*NOTREACHED*/

	SET_SIZE(kdi_pass_to_kernel)

	/*
	 * A minimal version of mdboot(), to be used by the master CPU only.
	 */
	ENTRY_NP(kdi_reboot)

	pushl	$AD_BOOT
	pushl	$A_SHUTDOWN
	call	*psm_shutdownf
	addl	$8, %esp

#if defined(__xpv)
	pushl	$SHUTDOWN_reboot
	call	HYPERVISOR_shutdown
#else
	call	reset
#endif
	/*NOTREACHED*/

	SET_SIZE(kdi_reboot)

#endif	/* !__lint */

#if defined(__lint)
/*ARGSUSED*/
void
kdi_cpu_debug_init(kdi_cpusave_t *save)
{
}
#else	/* __lint */

	ENTRY_NP(kdi_cpu_debug_init)
	pushl	%ebp
	movl	%esp, %ebp

	pushl	%edi
	pushl	%ebx

	movl	8(%ebp), %edi

	KDI_RESTORE_DEBUGGING_STATE

	popl	%ebx
	popl	%edi
	leave
	ret

	SET_SIZE(kdi_cpu_debug_init)
#endif	/* !__lint */