kdi_asm.s revision ae115bc77f6fcde83175c75b4206dc2e50747966
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
/*
* Debugger entry for both master and slave CPUs
*/
#if defined(__lint)
#endif
#include <sys/segments.h>
#include <sys/asm_linkage.h>
#include <sys/controlregs.h>
#include <sys/x86_archext.h>
#include <sys/privregs.h>
#include <sys/machprivregs.h>
#include <sys/kdi_regs.h>
#ifdef _ASM
#include <kdi_assym.h>
#include <assym.h>
/* clobbers %edx, %ecx, returns addr in %eax, cpu id in %ebx */
#define GET_CPUSAVE_ADDR \
/*CSTYLED*/ \
/*
* Save copies of the IDT and GDT descriptors. Note that we only save the IDT
* and GDT if the IDT isn't ours, as we may be legitimately re-entering the
* debugger through the trap handler. We don't want to clobber the saved IDT
* in the process, as we'd end up resuming the world on our IDT.
*/
#define SAVE_IDTGDT \
je 1f; \
1:
/*
* Each cpusave buffer has an area set aside for a ring buffer of breadcrumbs.
* The following macros manage the buffer.
*/
/* Advance the ring buffer */
jge 1f; \
/* Advance the pointer and index */ \
jmp 2f; \
1: /* Reset the pointer and index */ \
/* Clear the new crumb */ \
jnz 3b
/* Set a value in the current breadcrumb buffer */
#endif /* _ASM */
/*
* The main entry point for master CPUs. It also serves as the trap handler
* for all traps and interrupts taken during single-step.
*/
#if defined(__lint)
void
kdi_cmnint(void)
{
}
#else /* __lint */
/* XXX implement me */
/* Save all registers and selectors */
/*
* If the kernel has started using its own selectors, we should too.
* Update our saved selectors if they haven't been updated already.
*/
/*
* The kernel switched, but we haven't. Update our saved selectors
* to match the kernel's copies for use below.
*/
1:
/*
* Set the selectors to a known state. If we come in from kmdb's IDT,
* we'll be on boot's %cs. This will cause GET_CPUSAVE_ADDR to return
* CPU 0's cpusave, regardless of which CPU we're on, and chaos will
* ensue. So, if we've got $KCSSEL in kdi_cs, switch to it. The other
* selectors are restored normally.
*/
jne 1f
1:
/*
* This has to come after we set %gs to the kernel descriptor. Since
* we've hijacked some IDT entries used in user-space such as the
* breakpoint handler, we can enter kdi_cmnint() with GDT_LWPGS used
* in %gs. On the hypervisor, CLI() needs GDT_GS to access the machcpu.
*/
GET_CPUSAVE_ADDR /* %eax = cpusave, %ebx = CPU ID */
/*
* Were we in the debugger when we took the trap (i.e. was %esp in one
* of the debugger's memory ranges)?
*/
jmp 1b
3: /*
* %esp was within one of the debugger's memory ranges. This should
* only happen when we take a trap while running in the debugger.
* kmdb_dpi_handle_fault will determine whether or not it was an
* expected trap, and will take the appropriate action.
*/
/*
* If we're here, we ran into a debugger problem, and the user
* elected to solve it by having the debugger debug itself. The
* state we're about to save is that of the debugger when it took
* the fault.
*/
#endif /* __lint */
/*
* The cross-call handler for slave CPUs.
*
* The debugger is single-threaded, so only one CPU, called the master, may be
* running it at any given time. The other CPUs, known as slaves, spin in a
* busy loop until there's something for them to do. This is the entry point
* for the slaves - they'll be sent here in response to a cross-call sent by the
* master.
*/
#if defined(__lint)
char kdi_slave_entry_patch;
void
kdi_slave_entry(void)
{
}
#else /* __lint */
/* kdi_msr_add_clrentry knows where this is */
/*
* Cross calls are implemented as function calls, so our stack
* currently looks like one you'd get from a zero-argument function
* call. There's an %eip at %esp, and that's about it. We want to
* make it look like the master CPU's stack. By doing this, we can
* use the same resume code for both master and slave. We need to
* make our stack look like a `struct regs' before we jump into the
* common save routine.
*/
/*
* Swap our saved EFLAGS and %eip. Each is where the other
* should be.
*/
/*
* Our stack now matches struct regs, and is irettable. We don't need
* to do anything special for the hypervisor w.r.t. PS_IE since we
* iret twice anyway; the second iret back to the hypervisor
* will re-enable interrupts.
*/
/* Load sanitized segment selectors */
GET_CPUSAVE_ADDR /* %eax = cpusave, %ebx = CPU ID */
#endif /* __lint */
#if !defined(__lint)
/*
* The state of the world:
*
* The stack has a complete set of saved registers and segment
* selectors, arranged in `struct regs' order (or vice-versa), up to
* and including EFLAGS. It also has a pointer to our cpusave area.
*
* We need to save a pointer to these saved registers. We also want
* to adjust the saved %esp - it should point just beyond the saved
* registers to the last frame of the thread we interrupted. Finally,
* we want to clear out bits 16-31 of the saved selectors, as the
* selector pushls don't automatically clear them.
*/
/* Save off %cr0, and clear write protect */
/* Save the debug registers and disable any active watchpoints */
pushl $7
pushl $7
pushl $6
pushl $0
pushl $1
pushl $2
pushl $3
/*
* Save any requested MSRs.
*/
1:
rdmsr /* addr in %ecx, value into %edx:%eax */
jmp 1b
#endif /* !__lint */
/*
* Given the address of the current CPU's cpusave area in %edi, the following
* macro restores the debugging state to said CPU. Restored state includes
* the debug registers from the global %dr variables, and debugging MSRs from
* the CPU save area. This code would be in a separate routine, but for the
* fact that some of the MSRs are jump-sensitive. As such, we need to minimize
* the number of jumps taken subsequent to the update of said MSRs. We can
* remove one jump (the ret) by using a macro instead of a function for the
* debugging state restoration code.
*
* Takes the cpusave area in %edi as a parameter, clobbers %eax-%edx
*/
#define KDI_RESTORE_DEBUGGING_STATE \
\
pushl $7; \
call kdi_dreg_set; \
\
pushl $6; \
call kdi_dreg_set; \
\
pushl $0; \
call kdi_dreg_set; \
\
pushl $1; \
call kdi_dreg_set; \
\
pushl $2; \
call kdi_dreg_set; \
\
pushl $3; \
call kdi_dreg_set; \
\
/* \
* Write any requested MSRs. \
*/ \
je 3f; \
1: \
je 3f; \
\
jne 2f; \
\
wrmsr; \
2: \
jmp 1b; \
3: \
/* \
* We must not branch after re-enabling LBR. If \
* kdi_wsr_wrexit_msr is set, it contains the number \
* of the MSR that controls LBR. kdi_wsr_wrexit_valp \
* contains the value that is to be written to enable \
* LBR. \
*/ \
je 1f; \
\
\
wrmsr; \
1:
#if defined(__lint)
/*ARGSUSED*/
void
{
}
#else /* __lint */
#endif /* !__lint */
/*
* Resume the world. The code that calls kdi_resume has already
* decided whether or not to restore the IDT.
*/
#if defined(__lint)
void
kdi_resume(void)
{
}
#else /* __lint */
/*
* Send this CPU back into the world
*/
#endif /* __lint */
#if !defined(__lint)
/* cpusave is still in %eax */
/*
* When we replaced the kernel's handlers in the IDT, we made note of
* the handlers being replaced, thus allowing us to pass traps directly
* to said handlers here. We won't have any registers available for use
* after we start popping, and we know we're single-threaded here, so
* we have to use a global to store the handler address.
*/
/*
* The trap handler will expect the stack to be in trap order, with
* %eip being the last entry. Our stack is currently in KDIREG_*
* order, so we'll need to pop (and restore) our way back down.
*/
/*NOTREACHED*/
/*
* Reboot the system. This routine is to be called only by the master
* CPU.
*/
/*
* psm_shutdown didn't work or it wasn't set, try pc_reset.
*/
/*NOTREACHED*/
#endif /* !__lint */