/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
/*
* Debugger entry for both master and slave CPUs
*/
#if defined(__lint)
#endif
#include <sys/segments.h>
#include <sys/asm_linkage.h>
#include <sys/controlregs.h>
#include <sys/x86_archext.h>
#include <sys/privregs.h>
#include <sys/machprivregs.h>
#include <sys/kdi_regs.h>
#ifdef __xpv
#include <sys/hypervisor.h>
#endif
#ifdef _ASM
#include <kdi_assym.h>
#include <assym.h>
/* clobbers %rdx, %rcx, returns addr in %rax, CPU ID in %rbx */
#define GET_CPUSAVE_ADDR \
/*CSTYLED*/ \
/*
* Save copies of the IDT and GDT descriptors. Note that we only save the IDT
* and GDT if the IDT isn't ours, as we may be legitimately re-entering the
* debugger through the trap handler. We don't want to clobber the saved IDT
* in the process, as we'd end up resuming the world on our IDT.
*/
#define SAVE_IDTGDT \
je 1f; \
1:
#ifdef __xpv
#else
rdmsr; \
#endif /* __xpv */
/*
* %ss, %rsp, %rflags, %cs, %rip, %err, %trapno are already on the stack. Note
* unnecessary.
*/
RESTORE_GSBASE(%rdi); \
/*
* Given the address of the current CPU's cpusave area in %rax, the following
* macro restores the debugging state to said CPU. Restored state includes
* the debug registers from the global %dr variables, and debugging MSRs from
* the CPU save area. This code would be in a separate routine, but for the
* fact that some of the MSRs are jump-sensitive. As such, we need to minimize
* the number of jumps taken subsequent to the update of said MSRs. We can
* remove one jump (the ret) by using a macro instead of a function for the
* debugging state restoration code.
*
* Takes the cpusave area in %rdi as a parameter, clobbers %rax-%rdx
*/
#define KDI_RESTORE_DEBUGGING_STATE \
call kdi_dreg_set; \
\
call kdi_dreg_set; \
\
call kdi_dreg_set; \
call kdi_dreg_set; \
call kdi_dreg_set; \
call kdi_dreg_set; \
\
/* \
* Write any requested MSRs. \
*/ \
je 3f; \
1: \
je 3f; \
\
jne 2f; \
\
wrmsr; \
2: \
jmp 1b; \
3: \
/* \
* We must not branch after re-enabling LBR. If \
* kdi_wsr_wrexit_msr is set, it contains the number \
* of the MSR that controls LBR. kdi_wsr_wrexit_valp \
* contains the value that is to be written to enable \
* LBR. \
*/ \
je 1f; \
\
\
wrmsr; \
1:
/*
* Each cpusave buffer has an area set aside for a ring buffer of breadcrumbs.
* The following macros manage the buffer.
*/
/* Advance the ring buffer */
jge 1f; \
/* Advance the pointer and index */ \
jmp 2f; \
1: /* Reset the pointer and index */ \
/* Clear the new crumb */ \
jnz 3b
/* Set a value in the current breadcrumb buffer */
#endif /* _ASM */
#if defined(__lint)
void
kdi_cmnint(void)
{
}
#else /* __lint */
/* XXX implement me */
/*
* The main entry point for master CPUs. It also serves as the trap
* handler for all traps and interrupts taken during single-step.
*/
/* Save current register state */
#ifdef __xpv
/*
* Clear saved_upcall_mask in unused byte of cs slot on stack.
* It can only confuse things.
*/
#endif
#if !defined(__xpv)
/*
* Switch to the kernel's GSBASE. Neither GSBASE nor the ill-named
* KGSBASE can be trusted, as the kernel may or may not have already
* done a swapgs. All is not lost, as the kernel can divine the correct
* value for us. Note that the previous GSBASE is saved in the
* KDI_SAVE_REGS macro to prevent a usermode process's GSBASE from being
* blown away. On the hypervisor, we don't need to do this, since it's
* ensured we're on our requested kernel GSBASE already.
*/
#endif /* __xpv */
GET_CPUSAVE_ADDR /* %rax = cpusave, %rbx = CPU ID */
/*
* Were we in the debugger when we took the trap (i.e. was %esp in one
* of the debugger's memory ranges)?
*/
jmp 1b
3: /*
* The master is still set. That should only happen if we hit a trap
* while running in the debugger. Note that it may be an intentional
* fault. kmdb_dpi_handle_fault will sort it all out.
*/
/*
* If we're here, we ran into a debugger problem, and the user
* elected to solve it by having the debugger debug itself. The
* state we're about to save is that of the debugger when it took
* the fault.
*/
#endif /* __lint */
/*
* The cross-call handler for slave CPUs.
*
* The debugger is single-threaded, so only one CPU, called the master, may be
* running it at any given time. The other CPUs, known as slaves, spin in a
* busy loop until there's something for them to do. This is the entry point
* for the slaves - they'll be sent here in response to a cross-call sent by the
* master.
*/
#if defined(__lint)
char kdi_slave_entry_patch;
void
kdi_slave_entry(void)
{
}
#else /* __lint */
/* kdi_msr_add_clrentry knows where this is */
/*
* Cross calls are implemented as function calls, so our stack currently
* looks like one you'd get from a zero-argument function call. That
* is, there's the return %rip at %rsp, and that's about it. We need
* to make it look like an interrupt stack. When we first save, we'll
* reverse the saved %ss and %rip, which we'll fix back up when we've
* freed up some general-purpose registers. We'll also need to fix up
* the saved %rsp.
*/
/*
* We've saved all of the general-purpose registers, and have a stack
* that is irettable (after we strip down to the error code)
*/
GET_CPUSAVE_ADDR /* %rax = cpusave, %rbx = CPU ID */
#endif /* __lint */
/*
* The state of the world:
*
* The stack has a complete set of saved registers and segment
* selectors, arranged in the kdi_regs.h order. It also has a pointer
* to our cpusave area.
*
* We need to save, into the cpusave area, a pointer to these saved
* registers. First we check whether we should jump straight back to
* the kernel. If not, we save a few more registers, ready the
* machine for debugger entry, and enter the debugger.
*/
#if !defined(__lint)
#if !defined(__xpv)
/* Save off %cr0, and clear write protect */
#endif
/* Save the debug registers and disable any active watchpoints */
/*
* Save any requested MSRs.
*/
1:
rdmsr /* addr in %ecx, value into %edx:%eax */
jmp 1b
/* Pass cpusave to kdi_resume */
#endif /* !__lint */
/*
* Resume the world. The code that calls kdi_resume has already
* decided whether or not to restore the IDT.
*/
#if defined(__lint)
void
kdi_resume(void)
{
}
#else /* __lint */
/* cpusave in %rdi */
/*
* Send this CPU back into the world
*/
#if !defined(__xpv)
#endif
/*NOTREACHED*/
#endif /* __lint */
#if !defined(__lint)
/*
* Find the trap and vector off the right kernel handler. The trap
* handler will expect the stack to be in trap order, with %rip being
* the last entry, so we'll need to restore all our regs. On i86xpv
* we'll need to compensate for XPV_TRAP_POP.
*
* We're hard-coding the three cases where KMDB has installed permanent
* handlers, since after we KDI_RESTORE_REGS(), we don't have registers
* to work with; we can't use a global since other CPUs can easily pass
* through here at the same time.
*
* Note that we handle T_DBGENTR since userspace might have tried it.
*/
je 1f
je 2f
je 3f
/*
* Hmm, unknown handler. Somebody forgot to update this when they
* added a new trap interposition... try to drop back into kmdb.
*/
int $T_DBGENTR
KDI_RESTORE_REGS(%rsp); \
/* Discard state, trapno, err */ \
1:
/*NOTREACHED*/
2:
/*NOTREACHED*/
3:
/*NOTREACHED*/
/*
* A minimal version of mdboot(), to be used by the master CPU only.
*/
#if defined(__xpv)
#else
#endif
/*NOTREACHED*/
#endif /* !__lint */
#if defined(__lint)
/*ARGSUSED*/
void
{
}
#else /* __lint */
#endif /* !__lint */