/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
/*
* The debugger/"PROM" interface layer
*
* It makes more sense on SPARC. In reality, these interfaces deal with three
* things: setting break/watchpoints, stepping, and interfacing with the KDI to
* set up kmdb's IDT handlers.
*/
#include <kmdb/kmdb_dpi_impl.h>
#include <kmdb/kmdb_kdi.h>
#include <kmdb/kmdb_umemglue.h>
#include <kmdb/kaif_start.h>
#include <mdb/mdb_debug.h>
#include <mdb/mdb_isautil.h>
#include <mdb/mdb_io_impl.h>
#include <mdb/mdb_kreg_impl.h>
#include <sys/kdi_impl.h>
/*
* This is the area containing the saved state when we enter
* via kmdb's IDT entries.
*/
int kaif_ncpusave;
int kaif_trap_switch;
enum {
};
#ifdef __amd64
#else
#endif
/*
* Called during normal debugger operation and during debugger faults.
*/
static void
kaif_enter_mon(void)
{
char c;
for (;;) {
"%s: Do you really want to reboot? (y/n) ",
c = kmdb_getchar();
return;
else if (c == 'y' || c == 'Y') {
}
}
}
static kaif_cpusave_t *
{
if (cpuid == DPI_MASTER_CPUID)
return (&kaif_cpusave[kaif_master_cpuid]);
return (NULL);
}
return (NULL);
}
return (save);
}
static int
{
return (-1); /* errno is set for us */
switch (save->krs_cpu_state) {
case KAIF_CPU_STATE_MASTER:
return (DPI_CPU_STATE_MASTER);
case KAIF_CPU_STATE_SLAVE:
return (DPI_CPU_STATE_SLAVE);
default:
}
}
static int
kaif_get_master_cpuid(void)
{
return (kaif_master_cpuid);
}
static mdb_tgt_gregset_t *
{
return (NULL); /* errno is set for us */
/*
* The saved registers are actually identical to an mdb_tgt_gregset,
* so we can directly cast here.
*/
}
static const mdb_tgt_gregset_t *
{
return (kaif_kdi_to_gregs(cpuid));
}
typedef struct kaif_reg_synonyms {
const char *rs_syn;
const char *rs_name;
static kreg_t *
{
#ifdef __amd64
{ "pc", "rip" },
{ "sp", "rsp" },
{ "fp", "rbp" },
#else
{ "pc", "eip" },
{ "sp", "esp" },
{ "fp", "ebp" },
#endif
{ "tt", "trapno" }
};
int i;
return (NULL);
}
}
return (NULL);
}
/*ARGSUSED*/
static int
{
return (-1);
return (0);
}
static int
{
return (-1);
return (0);
}
static int
{
sizeof (mdb_instr_t))
return (-1); /* errno is set for us */
sizeof (mdb_instr_t))
return (-1); /* errno is set for us */
return (0);
}
static int
{
sizeof (mdb_instr_t))
return (-1); /* errno is set for us */
return (0);
}
/*
* Intel watchpoints are even more fun than SPARC ones. The Intel architecture
* manuals refer to watchpoints as breakpoints. For consistency with the
* terminology used in other portions of kmdb, we will, however, refer to them
* as watchpoints.
*
* supported by the hardware. Execute watchpoints must be one byte in length,
* and must be placed on the first byte of the instruction to be watched.
* Lengths of other watchpoints are more varied.
*
* Given that we already have a breakpoint facility, and given the restrictions
* placed on execute watchpoints, we're going to disallow the creation of
* execute watchpoints. The others will be fully supported. See the Debugging
* chapter in both the IA32 and AMD64 System Programming books for more details.
*/
#ifdef __amd64
#else
#endif
static int
{
}
warn("I/O watchpoint size must be 1, 2, or 4 bytes\n");
}
warn("physical address watchpoints are not supported on this "
"platform\n");
return (set_errno(EMDB_TGTHWNOTSUP));
} else {
}
" bytes\n");
}
}
warn("%lu-byte watchpoints must be %lu-byte aligned\n",
}
return (0);
}
static int
{
int id;
/* found one */
return (0);
}
}
return (set_errno(EMDB_WPTOOMANY));
}
static void
{
}
/*ARGSUSED*/
static void
{
else
}
/*ARGSUSED*/
static void
{
}
/*ARGSUSED*/
static int
{
int n = 0;
int i;
for (i = 0; i < kaif_ncpusave; i++)
return (n);
}
static int
kaif_step(void)
{
}
/*
* Stepping behavior depends on the type of instruction. It does not
* depend on the presence of a REX prefix, as the action we take for a
* given instruction doesn't currently vary for 32-bit instructions
* versus their 64-bit counterparts.
*/
do {
warn("failed to read at %p for step",
return (-1);
}
switch (instr) {
case M_IRET:
warn("iret cannot be stepped\n");
return (set_errno(EMDB_TGTNOTSUP));
case M_INT3:
case M_INTX:
case M_INTO:
warn("int cannot be stepped\n");
return (set_errno(EMDB_TGTNOTSUP));
case M_ESC:
warn("failed to read at %p for step",
return (-1);
}
switch (instr) {
case M_SYSRET:
warn("sysret cannot be stepped\n");
return (set_errno(EMDB_TGTNOTSUP));
case M_SYSEXIT:
warn("sysexit cannot be stepped\n");
return (set_errno(EMDB_TGTNOTSUP));
}
break;
/*
* Some instructions need to be emulated. We need to prevent direct
* manipulations of EFLAGS, so we'll emulate cli, sti. pushfl and
* popfl also receive special handling, as they manipulate both EFLAGS
* and %esp.
*/
case M_CLI:
fl &= ~KREG_EFLAGS_IF_MASK;
emulated = 1;
break;
case M_STI:
emulated = 1;
break;
case M_POPF:
/*
* popfl will restore a pushed EFLAGS from the stack, and could
* in so doing cause IF to be turned on, if only for a brief
* period. To avoid this, we'll secretly replace the stack's
* EFLAGS with our decaffeinated brand. We'll then manually
* load our EFLAGS copy with the real verion after the step.
*/
" at %p for popfl step\n", (void *)sp);
}
" at %p for popfl step\n", (void *)sp);
}
break;
}
if (emulated) {
return (0);
}
/* Do the step with IF off, and TF (step) on */
(void) kmdb_dpi_set_register(FLAGS_REG_NAME,
kmdb_dpi_resume_master(); /* ... there and back again ... */
/* EFLAGS has now changed, and may require tuning */
switch (instr) {
case M_POPF:
/*
* Use the EFLAGS we grabbed before the pop - see the pre-step
* M_POPFL comment.
*/
return (0);
case M_PUSHF:
/*
* We pushed our modified EFLAGS (with IF and TF turned off)
* onto the stack. Replace the pushed version with our
* unmodified one.
*/
" at %p after pushfl step\n", (void *)sp);
}
/* Go back to using the EFLAGS we were using before the step */
return (0);
default:
/*
* The stepped instruction may have altered EFLAGS. We only
* really care about the value of IF, and we know the stepped
* instruction didn't alter it, so we can simply copy the
* pre-step value. We'll also need to turn TF back off.
*/
(void) kmdb_dpi_set_register(FLAGS_REG_NAME,
(oldfl & KREG_EFLAGS_IF_MASK)));
return (0);
}
}
/*
* The target has already configured the chip for branch step, leaving us to
* actually make the machine go. Due to a number of issues involving
* the potential alteration of system state via instructions like sti, cli,
* pushfl, and popfl, we're going to treat this like a normal system resume.
* All CPUs will be released, on the kernel's IDT. Our primary concern is
* the alteration/storage of our TF'd EFLAGS via pushfl and popfl. There's no
* real workaround - we don't have opcode breakpoints - so the best we can do is
* to ensure that the world won't end if someone does bad things to EFLAGS.
*
* Two things can happen:
* 1. EFLAGS.TF may be cleared, either maliciously or via a popfl from saved
* state. The CPU will continue execution beyond the branch, and will not
* 2. Someone may pushlf the TF'd EFLAGS, and may stash a copy of it somewhere.
* When the saved version is popfl'd back into place, the debugger will be
* re-entered on a single-step trap.
*/
static void
kaif_step_branch(void)
{
(void) kmdb_dpi_set_register(FLAGS_REG_NAME,
}
/*ARGSUSED*/
static uintptr_t
{
}
static void
{
sizeof (kdi_crumb_t)) {
return;
}
mdb_printf("state: ");
switch (krm.krm_cpu_state) {
case KAIF_CPU_STATE_MASTER:
mdb_printf("M");
break;
case KAIF_CPU_STATE_SLAVE:
mdb_printf("S");
break;
default:
}
mdb_printf(" trapno %3d sp %08x flag %d pc %p %A\n",
}
static void
{
int i;
for (i = KDI_NCRUMBS; i > 0; i--) {
}
}
static void
{
int i;
/* dump_crumb will protect us against bogus addresses */
} else if (cpuid != -1) {
return;
} else {
for (i = 0; i < kaif_ncpusave; i++) {
continue;
mdb_printf("%sCPU %d crumbs: (curidx %d)\n",
}
}
}
static void
{
}
static void
kaif_modchg_cancel(void)
{
}
static void
{
size_t i;
nr_msrs++;
/* we want to copy the terminating kdi_msr_t too */
nr_msrs++;
UM_SLEEP);
for (i = 0; i < kaif_ncpusave; i++)
}
static uint64_t
{
int i;
return (-1); /* errno is set for us */
return (msr[i].kdi_msr_val);
}
return (0);
}
void
kaif_trap_set_debugger(void)
{
}
void
{
}
static void
kaif_vmready(void)
{
}
void
{
int ret;
/*
* In the unlikely event that someone is stepping through this routine,
* we need to make sure that the KDI knows about the new range before
* umem gets it. That way the entry code can recognize stacks
* allocated from the new region.
*/
}
void
{
if (kaif_modchg_cb != NULL)
}
void
{
if (kaif_modchg_cb != NULL)
kaif_modchg_cb(modp, 0);
}
void
{
}
NULL, /* dv_kctl_vmready */
NULL, /* dv_kctl_memavail */
NULL, /* dv_kctl_modavail */
NULL, /* dv_kctl_thravail */
};
void
{
}
/*ARGSUSED*/
void
{
}
static int
{
/* Allocate the per-CPU save areas */
UM_SLEEP);
kaif_waptmap = 0;
return (0);
}
};