ftrace.c revision 4df4bd6096e60a7062ad804cc29ac7cc4b03811a
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
/*
* Tunable parameters:
*
* ftrace_atboot - whether to start fast tracing at boot.
* ftrace_nent - size of the per-CPU event ring buffer.
*/
int ftrace_atboot = 0;
int ftrace_nent = FTRACE_NENT;
/*
* Global Tracing State:
*
* NOTREADY(=0)
* |
* ftrace_init()
* |
* |
* v
* +-------->READY-------+
* | |
* ftrace_stop() ftrace_start()
* | |
* +---(ENABLED|READY)<--+
*
* During boot, ftrace_init() is called and the state becomes
* READY. If ftrace_atboot is set, ftrace_start() is called at
* this time.
*
* If FTRACE_READY is set, then tracing can be enabled.
* If FTRACE_ENABLED is set, tracing is enabled on the set of CPUs
* which are currently FTRACE_READY.
*/
static int ftrace_state = 0;
/*
* Per-CPU Tracing State:
*
* +-----------------READY<--------------+
* | ^ | |
* | | ftrace_cpu_fini() |
* | | | |
* | ftrace_cpu_init() | |
* | | v ftrace_cpu_stop()
* | NOTREADY(=0) |
* | ^ |
* ftrace_cpu_start() | |
* | ftrace_cpu_fini() |
* | | |
* +----------->(ENABLED|READY)----------+
*
*/
/*
* Locking :
*
* Trace context code does not use any lock. There is a per-cpu circular trace
* buffer that has a head, a tail and a current pointer. Each record of this
* buffer is of equal length. Before doing anything, trace context code checks
* the per-cpu ENABLED bit. Trace buffer is allocated in non-trace context and
* it sets this bit only after allocating and setting up the buffer. So trace
* context code can't access the buffer till it is set up completely. The
* buffer is freed also in non-trace context. The code that frees the buffer is
* executed only after the corresponding cpu is powered off. So when this
* happens, no trace context code can be running on it. We only need to make
* sure that trace context code is not preempted from the cpu in the middle of
* accessing the trace buffer. This can be achieved simply by disabling
* interrupts temporarily. This approach makes the least assumption about the
* state of the callers of tracing functions.
*
* A single global lock, ftrace_lock protects assignments to all global and
* per-cpu trace variables. It does not protect reading of those in some cases.
*
* More specifically, it protects assignments to:
*
* ftrace_state
* cpu[N]->cpu_ftrace.ftd_state
* cpu[N]->cpu_ftrace.ftd_first
* cpu[N]->cpu_ftrace.ftd_last
*
* Does _not_ protect reading of cpu[N]->cpu_ftrace.ftd_state
* Does _not_ protect cpu[N]->cpu_ftrace.ftd_cur
* Does _not_ protect reading of ftrace_state
*/
static kmutex_t ftrace_lock;
/*
* Check whether a CPU is installed.
*/
static void
ftrace_cpu_init(int cpuid)
{
/*
* This can be called with "cpu[cpuid]->cpu_flags & CPU_EXISTS"
* being false - e.g. when a CPU is DR'ed in.
*/
return;
/*
* We don't allocate the buffers until the first time
* ftrace_cpu_start() is called, so that they're not
* allocated if ftrace is never enabled.
*/
}
/*
* Only called from cpu_unconfigure() (and cpu_configure() on error).
* At this point, cpu[cpuid] is about to be freed and NULLed out,
* so we'd better clean up after ourselves.
*/
static void
ftrace_cpu_fini(int cpuid)
{
return;
/*
* This cpu is powered off and no code can be executing on it. So
* we can simply finish our cleanup. There is no need for a xcall
* to make sure that this cpu is out of trace context.
*
* The cpu structure will be cleared soon. But, for the sake of
* debugging, clear our pointers and state.
*/
ftrace_nent * sizeof (ftrace_record_t));
}
}
static void
ftrace_cpu_start(int cpuid)
{
sizeof (ftrace_record_t), KM_SLEEP);
/*
* Someone else beat us to it. The winner will
* set up the pointers and the state.
*/
ftrace_nent * sizeof (ftrace_record_t));
return;
}
}
}
}
static void
ftrace_cpu_stop(int cpuid)
{
}
/*
* Hook for DR.
*/
/*ARGSUSED*/
int
{
if (!(ftrace_state & FTRACE_READY))
return (0);
switch (what) {
case CPU_CONFIG:
if (ftrace_state & FTRACE_ENABLED)
break;
case CPU_UNCONFIG:
break;
default:
break;
}
return (0);
}
void
ftrace_init(void)
{
int i;
for (i = 0; i < NCPU; i++) {
if (IS_CPU(i)) {
/* should have been kmem_zalloc()'ed */
}
}
if (ftrace_nent < 1) {
return;
}
for (i = 0; i < NCPU; i++)
if (IS_CPU(i))
ftrace_cpu_init(i);
if (ftrace_atboot)
(void) ftrace_start();
}
/*
* Called from uadmin ioctl, or via mp_init_table[] during boot.
*/
int
ftrace_start(void)
{
int i, was_enabled = 0;
if (ftrace_state & FTRACE_READY) {
for (i = 0; i < NCPU; i++)
if (IS_CPU(i))
ftrace_cpu_start(i);
}
return (was_enabled);
}
/*
* Called from uadmin ioctl, to stop tracing.
*/
int
ftrace_stop(void)
{
int i, was_enabled = 0;
if (ftrace_state & FTRACE_READY) {
if (ftrace_state & FTRACE_ENABLED) {
was_enabled = 1;
for (i = 0; i < NCPU; i++)
if (IS_CPU(i))
ftrace_cpu_stop(i);
ftrace_state &= ~(FTRACE_ENABLED);
}
}
return (was_enabled);
}
/*
* ftrace_X() functions are called from trace context. All callers of ftrace_X()
* tests FTRACE_ENABLED first. Although this is not very accurate, it keeps the
* overhead very low when tracing is not enabled.
*
* gethrtime_unscaled() appears to be safe to be called in trace context. As an
* added precaution, we call these before we disable interrupts on this cpu.
*/
void
{
ftrace_record_t *r;
return;
}
r->ftr_thread = curthread;
r->ftr_caller = caller;
}
void
{
ftrace_record_t *r;
return;
}
r->ftr_thread = curthread;
r->ftr_caller = caller;
}
void
{
ftrace_record_t *r;
return;
}
r->ftr_thread = curthread;
r->ftr_caller = caller;
}
void
{
ftrace_record_t *r;
return;
}
r->ftr_thread = curthread;
r->ftr_caller = caller;
}
void
{
ftrace_record_t *r;
return;
}
r->ftr_thread = curthread;
r->ftr_tick = 0;
r->ftr_caller = caller;
}