/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
*/
#include <sys/inttypes.h>
#include <sys/cpc_impl.h>
#include <sys/cpc_pcbe.h>
#include <sys/archsystm.h>
#include <sys/cap_util.h>
#if defined(__x86)
#include <sys/xc_levels.h>
#endif
/*
* These are set when a PCBE module is loaded.
*/
/*
* Statistics on (mis)behavior
*/
/*
* By setting 'kcpc_nullctx_panic' to 1, any overflow interrupts in a thread
* with no valid context will result in a panic.
*/
static int kcpc_nullctx_panic = 0;
int set_flags, int kmem_flags);
/*
* Macros to manipulate context flags. All flag updates should use one of these
* two macros
*
* Flags should be always be updated atomically since some of the updates are
* not protected by locks.
*/
/*
* The IS_HIPIL() macro verifies that the code is executed either from a
* cross-call or from high-PIL interrupt
*/
#ifdef DEBUG
#else
#define IS_HIPIL()
#endif /* DEBUG */
extern int kcpc_hw_load_pcbe(void);
/*
* Return value from kcpc_hw_load_pcbe()
*/
static int kcpc_pcbe_error = 0;
/*
* Perform one-time initialization of kcpc framework.
* This function performs the initialization only the first time it is called.
* It is safe to call it multiple times.
*/
int
kcpc_init(void)
{
long hash;
/*
* We already tried loading platform pcbe module and failed
*/
if (kcpc_pcbe_error != 0)
return (-1);
/*
* The kcpc framework should be initialized at most once
*/
return (0);
/*
* Load platform-specific pcbe module
*/
return (kcpc_pcbe_error == 0 ? 0 : -1);
}
void
{
}
void
{
}
void
kcpc_unregister_dcpc(void)
{
}
int
{
int error;
int save_spl;
return (EINVAL);
}
return (error);
}
/*
* We must hold cpu_lock to prevent DR, offlining, or unbinding while
* we are manipulating the cpu_t and programming the hardware, else the
* the cpu_t could go away while we're looking at it.
*/
/*
* The CPU could have been DRd out while we were getting set up.
*/
goto unbound;
/*
* Check to see whether counters for CPU already being used by someone
* other than kernel for capacity and utilization (since kernel will
* let go of counters for user in kcpc_program() below)
*/
/*
* If this CPU already has a bound set, return an error.
*/
goto unbound;
}
goto unbound;
}
return (0);
return (EAGAIN);
}
int
{
int error;
/*
* Only one set is allowed per context, so ensure there is no
* existing context.
*/
return (EEXIST);
/*
* The context must begin life frozen until it has been properly
* programmed onto the hardware. This prevents the context ops from
* worrying about it until we're ready.
*/
return (EINVAL);
}
/*
* Permit threads to look at their own hardware counters from userland.
*/
/*
* Create the data store for this set.
*/
return (error);
}
/*
* Add a device context to the subject thread.
*/
/*
* Ask the backend to program the hardware.
*/
if (t == curthread) {
int save_spl;
} else {
/*
* Since we are the agent LWP, we know the victim LWP is stopped
* until we're done here; no need to worry about preemption or
* migration here. We still use an atomic op to clear the flag
* to ensure the flags are always self-consistent; they can
* still be accessed from, for instance, another CPU doing a
* kcpc_invalidate_all().
*/
}
return (0);
}
/*
* Walk through each request in the set and ask the PCBE to configure a
* corresponding counter.
*/
int
{
int i;
int ret;
int n;
ASSERT(n >= 0 && n < cpc_ncounters);
== 0) {
*subcode = -1;
return (ENOTSUP);
}
/*
* If any of the counters have requested overflow
* notification, we flag the context as being one that
* cares about overflow.
*/
}
switch (ret) {
case CPC_HV_NO_ACCESS:
return (EACCES);
default:
return (EINVAL);
}
}
}
return (0);
}
void
{
int i;
}
/*
* buf points to a user address and the data should be copied out to that
* address in the current process.
*/
int
{
int save_spl;
return (EINVAL);
}
/*
* Kernel preemption must be disabled while reading the hardware regs,
* and if this is a CPU-bound context, while checking the CPU binding of
* the current thread.
*/
return (EAGAIN);
}
return (EAGAIN);
}
}
}
/*
* The config may have been invalidated by
* the pcbe_sample op.
*/
return (EAGAIN);
}
}
return (EFAULT);
return (EFAULT);
return (EFAULT);
return (0);
}
/*
* Stop the counters on the CPU this context is bound to.
*/
static void
{
} else {
}
}
int
{
kthread_t *t;
/*
* We could be racing with the process's agent thread as it
* binds the set; we must wait for the set to finish binding
* before attempting to tear it down.
*/
/*
* Use kc_lock to synchronize with kcpc_restore().
*/
/*
* The context is thread-bound and therefore has a device
* context. It will be freed via removectx() calling
* freectx() calling kcpc_free().
*/
if (t == curthread) {
int save_spl;
}
#ifdef DEBUG
panic("kcpc_unbind: context %p not preset on thread %p",
(void *)ctx, (void *)t);
#else
#endif /* DEBUG */
} else {
/*
* If we are unbinding a CPU-bound set from a remote CPU, the
* native CPU's idle thread could be in the midst of programming
* this context onto the CPU. We grab the context's lock here to
* ensure that the idle thread is done with it. When we release
* the lock, the CPU no longer has a context and the idle thread
* will move on.
*
* cpu_lock must be held to prevent the CPU from being DR'd out
* while we disassociate the context from the cpu_t.
*/
/*
* The CPU may have been DR'd out of the system.
*/
}
}
}
return (0);
}
int
{
int i;
return (EINVAL);
break;
return (0);
}
int
{
int i;
int save_spl;
}
/*
* If the user is doing this on a running set, make sure the counters
* are stopped first.
*/
pcbe_ops->pcbe_allstop();
/*
* Ask the backend to program the hardware.
*/
return (0);
}
/*
* Caller must hold kcpc_cpuctx_lock.
*/
int
{
int i;
int flag;
int err;
/*
* This thread has a set but no context; it must be a
* CPU-bound set.
*/
return (EINVAL);
return (EAGAIN);
if (cmd == CPC_ENABLE) {
return (EINVAL);
} else if (cmd == CPC_DISABLE) {
return (EINVAL);
/*
* with current counter values, unbind, update requests with
* new config, then re-bind.
*/
pcbe_ops->pcbe_allstop();
if (enable)
else
}
if (kcpc_unbind(set) != 0)
return (EINVAL);
return (EINVAL);
}
} else
return (EINVAL);
return (0);
}
/*
* Provide PCBEs with a way of obtaining the configs of every counter which will
* be programmed together.
*
* If current is NULL, provide the first config.
*
* If data != NULL, caller wants to know where the data store associated with
* the config we return is located.
*/
void *
{
int i;
/*
* Client would like the first config, which may not be in
* counter 0; we need to search through the counters for the
* first config.
*/
for (i = 0; i < cpc_ncounters; i++)
break;
/*
* There are no counters configured for the given context.
*/
if (i == cpc_ncounters)
return (NULL);
} else {
/*
* There surely is a faster way to do this.
*/
for (i = 0; i < cpc_ncounters; i++) {
break;
}
/*
* We found the current config at picnum i. Now search for the
* next configured PIC.
*/
for (i++; i < cpc_ncounters; i++) {
break;
}
if (i == cpc_ncounters)
return (NULL);
}
}
}
{
long hash;
return (NULL);
return (ctx);
}
/*
* Copy set from ctx to the child context, cctx, if it has CPC_BIND_LWP_INHERIT
* in the flags.
*/
static void
{
int i, j;
int code;
return;
sizeof (kcpc_request_t), KM_SLEEP);
KM_SLEEP);
sizeof (kcpc_attr_t), KM_SLEEP);
}
}
}
}
void
{
}
/*
* Generic interrupt handler used on hardware that generates
* overflow interrupts.
*
* Note: executed at high-level interrupt context!
*/
/*ARGSUSED*/
{
int i;
/*
* On both x86 and UltraSPARC, we may deliver the high-level
* interrupt in kernel mode, just after we've started to run an
* interrupt thread. (That's because the hardware helpfully
* delivers the overflow interrupt some random number of cycles
* after the instruction that caused the overflow by which time
* we're in some part of the kernel, not necessarily running on
* the right thread).
*
* Check for this case here -- find the pinned thread
* that was running when the interrupt went off.
*/
if (t->t_flag & T_INTR_THREAD) {
/*
* Note that t_lwp is always set to point at the underlying
* thread, thus this will work in the presence of nested
* interrupts.
*/
}
} else
/*
* This can easily happen if we're using the counters in
* "shared" mode, for example, and an overflow interrupt
* occurs while we are running cpustat. In that case, the
* bound thread that has the context that belongs to this
* CPU is almost certainly sleeping (if it was running on
* the CPU we'd have found it above), and the actual
* interrupted thread has no knowledge of performance counters!
*/
/*
* Return the bound context for this CPU to
* the interrupt handler so that it can synchronously
* sample the hardware counters and restart them.
*/
return (ctx);
}
/*
* As long as the overflow interrupt really is delivered early
* enough after trapping into the kernel to avoid switching
* threads, we must always be able to find the cpc context,
* or something went terribly wrong i.e. we ended up
* running a passivated interrupt thread, a kernel
* thread or we interrupted idle, all of which are Very Bad.
*
* We also could end up here owing to an incredibly unlikely
* race condition that exists on x86 based architectures when
* the cpc provider is in use; overflow interrupts are directed
* to the cpc provider if the 'dtrace_cpc_in_use' variable is
* set when we enter the handler. This variable is unset after
* overflow interrupts have been disabled on all CPUs and all
* contexts have been torn down. To stop interrupts, the cpc
* provider issues a xcall to the remote CPU before it tears
* down that CPUs context. As high priority xcalls, on an x86
* architecture, execute at a higher PIL than this handler, it
* is possible (though extremely unlikely) that the xcall could
* interrupt the overflow handler before the handler has
* checked the 'dtrace_cpc_in_use' variable, stop the counters,
* return to the cpc provider which could then rip down
* contexts and unset 'dtrace_cpc_in_use' *before* the CPUs
* overflow handler has had a chance to check the variable. In
* that case, the handler would direct the overflow into this
* code and no valid context will be found. The default behavior
* when no valid context is found is now to shout a warning to
* the console and bump the 'kcpc_nullctx_count' variable.
*/
if (kcpc_nullctx_panic)
panic("null cpc context, thread %p", (void *)t);
#ifdef DEBUG
"null cpc context found in overflow handler!\n");
#endif
/*
* Schedule an ast to sample the counters, which will
* propagate any overflow into the virtualized performance
* counter(s), and may deliver a signal.
*/
/*
* If a counter has overflowed which was counting on behalf of
* a request which specified CPC_OVF_NOTIFY_EMT, send the
* process a signal.
*/
for (i = 0; i < cpc_ncounters; i++) {
bitmap & (1 << i) &&
/*
* A signal has been requested for this PIC, so
* so freeze the context. The interrupt handler
* has already stopped the counter hardware.
*/
}
}
aston(t);
/*
* Thread context is no longer valid, but here may be a valid
* CPU context.
*/
}
return (NULL);
}
/*
* The current thread context had an overflow interrupt; we're
* executing here in high-level interrupt context.
*/
/*ARGSUSED*/
{
int save_spl;
return (DDI_INTR_UNCLAIMED);
/*
* Prevent any further interrupts.
*/
pcbe_ops->pcbe_allstop();
if (dtrace_cpc_in_use) {
/*
* Set the per-CPU state bit to indicate that we are currently
* processing an interrupt if it is currently free. Drop the
* interrupt if the state isn't free (i.e. a configuration
* event is taking place).
*/
int i;
(*dtrace_cpc_fire)(bitmap);
#ifdef DEBUG
"hardware overflow handler!\n");
#endif
return (DDI_INTR_CLAIMED);
}
/* Reset any counters that have overflowed */
(void *)ctx);
}
}
/*
* We've finished processing the interrupt so set
* the state back to free.
*/
}
return (DDI_INTR_CLAIMED);
}
/*
* DTrace isn't involved so pass on accordingly.
*
* If the interrupt has occurred in the context of an lwp owning
* the counters, then the handler posts an AST to the lwp to
* trigger the actual sampling, and optionally deliver a signal or
* restart the counters, on the way out of the kernel using
* kcpc_hw_overflow_ast() (see below).
*
* On the other hand, if the handler returns the context to us
* directly, then it means that there are no other threads in
* the middle of updating it, no AST has been posted, and so we
* should sample the counters here, and restart them with no
* further fuss.
*
* The CPU's CPC context may disappear as a result of cross-call which
* has higher PIL on x86, so protect the context by raising PIL to the
* cross-call level.
*/
}
return (DDI_INTR_CLAIMED);
}
/*
* Called from trap() when processing the ast posted by the high-level
* interrupt handler.
*/
int
{
int i;
int found = 0;
/*
* An overflow happened: sample the context to ensure that
* the overflow is propagated into the upper bits of the
* virtualized 64-bit counter(s).
*/
/*
* The interrupt handler has marked any pics with KCPC_PIC_OVERFLOWED
* if that pic generated an overflow and if the request it was counting
* on behalf of had CPC_OVERFLOW_REQUEST specified. We go through all
* pics in the context and clear the KCPC_PIC_OVERFLOWED flags. If we
* found any overflowed pics, keep the context frozen and return true
* (thus causing a signal to be sent).
*/
for (i = 0; i < cpc_ncounters; i++) {
found = 1;
}
}
if (found)
return (1);
/*
* Otherwise, re-enable the counters and continue life as before.
*/
return (0);
}
/*
* Called when switching away from current thread.
*/
static void
{
int err;
int save_spl;
return;
}
/*
* This context has been invalidated but the counters have not
* been stopped. Stop them here and mark the context stopped.
*/
return;
}
pcbe_ops->pcbe_allstop();
return;
}
/*
* Need to sample for all reqs into each req's current mpic.
*/
/*
* Program counter for measuring capacity and utilization since user
* thread isn't using counter anymore
*/
}
static void
{
int save_spl;
/*
* The context is invalidated but has not been marked stopped.
* We mark it as such here because we will not start the
* counters during this context switch.
*/
}
return;
}
/*
* Set kc_flags to show that a kcpc_restore() is in progress to avoid
* ctx & set related memory objects being freed without us knowing.
* This can happen if an agent thread is executing a kcpc_unbind(),
* with this thread as the target, whilst we're concurrently doing a
* restorectx() during, for example, a proc_exit(). Effectively, by
* doing this, we're asking kcpc_free() to cv_wait() until
* kcpc_restore() has completed.
*/
/*
* While programming the hardware, the counters should be stopped. We
* don't do an explicit pcbe_allstop() here because they should have
* been stopped already by the last consumer.
*/
/*
* Wake the agent thread if it's waiting in kcpc_free().
*/
}
/*
* If kcpc_counts_include_idle is set to 0 by the sys admin, we add the the
* following context operators to the idle thread on each CPU. They stop the
* counters when the idle thread is switched on, and they start them again when
* it is switched off.
*/
/*ARGSUSED*/
void
{
/*
* The idle thread shouldn't be run anywhere else.
*/
/*
* We must hold the CPU's context lock to ensure the context isn't freed
* while we're looking at it.
*/
return;
}
}
void
{
/*
* The idle thread shouldn't be run anywhere else.
*/
/*
* We must hold the CPU's context lock to ensure the context isn't freed
* while we're looking at it.
*/
return;
}
pcbe_ops->pcbe_allstop();
}
/*ARGSUSED*/
static void
{
int i;
return;
return;
}
/*
* Copy the parent context's kc_flags field, but don't overwrite
* the child's in case it was modified during kcpc_ctx_clone.
*/
/*
* Our contract with the user requires us to immediately send an
* overflow signal to all children if we have the LWPINHERIT
* and SIGOVF flags set. In addition, all counters should be
* set to UINT64_MAX, and their pic's overflow flag turned on
* so that our trap() processing knows to send a signal.
*/
}
}
}
}
/*
* Counter Stoppage Theory
*
* The counters may need to be stopped properly at the following occasions:
*
* 1) An LWP exits.
* 2) A thread exits.
* 3) An LWP performs an exec().
* 4) A bound set is unbound.
*
* In addition to stopping the counters, the CPC context (a kcpc_ctx_t) may need
* to be freed as well.
*
* Case 1: kcpc_passivate(), called via lwp_exit(), stops the counters. Later on
* when the thread is freed, kcpc_free(), called by freectx(), frees the
* context.
*
* Case 2: same as case 1 except kcpc_passivate is called from thread_exit().
*
* Case 3: kcpc_free(), called via freectx() via exec(), recognizes that it has
* been called from exec. It stops the counters _and_ frees the context.
*
* Case 4: kcpc_unbind() stops the hardware _and_ frees the context.
*
* CPU-bound counters are always stopped via kcpc_unbind().
*/
/*
* We're being called to delete the context; we ensure that all associated data
* structures are freed, and that the hardware is passivated if this is an exec.
*/
/*ARGSUSED*/
void
{
int i;
/*
* Wait for kcpc_restore() to finish before we tear things down.
*/
if (isexec) {
/*
* This thread is execing, and after the exec it should not have
* any performance counter context. Stop the counters properly
* here so the system isn't surprised by an overflow interrupt
* later.
*/
/*
* CPU-bound context; stop the appropriate CPU's ctrs.
* Hold cpu_lock while examining the CPU to ensure it
* doesn't go away.
*/
/*
* The CPU could have been DR'd out, so only stop the
* CPU and clear its context pointer if the CPU still
* exists.
*/
}
} else {
int save_spl;
/*
* Thread-bound context; stop _this_ CPU's counters.
*/
}
/*
* Since we are being called from an exec and we know that
* exec is not permitted via the agent thread, we should clean
* up this thread's CPC state completely, and not leave dangling
* CPC pointers behind.
*/
}
/*
* Walk through each request in this context's set and free the PCBE's
* configuration if it exists.
*/
}
}
/*
* Free the memory associated with a request set.
*/
void
{
int i;
}
}
}
/*
* Grab every existing context and mark it as invalid.
*/
void
kcpc_invalidate_all(void)
{
long hash;
}
}
/*
* Interface for PCBEs to signal that an existing configuration has suddenly
* become invalid.
*/
void
{
}
/*
* Called from lwp_exit() and thread_exit()
*/
void
kcpc_passivate(void)
{
int save_spl;
return;
/*
* This thread has a set but no context; it must be a CPU-bound
* set. The hardware will be stopped via kcpc_unbind() when the
* process exits and closes its file descriptors with
* kcpc_close(). Our only job here is to clean up this thread's
* state; the set will be freed with the unbind().
*/
(void) kcpc_unbind(set);
/*
* Unbinding a set belonging to the current thread should clear
* its set pointer.
*/
return;
}
/*
* happen for a bit as the exit proceeds. Kernel preemption must be
* disabled here to prevent a race between checking or setting the
* INVALID_STOPPED flag here and kcpc_restore() setting the flag during
* a context switch.
*/
}
/*
* We're cleaning up after this thread; ensure there are no dangling
* CPC pointers left behind. The context and set will be freed by
* freectx().
*/
}
/*
* Assign the requests in the given set to the PICs in the context.
* Returns 0 if successful, -1 on failure.
*/
/*ARGSUSED*/
int
{
int i;
int *picnum_save;
/*
* Provide kcpc_tryassign() with scratch space to avoid doing an
*/
/*
* kcpc_tryassign() blindly walks through each request in the set,
* seeing if a counter can count its event. If yes, it assigns that
* counter. However, that counter may have been the only capable counter
* for _another_ request's event. The solution is to try every possible
* request first. Note that this does not cover all solutions, as
* that would require all unique orderings of requests, an n^n operation
* which would be unacceptable for architectures with many counters.
*/
break;
return (-1);
return (0);
}
static int
{
int i;
int j;
/*
* We are attempting to assign the reqs to pics, but we may fail. If we
* fail, we need to restore the state of the requests to what it was
* when we found it, as some reqs may have been explicitly assigned to
* a specific PIC beforehand. We do this by snapshotting the assignments
* now and restoring from it later if we fail.
*
* Also we note here which counters have already been claimed by
* requests with explicit counter assignments.
*/
}
/*
* Walk through requests assigning them to the first PIC that is
* capable.
*/
i = starting_req;
do {
i = 0;
continue;
}
for (j = 0; j < cpc_ncounters; j++) {
(resmap & (1 << j)) == 0) {
/*
* We can assign this counter because:
*
* 1. It can count the event (ctrmap)
* 2. It hasn't been assigned yet (bitmap)
* 3. It wasn't reserved by a request (resmap)
*/
bitmap |= (1 << j);
break;
}
}
if (j == cpc_ncounters) {
return (-1);
}
i = 0;
} while (i != starting_req);
return (0);
}
{
int i;
int j;
KM_SLEEP);
sizeof (kcpc_attr_t), KM_SLEEP);
}
}
return (new);
}
int
{
}
void
{
}
/*
* Given a PCBE ID, attempt to load a matching PCBE module. The strings given
* are used to construct PCBE names, starting with the most specific,
* "pcbe.first.second.third.fourth" and ending with the least specific,
* "pcbe.first".
*
* Returns 0 if a PCBE was successfully loaded and -1 upon error.
*/
int
{
uint_t s[3];
s[0] = first;
s[1] = second;
s[2] = third;
return (modload_qualified("pcbe",
}
/*
* Create one or more CPC context for given CPU with specified counter event
* requests
*
* If number of requested counter events is less than or equal number of
* hardware counters on a CPU and can all be assigned to the counters on a CPU
* at the same time, then make one CPC context.
*
* Otherwise, multiple CPC contexts are created to allow multiplexing more
* counter events than existing counters onto the counters by iterating through
* all of the CPC contexts, programming the counters with each CPC context one
* at a time and measuring the resulting counter values. Each of the resulting
* CPC contexts contains some number of requested counter events less than or
* equal the number of counters on a CPU depending on whether all the counter
* events can be programmed on all the counters at the same time or not.
*
* Flags to kmem_{,z}alloc() are passed in as an argument to allow specifying
* whether memory allocation should be non-blocking or not. The code will try
* to allocate *whole* CPC contexts if possible. If there is any memory
* allocation failure during the allocations needed for a given CPC context, it
* will skip allocating that CPC context because it cannot allocate the whole
* thing. Thus, the only time that it will end up allocating none (ie. no CPC
* contexts whatsoever) is when it cannot even allocate *one* whole CPC context
* without a memory allocation failure occurring.
*/
int
{
int nctx;
int nctx_ptrs;
int nreqs;
return (-1);
/*
* Allocate number of sets assuming that each set contains one and only
* one counter event request for each counter on a CPU
*/
return (-2);
/*
* Fill in sets of requests
*/
nctx = 0;
while (nreqs > 0) {
int subcode;
/*
* Allocate CPC context and set for requested counter events
*/
break;
}
/*
* Determine assignment of requested counter events to specific
* counters
*/
/*
* May not be able to assign requested counter events
* to all counters since all counters may not be able
* to do all events, so only do one counter event in
* set of counter requests when this happens since at
* least one of the counters must be able to do the
* event.
*/
break;
}
#ifdef DEBUG
"assign counter event %s!\n",
#endif
reqs++;
nreqs--;
continue;
}
}
/*
* Allocate memory needed to hold requested counter event data
*/
break;
}
/*
* Configure requested counter events
*/
#ifdef DEBUG
"!kcpc_cpu_ctx_create: can't configure "
"set of counter event requests!\n");
#endif
continue;
}
/*
* Point set of counter event requests at this context and fill
* in CPC context
*/
/*
* Update requests and how many are left to be assigned to sets
*/
/*
* Increment number of CPC contexts and allocate bigger array
* for context pointers as needed
*/
nctx++;
int new_cnt;
/*
* Allocate more CPC contexts based on how many
* contexts allocated so far and how many counter
* requests left to assign
*/
break;
/*
* Copy contents of old sets into new ones
*/
nctx_ptrs * sizeof (kcpc_ctx_t *));
/*
* Free old array of context pointers and use newly
* allocated one instead now
*/
}
}
/*
* Return NULL if no CPC contexts filled in
*/
if (nctx == 0) {
*ctx_ptr_array = NULL;
*ctx_ptr_array_sz = 0;
return (-2);
}
return (nctx);
}
/*
* Return whether PCBE supports given counter event
*/
{
return (B_FALSE);
return (B_TRUE);
}
/*
* Program counters on current CPU with given CPC context
*
* If kernel is interposing on counters to measure hardware capacity and
* utilization, then unprogram counters for kernel *before* programming them
* with specified CPC context.
*
* kcpc_{program,unprogram}() may be called either directly by a thread running
* on the target CPU or from a cross-call from another CPU. To protect
* programming and unprogramming from being interrupted by cross-calls, callers
* who execute kcpc_{program,unprogram} should raise PIL to the level used by
* cross-calls.
*/
void
{
int error;
/*
* CPC context shouldn't be NULL, its CPU field should specify current
* CPU or be -1 to specify any CPU when the context is bound to a
* thread, and preemption should be disabled
*/
return;
/*
* Unprogram counters for kernel measuring hardware capacity and
* utilization
*/
if (cu_interpose == B_TRUE) {
} else {
int i;
/*
* Since cu_interpose is false, we are programming CU context.
* In general, PCBE can continue from the state saved in the
* set, but it is not very reliable, so we start again from the
* preset value.
*/
/*
* Reset the virtual counter value to the preset value.
*/
/*
* Reset PCBE to the preset value.
*/
}
}
/*
* Program counters with specified CPC context
*/
/*
* Denote that counters programmed for thread or CPU CPC context
* differently
*/
if (for_thread == B_TRUE)
else
}
/*
* Unprogram counters with given CPC context on current CPU
*
* If kernel is interposing on counters to measure hardware capacity and
* utilization, then program counters for the kernel capacity and utilization
* *after* unprogramming them for given CPC context.
*
* See the comment for kcpc_program regarding the synchronization with
* cross-calls.
*/
void
{
int error;
/*
* CPC context shouldn't be NULL, its CPU field should specify current
* CPU or be -1 to specify any CPU when the context is bound to a
* thread, and preemption should be disabled
*/
return;
}
/*
* Specified CPC context to be unprogrammed should be bound to current
* CPU or thread
*/
/*
* Stop counters
*/
pcbe_ops->pcbe_allstop();
/*
* Allow kernel to interpose on counters and program them for its own
* use to measure hardware capacity and utilization if cu_interpose
* argument is true
*/
if (cu_interpose == B_TRUE)
}
/*
* Read CPU Performance Counter (CPC) on current CPU and call specified update
* routine with data for each counter event currently programmed on CPU
*/
int
{
int i;
int retval;
/*
* Can't grab locks or block because may be called inside dispatcher
*/
return (0);
}
/*
* Read counter data from current CPU
*/
return (0);
}
/*
* Call update function with preset pointer and data for each CPC event
* request currently programmed on current CPU
*/
retval = 0;
int ret;
break;
if (ret < 0)
}
return (retval);
}
/*
* Initialize list of counter event requests
*/
{
if (nreqs < 1)
return (NULL);
return (NULL);
return (NULL);
}
return (req_list);
}
/*
* Add counter event request to given list of counter event requests
*/
int
{
return (-1);
/*
* Allocate more space (if needed)
*/
return (-2);
}
/*
* Fill in request as much as possible now, but some fields will need
* to be set when request is assigned to a set.
*/
/*
* Keep pointer given by caller to give to update function when this
*/
return (0);
}
/*
* Reset list of CPC event requests so its space can be used for another set
* of requests
*/
int
{
/*
* Return when pointer to request list structure or request is NULL or
* when max requests is less than or equal to 0
*/
return (-1);
/*
* Zero out requests and number of requests used
*/
return (0);
}
/*
* Free given list of counter event requests
*/
int
{
return (0);
}
/*
* Create set of given counter event requests
*/
static kcpc_set_t *
{
int i;
/*
* Allocate set and assign number of requests in set and flags
*/
return (NULL);
if (nreqs < cpc_ncounters)
else
/*
* Allocate requests needed, copy requests into set, and set index into
* data for each request (which may change when we assign requested
* counter events to counters)
*/
return (NULL);
}
return (set);
}
/*
* Stop counters on current CPU.
*
* If preserve_context is true, the caller is interested in the CPU's CPC
* context and wants it to be preserved.
*
* If preserve_context is false, the caller does not need the CPU's CPC context
* to be preserved, so it is set to NULL.
*/
static void
{
/*
* Someone already stopped this context before us, so there is nothing
* to do.
*/
return;
}
/*
* If CU does not use counters, then clear the CPU's CPC context
* If the caller requested to preserve context it should disable CU
* first, so there should be no CU context now.
*/
}
/*
* Stop counters on given CPU and set its CPC context to NULL unless
* preserve_context is true.
*/
void
{
preserve_context, 0);
}
/*
* Program the context on the current CPU
*/
static void
{
}
/*
* Program counters on given CPU
*/
void
{
}
char *
kcpc_list_attrs(void)
{
return (pcbe_ops->pcbe_list_attrs());
}
char *
{
}
kcpc_pcbe_capabilities(void)
{
}
int
kcpc_pcbe_loaded(void)
{
}