/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
*/
#include <libcpc.h>
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include <strings.h>
#include <unistd.h>
#include <stropts.h>
#include <libintl.h>
#include <signal.h>
#include <sys/processor.h>
#include "libcpc_impl.h"
/*
* The library uses the cpc_lock field of the cpc_t struct to protect access to
* the linked lists inside the cpc_t, and only the linked lists. It is NOT used
* instance, destroying the same set at the same time from different threads.).
*
* SIGEMT needs to be blocked while holding the lock, to prevent deadlock among
* an app holding the lock and a signal handler attempting to sample or bind.
*/
cpc_t *
{
void (*sigsaved)();
int error = 0;
int i;
int j;
if (ver != CPC_VER_CURRENT) {
/*
* v1 clients must stick to the v1 interface: cpc_version()
*/
return (NULL);
}
/*
* Call the syscall with invalid parameters. If we get ENOSYS this CPU
* has no CPC support. We need to block SIGSYS because the syscall code
* will send the signal if the system call fails to load.
*/
return (NULL);
}
return (NULL);
}
return (NULL);
}
return (NULL);
return (NULL);
return (NULL);
}
NULL) {
return (NULL);
}
NULL)
break;
}
for (j = 0; j < i; j++)
return (NULL);
}
return (cpc);
}
/*
* Ensure state is cleaned up:
*
* - Hardware is unbound
* - Sets are all destroyed
* - Bufs are all freed
*/
int
{
}
return (0);
}
/*
* Terminate everything that runs in pctx_run
*/
void
{
int sigblocked;
}
}
{
int sigblocked;
return (NULL);
}
return (set);
}
int
{
int sigblocked;
/*
* Remove this set from the cpc handle's list of sets.
*/
break;
}
return (-1);
}
/*
* Detach from the process
*/
}
}
return (0);
}
/*ARGSUSED*/
int
{
int i;
return (-1);
}
for (i = 0; i < npics; i++)
break;
if (i == npics) {
return (-1);
}
return (-1);
}
if (nattrs != 0) {
for (i = 0; i < nattrs; i++) {
/*
* Verify that each attribute name is legal and valid.
*/
goto inval;
}
/*
* If the user requested a specific picnum, ensure that
* the pic can count the requested event.
*/
goto inval;
}
goto inval;
}
}
}
== NULL) {
return (-1);
}
for (i = 0; i < nattrs; i++) {
}
} else
/*
* Packing the counter set is expensive, so the idea is to do
* it during the construction phase of the counter set or
* whenever new requests are added to the set. It is still
* expensive, but doing it here takes it out of the typical
* sampling critical path. If it fails, free the request and
* return an error.
*/
}
return (-1);
}
return (-1);
}
{
int sigblocked;
return (NULL);
}
return (NULL);
return (NULL);
}
return (buf);
}
int
{
int sigblocked;
/*
* Remove this buf from the cpc handle's list of bufs.
*/
break;
}
return (-1);
}
return (0);
}
/*ARGSUSED*/
int
{
int ret;
char *packed;
/*
* We don't bother checking cpc_set_valid() here, because this is in the
* fast path of an app doing SIGEMT-based profiling as they restart the
* counters from their signal handler.
*/
return (-1);
}
/*
* If flags is zero we can use the cached packed set,
* otherwise re-pack the set with the flags value. It is
* set representation than try to update the cached packed
*/
if (flags == 0) {
} else {
return (-1);
}
}
if (ret != 0) {
if (subcode != -1)
return (-1);
}
return (ret);
}
/*ARGSUSED*/
int
{
int ret;
/*
* cpc_bind_pctx() currently has no valid flags.
*/
return (-1);
}
return (-1);
}
}
if (ret == 0) {
} else if (subcode != -1)
return (ret);
}
/*ARGSUSED*/
int
{
int fd;
int error;
return (-1);
}
/*
* Prepare the packed representation of the set to send to
* kernel-land using flags == 0. Cache the packed set if it
* is not already present.
*/
return (-1);
}
}
if ((flags & CPC_FLAGS_NOPBIND) == 0)
return (-1);
}
/*
* To avoid leaking file descriptors, if we find an existing fd here we
* just close it. This is only a problem if a user attempts to bind the
* same set to different CPUs without first unbinding it.
*/
}
return (-1);
}
if (subcode != -1)
return (-1);
}
return (0);
}
/*ARGSUSED*/
int
{
}
/*ARGSUSED*/
int
{
}
/*ARGSUSED*/
int
{
int ret = 0;
int error;
return (-1);
}
case CS_UNBOUND:
return (-1);
case CS_BOUND_CURLWP:
break;
case CS_BOUND_CPU:
NULL);
break;
case CS_BOUND_PCTX:
}
break;
}
if (ret != 0)
return (ret);
}
/*ARGSUSED*/
int
{
/*
* The following check ensures that only the most recently bound set
* can be sampled, as binding a set invalidates all other sets in the
* cpc_t.
*/
return (-1);
}
case CS_BOUND_CURLWP:
case CS_BOUND_CPU:
case CS_BOUND_PCTX:
}
return (-1);
}
/*ARGSUSED*/
void
{
int i;
return;
}
/*ARGSUSED*/
void
{
int i;
return;
}
/*ARGSUSED*/
void
{
return;
}
/*ARGSUSED*/
void
{
}
/*
* Gets or sets the value of the request specified by index.
*/
/*ARGSUSED*/
int
{
return (0);
}
/*ARGSUSED*/
int
{
return (0);
}
/*ARGSUSED*/
{
}
/*ARGSUSED*/
void
{
}
/*ARGSUSED*/
{
}
/*ARGSUSED*/
void
{
}
static char *
{
int szcmd;
int size;
char *list;
if (which == CPC_LIST_ATTRS)
else
return (NULL);
return (NULL);
return (NULL);
}
return (list);
}
/*ARGSUSED*/
void
{
int i;
/*
* Need to reconstruct a temporary cpc_attr_t array for req.
*/
sizeof (cpc_attr_t))) == NULL)
return;
}
}
}
/*ARGSUSED*/
static void
{
char **list;
char *p, *e;
int i;
int is_papi;
return;
return;
}
for (i = 0; i < ncounters; i++) {
goto err;
p = list[i];
*e = '\0';
/*
* Based on is_generic flag, skip appropriate
* event names.
*/
if (is_generic != is_papi) {
p = e + 1;
continue;
}
goto err;
p = e + 1;
}
if (is_generic == is_papi) {
goto err;
}
}
err:
for (i = 0; i < ncounters; i++)
}
/*ARGSUSED*/
void
{
}
/*ARGSUSED*/
void
{
}
/*ARGSUSED*/
static void
{
char *p;
char *e;
char *list;
int is_papi;
return;
}
return;
/*
* List now points to a comma-separated list of events supported by
* the designated pic.
*/
p = list;
*e = '\0';
/*
* Based on is_generic flag, skip appropriate
* event names.
*/
if (is_generic != is_papi) {
p = e + 1;
continue;
}
p = e + 1;
}
if (is_generic == is_papi)
}
/*ARGSUSED*/
void
{
}
/*ARGSUSED*/
void
{
}
/*ARGSUSED*/
void
{
char *p;
char *e;
char *list;
return;
/*
* Platforms with no attributes will return an empty string.
*/
if (*list == '\0')
return;
/*
* List now points to a comma-separated list of attributes supported by
* the underlying platform.
*/
p = list;
*e = '\0';
p = e + 1;
}
}
/*ARGSUSED*/
int
{
}
/*ARGSUSED*/
int
{
}
/*ARGSUSED*/
{
}
/*ARGSUSED*/
{
}
const char *
{
return (cpc->cpc_cciname);
}
const char *
{
return (cpc->cpc_cpuref);
}
int
{
return (0);
}
/*
* These strings may contain printf() conversion specifiers.
*/
static const char *errstr[] = {
"", /* zero slot filler */
"Unknown event\n", /* CPC_INVALID_EVENT */
"Invalid counter number\n", /* CPC_INVALID_PICNUM */
"Unknown attribute\n", /* CPC_INVALID_ATTRIBUTE */
"Attribute out of range\n", /* CPC_ATTRIBUTE_OUT_OF_RANGE */
"Hardware resource unavailable\n", /* CPC_RESOURCE_UNAVAIL */
"Counter cannot count requested event\n", /* CPC_PIC_NOT_CAPABLE */
"Invalid flags in a request\n", /* CPC_REQ_INVALID_FLAGS */
"Requests conflict with each other\n", /* CPC_CONFLICTING_REQS */
"Attribute requires the cpc_cpu privilege\n", /* CPC_ATTR_REQUIRES_PRIVILEGE */
"Couldn't bind LWP to requested processor\n", /* CPC_PBIND_FAILED */
"Hypervisor event access denied\n" /* CPC_HV_NO_ACCESS */
};
/*VARARGS3*/
static void
{
const char *str;
int error;
/*
* If subcode is -1, there is no specific description for this error.
*/
if (subcode == -1)
return;
/*
* We need to preserve errno across calls to this function to prevent it
* from being clobbered while here, or in the user's error handler.
*/
else {
/*
* If printf() conversion specifiers are added to the errstr[]
* table, this call needs to be changed to vfprintf().
*/
}
}
/*
* Hook used by libpctx to alert libcpc when a pctx handle is going away.
* This is necessary to prevent libcpc from attempting a libpctx operation on a
* stale and invalid pctx_t handle. Since pctx_t's are cached by libcpc, we need
* to be notified when they go away.
*/
static void
{
int sigblocked;
}
/*
* Check that the set is valid; if so it will be in the cpc handle's
* list of sets. The lock protects the list of sets, but not the set
* itself.
*/
static int
{
int sigblocked;
break;
return (-1);
return (0);
}
static int
{
return (ret);
}
static int
{
return (ret);
}
static void
{
if (sigblocked == 0)
}
struct priv {
const char *name;
int found;
};
/*ARGSUSED*/
static void
{
}
static void
{
}
static int
{
char *end_ev;
int err;
return (1);
return (1);
/*
* Before assuming this is an invalid event, see if we have been given
* a raw event code.
* Check the second argument of strtol() to ensure invalid events
* beginning with number do not go through.
*/
errno = 0;
/*
* Success - this is a valid raw code in hex, decimal, or octal.
*/
return (1);
}
return (0);
}
static int
{
}