lgrpsys.c revision 37294019745c29c42448424cc3bce79eb1c6a785
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
* Copyright 2015 Joyent, Inc.
*/
/*
* lgroup system calls
*/
#include <sys/lgrp_user.h>
#include <sys/sysmacros.h>
/* definitions for mi_validity */
#define VALID_ADDR 1
#define VALID_REQ 2
/*
* run through the given number of addresses and requests and return the
* corresponding memory information for each address
*/
static int
{
int i, j, out_idx, info_count;
int ret = 0;
#if defined(_SYSCALL32_IMPL)
#endif
/*
* Make sure that there is at least one address to translate and
* limit how many virtual addresses the kernel can do per call
*/
if (addr_count < 1)
else if (addr_count > MAX_MEMINFO_CNT)
if (get_udatamodel() == DATAMODEL_NATIVE) {
}
#if defined(_SYSCALL32_IMPL)
else {
}
#endif
/*
* all the input parameters have been copied in:-
* addr_count - number of input addresses
* minfo.mi_inaddr - array of input addresses
* minfo.mi_info_req - array of types of information requested
* minfo.mi_info_count - no. of pieces of info requested for each addr
* minfo.mi_outdata - array into which the results are placed
* minfo.mi_validity - array containing bitwise result codes; 0th bit
* evaluates validity of corresponding input
* address, 1st bit validity of response to first
* member of info_req, etc.
*/
/* make sure mi_info_count is within limit */
/*
* allocate buffer in_array for the input addresses and copy them in
*/
}
/*
* allocate buffer req_array for the input info_reqs and copy them in
*/
}
/*
* Validate privs for each req.
*/
for (i = 0; i < info_count; i++) {
switch (req_array[i] & MEMINFO_MASK) {
case MEMINFO_VLGRP:
case MEMINFO_VPAGESIZE:
break;
default:
if (secpolicy_meminfo(CRED()) != 0) {
}
break;
}
}
/*
* allocate buffer out_array which holds the results and will have
* to be copied out later
*/
/*
* allocate buffer val_array which holds the validity bits and will
* have to be copied out later
*/
/* find the corresponding lgroup for each physical address */
for (i = 0; i < addr_count; i++) {
if (lgrp) {
} else {
val_array[i] = 0;
}
}
} else {
/* get the corresponding memory info for each virtual address */
info_count) {
val_array[i] = 0;
continue;
}
val_array[i] = VALID_ADDR;
if (pfn != PFN_INVALID) {
(addr & PAGEOFFSET));
for (j = 0; j < info_count; j++) {
switch (req_array[j] & MEMINFO_MASK) {
case MEMINFO_VPHYSICAL:
/*
* return the physical address
* corresponding to the input
* virtual address
*/
break;
case MEMINFO_VLGRP:
/*
* return the lgroup of physical
* page corresponding to the
* input virtual address
*/
if (lgrp) {
val_array[i] |=
VALID_REQ << j;
}
break;
case MEMINFO_VPAGESIZE:
/*
* return the size of physical
* page corresponding to the
* input virtual address
*/
if (pgsz != -1) {
pgsz;
val_array[i] |=
VALID_REQ << j;
}
break;
case MEMINFO_VREPLCNT:
/*
* for future use:-
* return the no. replicated
* physical pages corresponding
* to the input virtual address,
* so it is always 0 at the
* moment
*/
break;
case MEMINFO_VREPL:
/*
* for future use:-
* return the nth physical
* replica of the specified
* virtual address
*/
break;
case MEMINFO_VREPL_LGRP:
/*
* for future use:-
* return the lgroup of nth
* physical replica of the
* specified virtual address
*/
break;
case MEMINFO_PLGRP:
/*
* this is for physical address
* only, shouldn't mix with
* virtual address
*/
break;
default:
break;
}
}
}
}
}
/* copy out the results and validity bits and free the buffers */
return (ret);
}
/*
* Initialize lgroup affinities for thread
*/
void
{
if (bufaddr)
}
/*
* Free lgroup affinities for thread and set to NULL
* just in case thread gets recycled
*/
void
{
}
}
/*
* Find LWP with given ID in specified process and get its affinity for
* specified lgroup
*/
{
int found;
kthread_t *t;
aff = LGRP_AFF_NONE;
found = 0;
t = p->p_tlist;
/*
* The process may be executing in proc_exit() and its p->p_list may be
* already NULL.
*/
if (t == NULL)
do {
thread_lock(t);
/*
* Check to see whether caller has permission to set
* affinity for LWP
*/
thread_unlock(t);
}
if (t->t_lgrp_affinity)
thread_unlock(t);
found = 1;
break;
}
if (!found)
return (aff);
}
/*
* Get lgroup affinity for given LWP
*/
{
proc_t *p;
kthread_t *t;
/*
* Copyin arguments
*/
/*
* Check for invalid lgroup
*/
/*
* Check for existing lgroup
*/
if (lgrp > lgrp_alloc_max)
/*
* Get lgroup affinity for given LWP or process
*/
switch (idtype) {
case P_LWPID:
/*
* LWP in current process
*/
p = curproc;
mutex_enter(&p->p_lock);
else { /* current thread */
aff = LGRP_AFF_NONE;
t = curthread;
thread_lock(t);
if (t->t_lgrp_affinity)
thread_unlock(t);
}
mutex_exit(&p->p_lock);
break;
case P_PID:
/*
* Process
*/
p = curproc;
else {
if (p == NULL) {
}
}
mutex_enter(&p->p_lock);
mutex_exit(&p->p_lock);
break;
default:
break;
}
return (aff);
}
/*
* Find lgroup for which this thread has most affinity in specified partition
* starting from home lgroup unless specified starting lgroup is preferred
*/
lpl_t *
{
if (t->t_lgrp_affinity == NULL)
return (NULL);
affs = t->t_lgrp_affinity;
/*
* Thread bound to CPU
*/
if (t->t_bind_cpu != PBIND_NONE) {
/*
* Find which lpl has most affinity among leaf lpl directly
* containing CPU and its ancestor lpls
*/
}
}
return (best_lpl);
}
/*
* Start searching from home lgroup unless given starting lgroup is
* preferred or home lgroup isn't in given pset. Use root lgroup as
* starting point if both home and starting lgroups aren't in given
* pset.
*/
else
do {
/*
* Skip any lgroups that don't have CPU resources
* in this processor set.
*/
if (++lgrpid > lgrp_alloc_max)
lgrpid = 0; /* wrap the search */
continue;
}
/*
* Find lgroup with most affinity
*/
}
if (++lgrpid > lgrp_alloc_max)
lgrpid = 0; /* wrap the search */
/*
* No lgroup (in this pset) with any affinity
*/
if (best_aff == LGRP_AFF_NONE)
return (NULL);
return (best_lpl);
}
/*
* Set thread's affinity for given lgroup
*/
int
{
int retval;
retval = 0;
thread_lock(t);
/*
* Check to see whether caller has permission to set affinity for
* thread
*/
thread_unlock(t);
}
if (t->t_lgrp_affinity == NULL) {
if (aff == LGRP_AFF_NONE) {
thread_unlock(t);
return (0);
}
t->t_lgrp_affinity = *aff_buf;
}
affs = t->t_lgrp_affinity;
/*
* Find lgroup for which thread has most affinity,
* starting with lgroup for which affinity being set
*/
/*
* Rehome if found lgroup with more affinity than home or lgroup for
* which affinity is being set has same affinity as home
*/
}
thread_unlock(t);
return (retval);
}
/*
* Set process' affinity for specified lgroup
*/
int
{
int err = 0;
int i;
int retval;
kthread_t *t;
i = 0;
t = p->p_tlist;
if (t != NULL) {
do {
/*
* Set lgroup affinity for thread
*/
buf = aff_buf_array[i];
/*
* Advance pointer to next buffer
*/
aff_buf_array[i] = NULL;
i++;
}
}
return (err);
}
/*
* Set LWP's or process' affinity for specified lgroup
*
* When setting affinities, pidlock, process p_lock, and thread_lock()
* need to be held in that order to protect target thread's pset, process,
* process contents, and thread contents. thread_lock() does splhigh(),
* so it ends up having similiar effect as kpreempt_disable(), so it will
* protect calls to lgrp_move_thread() and lgrp_choose() from pset changes.
*/
int
{
int nthreads;
proc_t *p;
int retval;
/*
* Copyin arguments
*/
/*
* Check for invalid lgroup
*/
/*
* Check for existing lgroup
*/
if (lgrp > lgrp_alloc_max)
/*
* Check for legal affinity
*/
aff != LGRP_AFF_STRONG)
/*
* Must be process or LWP ID
*/
/*
* Set given LWP's or process' affinity for specified lgroup
*/
switch (idtype) {
case P_LWPID:
/*
* Allocate memory for thread's lgroup affinities
* ahead of time w/o holding locks
*/
KM_SLEEP);
p = curproc;
/*
* Set affinity for thread
*/
mutex_enter(&p->p_lock);
&aff_buf);
} else { /* other thread */
int found = 0;
kthread_t *t;
t = p->p_tlist;
do {
found = 1;
break;
}
if (!found)
}
mutex_exit(&p->p_lock);
/*
* Free memory for lgroup affinities,
* since thread didn't need it
*/
if (aff_buf)
nlgrpsmax * sizeof (lgrp_affinity_t));
break;
case P_PID:
do {
int i;
/*
* Get process
*/
p = curproc;
else
if (p == NULL) {
}
/*
* Get number of threads in process
*
* NOTE: Only care about user processes,
* so p_lwpcnt should be number of threads.
*/
mutex_enter(&p->p_lock);
mutex_exit(&p->p_lock);
if (nthreads < 1)
/*
* Preallocate memory for lgroup affinities for
* each thread in process now to avoid holding
* any locks. Allocate an array to hold a buffer
* for each thread.
*/
sizeof (lgrp_affinity_t *), KM_SLEEP);
for (i = 0; i < nthreads; i++)
/*
* Get process again since dropped locks to allocate
* memory (except current process)
*/
/*
* Process went away after we dropped locks and before
* reacquiring them, so drop locks, free memory, and
* return.
*/
if (p == NULL) {
for (i = 0; i < nthreads; i++)
nthreads * sizeof (lgrp_affinity_t *));
}
mutex_enter(&p->p_lock);
/*
* See whether number of threads is same
* If not, drop locks, free memory, and try again
*/
mutex_exit(&p->p_lock);
for (i = 0; i < nthreads; i++)
nthreads * sizeof (lgrp_affinity_t *));
continue;
}
/*
* Set lgroup affinity for threads in process
*/
mutex_exit(&p->p_lock);
/*
* Free any leftover memory, since some threads may
* have already allocated memory and set lgroup
* affinities before
*/
for (i = 0; i < nthreads; i++)
if (aff_buf_array[i] != NULL)
nthreads * sizeof (lgrp_affinity_t *));
break;
break;
default:
break;
}
return (retval);
}
/*
* Return the latest generation number for the lgroup hierarchy
* with the given view
*/
{
/*
* Determine generation number for given view
*/
if (view == LGRP_VIEW_OS)
/*
* Return generation number of lgroup hierarchy for OS view
*/
else {
/*
* For caller's view, use generation numbers for lgroup
* hierarchy and caller's pset
* NOTE: Caller needs to check for change in pset ID
*/
}
return (gen);
}
{
thread_lock(t);
/*
* Check to see whether caller has permission to set affinity for
* thread
*/
thread_unlock(t);
}
home = lgrp_home_id(t);
thread_unlock(t);
return (home);
}
/*
* Get home lgroup of given process or thread
*/
{
proc_t *p;
kthread_t *t;
/*
* Get home lgroup of given LWP or process
*/
switch (idtype) {
case P_LWPID:
p = curproc;
/*
* Set affinity for thread
*/
mutex_enter(&p->p_lock);
} else { /* other thread */
int found = 0;
t = p->p_tlist;
do {
retval = lgrp_home_thread(t);
found = 1;
break;
}
if (!found)
}
mutex_exit(&p->p_lock);
break;
case P_PID:
/*
* Get process
*/
p = curproc;
else
if (p == NULL) {
}
mutex_enter(&p->p_lock);
t = p->p_tlist;
if (t == NULL)
else
retval = lgrp_home_thread(t);
mutex_exit(&p->p_lock);
break;
default:
break;
}
return (retval);
}
/*
* Return latency between "from" and "to" lgroups
*
* This latency number can only be used for relative comparison
* between lgroups on the running system, cannot be used across platforms,
* and may not reflect the actual latency. It is platform and implementation
* specific, so platform gets to decide its value. It would be nice if the
* number was at least proportional to make comparisons more meaningful though.
*/
int
{
int i;
int latency;
int latency_max;
}
/*
* Get latency for same lgroup
*/
return (latency);
}
/*
* Get latency between leaf lgroups
*/
to_lgrp->lgrp_plathand));
/*
* Determine max latency between resources in two lgroups
*/
latency_max = 0;
for (i = 0; i <= lgrp_alloc_max; i++) {
int j;
from_rsrc = lgrp_table[i];
if (!LGRP_EXISTS(from_rsrc) ||
continue;
for (j = 0; j <= lgrp_alloc_max; j++) {
to_rsrc = lgrp_table[j];
if (!LGRP_EXISTS(to_rsrc) ||
j) == 0)
continue;
if (latency > latency_max)
}
}
return (latency_max);
}
/*
* Return lgroup interface version number
* 0 - none
* 1 - original
* 2 - lgrp_latency_cookie() and lgrp_resources() added
*/
int
lgrp_version(int version)
{
/*
* Return LGRP_VER_NONE when requested version isn't supported
*/
return (LGRP_VER_NONE);
/*
* Return current version when LGRP_VER_NONE passed in
*/
if (version == LGRP_VER_NONE)
return (LGRP_VER_CURRENT);
/*
* Otherwise, return supported version.
*/
return (version);
}
/*
* Snapshot of lgroup hieararchy
*
* One snapshot is kept and is based on the kernel's native data model, so
* a 32-bit snapshot is kept for the 32-bit kernel and a 64-bit one for the
* 64-bit kernel. If a 32-bit user wants a snapshot from the 64-bit kernel,
* the kernel generates a 32-bit snapshot from the data in its 64-bit snapshot.
*
* The format is defined by lgroup snapshot header and the layout of
* the snapshot in memory is as follows:
* 1) lgroup snapshot header
* - specifies format of snapshot
* - defined by lgrp_snapshot_header_t
* 2) lgroup info array
* - contains information about each lgroup
* - one element for each lgroup
* - each element is defined by lgrp_info_t
* 3) lgroup CPU ID array
* - contains list (array) of CPU IDs for each lgroup
* - lgrp_info_t points into array and specifies how many CPUs belong to
* given lgroup
* 4) lgroup parents array
* - contains lgroup bitmask of parents for each lgroup
* - bitmask is an array of unsigned longs and its size depends on nlgrpsmax
* 5) lgroup children array
* - contains lgroup bitmask of children for each lgroup
* - bitmask is an array of unsigned longs and its size depends on nlgrpsmax
* 6) lgroup resources array
* - contains lgroup bitmask of resources for each lgroup
* - bitmask is an array of unsigned longs and its size depends on nlgrpsmax
* 7) lgroup latency table
* - contains latency from each lgroup to each of other lgroups
*
* NOTE: Must use nlgrpsmax for per lgroup data structures because lgroups
* may be sparsely allocated.
*/
/*
* Take a snapshot of lgroup hierarchy and return size of buffer
* needed to hold snapshot
*/
static int
lgrp_snapshot(void)
{
int cpu_index;
int i;
int j;
int **lgrp_lats;
int snap_ncpus;
int snap_nlgrps;
int snap_nlgrpsmax;
#ifdef _SYSCALL32_IMPL
/*
* Have up-to-date snapshot, so check to see whether caller is 32-bit
* program and need to return size of 32-bit snapshot now.
*/
model = get_udatamodel();
/*
* Calculate size of buffer needed for 32-bit snapshot,
* rounding up size of each object to allow for alignment
* of next object in buffer.
*/
sizeof (caddr32_t));
sizeof (processorid_t));
sizeof (ulong_t));
/*
* lgroup bitmasks needed for parents, children, and resources
* for each lgroup and pset lgroup set
*/
/*
* Size of latency table and buffer
*/
snap_nlgrpsmax * snap_nlgrpsmax * sizeof (int);
return (bufsize);
}
#endif /* _SYSCALL32_IMPL */
/*
* Check whether snapshot is up-to-date
* Free it and take another one if not
*/
if (lgrp_snap) {
}
/*
* Allocate memory for snapshot
* w/o holding cpu_lock while waiting for memory
*/
int old_generation;
/*
* Take snapshot of lgroup generation number
* and configuration size dependent information
* NOTE: Only count number of online CPUs,
* since only online CPUs appear in lgroups.
*/
/*
* Calculate size of buffer needed for snapshot,
* rounding up size of each object to allow for alignment
* of next object in buffer.
*/
sizeof (void *));
sizeof (processorid_t));
sizeof (ulong_t));
/*
* lgroup bitmasks needed for pset lgroup set and parents,
* children, and resource sets for each lgroup
*/
/*
* Size of latency table and buffer
*/
lats_size = snap_nlgrpsmax * sizeof (int *) +
snap_nlgrpsmax * snap_nlgrpsmax * sizeof (int);
/*
* Allocate memory for buffer
*/
/*
* Check whether generation number has changed
*/
if (lgrp_gen == old_generation)
break; /* hasn't change, so done. */
/*
* Generation number changed, so free memory and try again.
*/
}
/*
* Fill in lgroup snapshot header
* (including pointers to tables of lgroup info, CPU IDs, and parents
* and children)
*/
/*
* XXX For now, liblgrp only needs to know whether the hierarchy
* XXX only has one level or not
*/
if (snap_nlgrps == 1)
else
bitmask_size));
bitmask_size));
/*
* Fill in lgroup information
*/
cpu_index = 0;
for (i = 0; i < snap_nlgrpsmax; i++) {
int cpu_count;
int k;
lgrp = lgrp_table[i];
if (!LGRP_EXISTS(lgrp)) {
continue;
}
lgrp_info[i].info_lgrpid = i;
/*
* Fill in parents, children, and lgroup resources
*/
lgrp_info[i].info_parents =
if (lgrp->lgrp_parent)
lgrp_info[i].info_children =
for (j = 0; j < snap_nlgrpsmax; j++)
(i * LGRP_RSRC_COUNT * bitmask_size));
for (j = 0; j < LGRP_RSRC_COUNT; j++) {
(j * bitmask_size));
for (k = 0; k < snap_nlgrpsmax; k++)
}
/*
* Fill in CPU IDs
*/
cpu_count = 0;
do {
cpu_index++;
cpu_count++;
}
/*
* Fill in memory sizes for lgroups that directly contain
* memory
*/
lgrp_info[i].info_mem_free =
}
/*
* Fill in latency table and buffer
*/
sizeof (int *) + i * snap_nlgrpsmax * sizeof (int));
for (j = 0; j < snap_nlgrpsmax; j++) {
to = lgrp_table[j];
if (!LGRP_EXISTS(to))
continue;
}
}
#ifdef _SYSCALL32_IMPL
/*
* Check to see whether caller is 32-bit program and need to return
* May not have been able to do this earlier if snapshot was out of
* date or didn't exist yet.
*/
if (model == DATAMODEL_ILP32) {
/*
* Calculate size of buffer needed for 32-bit snapshot,
* rounding up size of each object to allow for alignment
* of next object in buffer.
*/
sizeof (caddr32_t));
sizeof (processorid_t));
sizeof (ulong_t));
1) * bitmask_size;
/*
* Size of latency table and buffer
*/
(snap_nlgrpsmax * snap_nlgrpsmax * sizeof (int));
return (bufsize);
}
#endif /* _SYSCALL32_IMPL */
}
/*
* Copy snapshot into given user buffer, fix up any pointers in buffer to point
* into user instead of kernel address space, and return size of buffer
* needed to hold snapshot
*/
static int
{
int cpu_index;
int i;
int retval;
int snap_ncpus;
int snap_nlgrpsmax;
int **user_lats;
int **user_lats_buffer;
return (0);
/*
* User needs to try getting size of buffer again
* because given buffer size is too small.
* The lgroup hierarchy may have changed after they asked for the size
* but before the snapshot was taken.
*/
/*
* Fill in lgrpset now because caller may have change psets
*/
for (i = 0; i < snap_nlgrpsmax; i++) {
i)) {
}
}
/*
* Copy lgroup snapshot (snapshot header, lgroup info, and CPU IDs)
* into user buffer all at once
*/
/*
* Round up sizes of lgroup snapshot header and info for alignment
*/
sizeof (void *));
sizeof (processorid_t));
sizeof (ulong_t));
/*
* Calculate pointers into user buffer for lgroup snapshot header,
* info, and CPU IDs
*/
(snap_nlgrpsmax * bitmask_size));
(snap_nlgrpsmax * bitmask_size));
/*
* Copyout magic number (ie. pointer to beginning of buffer)
*/
/*
* Fix up pointers in user buffer to point into user buffer
* not kernel snapshot
*/
sizeof (user_cpuids)) != 0)
sizeof (user_lgrpset)) != 0)
sizeof (user_parents)) != 0)
sizeof (user_children)) != 0)
sizeof (user_rsets)) != 0)
sizeof (user_lats)) != 0)
/*
* Make copies of lgroup info and latency table, fix up pointers,
* and then copy them into user buffer
*/
if (user_info_buffer == NULL)
if (user_lats_buffer == NULL) {
}
cpu_index = 0;
for (i = 0; i < snap_nlgrpsmax; i++) {
/*
* Skip non-existent lgroups
*/
continue;
/*
* Update free memory size since it changes frequently
* Only do so for lgroups directly containing memory
*
* NOTE: This must be done before changing the pointers to
* point into user space since we need to dereference
* lgroup resource set
*/
/*
* Fix up pointers to parents, children, resources, and
* latencies
*/
(i * LGRP_RSRC_COUNT * bitmask_size));
(snap_nlgrpsmax * sizeof (int *)) + (i * snap_nlgrpsmax *
sizeof (int)));
/*
* Fix up pointer to CPU IDs
*/
if (user_info_buffer[i].info_ncpus == 0) {
continue;
}
}
/*
* Copy lgroup info and latency table with pointers fixed up to point
* into user buffer out to user buffer now
*/
sizeof (int *)) != 0)
return (retval);
}
#ifdef _SYSCALL32_IMPL
/*
* Make 32-bit copy of snapshot, fix up any pointers in buffer to point
* into user instead of kernel address space, copy 32-bit snapshot into
* given user buffer, and return size of buffer needed to hold snapshot
*/
static int
{
int cpu_index;
int i;
int j;
int **lgrp_lats32_kernel;
int snap_ncpus;
int snap_nlgrpsmax;
return (0);
/*
* Calculate size of buffer needed for 32-bit snapshot,
* rounding up size of each object to allow for alignment
* of next object in buffer.
*/
sizeof (caddr32_t));
sizeof (processorid_t));
sizeof (ulong_t));
(int)bitmask_size, sizeof (caddr32_t));
/*
* Size of latency table and buffer
*/
(snap_nlgrpsmax * snap_nlgrpsmax * sizeof (int));
return (snap_size);
}
/*
* User needs to try getting size of buffer again
* because given buffer size is too small.
* The lgroup hierarchy may have changed after they asked for the size
* but before the snapshot was taken.
*/
/*
* Make 32-bit copy of snapshot, fix up pointers to point into user
* buffer not kernel, and then copy whole thing into user buffer
*/
if (lgrp_snap32 == NULL)
/*
* Calculate pointers into 32-bit copy of snapshot
* for lgroup info, CPU IDs, pset lgroup bitmask, parents, children,
* resources, and latency table and buffer
*/
/*
* Make temporary lgroup latency table of pointers for kernel to use
* to fill in rows of table with latencies from each lgroup
*/
if (lgrp_lats32_kernel == NULL) {
}
/*
* Fill in 32-bit lgroup snapshot header
* (with pointers into user's buffer for lgroup info, CPU IDs,
* bit masks, and latencies)
*/
/*
* Fill in lgrpset now because caller may have change psets
*/
for (i = 0; i < snap_nlgrpsmax; i++) {
i)) {
BT_SET32(lgrp_set32, i);
}
}
/*
* Fill in 32-bit copy of lgroup info and fix up pointers
* to point into user's buffer instead of kernel's
*/
cpu_index = 0;
for (i = 0; i < snap_nlgrpsmax; i++) {
/*
* Skip non-existent lgroups
*/
continue;
}
/*
* Fill in parents, children, lgroup resource set, and
* latencies from snapshot
*/
i * bitmask_size);
i * bitmask_size);
(i * LGRP_RSRC_COUNT * bitmask_size));
sizeof (int));
for (j = 0; j < snap_nlgrpsmax; j++) {
int k;
for (k = 0; k < LGRP_RSRC_COUNT; k++) {
k * bitmask_size);
}
lgrp_lats32_kernel[i][j] =
lgrp_snap->ss_latencies[i][j];
}
/*
* Fix up pointer to latency buffer
*/
sizeof (int);
/*
* Fix up pointers for parents, children, and resources
*/
(i * bitmask_size);
(i * bitmask_size);
(i * LGRP_RSRC_COUNT * bitmask_size);
/*
* Fill in memory and CPU info
* Only fill in memory for lgroups directly containing memory
*/
}
if (lgrp_info32[i].info_ncpus == 0) {
lgrp_info32[i].info_cpuids = 0;
continue;
}
/*
* Fix up pointer for CPU IDs
*/
(cpu_index * sizeof (processorid_t));
}
/*
* Copy lgroup CPU IDs into 32-bit snapshot
* before copying it out into user's buffer
*/
/*
* Copy 32-bit lgroup snapshot into user's buffer all at once
*/
}
return (snap_size);
}
#endif /* _SYSCALL32_IMPL */
int
{
int latency;
switch (subcode) {
case LGRP_SYS_AFFINITY_GET:
case LGRP_SYS_AFFINITY_SET:
case LGRP_SYS_GENERATION:
return (lgrp_generation(ia));
case LGRP_SYS_HOME:
case LGRP_SYS_LATENCY:
return (latency);
case LGRP_SYS_MEMINFO:
case LGRP_SYS_VERSION:
return (lgrp_version(ia));
case LGRP_SYS_SNAPSHOT:
bufsize = lgrp_snapshot();
if (get_udatamodel() == DATAMODEL_NATIVE)
#ifdef _SYSCALL32_IMPL
else
#endif /* _SYSCALL32_IMPL */
}
return (bufsize);
default:
break;
}
}