idn_smr.c revision 07d06da50d310a325b457d6330165aebab1e0064
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*
* Inter-Domain Network
*
* Shared Memory Region (SMR) supporting code.
*/
#include <sys/machparam.h>
#include <sys/machlock.h>
#include <vm/hat_sfmmu.h>
#include <sys/vm_machparam.h>
#ifdef DEBUG
{ \
int _dio; \
">>>>> file %s, line %d: domain %d, dio = %d", \
} \
}
#else
#endif /* DEBUG */
static int smr_slabwaiter_register(int domid);
static void smr_slab_reap_global();
/*
* Can only be called by the master. Allocate a slab from the
* local pool representing the SMR, on behalf of the given
* domain. Slab is either being requested for use by the
* local domain (i.e. domid == idn.localid), or it's being
* allocated to give to a remote domain which requested one.
* In the base of allocating on behalf of a remote domain,
* smr_slab_t structure is used simply to manage ownership.
*
* Returns: smr_slaballoc_wait
* (EINVAL, ETIMEDOUT)
* smr_slabwatier_unregister
* (0, EINVAL, EBUSY, ENOMEM)
* ENOLCK
*/
static int
{
int serrno = 0;
int nwait;
smr_slab_t *sp;
/*
* Only the master can make local allocations.
*/
/*
* Register myself with the waiting list.
*/
if (nwait > 1) {
/*
* XXX - old comment?
* Need to drop the read lock _after_ registering
* ourselves with the potential wait list for this allocation.
* Although this allocation is not a remote one, we could
* still have multiple threads on the master trying to
* satisfy (allocate) request on behalf of a remote domain.
*/
/*
* Somebody is already in the process of satisfying
* the allocation request for this respective
* domain. All we need to do is wait and let
* it happen.
*/
return (serrno);
}
/*
* I'm the original slab requester for this domain. It's local
* so go ahead and do the job.
*/
/*
* Allocation may have failed. In either case we've
* got to do the put to at least wake potential waiters up.
*/
if (!serrno) {
if (DSLAB_LOCK_TRYUPGRADE(domid) == 0) {
}
}
/*
* If serrno is ENOLCK here, then we must have failed
* on the upgrade above, so lock already dropped.
*/
/*
* Need to drop since reaping may be recursive?
*/
}
/*
* Since we were the original requester but never went
* to sleep, we need to directly unregister ourselves
* from the waiting list.
*/
/*
* Now that we've satisfied the request, let's check if any
* reaping is necessary. Only the master does this and only
* when allocating slabs, an infrequent event :-o
*/
return (serrno);
}
/*
* Can only be called by a slave on behalf of himself. Need to
* make a request to the master to allocate a slab of SMR buffers
* for the local domain.
*
* Returns: smr_slaballoc_wait
* (0, EINVAL, EBUSY, ENOMEM)
* ENOLCK
* ECANCELED
*/
static int
{
int nwait;
int serrno = 0;
int bailout = 0;
int masterid;
/*
* Only slaves make remote allocations.
*/
/*
* Register myself with the slaballoc waiting list.
* Note that only allow one outstanding allocation
* request for the given domain. Other callers which
* detect a slab is needed simply get stuck on the
* waiting list waiting for the original caller to
* get the job done.
* The waiter_register routine will allocate the necessary
* slab structure which will ultimately be inserted in
* the domain's slab list via smr_slaballoc_put().
*/
/*
* Make sure we have a connection with the master
* before we wait around for nothing and send a
* command off to nowhere.
* First do a quick (no lock) check for global okayness.
*/
bailout = 1;
}
/*
* We need to drop our read lock _before_ acquiring the
* slaballoc waiter lock. This is necessary because the
* thread that receives the slab alloc response and fills
* in the slab structure will need to grab the domain write
* lock while holding onto the slaballoc waiter lock.
* Potentially could deadlock if we didn't drop our domain
* lock before. Plus, we've registered.
*
* 4093209 - Note also that we do this _after_ the check for
* idn.masterid where we grab the READER global
* lock. This is to prevent somebody from
* changing our state after we drop the drwlock.
* A deadlock can occur when shutting down a
* domain we're holding the
*/
if (!bailout) {
/*
* Global state is okay. Let's double check the
* state of our actual target domain.
*/
bailout = 1;
} else if (IDN_DLOCK_TRY_SHARED(masterid)) {
bailout = 1;
} else if (nwait != 1) {
}
/*
* Note that keep the drwlock(read) for
* the target (master) domain if it appears
* we're the lucky one to send the command.
* We hold onto the lock until we've actually
* sent the command out.
* We don't reach this place unless it
* appears everything is kosher with
* the target (master) domain.
*/
} else {
bailout = 1;
}
}
if (bailout) {
/*
* Gotta bail. Abort operation. Error result
* will be picked up when we attempt to wait.
*/
PR_SMR("%s: BAILING OUT on behalf domain %d "
"(err=%d, gs=%s, ms=%s)\n",
(masterid == IDN_NIL_DOMID)
} else if (nwait == 1) {
/*
* We are the original requester. Initiate the
* actual request to the master.
*/
}
/*
* Wait here for response. Once awakened func returns
* with slab structure possibly filled with gifts!
*/
return (serrno);
}
/*
* Allocate a slab from the Master on behalf
* of the given domain. Note that master uses
* this function to allocate slabs on behalf of
* remote domains also.
* Entered with drwlock held.
* Leaves with drwlock dropped.
* Returns: EDQUOT
* EINVAL
* ENOLCK
* smr_slab_alloc_local
* smr_slab_alloc_remote
* (0, EINVAL, EBUSY, ENOMEM)
*/
int
{
int serrno = 0;
switch (dp->dslab_state) {
case DSLAB_STATE_UNKNOWN:
"IDN: 300: no slab allocations without a master");
break;
case DSLAB_STATE_LOCAL:
/*
* If I'm the master, then get a slab
* from the local SMR pool, but only
* if the number of allocated slabs has
* not been exceeded.
*/
else
break;
case DSLAB_STATE_REMOTE:
/*
* Have to make a remote request.
* In order to prevent overwhelming the master
* with a bunch of requests that he won't be able
* to handle we do a check to see if we're still
* under quota. Note that the limit is known
* IDN_SLAB_MINTOTAL. Domains must have the same
* IDN_SLAB_MINTOTAL. Thus a domain could throttle
* itself however it wishes.
*/
else
break;
default:
"IDN: 301: (ALLOC) unknown slab state (%d) "
break;
}
PR_SMR("%s: failed to allocate %s slab [serrno = %d]\n",
}
if (serrno) {
}
return (serrno);
}
static void
{
int rv;
/*
* Do a slaballoc_put just in case there may have
* been waiters for slabs for this respective domain
* before we unreserve this slab.
*/
if (rv == -1) {
/*
* Put failed. Must not have been any waiters.
* Go ahead and unreserve the space.
*/
}
}
static void
{
int slab_size;
int rv;
int masterid;
masterid = IDN_GET_MASTERID();
/*
* Do a slaballoc_put just in case there may have
* been waiters for slabs for this domain before
* returning back to the master.
*/
/*
* Put failed. No waiters so free the local data
* structure ship the SMR range off to the master.
*/
0);
}
}
/*
* Free up the list of slabs passed
*/
void
{
return;
case DSLAB_STATE_UNKNOWN:
break;
case DSLAB_STATE_LOCAL:
/*
* If I'm the master then put the slabs
* back to the local SMR pool.
*/
}
break;
case DSLAB_STATE_REMOTE:
/*
* If the domid is my own then I'm freeing
* a slab back to the Master.
*/
}
break;
default:
"IDN: 301: (FREE) unknown slab state (%d) for domain %d",
break;
}
}
/*
* Free up the list of slab data structures ONLY.
* This is called during a fatal shutdown of the master
* where we need to garbage collect the locally allocated
* data structures used to manage slabs allocated to the
* local domain. Should never be called by a master since
* the master can do a regular smr_slab_free.
*/
void
{
return;
/*
* Since this is only ever called by a slave,
* the slab structure size always contains a buflist.
*/
}
}
/*
* Allocate a SMR buffer on behalf of the local domain
* which is ultimately targeted for the given domain.
*
* IMPORTANT: This routine is going to drop the domain rwlock (drwlock)
* for the domain on whose behalf the request is being
* made. This routine canNOT block on trying to
* reacquire the drwlock. If he does block then somebody
* must have the write lock on the domain which most likely
* means the domain is going south anyway, so just bail on
* this buffer. Higher levels will retry if needed.
*
* XXX - Support larger than IDN_SMR_BUFSIZE allocations?
*
* Returns: A negative return value indicates lock lost on domid.
* EINVAL, ENOLINK, ENOLCK(internal)
* smr_slaballoc_wait
* (EINVAL, ETIMEDOUT)
* smr_slabwatier_unregister
* (0, EINVAL, EBUSY, ENOMEM)
*/
int
{
smr_slab_t *sp;
int serrno;
/*
* Local domain can only allocate on behalf of
* itself if this is a priviledged call and the
* caller is the master.
*/
if (len > IDN_DATA_SIZE) {
"IDN: 303: buffer len %d > IDN_DATA_SIZE (%lu)",
len, IDN_DATA_SIZE);
return (EINVAL);
}
/*
* Need to go to my local slab list to find
* a buffer.
*/
/*
* Now we loop trying to locate a buffer out of our
* slabs. We continue this until either we find a
* buffer or we're unable to allocate a slab. Note
* that new slabs are allocated to the front.
*/
do {
PR_SMR("%s:%d: failed to allocate "
"slab [serrno = %d]",
return (serrno);
}
/*
* Of course, the world may have changed while
* we dropped the lock. Better make sure we're
* still established.
*/
PR_SMR("%s:%d: state changed during slab "
"alloc (dstate = %s)\n",
return (ENOLINK);
}
/*
* We were able to allocate a slab. Should
* be at the front of the list, spin again.
*/
}
/*
* If we have reached here then we have a slab!
* Hopefully there are free bufs there :-o
*/
all_empty = 1;
continue;
all_empty = 0;
continue;
}
continue;
}
/*
* Found a free buffer.
*/
}
/*
* If we still haven't found a buffer, but
* there's still possibly a buffer available,
* then try again. Only if we're absolutely
* sure all slabs are empty do we attempt
* to allocate a new one.
*/
}
return (0);
}
/*
* Free a buffer allocated to the local domain back to
* its respective slab. Slabs are freed via the slab-reap command.
* XXX - Support larger than IDN_SMR_BUFSIZE allocations?
*/
int
{
register smr_slab_t *sp;
int buffreed;
/*
* We should never be free'ing a buffer on
* behalf of ourselves as we are never the
* target for allocated SMR buffers.
*/
buffreed = 0;
"IDN: 304: buffer (0x%p) from domain %d not on a "
goto bfdone;
}
"IDN: 305: buffer length (%d) from domain %d greater "
"than IDN_DATA_SIZE (%lu)",
goto bfdone;
}
break;
if (sp) {
int spl;
;
break;
}
if (bp) {
buffreed++;
}
}
if (buffreed) {
} else {
"IDN: 306: unknown buffer (0x%p) from domain %d",
}
return (sp ? 0 : -1);
}
/*
* Alternative interface to smr_buf_free, but with local drwlock
* held.
*/
/* ARGSUSED2 */
int
{
}
/*
* Free any and all buffers associated with the given domain.
* Assumption is that domain is dead and buffers are not in use.
* Returns: Number of buffers freed.
* -1 if error.
*/
int
smr_buf_free_all(int domid)
{
register smr_slab_t *sp;
int nbufsfreed = 0;
/*
* We should never be free'ing buffers on
* behalf of ourself
*/
if (!VALID_DOMAINID(domid)) {
return (-1);
}
/*
* We grab the writer lock so that we don't have any
* competition during a "free-all" call.
* No need to grab individual slab locks when holding
* dslab(writer).
*/
nbufsfreed++;
} else {
}
}
}
if (nbufsfreed > 0) {
}
return (nbufsfreed);
}
int
{
int num_reclaimed = 0;
/*
* Reclaim is already in progress, don't
* bother.
*/
return (0);
}
register smr_slab_t *sp;
int spl;
continue;
continue;
continue;
}
/*
* Buffer no longer in use,
* reclaim it.
*/
nbufs--;
} else {
}
}
}
if (num_reclaimed > 0) {
}
}
PR_SMR("%s: reclaimed %d buffers from domain %d\n",
return (num_reclaimed);
}
/*
* Returns 1 If any buffers are locked for the given slab.
* 0 If all buffers are free for the given slab.
*
* The caller is assumed to have the slab protected so that no
* new allocations are attempted from it. Also, this is only
* valid to be called with respect to slabs that were allocated
* on behalf of the local domain, i.e. the master is not expected
* to call this function with (slave) slab "representatives".
*/
int
{
}
int
{
register int i;
struct slabwaiter *wp;
return (0);
/*
* Initialize the slab waiting area for MAX_DOMAINS.
*/
for (i = 0; i < MAX_DOMAINS; wp++, i++) {
}
return (0);
}
void
{
register int i;
struct slabwaiter *wp;
return;
for (i = 0; i < MAX_DOMAINS; wp++, i++) {
}
}
void
{
int d;
struct slabwaiter *wp;
return;
for (d = 0; d < MAX_DOMAINS; wp++, d++) {
if (!DOMAIN_IN_SET(domset, d))
continue;
}
}
void
{
int d;
struct slabwaiter *wp;
return;
for (d = 0; d < MAX_DOMAINS; wp++, d++) {
if (!DOMAIN_IN_SET(domset, d))
continue;
}
}
/*
* Register the caller with the waiting list for the
* given domain.
*
* Protocol:
* 1st Local requester: register -> alloc ->
* put(wakeup|xdc) -> unregister
* Nth Local requester: register -> wait
* 1st Remote requester: register -> xdc -> wait
* Nth Remote requester: register -> wait
*
* Remote Responder: local alloc -> put(xdc)
* Local Handler: xdc -> put(wakeup)
*
* E.g. A standard slave allocation request:
* slave master
* ----- ------
* idn_slab_alloc(remote)
* - register
* - xdc -> idn_handler
* - wait ...
* idn_slab_alloc(local)
* - register
* - alloc
* - put
* . wakeup [local]
* - unregister
* idn_handler <- - xdc
* - put DONE
* . wakeup [local]
* |
* V
* - wait
* . unregister
* DONE
*/
static int
{
struct slabwaiter *wp;
int nwait;
if (nwait > 1) {
/*
* There are already waiters for slab allocations
* with respect to this domain.
*/
PR_SMR("%s: existing waiters for slabs for domain %d\n",
return (nwait);
}
/*
* We are the first requester of a slab allocation for this
* respective domain. Need to prep waiting area for
* subsequent arrival of a slab.
*/
return (nwait);
}
/*
* It is assumed that the caller had previously registered,
* but wakeup did not occur due to caller never waiting.
* Thus, slaballoc mutex is still held by caller.
*
* Returns: 0
* EINVAL
* EBUSY
* w_serrno (smr_slaballoc_put)
* (0, ENOLCK, ENOMEM, EDQUOT, EBUSY, ECANCELED)
*/
static int
{
struct slabwaiter *wp;
int serrno = 0;
if (wp->w_nwaiters <= 0) {
/*
* Hmmm...nobody is registered!
*/
return (EINVAL);
}
(wp->w_nwaiters)--;
/*
* Is our present under the tree?
*/
/*
* Bummer...no presents. Let the caller know
* via a null slab pointer.
* Note that we don't clean up immediately since
* message might still come in for other waiters.
* Thus, late sleepers may still get a chance.
*/
PR_SMR("%s: bummer no slab allocated for domain %d\n",
} else {
#ifdef DEBUG
if (serrno == 0) {
register smr_slab_t *sp;
PR_SMR("%s: allocation succeeded (domain %d)\n",
break;
"%s:%d: slab ptr = NULL",
} else {
PR_SMR("%s: allocation failed (domain %d) "
}
#endif /* DEBUG */
}
if (wp->w_nwaiters == 0) {
/*
* Last one turns out the lights.
*/
PR_SMR("%s: domain %d last waiter, turning out lights\n",
}
return (serrno);
}
/*
* Called to abort any slaballoc requests on behalf of the
* given domain.
*/
int
{
}
/*
* Put ourselves into a timedwait waiting for slab to be
* allocated.
* Returns with slaballoc mutex dropped.
*
* Returns: EINVAL
* ETIMEDOUT
* smr_slabwatier_unregister
* (0, EINVAL, EBUSY, ENOMEM)
*/
static int
{
struct slabwaiter *wp;
int serrno = 0, serrno_unreg;
PR_SMR("%s: domain = %d, nwaiters = %d, wsp = 0x%p\n",
if (wp->w_nwaiters <= 0) {
/*
* Hmmm...no waiters registered.
*/
return (EINVAL);
}
int rv;
/*
* Only wait if data hasn't arrived yet.
*/
if (rv == -1)
PR_SMR("%s: domain %d, awakened (reason = %s)\n",
}
/*
* We've awakened or request already filled!
* Unregister ourselves.
*/
/*
* Any gifts will be entered into spp.
*/
/*
* Leave with reader lock on dslab_lock.
*/
return (serrno);
else
return (serrno_unreg);
}
/*
* A SMR slab was allocated on behalf of the given domain.
* Wakeup anybody that may have been waiting for the allocation.
* Note that if the domain is a remote one, i.e. master is allocating
* on behalf of a slave, it's up to the caller to transmit the
* allocation response to that domain.
* The force flag indicates that we want to install the slab for
* the given user regardless of whether there are waiters or not.
* This is used primarily in situations where a slave may have timed
* out before the response actually arrived. In this situation we
* don't want to send slab back to the master after we went through
* the trouble of allocating one. Master is _not_ allowed to do this
* for remote domains.
*
* Returns: -1 Non-registered waiter or waiting area garbaged.
* 0 Successfully performed operation.
*/
int
{
struct slabwaiter *wp;
if (domid == IDN_NIL_DOMID)
return (-1);
PR_SMR("%s: domain = %d, bufp = 0x%p, ebufp = 0x%p, "
if (wp->w_nwaiters <= 0) {
/*
* There are no waiters!! Must have timed out
* and left. Oh well...
*/
PR_SMR("%s: no slaballoc waiters found for domain %d\n",
/*
* No waiters and caller doesn't want to force it.
*/
return (-1);
}
/*
* Now we fall through and let it be added in the
* regular manor.
*/
}
/*
* There's at least one waiter so there has
* to be a slab structure waiting for us.
* If everything is going smoothly, there should only
* be one guy coming through the path of inserting
* an error or good slab. However, if a disconnect was
* detected, you may get several guys coming through
* trying to let everybody know.
*/
return (-1);
}
if (serrno != 0) {
/*
* Bummer...allocation failed. This call is simply
* to wake up the sleepers and let them know.
*/
return (0);
}
PR_SMR("%s: putting slab into struct (domid=%d, localid=%d)\n",
/*
* Prep the slab structure.
*/
/*
* Allocation was indeed for me.
* Slab may or may not be locked when
* we reach. Normally they will be locked
* if we're being called on behalf of a
* free, and not locked if on behalf of
* a new allocation request.
*/
#ifdef DEBUG
} else {
/*
* Slab was not allocated on my behalf. Must be
* a master request on behalf of some other domain.
* Prep appropriately. Slab should have been locked
* by smr_slab_reserve.
*/
#endif /* DEBUG */
}
/*
* Slab is ready to go. Insert it into the domain's
* slab list so once we wake everybody up they'll find it.
* You better have write lock if you're putting treasures
* there.
*/
/*
* It's possible to fall through here without waiters.
* This is a case where forceflag was set.
*/
if (wp->w_nwaiters > 0) {
} else {
}
return (0);
}
/*
* Get the slab representing [bufp,ebufp] from the respective
* domain's pool if all the buffers are free. Remove them from
* the domain's list and return it.
* If bufp == NULL, then return however many free ones you
* can find.
* List of slabs are returned locked (sl_lock).
* XXX - Need minimum limit to make sure we don't free up _all_
* of our slabs! However, during a shutdown we will need
* method to free them all up regardless of locking.
*/
{
int nslabs;
PR_SMR("%s: getting slab for domain %d [bufp=0x%p, ebufp=0x%p]\n",
return (NULL);
}
/*
* If domid is myself then I'm trying to get a slab out
* of my local pool. Otherwise, I'm the master and
* I'm trying to get the slab representative from the
* global pool.
*/
islocal = 1;
nslabs = -1;
} else {
if (nslabs == 0) {
PR_SMR("%s: requested nslabs (%d) <= 0\n",
return (NULL);
} else if (nslabs < 0) {
/*
* Caller wants them all!
*/
}
}
foundit = 0;
int isbusy;
continue;
}
ASSERT(0);
}
/*
* We found the desired slab. Make sure
* it's free.
*/
foundit++;
isbusy = 0;
if (islocal) {
int spl;
/*
* Some of the buffers in the slab
* are still in use. Unlock the
* buffers we locked and bail out.
*/
isbusy = 1;
foundit--;
isbusy = 1;
foundit--;
}
} else {
/*
* If not local, then I'm the master getting
* a slab from one of the slaves. In this case,
* their slab structs will always be locked.
*/
}
if (!isbusy) {
/*
* Delete the entry from the list and slap
* it onto our return list.
*/
} else {
}
/*
* If bufp == NULL (alternate interface) and we haven't
* found the desired number of slabs yet, keep looking.
*/
break;
}
if (foundit) {
domid);
} else {
}
/*
* If this is the alternate interface, need to return
* the number of slabs found in the ebufp parameter.
*/
return (retsp);
}
/*
* Wrapper to hide alternate interface to smr_slaballoc_get()
*/
{
smr_slab_t *sp;
return (sp);
}
/*
* Only called by master. Initialize slab pool based on local SMR.
* Returns number of slabs initialized.
* reserved_size = Length of area at the front of the NWR portion
* of the SMR to reserve and not make available for
* slab allocations. Must be a IDN_SMR_BUFSIZE multiple.
* reserved_area = Pointer to reserved area, if any.
*/
int
{
register int p, pp;
register smr_slab_t *sp;
*reserved_area = NULL;
(nwr_available < IDN_SLAB_SIZE) ||
return (-1);
}
1 : IDN_SLAB_MINPERPOOL;
/*
* npools needs to be odd for hashing algorithm.
*/
}
/*
* Calculate the number of extra slabs that will need to
* be alloted to the pools. This number will be less than
* npools. Only one extra slab is allocated to each pool
* until we have assigned all the extra slabs.
*/
else
nxslabs = 0;
for (p = nslabs = 0;
if (nxslabs > 0) {
nslabs++;
nxslabs--;
}
sp++;
}
}
/*
* We should be at the end of the SMR at this point.
*/
if (reserved_size != 0)
return (0);
}
void
{
return;
}
void
{
int n, nbufs;
return;
if (nbufs <= 0) {
return;
}
sbufp += IDN_SMR_BUFSIZE;
}
}
void
{
int nbufs;
return;
}
/*
* Returns: 0 Successfully located a slab.
* -1 Failure.
*/
static smr_slab_t *
smr_slab_reserve(int domid)
{
register smr_slab_t *spa;
int foundone = 0;
int spl;
nextp = -1;
nexts = -1;
foundone = 1;
break;
}
nexts = SMR_SLAB_HASHSTEP(p, s);
s = nexts;
}
if (foundone)
break;
nextp = SMR_SLABPOOL_HASHSTEP(p);
p = nextp;
}
if (foundone) {
/*
* Caller is actually reserving a slab for
* themself which means they'll need the full
* slab structure to represent all of the I/O
* buffers. The "spa" is just a representative
* and doesn't contain the space to manage the
* individual buffers. Need to alloc a full-size
* struct.
* Note that this results in the returning
* smr_slab_t structure being unlocked.
*/
PR_SMR("%s: allocated full slab struct for domain %d\n",
} else {
/*
* Slab structure gets returned locked.
*/
spa += s;
}
PR_SMR("%s: allocated slab 0x%p (start=0x%p, size=%lu) for "
} else {
PR_SMR("%s: FAILED to allocate for domain %d\n",
}
return (spa);
}
static void
{
register smr_slab_t *spa;
int foundit = 0;
nextp = -1;
nexts = -1;
foundit = 1;
break;
}
nexts = SMR_SLAB_HASHSTEP(p, s);
s = nexts;
}
if (foundit)
break;
nextp = SMR_SLABPOOL_HASHSTEP(p);
p = nextp;
}
if (foundit) {
PR_SMR("%s: freed (bufp=0x%p) for domain %d\n",
/*
* Caller is actually unreserving a slab of their
* own. Note that only the master calls this
* routine. Since the master's local slab
* structures do not get entered into the global
* "representative" pool, we need to free up the
* data structure that was passed in.
*/
} else {
}
} else {
/*
* Couldn't find slab entry for given buf!
*/
PR_SMR("%s: FAILED to free (bufp=0x%p) for domain %d\n",
}
}
/*
* The Reap Protocol:
* master slave
* ------ -----
* smr_slab_reap_global
* - idn_broadcast_cmd(SLABREAP) -> idn_recv_cmd(SLABREAP)
* . idn_local_cmd(SLABREAP) - idn_recv_slabreap_req
* - smr_slab_reap . smr_slab_reap
* . smr_slaballoc_get_n - smr_slaballoc_get_n
* . smr_slab_free - smr_slab_free
* - smr_slab_free_local . smr_slab_free_remote
* . smr_slab_unreserve
* <- - idn_send_cmd(SLABFREE)
* idn_recv_cmd(SLABFREE)
* - idn_recv_slabfree_req
* . smr_slaballoc_get
* . smr_slab_free
* - smr_slab_free_local
* . smr_slab_unreserve
* . idn_send_slabfree_resp -> idn_recv_cmd(SLABFREE | ack)
* - idn_recv_slabfree_resp
*
* idn_recv_cmd(SLABREAP | ack) <- . idn_send_slabreap_resp
* - idn_recv_slabreap_resp DONE
* DONE
*
* Check available slabs and if we're below the threshold, kick
* off reaping to all remote domains. There is no guarantee remote
* domains will be able to free up any.
*/
static void
{
register int p, npools;
register int total_free = 0;
register struct smr_slabtbl *tblp;
PR_SMR("%s: only allowed by master (%d)\n",
proc, IDN_GET_MASTERID());
return;
}
now = ddi_get_lbolt();
return;
if (total_free <= IDN_SLAB_THRESHOLD) {
int diff, reap_per_domain;
PR_SMR("%s: kicking off reaping "
"(total_free = %d, min = %d)\n",
}
}
void
{
register int d;
int nreclaimed;
smr_slab_t *sp;
/*
* Should only be called on behalf of local
* domain.
*/
PR_SMR("%s: called by domain %d, should only be local (%d)\n",
ASSERT(0);
return;
}
/*
* Try and reclaim some buffers so we can possibly
* free up some slabs.
*/
nreclaimed = 0;
for (d = 0; d < MAX_DOMAINS; d++) {
int nr;
if (!DOMAIN_IN_SET(reapset, d))
continue;
IDN_DLOCK_SHARED(d);
dp = &idn_domain[d];
IDN_DUNLOCK(d);
continue;
}
/*
* Clean up any dead I/O errors if possible.
*/
register int cnt;
register smr_slabbuf_t *bp;
/*
* We need to grab the writer lock to prevent
* anybody from allocating buffers while we
* traverse the slabs outstanding.
*/
cnt = 0;
cnt++;
}
nreclaimed += nr;
IDN_DUNLOCK(d);
}
if (sp) {
}
}
/*
* ---------------------------------------------------------------------
* Remap the (IDN) shared memory region to a new physical address.
* Caller is expected to have performed a ecache flush if needed.
* ---------------------------------------------------------------------
*/
void
{
PR_REMAP("%s: vaddr (0x%p) already mapped to pfn (0x%lx)\n",
return;
}
PR_REMAP("%s: va = 0x%p, pfn = 0x%lx, npgs = %ld, mb = %d MB (%ld)\n",
/*
* Unmap the SMR virtual address from it's current
* mapping.
*/
if (new_pfn == PFN_INVALID)
return;
/*
* Map the SMR to the new physical address space,
* presumably a remote pfn. Cannot use hat_devload
* because it will think pfn represents non-memory,
* i.e. space since it may beyond his physmax.
*/
for (p = 0; p < npgs; p++) {
TTE8K);
vaddr += MMU_PAGESIZE;
new_pfn++;
}
PR_REMAP("%s: remapped %ld pages (expected %ld)\n",
}