wrsm_memseg_export.c revision 7c478bd95313f5f23a4c958a745db2134aa03244
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License, Version 1.0 only
* (the "License"). You may not use this file except in compliance
* with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2005 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
/*
* This file implements the RSMPI export side memory segment functions
* for the Wildcat RSM driver.
*/
#include <vm/seg_kmem.h>
#include <sys/ddimapreq.h>
#include <sys/wrsm_common.h>
#include <sys/wrsm_session.h>
#include <sys/wrsm_memseg.h>
#include <sys/wrsm_memseg_impl.h>
#include <sys/wrsm_intr.h>
#ifdef DEBUG
#define DBG_WARN 0x001
#define DBG_EXPORT 0x002
#define DBG_EXPORT_EXTRA 0x040
#else /* DEBUG */
#define DPRINTF(a, b) { }
#endif /* DEBUG */
static int wrsm_hw_protection = 0;
/*
* lock hierarchy:
* network->lock
* all_exportsegs_lock
* exportseg->lock
* node->memseg->lock
*
* Note: it is always safe to take all_exportsegs_lock.
* It is also safe to take network->lock: the network must
* unregister (unregister_controller), which it can't do
* until clients all release the network (release_controller).
* If a client accesses these functions after doing a release
* controller, all bets are off.
*/
/*
* Find the right starting cmmugrp for offset <off>. <sz> is the size of
* the region starting at <off> that falls within this cmmugrp. <ci> is
* the index of the cmmu entry within the entire cmmugrp's tuples array of
* the entry for this offset.
*/
static void
{
}
}
/*
* Get the next cmmugrp. <cc> is the index into the new cmmugrp's tuples
* array. <ci> is the cmmu entry within the tuple. (Both are set to 0.)
*/
static void
{
*cc = 0;
*ci = 0;
}
/*
* Get the starting tuple and index into this tuple within cmmugrp <grp>
* for this offset. <cc> is the index into the cmmugrp's tuples array.
* <tp> is the tuple. The index into the cmmgrup for this offset is passed
* in through <ci>. <ci> is modified to contain the cmmu entry within the tuple
* for this offset.
*/
static void
unsigned *ci)
{
(*cc) = 0;
(*cc)++;
}
}
/*
* Get next entry from this tuple. If no more entries from this tuple, use
* entries from next tuple. <cc> is the index into the tuples array. <tp>
* is the tuple. <ci> is the cmmu entry within the tuple for this offset.
*/
void
{
(*ci)++;
(*cc)++;
(*ci) = 0;
}
}
/*
* Get the number of entries in this cmmugrp needed to cover region of size
* <len>, or the maximum number of entries. <sz> is the size in bytes of
* the cmmugrp. <pgbytes> is the number of bytes covered by each entry.
* <num> returns the number of entries.
*/
static void
{
} else {
(*len) = 0;
}
}
/*
* Find exportseg structure in network exportseg hash from segment id.
*/
static exportseg_t *
{
int index;
while (exportseg) {
return (exportseg);
}
return (NULL);
}
/*
* Set segid of exportseg, add to network hash table.
*/
static int
{
int index;
/*
* release exportseg lock in order to take network lock
*/
while (expsg) {
break;
}
}
if (!found) {
return (RSMERR_BAD_SEG_HNDL);
}
/* segment is already published */
return (RSMERR_SEG_ALREADY_PUBLISHED);
}
/* segment id already in use */
return (RSMERR_SEGID_IN_USE);
}
/*
* add to hash
*/
return (RSM_SUCCESS);
}
/*
* Stop using current segment id, and remove exportseg structure from
* network hash. Note: exportseg is prevented from disappearing until
* exportseg->state is unpublished.
*/
static void
{
int index;
/*
* find and remove exportseg from hash table
*/
}
if (*exportsegp == NULL) {
/* someone else already unpublished this segment */
return;
}
/*
* Found exportseg; remove from segid hash table.
* If exportseg is in segid hash table, it cannot
* be in unpublished state.
*/
}
/*
* Find an exportseg with specified segid in network's exportseg hash and
* lock it.
*/
static exportseg_t *
{
if (exportseg)
return (exportseg);
}
/*
* Make sure this exportseg is still in all_exportsegs_hash.
*/
static int
{
int err = RSMERR_BAD_SEG_HNDL;
while (expsg) {
err = RSM_SUCCESS;
break;
}
}
/*
* make sure exportseg is not currently being removed
*/
}
#ifdef DEBUG
if (err == RSMERR_BAD_SEG_HNDL) {
"invalid memseg 0x%p\n", (void *)exportseg));
}
#endif
return (err);
}
/*
* Free all cmmu entries for this exported segment.
*/
static void
{
unsigned count;
unsigned i, j;
(void *)exportseg));
/*
* invalidate and free cmmu entries
*/
for (i = 0; i < cmmugrp->num_tuples; i++) {
"mseg_free_cmmus() freeing tuples %d - %d\n",
for (j = 0; j < count; j++) {
index++;
}
}
/*
* free cmmugrp structures
*/
}
}
/*
* In the cmmu entries in the range specifed by <seg_offset, len>, set or
* unset the valid field and set or unset the writable field as specified
* by <flag>.
*/
static void
{
unsigned cmmutuples;
unsigned cmmu_index;
unsigned num_entries;
unsigned pfn_index;
CMMU_UPDATE_STR(flag)));
/* nothing to do */
return;
}
/*
* Update the valid field; also update the writable field if this
* was requested.
*/
if (flag == memseg_set_writeable) {
} else if (flag == memseg_unset_writeable) {
} else if (flag == memseg_unset_valid) {
}
/*
* Find the right cmmugrp structure, tuple, and cmmu entry within
* the tuple (as indicated by cmmu_index) for <seg_offset>.
*/
while (len > 0) {
/*
* Calculate the number of entries from this cmmugrp that
* should be reset, and subtract covered bytes from len.
*/
while (num_entries) {
/*
* If writable field is being updated, the valid
* field also set to true if there is memory
* backing the cmmu entry.
*/
if (flag == memseg_unset_valid ||
} else {
}
/* get next CMMU entry in this cmmugrp */
&cmmu_index);
num_entries--;
}
if (len == 0)
break;
&cmmugrp_size, &tuple);
}
}
/*
* The lpa fields in cmmu entries in the range specified by <seg_offset,
* len> are no longer valid. Set the valid field of all cmmu entries in
* this range to invalid, and set the affected entries in the pfn_list to
* PFN_INVALID.
*/
static int
{
unsigned cmmutuples;
unsigned cmmu_index;
unsigned num_entries;
unsigned pfn_index;
int i;
/*
* Check if any pfn fields are not valid. Fail with
* RSMERR_MEM_NOT_BOUND if it is required that they be valid.
*/
if (mapping_required) {
for (i = 0; i < (len >> MMU_PAGESHIFT); i++) {
return (RSMERR_MEM_NOT_BOUND);
}
}
}
/*
* Invalidate all affected entries in the pfn list.
*/
}
}
/*
* Set all cmmu entries in range to invalid if segment is published.
* Otherwise, they are already set to invalid.
*/
return (WRSM_SUCCESS);
}
/*
* Find the right cmmugrp structure, tuple, and cmmu entry within
* the tuple (as indicated by cmmu_index) for <seg_offset>.
*/
while (len > 0) {
/*
* Calculate the number of entries from this cmmugrp that
* should be cleared, and subtract covered bytes from len.
*/
while (num_entries) {
&cmmu,
/* get next CMMU entry in this cmmugrp */
&cmmu_index);
num_entries--;
}
if (len == 0)
break;
&cmmugrp_size, &tuple);
}
return (WRSM_SUCCESS);
}
/*
* Set up the cmmu lpa fields to point to the physical memory backing the
* region pointed to by <as, vaddr> or to the pages in the pagelist
* starting with <startpp>. Use as many entries as needed to map <len>
* bytes.
*
* For each physical page backing the region, update the lpa fields of as
* many cmmu entries as are needed to map the page -- either one cmmu entry
* if the passed in page size matches the CMMU entry page size, or multiple
* cmmu entries if a large page is passed in but small page cmmu entries
* are being used. Also record the pfn for each 8k region in the segment
* pfn_list, and set the entry to valid if it is published.
*
* the entry specified by <seg_offset>.
*/
static int
{
int err = 0;
int pgbytes;
unsigned cmmutuples;
unsigned cmmu_index;
unsigned num_entries;
unsigned pfn_index;
off_t cur_offset = 0;
int i;
/*
* If any pfn entries are already valid, fail with
* RSMERR_MEM_ALREADY_BOUND.
*/
for (i = 0; i < (len >> MMU_PAGESHIFT); i++) {
return (RSMERR_MEM_ALREADY_BOUND);
}
}
/*
* Set cmmu entries to valid if segment has been published.
*/
} else {
}
/*
* Find the right cmmugrp structure, tuple, and cmmu entry within
* the tuple (as indicated by cmmu_index) for <seg_offset>.
*/
used_in_group = 0;
while (len > 0) {
/*
* Get the pfn and size of the next page.
*/
if (startpp) {
/*
* Get the pfn for next page in pagelist. This is
* guaranteed to be real memory, as we have been
* given page structures.
*/
if (!pp) {
goto bad_memory;
}
} else {
/*
* Get the pfn for the page backing <as, vaddr +
* cur_offset>. Make sure this is real memory.
* Grab AS_LOCK to make sure as mappings don't
* change.
*/
if (pfn == PFN_INVALID) {
goto bad_memory;
}
if (!pf_is_memory(pfn)) {
goto bad_memory;
}
}
"mapping page with pfn 0x%lx size 0x%x\n",
/*
* If we've already allocated all the entries from the
* current cmmugrp, move to the next one.
*/
if (used_in_group >= cmmugrp_size) {
"set_lpa_fields used all in one group"
" used_in_group = %lx, size = %lx\n",
&cmmugrp_size, &tuple);
used_in_group = 0;
}
while (bytesleft > 0) {
/*
* Calculate the number of cmmu entries from this
* cmmugrp that will be used to map this page, and
* subtract covered bytes from bytesleft.
*/
while (num_entries) {
/*
* record lpa for this region of the page
*/
(CE_CONT, "set_lpa_fields "
"cmmu index %d pfn 0x%lx valid %ld\n",
/* get next CMMU entry */
&cmmutuples, &cmmu_index);
num_entries--;
/*
* If cmmu entries are for small pages,
* get physaddr (pfn) for next 8k page.
*/
}
if (bytesleft == 0)
break;
&cmmugrp_size, &tuple);
used_in_group = 0;
}
/*
* record the 8k-based pfns for this page in pfn_list
*/
for (i = 0; i < (pgbytes >> MMU_PAGESHIFT); i++) {
}
used_in_group += pgbytes;
cur_offset += pgbytes;
}
return (WRSM_SUCCESS);
/*
* There was a problem with the backing memory. Tear down
* previously set up stuff, and return error.
*/
for (i = 0; i < (cur_offset >> MMU_PAGESHIFT); i++) {
}
return (err);
}
/*
* Allocate <num_entries> cmmu entries of <pgbytes> page size from the cmmu
* allocator. Create a cmmugrp entry to store info about these entries.
*/
static int
{
int err = WRSM_SUCCESS;
int i, j;
"num_entries %d seg_offset 0x%lx pgbytes 0x%x\n",
KM_SLEEP);
WRSM_SUCCESS) {
if (cmmu_page_size == CMMU_PAGE_SIZE_LARGE) {
/*
* try allocating cmmu entries for small pages
*/
WRSM_SUCCESS) {
/* return RSMPI complaint error code */
return (RSMERR_INSUFFICIENT_RESOURCES);
}
} else {
/* give up */
return (err);
}
}
/*
* Update each CMMU entry to reflect how it is being used
*/
for (i = 0; i < cmmugrp->num_tuples; i++) {
"alloc_cmmu_tuples() alloced tuples %d - %d\n",
index++;
}
}
return (WRSM_SUCCESS);
}
/*
* Allocate enough cmmu entries for a segment of size <size>. Allocate
* large pages where possible. Set up mappings to any passed in memory.
*/
static int
{
/* LINTED: E_FUNC_SET_NOT_USED */
int err;
#ifdef DEBUG
pfn = 0;
#endif
return (RSMERR_NO_BACKING_MEM);
} else {
}
} else {
} else {
}
}
} else {
}
"as 0x%p vaddr 0x%p length 0x%lx size 0x%lx\n", (void *)as,
}
/* vaddr must be propertly aligned */
(void *)vaddr));
return (RSMERR_BAD_MEM_ALIGNMENT);
}
/* size range can't exceed segment size */
return (RSMERR_BAD_LENGTH);
}
if (nbytes & MMU_PAGEOFFSET) {
/* size must be an aligned number of bytes */
nbytes));
return (RSMERR_BAD_MEM_ALIGNMENT);
}
num_entries = 0;
seg_offset = 0;
/*
* Use large page CMMU entries for all large physical pages if
* allowed and available. We could try seeing if the small pages
* happen to be allocated consecutively, but the caller apparently
* didn't care enough to use large pages, so don't bother.
*/
while (nbytes > 0) {
if (startpp) {
if (!pp) {
(CE_CONT, "invalid buf pp\n"));
return (RSMERR_NO_BACKING_MEM);
}
} else {
/*
* make sure the next region of the vaddr range
* points to valid physical memory. Grab AS_LOCK
* to make sure as mappings don't change.
*/
offset);
if (pfn == PFN_INVALID) {
/* not backed by anything! */
"not backed by memory\n",
return (RSMERR_NO_BACKING_MEM);
}
if (!pf_is_memory(pfn)) {
/* tear down previously set up stuff */
"pfn 0x%lx at vaddr 0x%p\n", pfn,
return (RSMERR_NOT_MEM);
}
}
/*
* large pages not allowed: translate to small pages
*/
"pages from size %d to %d (MMU_PAGESIZE)\n",
pgbytes, MMU_PAGESIZE));
} else {
need_entries = 1;
}
/*
* a different page size is being used
*/
if (num_entries != 0) {
/*
* Allocate cmmu entries for the num_entries
* previous pages.
*/
if ((alloc_cmmu_tuples(exportseg,
"couldn't alloc cmmu tuples to "
"back memory\n"));
return (RSMERR_INSUFFICIENT_RESOURCES);
}
/*
* record the physical addresses of this
* range of memory into the LPA fields in
* the cmmu entries
*/
if (startpp) {
}
num_entries = 0;
}
}
}
/*
* allocate tuples for last set of physical pages
*/
if (num_entries != 0) {
if (num_entries != 0) {
WRSM_SUCCESS) {
"couldn't alloc cmmu tuples for "
"last set of backing memory\n"));
return (RSMERR_INSUFFICIENT_RESOURCES);
}
}
}
/*
* Allocate tuples for the end of the segment if some of it
* has no memory backing it. Allocate small pages for this
* part, as we don't know what memory will eventually back it.
*/
if (size != 0) {
ASSERT(num_entries != 0);
!= WRSM_SUCCESS) {
"couldn't alloc cmmu tuples for unbacked "
" memory\n"));
return (RSMERR_INSUFFICIENT_RESOURCES);
}
}
return (WRSM_SUCCESS);
}
/*
* Parse the passed in access list, calculate new per node access
* permissions (based on the old and new permissions), store the new
* permissions, and apply the access permissions for an exported segment
* to the appropriate cmmu entries.
*/
static int
{
uint_t i;
int j;
for (i = 0; i < WRSM_MAX_CNODES; i++) {
}
/*
* If no access list, assume default of all nodes, with a
* permission of RSM_PERM_RDWR. If the access list's first entry
* specifies single hardware address of RSM_ACCESS_PUBLIC, apply
* the specified permission to all nodes. Otherwise, parse the
* access list.
*/
if (access_list == NULL ||
if ((perms & ~RSM_PERM_RDWR) != 0) {
return (RSMERR_BAD_ACL);
}
/* all nodes are allowed to import this segment */
for (i = 0; i < WRSM_MAX_CNODES; i++) {
WRSMSET_ADD(bitmask, i);
}
}
} else {
for (i = 0; i < access_list_length; i++) {
/*
* wrsm hardware addresses must be cnodeids.
* Only allowed bits in perms are RSM_PERM_READ
* and RSM_PERM_WRITE
*/
if ((addr >= WRSM_MAX_CNODES) ||
((access_list[i].ae_permission &
~RSM_PERM_RDWR) != 0)) {
/*
* invalid hardware address or perms --
* reinstate old settings, then fail
*/
for (j = 0; j < i; j++) {
if (changed[j]) {
old_import_vals[j];
old_perms_vals[j];
}
}
return (RSMERR_BAD_ACL);
}
/*
* make sure perms is set to the most permissive
* of each node's old and new permissions.
* perms starts out as RSM_PERM_NONE, and gets
* changed if the current node has greater
* permissions.
*/
case RSM_PERM_RDWR:
case RSM_PERM_WRITE:
break;
case RSM_PERM_READ:
if (perms == RSM_PERM_NONE)
break;
#ifdef DEBUG
default:
== RSM_PERM_NONE);
break;
#endif
}
case RSM_PERM_RDWR:
case RSM_PERM_WRITE:
break;
case RSM_PERM_READ:
if (perms == RSM_PERM_NONE)
break;
#ifdef DEBUG
default:
== RSM_PERM_NONE);
break;
#endif
}
}
}
/*
* Make sure the actual per node perms are the max permissions of
* previous actual perms and the newly installed perms.
*/
for (i = 0; i < WRSM_MAX_CNODES; i++) {
case RSM_PERM_RDWR:
case RSM_PERM_WRITE:
break;
case RSM_PERM_READ:
break;
}
"perms 0x%x actual perms 0x%x\n",
i,
}
if (!wrsm_hw_protection) {
/*
* Set all CMMU entries to valid (if they have a valid
* lpa). Set writeable to true if any node is allowed
* write permission, or if there were previous permissions
* that allowed writing. (This handles the case of
* republish calling this function with stricter
* permissions.)
*/
/* LINTED: E_PRECEDENCE_CONFUSION */
} else {
}
}
return (RSM_SUCCESS);
}
/*
* Enable the small page interrupt cmmu entry for this exportseg.
*/
static void
{
}
/*
* Disable the small page interrupt cmmu entry for this exportseg.
*/
static void
{
/* make sure any in-process interrupts have completed */
}
/*
* Translate an exportseg's stored cmmu entry information into the format
* needed for connection messages.
*/
static void
{
int i;
#ifdef DEBUG
#endif
/* set ncslice to the equivalent ncslice imported by this node */
for (i = 0; i < WRSM_NODE_NCSLICES; i++) {
#ifdef DEBUG
#endif
break;
}
}
#ifdef DEBUG
#endif
}
/*
* Send requestor information about a published exported segment.
*/
void
wrsm_connect_msg_evt(void *arg)
{
int connection = 0;
/* non-existent node */
return;
}
/* session must not be valid */
return;
}
/*
* does segment exist?
*/
return;
}
/*
* is segment published?
*/
return;
}
/*
* does requesting node have permission to connect to it?
*/
return;
}
/*
* add to list of segments the remote node is importing
*/
connection = 1;
#ifdef DEBUG
} else {
"unexpected connect request from node %d "
#endif
}
/*
* Transport Layer tears down the session if there is a message
* delivery failure.
*/
/* We're done, deallocate our incoming args struct and the message */
}
/*
* Send requestor small put interrupt page mapping information for a
* published exported segment.
*/
void
wrsm_smallputmap_msg_evt(void *arg)
{
/* non-existent node */
return;
}
/* session must not be valid */
return;
}
/*
* does segment exist?
*/
return;
}
/*
* is segment published?
*/
}
/*
* does requesting node have permission to connect to it?
*/
}
/*
* 0 length segment -- no small put page to report
*/
}
return;
}
#ifdef DEBUG
"unexpected smallputmap request from node %d "
}
#endif
/*
* Transport Layer tears down the session if there is a message
* delivery failure.
*/
/* We're done, deallocate our incoming args struct and the message */
}
/*
* Send requestor barrier page mapping information for a published exported
* segment.
*/
void
wrsm_barriermap_msg_evt(void *arg)
{
/* non-existent node */
return;
}
/* session must not be valid */
return;
}
/*
* does segment exist?
*/
return;
}
/*
* is segment published?
*/
}
/*
* does requesting node have permission to connect to it?
*/
}
/*
* 0 length segment -- no small put page to report
*/
}
return;
}
#ifdef DEBUG
"unexpected barriermap request from node %d "
}
#endif
/*
* Transport Layer tears down the session if there is a message
* delivery failure.
*/
/* We're done, deallocate our incoming args struct and the message */
}
/*
* Send segment mapping information for a published exported segment.
*/
void
wrsm_segmap_msg_evt(void *arg)
{
int i, j;
int tuple_index;
/* non-existent node */
return;
}
/* session must not be valid */
return;
}
/*
* does segment exist?
*/
return;
}
/*
* is segment published?
*/
return;
}
/*
* does requesting node have permission to connect to it?
*/
return;
}
#ifdef DEBUG
"unexpected map request from node %d "
}
#endif
/* bad message */
return;
}
/*
* find the cmmugrp containing the desired starting tuple
*/
i = 0;
i += cmmugrp->num_tuples;
}
/* calculate index within the cmmugrp */
i = tuple_index - i;
/*
* If this is not the first cmmu tuple in a cmmugrp, then
* compute its offset.
*/
tuple_offset = 0;
if (i > 0) {
for (j = 0; j < i; j++) {
}
}
j = 0;
/*
* copy as many tuples as possible into the response message
*/
i++;
j++;
tuple_index++;
break;
if (i == cmmugrp->num_tuples) {
i = 0;
tuple_offset = 0;
}
}
respargs.num_tuples = j;
/*
* Transport Layer tears down the session if there is a message
* delivery failure.
*/
/* We're done, deallocate our incoming args struct and the message */
}
/*
* Mark this exportseg as no longer imported by node sending this message.
*/
void
wrsm_disconnect_msg_evt(void *arg)
{
/* non-existent node */
return;
}
/* session must not be valid */
return;
}
/*
* does segment exist?
*/
/* ignore */
"from node %d for non-existent segment %d\n",
return;
}
"from disconnected node %d for segment %d\n",
return;
}
/*
* remove from list of segments the remote node is importing
*/
break;
}
}
if (exportseg->wait_for_disconnects > 0) {
"wait_for_disconnects %d\n",
}
/* We're done, deallocate our incoming args struct and the message */
}
/*
* Send specified node a message indicating the the exported segment
* is no longer published. Record based on the response message whether
* the node has released all connections to the segment. Function
* returns 1 if it received a disconnect response from the remote
* node, otherwise it returns 0.
*/
static int
{
int disconnect = 0;
/* LINTED */
!= WRSM_SUCCESS) {
/*
* This node is not responding (message not
* delivered or response not received). (Transport
* Layer tears down the session if there is a
* message delivery failure).
*
* Assume session teardown will remove all accesses
* to this segment.
*/
return (0);
}
#ifdef DEBUG
#endif
"send_unpublish_msg got invalid response\n"));
return (0);
}
disconnect = 1;
/*
* remove from list of segments the remote node is
* importing
*/
break;
}
}
}
return (disconnect);
}
/*
* Send the specified node a message indicating new access permissions
* for the exported segment.
*/
static void
{
/* LINTED */
!= WRSM_SUCCESS) {
/*
* This node is not responding (message not
* delivered or response not received). (Transport
* Layer tears down the session if there is a
* message delivery failure).
*
* Assume session teardown will remove all accesses
* to this segment.
*/
return;
}
#ifdef DEBUG
#endif
}
/*
* The session to the specified node has been torn down. Clean up
* references by this node to any exported segments.
*/
{
int disconnects = 0;
/*
* it is presumed that at this point the node was removed from the
* cluster_members_bits registers in all wcis
*/
/*
* clean up exports to the remote node
*/
/*
* Must release node->memseg->lock in order to take
* exportseg lock; meanwhile, exportseg could disappear, so
* use find_and_lock_exportseg to verify it's still around.
*/
if (exportseg) {
disconnects++;
if (exportseg->wait_for_disconnects > 0) {
}
}
}
disconnects = 0;
}
return (B_TRUE);
}
/*
* Allocate and set up cmmu entries for the segment.
* The exportseg lock is not needed because segment is not yet visible to
* other threads.
*/
static int
{
int i;
int err;
}
/*
* Allocate CMMU entries for this segment. We can't use 4 Meg
* entries if we don't export a 4 meg ncslice, if REBIND is
* permitted, or if there is no memory backing the segment.
*
* If backing memory was provided, calculate the physical address
* for each CMMU entry, and store it in the CMMU's LPA field. Note:
* there is no guarantee that a buf struct will hang around, so can't
* just save a pointer to it. Similarly, there is no guarantee
* that the particular address space mapping will remain the same
* (although it must be mapped somewhere and locked down).
*
* If we need the physical addresses for any reason (such as to
* create HW based per node entries), the LPA can be read from the
* CMMU entry or found in the pfn_list. (The LPA field is
* RW.)
*/
if (!network->have_lg_page_ncslice ||
(flags & RSM_ALLOW_UNBIND_REBIND)) {
}
/* use small page CMMU entries */
WRSM_SUCCESS) {
return (RSMERR_INSUFFICIENT_RESOURCES);
}
return (err);
}
} else {
return (RSMERR_BAD_MSTYPE);
}
return (RSM_SUCCESS);
}
/*
* Invalidate and remove cmmu entries for the segment.
*/
static void
{
/*
* Unbind all pages, free CMMU entries.
*/
}
/*
* Allocate and set up cmmu entry for the smallput interrupt page.
* The exportseg lock is not needed because segment is not yet visible to
* other threads.
*/
static int
{
unsigned num_tuples;
int err;
int flags;
/*
* Set up an interrupt page for small puts. Allocate a CMMU entry,
* then create a receive queue.
*/
WRSM_SUCCESS) {
return (RSMERR_INSUFFICIENT_RESOURCES);
}
/*
* wrsm_intr_create_recvq() sets up the cmmu entry - identified by
* the passed in cmmu index. Create the recvq with the invalid
* flag set, and sleep waiting for resources if the caller set
* the sleep flag.
*/
if (sleep) {
}
0, /* from_node - N/A for memsegs */
flags);
if (err != WRSM_SUCCESS) {
"freeing index %d\n",
return (RSMERR_INSUFFICIENT_RESOURCES);
}
return (RSM_SUCCESS);
}
/*
* Invalidate and remove cmmu entry for the smallput interrupt page.
*/
static void
{
/*
* Release the small put interrupt page recvq
* and free the cmmu entry.
*/
"freeing index %d\n",
}
/*
* Allocate and set up cmmu entry for the barrier page.
* The exportseg lock is not needed because segment is not yet visible to
* other threads.
*/
static int
{
unsigned num_tuples;
/* LINTED: E_FUNC_SET_NOT_USED */
int err;
/*
* Set up a barrier page: allocate a page of memory, allocate a
* cmmu entry, and point the cmmu entry at the memory page.
*/
/*
* Need an aligned page, so allocate 2 pages.
*/
WRSM_SUCCESS) {
return (RSMERR_INSUFFICIENT_RESOURCES);
}
"index %d\n",
aligned_vaddr = (caddr_t)
pfn));
return (RSM_SUCCESS);
}
/*
* Invalidate and remove cmmu entry for the barrier page.
*/
static void
{
/*
* Invalidate and free barrier page cmmu entry, and free the
* barrier page memory.
*/
"freeing index %d\n",
}
/*
* Free exportsegs when network is being removed. Will only happen
* if client does a release_controller without first releasing
* exported segments.
*/
void
{
int i;
network->rsm_ctlr_id));
return;
}
for (i = 0; i < WRSM_SEGID_HASH_SIZE; i++) {
exportsegp = &(all_exportsegs_hash[i]);
while (*exportsegp != NULL) {
exportseg = *exportsegp;
/*
* remove exportseg from all_exportsegs_hash
* and destroy it
*/
}
} else {
}
}
}
#ifdef DEBUG
"exportseg count %d after exportseg cleanup\n",
}
#endif
}
/*
*
* RSMPI entry points
*
*/
/* ARGSUSED */
int
{
int err;
int i;
int index;
network->rsm_ctlr_id));
if (callback != RSM_RESOURCE_SLEEP &&
/* we don't support callbacks */
return (RSMERR_CALLBACKS_NOT_SUPPORTED);
}
if (callback == RSM_RESOURCE_SLEEP)
if ((size & MMU_PAGEOFFSET) != 0) {
/* size must be full pages */
size));
return (RSMERR_BAD_MEM_ALIGNMENT);
}
/*
* ddi_map() in sun4u's rootnex.c limits us to 4GB of total
* mappable space per segment.
*/
size));
return (RSMERR_INSUFFICIENT_RESOURCES);
}
sleep)) != RSM_SUCCESS) {
return (err);
}
!= RSM_SUCCESS) {
return (err);
}
!= RSM_SUCCESS) {
return (err);
}
}
/* save flags */
if (flags & RSM_ALLOW_UNBIND_REBIND) {
}
for (i = 0; i < WRSM_MAX_CNODES; i++) {
}
/*
* add exportseg to all_exportsegs_hash
*/
return (RSM_SUCCESS);
}
int
{
int err;
int index;
(void *)exportseg));
return (err);
}
/*
* make sure segment is not published
*/
(CE_CONT, "seg_destroy - memseg 0x%p is published "
return (RSMERR_SEG_PUBLISHED);
}
/*
* Remove exportseg from all_exportsegs_hash. exportseg->lock
* can't be held prior to taking all_exportsegs_lock, so mark
* exportseg as invalid until it is actually removed from the hash.
* Searching for exportseg in the hash fails when exportseg->valid
* is B_FALSE.
*/
*exportsegp != NULL;
/* make sure no one else got here first */
if ((*exportsegp == exportseg) &&
break;
}
}
if (found_exportseg) {
}
}
return (RSM_SUCCESS);
}
/* ARGSUSED */
int
{
int err;
(void *)exportseg));
if (callback != RSM_RESOURCE_SLEEP &&
/* we don't support callbacks */
return (RSMERR_CALLBACKS_NOT_SUPPORTED);
}
/* can only bind starting at page boundaries */
return (RSMERR_BAD_MEM_ALIGNMENT);
}
return (err);
}
return (RSMERR_NO_BACKING_MEM);
} else {
}
} else {
} else {
}
}
} else {
return (RSMERR_BAD_MSTYPE);
}
return (RSMERR_BAD_LENGTH);
}
if (nbytes & MMU_PAGEOFFSET) {
return (RSMERR_BAD_MEM_ALIGNMENT);
}
return (RSMERR_BAD_MEM_ALIGNMENT);
}
/* don't touch cmmu entries if a 0 length segment */
return (RSM_SUCCESS);
}
/*
* set up cmmu entries to point at the specified memory
*/
startpp);
return (err);
}
int
{
int err;
(void *)exportseg));
if (offset & MMU_PAGEOFFSET) {
/* can only unbind starting at page boundaries */
return (RSMERR_BAD_MEM_ALIGNMENT);
}
if (length & MMU_PAGEOFFSET) {
/* can only unbind page aligned regions */
return (RSMERR_BAD_MEM_ALIGNMENT);
}
return (err);
}
if (!exportseg->allow_rebind) {
return (RSMERR_UNBIND_REBIND_NOT_ALLOWED);
}
return (RSMERR_BAD_LENGTH);
}
/* don't touch cmmu entries if a 0 length segment */
return (RSM_SUCCESS);
}
/*
* modify cmmu entries to no longer point to this memory
*/
return (err);
}
/* ARGSUSED */
int
{
int err;
(void *)exportseg));
if (callback != RSM_RESOURCE_SLEEP &&
/* we don't support callbacks */
return (RSMERR_CALLBACKS_NOT_SUPPORTED);
}
if (offset & MMU_PAGEOFFSET) {
/* can only rebind starting at page boundaries */
return (RSMERR_BAD_MEM_ALIGNMENT);
}
return (err);
}
if (!exportseg->allow_rebind) {
return (RSMERR_UNBIND_REBIND_NOT_ALLOWED);
}
return (RSMERR_NO_BACKING_MEM);
} else {
}
} else {
} else {
}
}
} else {
return (RSMERR_BAD_MSTYPE);
}
return (RSMERR_BAD_LENGTH);
}
if (nbytes & MMU_PAGEOFFSET) {
return (RSMERR_BAD_MEM_ALIGNMENT);
}
return (RSMERR_BAD_MEM_ALIGNMENT);
}
/* don't touch cmmu entries if a 0 length segment */
return (RSM_SUCCESS);
}
/*
* modify cmmu entries to remove old mappings
*/
WRSM_SUCCESS) {
return (err);
}
/*
* modify cmmu entries to point to new memory
*/
startpp);
return (err);
}
/* ARGSUSED */
int
{
int err;
(void *)exportseg));
if (callback != RSM_RESOURCE_SLEEP &&
/* we don't support callbacks */
return (RSMERR_CALLBACKS_NOT_SUPPORTED);
}
return (err);
}
if (err != RSMERR_BAD_SEG_HNDL) {
}
return (err);
}
access_list_length)) != RSM_SUCCESS) {
return (err);
}
}
return (RSM_SUCCESS);
}
int
{
int err;
int i;
int disconnects = 0;
int rcv_disconnect;
int num_waiting;
(void *)exportseg));
return (err);
}
/*
* segment was already unpublished, but wasn't able
* to complete cleanup. Check whether cleanup has
* now completed.
*/
if (exportseg->wait_for_disconnects) {
return (RSMERR_SEG_IN_USE);
} else {
return (RSM_SUCCESS);
}
}
/* segment is not published */
return (RSMERR_SEG_NOT_PUBLISHED);
}
/*
* Set state to reflect we're doing an unpublish.
*
* update state prior to releasing lock, so subsequent publish or
* republish calls fail.
*
* exportseg->wait_for_disconnects is used as a reference count
* for the export_seg. The segment can't be freed until
* the count goes to zero.
*
* Note that the export segment lock is released prior
* to sending the RPC and thus the export seg state can change.
*/
/*
* Notify all importers that segment is being unpublished.
*/
for (i = 0; i < WRSM_MAX_CNODES; i++) {
/*
* If a session teardown occurs while we are waiting
* for the reponse to the rpc,
* exportseg_sess_teardown(), while holding exportseg
* lock, will decrement wait_for_disconnects,
* decrement network->memseg->export_connected,
* and clear inuse.
* So, if inuse is cleared, we don't want to
* do those actions again here.
*/
disconnects++;
}
}
}
/*
* disable ability to write to segment
*/
/* only need to update cmmu entries if size > 0 */
}
/*
* Kernel agent on importer doesn't always release mappings
* (doesn't call rsm_unmap) in a timely fashion. So instead of
* waiting to complete the disconnect or tearing down the session,
* return RSMERR_SEG_IN_USE.
*/
if (num_waiting) {
return (RSMERR_SEG_IN_USE);
}
return (RSM_SUCCESS);
}
/* ARGSUSED */
int
{
int err;
int i;
(void *)exportseg));
if (callback != RSM_RESOURCE_SLEEP &&
/* we don't support callbacks */
return (RSMERR_CALLBACKS_NOT_SUPPORTED);
}
return (err);
}
/* segment is not published */
return (RSMERR_SEG_NOT_PUBLISHED);
}
/*
* apply new permissions
*/
access_list_length)) != RSM_SUCCESS) {
return (err);
}
/*
* Notify current importers of permission changes.
*/
for (i = 0; i < WRSM_MAX_CNODES; i++) {
}
}
return (RSM_SUCCESS);
}