ldc_shm.c revision 5b7cb889d5dcadfe96f6a0188f0648131d49d3b3
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
/*
* sun4v LDC Link Layer Shared Memory Routines
*/
#include <sys/machsystm.h>
#include <sys/machcpuvar.h>
#include <vm/hat_sfmmu.h>
#include <sys/vm_machparam.h>
#include <vm/seg_kmem.h>
#include <sys/hypervisor_api.h>
#include <sys/ldc_impl.h>
/* LDC variables used by shared memory routines */
extern ldc_soft_state_t *ldcssp;
extern int ldc_max_retries;
#ifdef DEBUG
extern int ldcdbg;
#endif
/* LDC internal functions used by shared memory routines */
extern int i_ldc_h2v_error(int h_error);
#ifdef DEBUG
#endif
/* Memory synchronization internal functions */
/*
* LDC framework supports mapping remote domain's memory
* either directly or via shadow memory pages. Default
* support is currently implemented via shadow copy.
* Direct map can be enabled by setting 'ldc_shmem_enabled'
*/
int ldc_shmem_enabled = 0;
/*
* Use of directly mapped shared memory for LDC descriptor
* rings is permitted if this variable is non-zero.
*/
int ldc_dring_shmem_enabled = 1;
/*
* The major and minor versions required to use directly
* mapped shared memory for LDC descriptor rings. The
* ldc_dring_shmem_hv_force variable, if set to a non-zero
* value, overrides the hypervisor API version check.
*/
static int ldc_dring_shmem_hv_major = 1;
static int ldc_dring_shmem_hv_minor = 1;
static int ldc_dring_shmem_hv_force = 0;
/*
* The results of the hypervisor service group API check.
* A non-zero value indicates the HV includes support for
* descriptor ring shared memory.
*/
static int ldc_dring_shmem_hv_ok = 0;
/*
* Pages exported for remote access over each channel is
* maintained in a table registered with the Hypervisor.
* The default number of entries in the table is set to
* 'ldc_mtbl_entries'.
*/
/*
* Sets ldc_dring_shmem_hv_ok to a non-zero value if the HV LDC
* API version supports directly mapped shared memory or if it has
* been explicitly enabled via ldc_dring_shmem_hv_force.
*/
void
{
if ((major == ldc_dring_shmem_hv_major &&
minor >= ldc_dring_shmem_hv_minor) ||
(major > ldc_dring_shmem_hv_major) ||
(ldc_dring_shmem_hv_force != 0)) {
}
}
/*
* Allocate a memory handle for the channel and link it into the list
* Also choose which memory table to use if this is the first handle
* being assigned to this channel
*/
int
{
"ldc_mem_alloc_handle: invalid channel handle\n");
return (EINVAL);
}
/* check to see if channel is initalized */
"ldc_mem_alloc_handle: (0x%llx) channel not initialized\n",
return (EINVAL);
}
/* allocate handle for channel */
/* initialize the lock */
/* insert memory handle (@ head) into list */
} else {
/* insert @ head */
}
/* return the handle */
return (0);
}
/*
* Free memory handle for the channel and unlink it from the list
*/
int
{
"ldc_mem_free_handle: invalid memory handle\n");
return (EINVAL);
}
"ldc_mem_free_handle: cannot free, 0x%llx hdl bound\n",
mhdl);
return (EINVAL);
}
/* first handle */
"ldc_mem_free_handle: (0x%llx) freed handle 0x%llx\n",
} else {
/* walk the list - unlink and free */
"ldc_mem_free_handle: (0x%llx) freed "
break;
}
}
}
"ldc_mem_free_handle: invalid handle 0x%llx\n", mhdl);
return (EINVAL);
}
return (0);
}
/*
* Bind a memory handle to a virtual address.
* The virtual address is converted to the corresponding real addresses.
* Returns pointer to the first ldc_mem_cookie and the total number
* of cookies for this virtual address. Other cookies can be obtained
* using the ldc_mem_nextcookie() call. If the pages are stored in
* consecutive locations in the table, a single cookie corresponding to
* the first location is returned. The cookie size spans all the entries.
*
* If the VA corresponds to a page that is already being exported, reuse
* the page and do not export it again. Bump the page's use count.
*/
int
{
/*
* Check if direct shared memory map is enabled, if not change
* the mapping type to SHADOW_MAP.
*/
if (ldc_shmem_enabled == 0)
}
static int
{
int i, rv;
"ldc_mem_bind_handle: invalid memory handle\n");
return (EINVAL);
}
/* clear count */
*ccount = 0;
"ldc_mem_bind_handle: (0x%x) handle already bound\n",
mhandle);
return (EINVAL);
}
/* Force address and size to be 8-byte aligned */
return (EINVAL);
}
/*
* If this channel is binding a memory handle for the
* first time allocate it a memory map table and initialize it
*/
/* Allocate and initialize the map table structure */
/* Allocate the table itself */
/* allocate a page of memory using kmem_alloc */
"ldc_mem_bind_handle: (0x%llx) reduced tbl size "
}
/* zero out the memory */
/* initialize the lock */
/* register table for this channel */
if (rv != 0) {
"ldc_mem_bind_handle: (0x%lx) err %d mapping tbl",
else
return (EIO);
}
"ldc_mem_bind_handle: (0x%llx) alloc'd map table 0x%llx\n",
}
/* FUTURE: get the page size, pgsz code, and shift */
"va 0x%llx pgsz=0x%llx, pgszc=0x%llx, pg_shift=0x%llx\n",
/* aligned VA and its offset */
"(0x%llx) v=0x%llx,val=0x%llx,off=0x%x,pgs=0x%x\n",
/* lock the memory table - exclusive access to channel */
return (ENOMEM);
}
/* Allocate a memseg structure */
/* Allocate memory to store all pages and cookies */
/*
* Table slots are used in a round-robin manner. The algorithm permits
* inserting duplicate entries. Slots allocated earlier will typically
* get freed before we get back to reusing the slot.Inserting duplicate
* entries should be OK as we only lookup entries using the cookie addr
* i.e. tbl index, during export, unexport and copy operation.
*
* One implementation what was tried was to search for a duplicate
* page entry first and reuse it. The search overhead is very high and
* in the vnet case dropped the perf by almost half, 50 to 24 mbps.
* So it does make sense to avoid searching for duplicates.
*
* But during the process of searching for a free slot, if we find a
* duplicate entry we will go ahead and use it, and bump its use count.
*/
/* index to start searching from */
cookie_idx = -1;
if (mtype & LDC_DIRECT_MAP) {
}
if (mtype & LDC_SHADOW_MAP) {
}
if (mtype & LDC_IO_MAP) {
}
/* initialize each mem table entry */
for (i = 0; i < npages; i++) {
/* check if slot is available in the table */
/* we have looped around */
"ldc_mem_bind_handle: (0x%llx) cannot find "
*ccount = 0;
/* NOTE: free memory, remove previous entries */
/* this shouldnt happen as num_avail was ok */
return (ENOMEM);
}
}
/* get the real address */
/* build the mte */
/* update entry in table */
/* calculate the size and offset for this export range */
if (i == 0) {
/* first page */
} else if (i == (npages - 1)) {
/* last page */
if (psize == 0)
poffset = 0;
} else {
/* middle pages */
poffset = 0;
}
/* store entry for this page */
/* create the cookie */
cookie_idx++;
} else {
}
"(0x%llx) va=0x%llx, idx=0x%llx, "
"ra=0x%llx(sz=0x%x,off=0x%x)\n",
/* decrement number of available entries */
/* increment va by page size */
/* increment index */
prev_index = index;
/* save the next slot */
}
/* memory handle = bound */
/* update memseg_t */
/* return count and first cookie */
"ldc_mem_bind_handle: (0x%llx) bound 0x%llx, va=0x%llx, "
"pgs=0x%llx cookies=0x%llx\n",
return (0);
}
/*
* Return the next cookie associated with the specified memory handle
*/
int
{
"ldc_mem_nextcookie: invalid memory handle\n");
return (EINVAL);
}
if (cookie == 0) {
"ldc_mem_nextcookie:(0x%llx) invalid cookie arg\n",
return (EINVAL);
}
if (memseg->next_cookie != 0) {
memseg->next_cookie++;
memseg->next_cookie = 0;
} else {
return (EINVAL);
}
"ldc_mem_nextcookie: (0x%llx) cookie addr=0x%llx,sz=0x%llx\n",
return (0);
}
/*
* Unbind the virtual memory region associated with the specified
* memory handle. Allassociated cookies are freed and the corresponding
* RA space is no longer exported.
*/
int
{
"ldc_mem_unbind_handle: invalid memory handle\n");
return (EINVAL);
}
"ldc_mem_unbind_handle: (0x%x) handle is not bound\n",
mhandle);
return (EINVAL);
}
/* lock the memory table - exclusive access to channel */
/* undo the pages exported */
/* clear the entry from the table */
/* check for mapped pages, revocation cookie != 0 */
retries = 0;
do {
if (rv != H_EWOULDBLOCK)
break;
} while (retries++ < ldc_max_retries);
if (rv) {
"ldc_mem_unbind_handle: (0x%llx) cannot "
}
}
}
/* free the allocated memseg and page structures */
/* uninitialize the memory handle */
return (0);
}
/*
* Get information about the dring. The base address of the descriptor
* ring along with the type and permission are returned back.
*/
int
{
return (EINVAL);
}
return (EINVAL);
}
}
return (0);
}
/*
* Copy data either from or to the client specified virtual address
* space to or from the exported memory associated with the cookies.
* The direction argument determines whether the data is read from or
* written to exported memory.
*/
int
{
int i, rv = 0;
return (EINVAL);
}
/* check to see if channel is UP */
chid);
return (ECONNRESET);
}
/* Force address and size to be 8-byte aligned */
return (EINVAL);
}
/* Find the size of the exported memory */
export_size = 0;
for (i = 0; i < ccount; i++)
/* check to see if offset is valid */
if (off > export_size) {
"ldc_mem_copy: (0x%llx) start offset > export mem size\n",
chid);
return (EINVAL);
}
/*
* Check to see if the export size is smaller than the size we
* are requesting to copy - if so flag an error
*/
"ldc_mem_copy: (0x%llx) copy size > export mem size\n",
chid);
return (EINVAL);
}
/* FUTURE: get the page size, pgsz code, and shift */
"(0x%llx) va 0x%llx pgsz=0x%llx, pgszc=0x%llx, pg_shift=0x%llx\n",
/* aligned VA and its offset */
"ldc_mem_copy: (0x%llx) v=0x%llx,val=0x%llx,off=0x%x,pgs=0x%x\n",
len -= local_psize;
/*
* find the first cookie in the list of cookies
* if the offset passed in is not zero
*/
if (off < cookie_size)
break;
off -= cookie_size;
}
for (;;) {
"ldc_mem_copy:(0x%llx) dir=0x%x, caddr=0x%llx,"
" loc_ra=0x%llx, exp_poff=0x%llx, loc_poff=0x%llx,"
" exp_psz=0x%llx, loc_psz=0x%llx, copy_sz=0x%llx,"
" total_bal=0x%llx\n",
copy_size, &copied_len);
if (rv != 0) {
"ldc_mem_copy: (0x%llx) err %d during copy\n",
"ldc_mem_copy: (0x%llx) dir=0x%x, caddr=0x%lx, "
"loc_ra=0x%lx, exp_poff=0x%lx, loc_poff=0x%lx,"
" exp_psz=0x%lx, loc_psz=0x%lx, copy_sz=0x%lx,"
" copied_len=0x%lx, total_bal=0x%lx\n",
/*
* check if reason for copy error was due to
* a channel reset. we need to grab the lock
* just in case we have to do a reset.
*/
error = ECONNRESET;
}
return (error);
}
local_poff += copied_len;
total_bal -= copied_len;
if (copy_size != copied_len)
continue;
if (export_psize == 0 && total_bal != 0) {
if (cookie_size == 0) {
idx++;
} else {
export_caddr += pg_size;
export_poff = 0;
}
}
if (local_psize == 0 && total_bal != 0) {
local_valign += pg_size;
local_poff = 0;
len -= local_psize;
}
/* check if we are all done */
if (total_bal == 0)
break;
}
"ldc_mem_copy: (0x%llx) done copying sz=0x%llx\n",
return (0);
}
/*
* Copy data either from or to the client specified virtual address
* space to or from HV physical memory.
*
* The direction argument determines whether the data is read from or
* written to HV memory. direction values are LDC_COPY_IN/OUT similar
* to the ldc_mem_copy interface
*/
int
{
int rv = 0;
"ldc_mem_rdwr_cookie: invalid channel handle\n");
return (EINVAL);
}
/* check to see if channel is UP */
"ldc_mem_rdwr_cookie: (0x%llx) channel is not UP\n",
return (ECONNRESET);
}
/* Force address and size to be 8-byte aligned */
return (EINVAL);
}
target_size = *size;
/* FUTURE: get the page size, pgsz code, and shift */
"(0x%llx) va 0x%llx pgsz=0x%llx, pgszc=0x%llx, pg_shift=0x%llx\n",
/* aligned VA and its offset */
"val=0x%llx,off=0x%x,pgs=0x%x\n",
len -= local_psize;
for (;;) {
"ldc_mem_rdwr_cookie: (0x%llx) dir=0x%x, tar_pa=0x%llx,"
" loc_ra=0x%llx, tar_poff=0x%llx, loc_poff=0x%llx,"
" tar_psz=0x%llx, loc_psz=0x%llx, copy_sz=0x%llx,"
" total_bal=0x%llx\n",
copy_size, &copied_len);
if (rv != 0) {
"ldc_mem_rdwr_cookie: (0x%lx) err %d during copy\n",
"ldc_mem_rdwr_cookie: (0x%llx) dir=%lld, "
"tar_pa=0x%llx, loc_ra=0x%llx, tar_poff=0x%llx, "
"loc_poff=0x%llx, tar_psz=0x%llx, loc_psz=0x%llx, "
"copy_sz=0x%llx, total_bal=0x%llx\n",
return (i_ldc_h2v_error(rv));
}
local_poff += copied_len;
if (copy_size != copied_len)
continue;
if (target_psize == 0 && target_size != 0) {
target_poff = 0;
}
if (local_psize == 0 && target_size != 0) {
local_valign += pg_size;
local_poff = 0;
len -= local_psize;
}
/* check if we are all done */
if (target_size == 0)
break;
}
return (0);
}
/*
* Map an exported memory segment into the local address space. If the
* memory range was exported for direct map access, a HV call is made
* to allocate a RA range. If the map is done via a shadow copy, local
* shadow memory is allocated and the base VA is returned in 'vaddr'. If
* the mapping is a direct map then the RA is returned in 'raddr'.
*/
int
{
/*
* Check if direct map over shared memory is enabled, if not change
* the mapping type to SHADOW_MAP.
*/
if (ldc_shmem_enabled == 0)
}
static int
{
return (EINVAL);
}
return (EINVAL);
}
"ldc_mem_dring_map: (0x%llx) channel is not UP\n",
return (ECONNRESET);
}
return (EINVAL);
}
/* FUTURE: get the page size, pgsz code, and shift */
/* calculate the number of pages in the exported cookie */
/* Allocate memseg structure */
/* Allocate memory to store all pages and cookies */
/*
* Check to see if the client is requesting direct or shadow map
* If direct map is requested, try to map remote memory first,
* and if that fails, revert to shadow map
*/
if (mtype == LDC_DIRECT_MAP) {
/* Allocate kernel virtual space for mapping */
"ldc_mem_map: (0x%lx) memory map failed\n",
(sizeof (ldc_mem_cookie_t) * ccount));
(sizeof (ldc_page_t) * npages));
return (ENOMEM);
}
/* Unload previous mapping */
/* for each cookie passed in - map into address space */
idx = 0;
cookie_size = 0;
for (i = 0; i < npages; i++) {
if (cookie_size == 0) {
pg_size);
idx++;
}
/* map the cookie into address space */
retries++) {
break;
}
"ldc_mem_map: (0x%llx) hv mapin err %d\n",
/* remove previous mapins */
for (j = 0; j < i; j++) {
rv = hv_ldc_unmap(
if (rv) {
"ldc_mem_map: (0x%llx) "
"cannot unmap ra=0x%llx\n",
}
}
/* free kernel virtual space */
map_size);
/* direct map failed - revert to shadow map */
break;
} else {
"ldc_mem_map: (0x%llx) vtop map 0x%llx -> "
"0x%llx, cookie=0x%llx, perm=0x%llx\n",
cookie_addr, perm);
/*
* NOTE: Calling hat_devload directly, causes it
* to look for page_t using the pfn. Since this
* addr is greater than the memlist, it treates
* it as non-memory
*/
"ldc_mem_map: (0x%llx) ra 0x%llx -> "
cookie_size -= pg_size;
cookie_addr += pg_size;
}
}
}
if (mtype == LDC_SHADOW_MAP) {
} else {
/*
* Use client supplied memory for memseg->vaddr
* WARNING: assuming that client mem is >= exp_size
*/
}
/* Save all page and cookie information */
}
}
/* save all cookies */
/* update memseg_t */
memseg->next_cookie = 0;
/* memory handle = mapped */
"va=0x%llx, pgs=0x%llx cookies=0x%llx\n",
if (mtype == LDC_SHADOW_MAP)
base_off = 0;
if (raddr)
if (vaddr)
return (0);
}
/*
* Unmap a memory segment. Free shadow memory (if any).
*/
int
{
int i, rv;
"ldc_mem_unmap: (0x%llx) handle is not mapped\n",
mhandle);
return (EINVAL);
}
/* if we allocated shadow memory - free it */
/* unmap in the case of DIRECT_MAP */
if (rv) {
"ldc_mem_map: (0x%lx) hv unmap err %d\n",
}
}
}
/* free the allocated memseg and page structures */
/* uninitialize the memory handle */
return (0);
}
/*
* Internal entry point for LDC mapped memory entry consistency
* semantics. Acquire copies the contents of the remote memory
* into the local shadow copy. The release operation copies the local
* contents into the remote memory. The offset and size specify the
* bounds for the memory range being synchronized.
*/
static int
{
int err;
"i_ldc_mem_acquire_release: invalid memory handle\n");
return (EINVAL);
}
"i_ldc_mem_acquire_release: not mapped memory\n");
return (EINVAL);
}
/* do nothing for direct map */
return (0);
}
/* do nothing if COPY_IN+MEM_W and COPY_OUT+MEM_R */
return (0);
}
"i_ldc_mem_acquire_release: memory out of range\n");
return (EINVAL);
}
/* get the channel handle and memory segment */
"i_ldc_mem_acquire_release: copy failed\n");
return (err);
}
}
return (0);
}
/*
* Ensure that the contents in the remote memory seg are consistent
* with the contents if of local segment
*/
int
{
}
/*
* Ensure that the contents in the local memory seg are consistent
* with the contents if of remote segment
*/
int
{
}
/*
* Allocate a descriptor ring. The size of each each descriptor
* must be 8-byte aligned and the entire ring should be a multiple
* of MMU_PAGESIZE.
*/
int
{
return (EINVAL);
}
if (len == 0) {
return (EINVAL);
}
/* descriptor size should be 8-byte aligned */
return (EINVAL);
}
*dhandle = 0;
/* Allocate a desc ring structure */
/* Initialize dring */
/* round off to multiple of pagesize */
if (size & MMU_PAGEOFFSET)
/* allocate descriptor ring memory */
/* initialize the desc ring lock */
/* Add descriptor ring to the head of global list */
return (0);
}
/*
* Destroy a descriptor ring.
*/
int
{
"ldc_mem_dring_destroy: invalid desc ring handle\n");
return (EINVAL);
}
"ldc_mem_dring_destroy: desc ring is bound\n");
return (EACCES);
}
/* remove from linked list - if not bound */
if (tmp_dringp == dringp) {
} else {
while (tmp_dringp != NULL) {
break;
}
}
if (tmp_dringp == NULL) {
"ldc_mem_dring_destroy: invalid descriptor\n");
return (EINVAL);
}
}
/* free the descriptor ring */
/* destroy dring lock */
/* free desc ring object */
return (0);
}
/*
* Bind a previously allocated dring to a channel. The channel should
* be OPEN in order to bind the ring to the channel. Returns back a
* descriptor ring cookie. The descriptor ring is exported for remote
* access by the client at the other end of the channel. An entry for
* dring pages is stored in map table (via call to ldc_mem_bind_handle).
*/
int
{
int err;
/* check to see if channel is initalized */
"ldc_mem_dring_bind: invalid channel handle\n");
return (EINVAL);
}
"ldc_mem_dring_bind: invalid desc ring handle\n");
return (EINVAL);
}
"ldc_mem_dring_bind: invalid cookie arg\n");
return (EINVAL);
}
/* ensure the mtype is valid */
return (EINVAL);
}
/* no need to bind as direct map if it's not HV supported or enabled */
if (!ldc_dring_shmem_hv_ok || !ldc_dring_shmem_enabled) {
}
"ldc_mem_dring_bind: (0x%llx) descriptor ring is bound\n",
return (EINVAL);
}
if ((perm & LDC_MEM_RW) == 0) {
"ldc_mem_dring_bind: invalid permissions\n");
return (EINVAL);
}
return (EINVAL);
}
/* create an memory handle */
"ldc_mem_dring_bind: (0x%llx) error allocating mhandle\n",
return (err);
}
/* bind the descriptor ring to channel */
if (err) {
"ldc_mem_dring_bind: (0x%llx) error binding mhandle\n",
return (err);
}
/*
* For now return error if we get more than one cookie
* FUTURE: Return multiple cookies ..
*/
if (*ccount > 1) {
(void) ldc_mem_unbind_handle(mhandle);
(void) ldc_mem_free_handle(mhandle);
*ccount = 0;
return (EAGAIN);
}
/* Add descriptor ring to channel's exported dring list */
return (0);
}
/*
* Return the next cookie associated with the specified dring handle
*/
int
{
int rv = 0;
"ldc_mem_dring_nextcookie: invalid desc ring handle\n");
return (EINVAL);
}
"ldc_mem_dring_nextcookie: descriptor ring 0x%llx "
"is not bound\n", dringp);
return (EINVAL);
}
"ldc_mem_dring_nextcookie:(0x%llx) invalid cookie arg\n",
return (EINVAL);
}
return (rv);
}
/*
* Unbind a previously bound dring from a channel.
*/
int
{
"ldc_mem_dring_unbind: invalid desc ring handle\n");
return (EINVAL);
}
"ldc_mem_dring_bind: descriptor ring 0x%llx is unbound\n",
dringp);
return (EINVAL);
}
if (tmp_dringp == dringp) {
} else {
while (tmp_dringp != NULL) {
break;
}
}
if (tmp_dringp == NULL) {
"ldc_mem_dring_unbind: invalid descriptor\n");
return (EINVAL);
}
}
return (0);
}
#ifdef DEBUG
void
{
/* has a map table been allocated? */
return;
/* lock the memory table - exclusive access to channel */
/* lock the exported dring list */
continue;
continue;
/* undo the pages exported */
/* clear the entry from the table */
retries = 0;
do {
if (rv != H_EWOULDBLOCK)
break;
} while (retries++ < ldc_max_retries);
if (rv != 0) {
"i_ldc_mem_inject_dring_clear(): "
"hv_ldc_revoke failed: "
"channel: 0x%lx, cookie addr: 0x%p,"
"cookie: 0x%lx, rv: %d",
}
}
}
}
#endif
/*
* Get information about the dring. The base address of the descriptor
* ring along with the type and permission are returned back.
*/
int
{
int rv;
"ldc_mem_dring_info: invalid desc ring handle\n");
return (EINVAL);
}
if (rv) {
"ldc_mem_dring_info: error reading mem info\n");
return (rv);
}
} else {
}
return (0);
}
/*
* Map an exported descriptor ring into the local address space. If the
* descriptor ring was exported for direct map access, a HV call is made
* to allocate a RA range. If the map is done via a shadow copy, local
* shadow memory is allocated.
*/
int
{
int err;
"ldc_mem_dring_map: invalid dhandle\n");
return (EINVAL);
}
/* check to see if channel is initalized */
"ldc_mem_dring_map: invalid channel handle\n");
return (EINVAL);
}
"ldc_mem_dring_map: (0x%llx) invalid cookie\n",
return (EINVAL);
}
/* FUTURE: For now we support only one cookie per dring */
return (EINVAL);
}
/* ensure the mtype is valid */
return (EINVAL);
}
/* do not attempt direct map if it's not HV supported or enabled */
if (!ldc_dring_shmem_hv_ok || !ldc_dring_shmem_enabled) {
}
*dhandle = 0;
/* Allocate an dring structure */
"ldc_mem_dring_map: 0x%x,0x%x,0x%x,0x%llx,0x%llx\n",
/* Initialize dring */
/* round of to multiple of page size */
if (dring_size & MMU_PAGEOFFSET)
/* create an memory handle */
"ldc_mem_dring_map: cannot alloc hdl err=%d\n",
err);
return (ENOMEM);
}
/* map the dring into local memory */
"ldc_mem_dring_map: cannot map desc ring err=%d\n", err);
(void) ldc_mem_free_handle(mhandle);
return (ENOMEM);
}
/* initialize the desc ring lock */
/* Add descriptor ring to channel's imported dring list */
return (0);
}
/*
* Unmap a descriptor ring. Free shadow memory (if any).
*/
int
{
"ldc_mem_dring_unmap: invalid desc ring handle\n");
return (EINVAL);
}
"ldc_mem_dring_unmap: not a mapped desc ring\n");
return (EINVAL);
}
/* find and unlink the desc ring from channel import list */
if (tmp_dringp == dringp) {
} else {
while (tmp_dringp != NULL) {
break;
}
}
if (tmp_dringp == NULL) {
"ldc_mem_dring_unmap: invalid descriptor\n");
return (EINVAL);
}
}
/* do a LDC memory handle unmap and free */
/* destroy dring lock */
/* free desc ring object */
return (0);
}
/*
* Internal entry point for descriptor ring access entry consistency
* semantics. Acquire copies the contents of the remote descriptor ring
* into the local shadow copy. The release operation copies the local
* contents into the remote dring. The start and end locations specify
* bounds for the entries being synchronized.
*/
static int
{
int err;
"i_ldc_dring_acquire_release: invalid desc ring handle\n");
return (EINVAL);
}
"i_ldc_dring_acquire_release: not a mapped desc ring\n");
return (EINVAL);
}
"i_ldc_dring_acquire_release: index out of range\n");
return (EINVAL);
}
"i_ldc_dring_acquire_release: invalid memory handle\n");
return (EINVAL);
}
"i_ldc_dring_acquire_release: invalid mtype: %d\n",
return (EINVAL);
}
/* get the channel handle */
/* Calculate the relative offset for the first desc */
if (err) {
"i_ldc_dring_acquire_release: copy failed\n");
return (err);
}
/* do the balance */
soff = 0;
if (err) {
"i_ldc_dring_acquire_release: copy failed\n");
return (err);
}
}
return (0);
}
/*
* Ensure that the contents in the local dring are consistent
* with the contents if of remote dring
*/
int
{
}
/*
* Ensure that the contents in the remote dring are consistent
* with the contents if of local dring
*/
int
{
}