/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 QLogic Corporation. All rights reserved.
* Use is subject to license terms.
*/
/*
*/
#include <sys/stmf_defines.h>
#include <sys/fct_defines.h>
#include "qlt.h"
#include "qlt_dma.h"
/*
* Local Function Prototypes.
*/
static void
DMA_ATTR_V0, /* dma_attr_version */
0, /* low DMA address range */
0xffffffffffffffff, /* high DMA address range */
0xffffffff, /* DMA counter register */
8192, /* DMA address alignment */
0xff, /* DMA burstsizes */
1, /* min effective DMA size */
0xffffffff, /* max DMA xfer size */
0xffffffff, /* segment boundary */
1, /* s/g list length */
1, /* granularity of device */
0 /* DMA transfer flags */
};
{
int ndx, i;
if (qlt->qlt_bucketcnt[0] != 0) {
}
}
}
}
}
bsize = sizeof (dmem_buckets);
/*
* The reason it is ndx - 1 everywhere is becasue the last bucket
* pointer is NULL.
*/
for (i = 0; i < (ndx - 1); i++) {
(i * (int)sizeof (qlt_dmem_bucket_t)));
sizeof (qlt_dmem_bucket_t));
}
sizeof (qlt_dmem_bctl_t), KM_NOSLEEP);
goto alloc_bctl_failed;
}
p->dmem_bctls_mem = bctl;
goto alloc_handle_failed;
}
goto mem_alloc_failed;
}
goto addr_bind_handle_failed;
}
if (ncookie != 1) {
goto dmem_init_failed;
}
bsize = p->dmem_buf_size;
p->dmem_bctl_free_list = bctl;
p->dmem_nbufs_free = p->dmem_nbufs;
for (i = 0; i < p->dmem_nbufs; i++) {
bctl->bctl_bucket = p;
0, 0);
bctl++;
}
}
return (QLT_SUCCESS);
while (bc) {
}
(void) ddi_dma_unbind_handle(p->dmem_dma_handle);
mutex_destroy(&p->dmem_lock);
if (--ndx >= 0) {
bctl = p->dmem_bctl_free_list;
goto dmem_failure_loop;
}
((sizeof (dmem_buckets)/sizeof (void *))
*sizeof (qlt_dmem_bucket_t)));
return (QLT_FAILURE);
}
void
{
}
void
{
/*
* XXX Need to wait for free == total elements
* XXX Not sure how other driver shutdown stuff is done.
*/
"num_free %d != num_total %d\n",
while (handle) {
}
}
void
{
int ndx;
bctl = p->dmem_bctl_free_list;
while (bctl) {
}
bctl = p->dmem_bctl_free_list;
(void) ddi_dma_unbind_handle(p->dmem_dma_handle);
p->dmem_nbufs * sizeof (qlt_dmem_bctl_t));
mutex_destroy(&p->dmem_lock);
}
(((sizeof (dmem_buckets)/sizeof (void *))-1)*
sizeof (qlt_dmem_bucket_t)));
}
{
return (qlt_i_dmem_alloc((qlt_state_t *)
flags));
}
/* ARGSUSED */
{
int i;
if (size > QLT_DMEM_MAX_BUF_SIZE) {
goto qlt_try_partial_alloc;
}
/* 1st try to do a full allocation */
if (p->dmem_buf_size >= size) {
if (p->dmem_nbufs_free) {
mutex_enter(&p->dmem_lock);
bctl = p->dmem_bctl_free_list;
mutex_exit(&p->dmem_lock);
continue;
}
p->dmem_bctl_free_list =
p->dmem_nbufs_free--;
qlt->qlt_bufref[i]++;
mutex_exit(&p->dmem_lock);
} else {
qlt->qlt_bumpbucket++;
}
}
}
qlt->qlt_pmintry++;
/* Now go from high to low */
for (i = QLT_DMEM_NBUCKETS - 1; i >= 0; i--) {
p = qlt->dmem_buckets[i];
if (p->dmem_nbufs_free == 0)
continue;
if (!size_possible) {
size_possible = p->dmem_buf_size;
}
if (*pminsize > p->dmem_buf_size) {
/* At this point we know the request is failing. */
if (size_possible) {
/*
* This caller is asking too much. We already
* know what we can give, so get out.
*/
break;
} else {
/*
* Lets continue to find out and tell what
* we can give.
*/
continue;
}
}
mutex_enter(&p->dmem_lock);
if (*pminsize <= p->dmem_buf_size) {
bctl = p->dmem_bctl_free_list;
/* Someone took it. */
size_possible = 0;
mutex_exit(&p->dmem_lock);
continue;
}
p->dmem_nbufs_free--;
mutex_exit(&p->dmem_lock);
qlt->qlt_pmin_ok++;
}
}
return (NULL);
}
/* ARGSUSED */
void
{
qlt_dmem_free(0, dbuf);
}
/* ARGSUSED */
void
{
p = bctl->bctl_bucket;
mutex_enter(&p->dmem_lock);
p->dmem_bctl_free_list = bctl;
p->dmem_nbufs_free++;
mutex_exit(&p->dmem_lock);
}
void
{
int rv;
/*
* go through ddi handle list
*/
while (th) {
0, 0, sync_type);
if (rv != DDI_SUCCESS) {
}
}
} else {
p = bctl->bctl_bucket;
}
}
/*
* A very lite version of ddi_dma_addr_bind_handle()
*/
{
}
DMA_ATTR_V0, /* dma_attr_version */
0, /* low DMA address range */
0xffffffffffffffff, /* high DMA address range */
0xffffffff, /* DMA counter register */
64, /* DMA address alignment */
0xff, /* DMA burstsizes */
1, /* min effective DMA size */
0xffffffff, /* max DMA xfer size */
0xffffffff, /* segment boundary */
QLT_DMA_SG_LIST_LENGTH, /* s/g list length */
1, /* granularity of device */
0 /* DMA transfer flags */
};
/*
* Allocate a qlt_dma_handle container and fill it with a ddi_dma_handle
*/
static qlt_dma_handle_t *
{
int rv;
DDI_DMA_SLEEP, 0, &ddi_handle);
if (rv != DDI_SUCCESS) {
return (NULL);
}
return (qlt_handle);
}
/*
* Allocate a list of qlt_dma_handle containers from the free list
*/
static qlt_dma_handle_t *
{
int i;
/*
* Make sure the free list can satisfy the request.
* Once the free list is primed, it should satisfy most requests.
* XXX Should there be a limit on pool size?
*/
return (NULL);
}
/*
* The free list lock is held and the list is large enough to
* satisfy this request. Run down the freelist and snip off
* the number of elements needed for this request.
*/
for (i = 0; i < handle_count; i++) {
}
return (first_handle);
}
/*
* Return a list of qlt_dma_handle containers to the free list.
*/
static void
{
/*
* Traverse the list and unbind the handles
*/
handle_count = 0;
while (tmp_handle != NULL) {
/*
* If the handle is bound, unbind the handle so it can be
* reused. It may not be bound if there was a bind failure.
*/
if (tmp_handle->num_cookies != 0) {
tmp_handle->num_cookies = 0;
}
handle_count++;
}
/*
* Insert this list into the free list
*/
}
/*
* cookies produced by mapping this dbuf
*/
{
return (qsgl->cookie_count);
}
{
if (qsgl->cookie_prefetched)
else
return (NULL);
}
/*
* Wrapper around ddi_dma_nextcookie that hides the ddi_dma_handle usage.
*/
void
{
if (qsgl->cookie_prefetched) {
} else {
if (fetch->num_cookies_fetched == 0) {
} else {
}
else
}
}
}
/*
* Set this flag to fetch the DDI dma cookies from the handles here and
* store them in the port private area of the dbuf. This will allow
* faster access to the cookies in qlt_xfer_scsi_data() at the expense of
* an extra copy. If the qlt->req_lock is hot, this may help.
*/
int qlt_sgl_prefetch = 0;
/*ARGSUSED*/
{
int i, rv;
int prefetch;
/*
* psuedo code:
* get dma handle list from cache - one per sglist entry
* foreach sglist entry
* bind dma handle to sglist vaddr
* allocate space for DMA state to store in db_port_private
* fill in port private object
* if prefetching
* move all dma cookies into db_port_private
*/
if (handle_list == NULL) {
return (STMF_FAILURE);
}
/*
* Loop through sglist and bind each entry to a handle
*/
th = handle_list;
cookie_count = 0;
/*
* Bind this sgl entry to a DDI dma handle
*/
if ((rv = ddi_dma_addr_bind_handle(
th->dma_handle,
NULL,
NULL,
&th->first_cookie,
return (STMF_FAILURE);
}
/*
* Add to total cookie count
*/
if (cookie_count > QLT_DMA_SG_LIST_LENGTH) {
/*
* Request exceeds HBA limit
*/
return (STMF_FAILURE);
}
/* move to next ddi_dma_handle */
}
/*
* Allocate our port private object for DMA mapping state.
*/
qsize = sizeof (qlt_dma_sgl_t);
if (prefetch) {
/* one extra ddi_dma_cookie allocated for alignment padding */
}
/*
* Fill in the sgl
*/
qsgl->cookie_next_fetch = 0;
if (prefetch) {
/*
* traverse handle list and move cookies to db_port_private
*/
th = handle_list;
for (i = 0; i < numbufs; i++) {
while (--cc > 0) {
}
}
}
return (STMF_SUCCESS);
}
void
{
/*
* unbind and free the dma handles
*/
if (qsgl->handle_list) {
/* go through ddi handle list */
}
}
{
iocb_count = 1;
if (cookie_count > cnt) {
iocb_count++;
}
}
return (iocb_count);
}