emlxs_mem.c revision fcf3ce441efd61da9bb2884968af01cb7c1452cc
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2008 Emulex. All rights reserved.
* Use is subject to License terms.
*/
#include "emlxs.h"
#ifdef SLI3_SUPPORT
#endif /* SLI3_SUPPORT */
/*
* emlxs_mem_alloc_buffer
*
* space and setup the buffers for all rings on
* the specified board to use. The data buffers
* can be posted to the ring with the
* fc_post_buffer routine. The iocb buffers
* are used to make a temp copy of the response
* ring iocbs. Returns 0 if not enough memory,
* Returns 1 if successful.
*/
extern int32_t
{
int32_t i;
#ifdef EMLXS_SPARC
int32_t j;
#endif /* EMLXS_SPARC */
/*
* Allocate and Initialize MEM_NLP (0)
*/
mp->fc_memput_cnt = 0;
mp->fc_memstart_phys = 0;
mp->fc_memflag = 0;
mp->fc_mem_dma_handle = 0;
mp->fc_mem_dat_handle = 0;
mp->fc_memget_ptr = 0;
mp->fc_memget_end = 0;
mp->fc_memput_ptr = 0;
mp->fc_memput_end = 0;
(void) emlxs_mem_free_buffer(hba);
"NLP memory pool.");
return (0);
}
/*
* Link buffer into beginning of list. The first pointer in each
* buffer is a forward pointer to the next buffer.
*/
}
}
/*
* Allocate and Initialize MEM_IOCB (1)
*/
mp->fc_memput_cnt = 0;
mp->fc_memflag = 0;
mp->fc_memstart_phys = 0;
mp->fc_mem_dma_handle = 0;
mp->fc_mem_dat_handle = 0;
mp->fc_memget_ptr = 0;
mp->fc_memget_end = 0;
mp->fc_memput_ptr = 0;
mp->fc_memput_end = 0;
(void) emlxs_mem_free_buffer(hba);
"IOCB memory pool.");
return (0);
}
/*
* Link buffer into beginning of list. The first pointer in each
* buffer is a forward pointer to the next buffer.
*/
}
}
/*
* Allocate and Initialize MEM_MBOX (2)
*/
mp->fc_memput_cnt = 0;
mp->fc_memflag = 0;
mp->fc_memstart_phys = 0;
mp->fc_mem_dma_handle = 0;
mp->fc_mem_dat_handle = 0;
mp->fc_memget_ptr = 0;
mp->fc_memget_end = 0;
mp->fc_memput_ptr = 0;
mp->fc_memput_end = 0;
(void) emlxs_mem_free_buffer(hba);
"MBOX memory pool.");
return (0);
}
/*
* Link buffer into beginning of list. The first pointer in each
* buffer is a forward pointer to the next buffer.
*/
}
}
/*
* Initialize fc_table
*/
/* Allocate the fc_table */
(void) emlxs_mem_free_buffer(hba);
"fc_table buffer.");
return (0);
}
#ifdef EMLXS_SPARC
/*
* Allocate and Initialize FCP MEM_BPL's. This is for increased
* performance on sparc
*/
(void) emlxs_mem_free_buffer(hba);
"FCP BPL table buffer.");
return (0);
}
(void) emlxs_mem_free_buffer(hba);
"FCP BPL DMA buffers.");
return (0);
}
}
#endif /* EMLXS_SPARC */
/*
* Allocate and Initialize MEM_BPL (3)
*/
mp->fc_memstart_virt = 0;
mp->fc_memstart_phys = 0;
mp->fc_mem_dma_handle = 0;
mp->fc_mem_dat_handle = 0;
mp->fc_memget_ptr = 0;
mp->fc_memget_end = 0;
mp->fc_memput_ptr = 0;
mp->fc_memput_end = 0;
mp->fc_total_memsize = 0;
mp->fc_memput_cnt = 0;
/* Allocate buffer pools for above buffer structures */
for (i = 0; i < mp->fc_numblks; i++) {
/*
* If this is a DMA buffer we need alignment on a page so we
* don't want to worry about buffers spanning page boundries
* when mapping memory for the adapter.
*/
(void) emlxs_mem_free_buffer(hba);
"BPL segment buffer.");
return (0);
}
(void) emlxs_mem_free_buffer(hba);
"BPL DMA buffer.");
return (0);
}
/*
* Link buffer into beginning of list. The first pointer in
* each buffer is a forward pointer to the next buffer.
*/
if (oldbp == 0) {
}
}
/*
* These represent the unsolicited ELS buffers we preallocate.
*/
mp->fc_memstart_virt = 0;
mp->fc_memstart_phys = 0;
mp->fc_mem_dma_handle = 0;
mp->fc_mem_dat_handle = 0;
mp->fc_memget_ptr = 0;
mp->fc_memget_end = 0;
mp->fc_memput_ptr = 0;
mp->fc_memput_end = 0;
mp->fc_total_memsize = 0;
mp->fc_memput_cnt = 0;
/* Allocate buffer pools for above buffer structures */
for (i = 0; i < mp->fc_numblks; i++) {
/*
* If this is a DMA buffer we need alignment on a page so we
* don't want to worry about buffers spanning page boundries
* when mapping memory for the adapter.
*/
(void) emlxs_mem_free_buffer(hba);
"MEM_BUF Segment buffer.");
return (0);
}
(void) emlxs_mem_free_buffer(hba);
"MEM_BUF DMA buffer.");
return (0);
}
/*
* Link buffer into beginning of list. The first pointer in
* each buffer is a forward pointer to the next buffer.
*/
if (oldbp == 0) {
}
}
/*
* These represent the unsolicited IP buffers we preallocate.
*/
mp->fc_memstart_virt = 0;
mp->fc_memstart_phys = 0;
mp->fc_mem_dma_handle = 0;
mp->fc_mem_dat_handle = 0;
mp->fc_memget_ptr = 0;
mp->fc_memget_end = 0;
mp->fc_memput_ptr = 0;
mp->fc_memput_end = 0;
mp->fc_total_memsize = 0;
mp->fc_memput_cnt = 0;
/* Allocate buffer pools for above buffer structures */
for (i = 0; i < mp->fc_numblks; i++) {
/*
* If this is a DMA buffer we need alignment on a page so we
* don't want to worry about buffers spanning page boundries
* when mapping memory for the adapter.
*/
(void) emlxs_mem_free_buffer(hba);
"IP_BUF Segment buffer.");
return (0);
}
(void) emlxs_mem_free_buffer(hba);
"IP_BUF DMA buffer.");
return (0);
}
/*
* Link buffer into beginning of list. The first pointer in
* each buffer is a forward pointer to the next buffer.
*/
if (oldbp == 0) {
}
}
/*
* These represent the unsolicited CT buffers we preallocate.
*/
mp->fc_memstart_virt = 0;
mp->fc_memstart_phys = 0;
mp->fc_mem_dma_handle = 0;
mp->fc_mem_dat_handle = 0;
mp->fc_memget_ptr = 0;
mp->fc_memget_end = 0;
mp->fc_memput_ptr = 0;
mp->fc_memput_end = 0;
mp->fc_total_memsize = 0;
mp->fc_memput_cnt = 0;
/* Allocate buffer pools for above buffer structures */
for (i = 0; i < mp->fc_numblks; i++) {
/*
* If this is a DMA buffer we need alignment on a page so we
* don't want to worry about buffers spanning page boundries
* when mapping memory for the adapter.
*/
(void) emlxs_mem_free_buffer(hba);
"CT_BUF Segment buffer.");
return (0);
}
(void) emlxs_mem_free_buffer(hba);
"CT_BUF DMA buffer.");
return (0);
}
/*
* Link buffer into beginning of list. The first pointer in
* each buffer is a forward pointer to the next buffer.
*/
if (oldbp == 0) {
}
}
#ifdef SFCT_SUPPORT
/*
* These represent the unsolicited FCT buffers we preallocate.
*/
mp->fc_memstart_virt = 0;
mp->fc_memstart_phys = 0;
mp->fc_mem_dma_handle = 0;
mp->fc_mem_dat_handle = 0;
mp->fc_memget_ptr = 0;
mp->fc_memget_end = 0;
mp->fc_memput_ptr = 0;
mp->fc_memput_end = 0;
mp->fc_total_memsize = 0;
mp->fc_memput_cnt = 0;
/* Allocate buffer pools for above buffer structures */
for (i = 0; i < mp->fc_numblks; i++) {
/*
* If this is a DMA buffer we need alignment on a page so we
* don't want to worry about buffers spanning page boundries
* when mapping memory for the adapter.
*/
(void) emlxs_mem_free_buffer(hba);
"FCT_BUF Segment buffer.");
return (0);
}
(void) emlxs_mem_free_buffer(hba);
"FCT_BUF DMA buffer.");
return (0);
}
/*
* Link buffer into beginning of list. The first pointer in
* each buffer is a forward pointer to the next buffer.
*/
if (oldbp == 0) {
}
}
#endif /* SFCT_SUPPORT */
for (i = 0; i < FC_MAX_SEG; i++) {
char *seg;
switch (i) {
case MEM_NLP:
seg = "MEM_NLP";
break;
case MEM_IOCB:
seg = "MEM_IOCB";
break;
case MEM_MBOX:
seg = "MEM_MBOX";
break;
case MEM_BPL:
seg = "MEM_BPL";
break;
case MEM_BUF:
seg = "MEM_BUF";
break;
case MEM_IPBUF:
seg = "MEM_IPBUF";
break;
case MEM_CTBUF:
seg = "MEM_CTBUF";
break;
#ifdef SFCT_SUPPORT
case MEM_FCTBUF:
seg = "MEM_FCTBUF";
break;
#endif /* SFCT_SUPPORT */
default:
break;
}
"Segment: %s mp=%p size=%x count=%d flags=%x base=%p",
mp->fc_memget_ptr);
}
return (1);
} /* emlxs_mem_alloc_buffer() */
/*
* emlxs_mem_free_buffer
*
* and TGTM resource.
*/
extern int
{
int32_t j;
/* Check for deferred pkt completion */
}
/* Check for deferred ub completion */
}
/* Check for deferred iocb tx */
hba->mbox_iocbq = 0;
/* Set the error status of the iocb */
switch (iocb->ulpCommand) {
case CMD_FCP_ICMND_CR:
case CMD_FCP_ICMND_CX:
case CMD_FCP_IREAD_CR:
case CMD_FCP_IREAD_CX:
case CMD_FCP_IWRITE_CR:
case CMD_FCP_IWRITE_CX:
case CMD_FCP_ICMND64_CR:
case CMD_FCP_ICMND64_CX:
case CMD_FCP_IREAD64_CR:
case CMD_FCP_IREAD64_CX:
case CMD_FCP_IWRITE64_CR:
case CMD_FCP_IWRITE64_CX:
break;
case CMD_ELS_REQUEST_CR:
case CMD_ELS_REQUEST_CX:
case CMD_XMIT_ELS_RSP_CX:
case CMD_ELS_REQUEST64_CR: /* This is the only one used */
/* currently for deferred */
/* iocb tx */
case CMD_ELS_REQUEST64_CX:
case CMD_XMIT_ELS_RSP64_CX:
break;
case CMD_GEN_REQUEST64_CR:
case CMD_GEN_REQUEST64_CX:
break;
default:
if (rp) {
}
#ifdef SFCT_SUPPORT
}
#endif /* SFCT_SUPPORT */
}
}
}
}
/* free the mapped address match area for each ring */
for (j = 0; j < hba->ring_count; j++) {
/* Flush the ring */
addr = 0;
if ((j == FC_ELS_RING) ||
(j == FC_CT_RING) ||
#ifdef SFCT_SUPPORT
(j == FC_FCT_RING) ||
#endif /* SFCT_SUPPORT */
(j == FC_IP_RING)) {
}
if (j == FC_ELS_RING) {
} else if (j == FC_CT_RING) {
} else if (j == FC_IP_RING) {
}
#ifdef SFCT_SUPPORT
else if (j == FC_FCT_RING) {
}
#endif /* SFCT_SUPPORT */
}
}
}
#ifdef SLI3_SUPPORT
#ifdef SFCT_SUPPORT
}
#endif /* SFCT_SUPPORT */
}
#endif /* SLI3_SUPPORT */
/* Free everything on mbox queue */
while (mbox) {
}
hba->mbox_queue_flag = 0;
/* Free the nodes */
for (j = 0; j < MAX_VPORTS; j++) {
if (vport->node_count) {
}
}
/* Free memory associated with all buffers on get buffer pool */
if (hba->iotag_table) {
hba->iotag_table = 0;
}
#ifdef EMLXS_SPARC
if (hba->fcp_bpl_table) {
hba->fcp_bpl_table = 0;
}
}
#endif /* EMLXS_SPARC */
/* Free the memory segments */
for (j = 0; j < FC_MAX_SEG; j++) {
/* MEM_NLP, MEM_IOCB, MEM_MBOX */
if (j < MEM_BPL) {
if (mp->fc_memstart_virt) {
}
continue;
}
/*
* MEM_BPL, MEM_BUF, MEM_ELSBUF, MEM_IPBUF, MEM_CTBUF,
* MEM_FCTBUF
*/
/* Free memory associated with all buffers on get buffer pool */
}
/* Free memory associated with all buffers on put buffer pool */
}
}
return (0);
} /* emlxs_mem_free_buffer() */
extern uint8_t *
{
"MEM_BUF_ALLOC buffer.");
return (0);
}
"MEM_BUF_ALLOC DMA buffer.");
return (0);
}
} /* emlxs_mem_buf_alloc() */
extern uint8_t *
{
return (NULL);
}
return (bp);
} /* emlxs_mem_buf_free() */
/*
* emlxs_mem_get
*
* This routine will get a free memory buffer.
* seg identifies which buffer pool to use.
* Returns the free buffer ptr or 0 for no buf
*/
extern uint8_t *
{
/* range check on seg argument */
if (seg >= FC_MAX_SEG) {
return (NULL);
}
/* Check if memory segment destroyed! */
if (mp->fc_memsize == 0) {
return (NULL);
}
/*
* EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg, "mem_get[%d]:
* memget=%p,%d memput=%p,%d", seg, mp->fc_memget_ptr,
* mp->fc_memget_cnt, mp->fc_memput_ptr, mp->fc_memput_cnt);
*/
top:
if (mp->fc_memget_ptr) {
/*
* Checking (seg == MEM_MBOX || seg == MEM_IOCB || seg ==
* MEM_NLP)
*/
/* Verify buffer is in this memory region */
/* Invalidate the the get list */
mp->fc_memget_cnt = 0;
"Corruption detected: seg=%x bp=%p "
(void) thread_create(NULL, 0,
v.v_maxsyspri - 2);
return (NULL);
}
}
/*
* If a memory block exists, take it off freelist and return
* it to the user.
*/
mp->fc_memget_cnt = 0;
} else {
/*
* Pointer to the next free buffer
*/
mp->fc_memget_cnt--;
}
switch (seg) {
case MEM_MBOX:
break;
case MEM_IOCB:
break;
case MEM_NLP:
break;
case MEM_BPL:
case MEM_BUF: /* MEM_ELSBUF */
case MEM_IPBUF:
case MEM_CTBUF:
#ifdef SFCT_SUPPORT
case MEM_FCTBUF:
#endif /* SFCT_SUPPORT */
default:
break;
}
} else {
if (mp->fc_memput_ptr) {
/*
* Move buffer from memput to memget
*/
mp->fc_memput_cnt = 0;
goto top;
}
"Pool empty: seg=%x lowmem=%x free=%x",
/* HBASTATS.memAllocErr++; */
}
/*
* bp2 = mp->fc_memget_ptr;
*
* EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg, "mem_get[%d]-:
* memget=%p,%d memput=%p,%d >%x", seg, mp->fc_memget_ptr,
* mp->fc_memget_cnt, mp->fc_memput_ptr, mp->fc_memput_cnt, ((bp2)?
* *((uint8_t **) bp2):0));
*/
return (bp);
} /* emlxs_mem_get() */
extern uint8_t *
{
if (!bp) {
return (NULL);
}
/* Check on seg argument */
if (seg >= FC_MAX_SEG) {
return (NULL);
}
switch (seg) {
case MEM_MBOX:
return (bp);
}
break;
case MEM_IOCB:
/* Check to make sure the IOCB is pool allocated */
return (bp);
}
/*
* Any IOCBQ with a packet attached did not come from our
* pool
*/
return (bp);
}
break;
case MEM_NLP:
/* Check to make sure the NODE is pool allocated */
return (bp);
}
break;
case MEM_BPL:
case MEM_BUF: /* MEM_ELSBUF */
case MEM_IPBUF:
case MEM_CTBUF:
#ifdef SFCT_SUPPORT
case MEM_FCTBUF:
#endif /* SFCT_SUPPORT */
default:
}
return (bp);
}
/* Check to make sure the MATCHMAP is pool allocated */
return (bp);
}
break;
}
/* Free the pool object */
/*
* EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg, "mem_put[%d]:
* memget=%p,%d memput=%p,%d", seg, mp->fc_memget_ptr,
* mp->fc_memget_cnt, mp->fc_memput_ptr, mp->fc_memput_cnt);
*/
/* Check if memory segment destroyed! */
if (mp->fc_memsize == 0) {
return (NULL);
}
/* Check if buffer was just freed */
return (NULL);
}
/* Validate the buffer */
/*
* Checking (seg == MEM_BUF) || (seg == MEM_BPL) || (seg ==
* MEM_CTBUF) || (seg == MEM_IPBUF) || (seg == MEM_FCTBUF)
*/
"Corruption detected: seg=%x tag=%x bp=%p",
return (NULL);
}
}
/* Checking (seg == MEM_MBOX || seg == MEM_IOCB || seg == MEM_NLP) */
"Corruption detected: seg=%x bp=%p base=%p end=%p",
return (NULL);
}
}
/* Release to the first place of the freelist */
} else {
mp->fc_memput_cnt++;
}
/*
* EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg, "mem_put[%d]-:
* memget=%p,%d memput=%p,%d", seg, mp->fc_memget_ptr,
* mp->fc_memget_cnt, mp->fc_memput_ptr, mp->fc_memput_cnt);
*/
return (bp);
} /* emlxs_mem_put() */
/*
* Look up the virtual address given a mapped address
*/
extern MATCHMAP *
{
case FC_ELS_RING:
prev = 0;
while (mp) {
if (prev == 0) {
} else {
}
}
return (mp);
}
}
"ELS Buffer not mapped: bp=%lx ringno=%x mpoff=%p mpon=%p",
break;
case FC_CT_RING:
prev = 0;
while (mp) {
if (prev == 0) {
} else {
}
}
return (mp);
}
}
"CT Buffer not mapped: bp=%lx ringno=%x mpoff=%p mpon=%p",
break;
case FC_IP_RING:
prev = 0;
while (mp) {
if (prev == 0) {
} else {
}
}
return (mp);
}
}
"IP Buffer not mapped: bp=%lx ringno=%x mpoff=%p mpon=%p",
break;
#ifdef SFCT_SUPPORT
case FC_FCT_RING:
prev = 0;
while (mp) {
if (prev == 0) {
} else {
}
}
return (mp);
}
}
"FCT Buffer not mapped: bp=%lx ringno=%x mpoff=%p mpon=%p",
break;
#endif /* SFCT_SUPPORT */
}
return (0);
} /* emlxs_mem_get_vaddr() */
/*
* Given a virtual address, bp, generate the physical mapped address and place
* it where addr points to. Save the address pair for lookup later.
*/
extern void
{
case FC_ELS_RING:
/*
* Update slot fc_mpon points to then bump it fc_mpoff is
* pointer head of the list. fc_mpon is pointer tail of the
* list.
*/
} else {
}
/* return mapped address */
} else {
/* return mapped address */
}
break;
case FC_CT_RING:
/*
* Update slot fc_mpon points to then bump it fc_mpoff is
* pointer head of the list. fc_mpon is pointer tail of the
* list.
*/
} else {
}
/* return mapped address */
} else {
/* return mapped address */
}
break;
case FC_IP_RING:
/*
* Update slot fc_mpon points to then bump it fc_mpoff is
* pointer head of the list. fc_mpon is pointer tail of the
* list.
*/
} else {
}
/* return mapped address */
} else {
/* return mapped address */
}
break;
#ifdef SFCT_SUPPORT
case FC_FCT_RING:
/*
* Update slot fc_mpon points to then bump it fc_mpoff is
* pointer head of the list. fc_mpon is pointer tail of the
* list.
*/
} else {
}
/* return mapped address */
} else {
/* return mapped address */
}
break;
#endif /* SFCT_SUPPORT */
}
} /* emlxs_mem_map_vaddr() */
#ifdef SLI3_SUPPORT
static uint32_t
{
/* Get the system's page size in a DDI-compliant way. */
"Unable to alloc HBQ.");
return (ENOMEM);
}
}
return (0);
} /* emlxs_hbq_alloc() */
extern uint32_t
{
void *ioa2;
uint32_t j;
switch (hbq_id) {
case EMLXS_ELS_HBQ_ID:
seg = MEM_ELSBUF;
break;
case EMLXS_IP_HBQ_ID:
ringno = FC_IP_RING;
break;
case EMLXS_CT_HBQ_ID:
ringno = FC_CT_RING;
break;
#ifdef SFCT_SUPPORT
case EMLXS_FCT_HBQ_ID:
seg = MEM_FCTBUF;
break;
#endif /* SFCT_SUPPORT */
default:
"emlxs_hbq_setup: Invalid HBQ id. (%x)", hbq_id);
return (1);
}
/* Configure HBQ */
/* Get a Mailbox buffer to setup mailbox commands for CONFIG_HBQ */
"emlxs_hbq_setup: Unable to get mailbox.");
return (1);
}
/* Allocate HBQ Host buffer and Initialize the HBQEs */
"emlxs_hbq_setup: Unable to allocate HBQ.");
return (1);
}
/* HBA to a ring e.g. */
/* Ring0=b0001, Ring1=b0010, Ring2=b0100 */
hbq->HBQ_PutIdx_next = 0;
hbq->HBQ_GetIdx = 0;
/* Fill in POST BUFFERs in HBQE */
/* Allocate buffer to post */
0) {
"emlxs_hbq_setup: Unable to allocate HBQ buffer. "
"cnt=%d", j);
return (1);
}
}
/* Issue CONFIG_HBQ */
"emlxs_hbq_setup: Unable to config HBQ. cmd=%x status=%x",
return (1);
}
return (0);
} /* emlxs_hbq_setup */
static void
{
uint32_t j;
switch (hbq_id) {
case EMLXS_ELS_HBQ_ID:
seg = MEM_ELSBUF;
HBASTATS.ElsUbPosted = 0;
break;
case EMLXS_IP_HBQ_ID:
HBASTATS.IpUbPosted = 0;
break;
case EMLXS_CT_HBQ_ID:
HBASTATS.CtUbPosted = 0;
break;
#ifdef SFCT_SUPPORT
case EMLXS_FCT_HBQ_ID:
seg = MEM_FCTBUF;
HBASTATS.FctUbPosted = 0;
break;
#endif /* SFCT_SUPPORT */
default:
return;
}
for (j = 0; j < hbq->HBQ_PostBufCnt; j++) {
}
hbq->HBQ_PostBufCnt = 0;
}
return;
} /* emlxs_hbq_free_all() */
extern void
{
void *ioa2;
switch (hbq_id) {
case EMLXS_ELS_HBQ_ID:
break;
case EMLXS_IP_HBQ_ID:
break;
case EMLXS_CT_HBQ_ID:
break;
#ifdef SFCT_SUPPORT
case EMLXS_FCT_HBQ_ID:
break;
#endif /* SFCT_SUPPORT */
default:
return;
}
return;
}
}
return;
} /* emlxs_update_HBQ_index() */
#endif /* SLI3_SUPPORT */