/*
* CDDL HEADER START
*
* Copyright(c) 2007-2009 Intel Corporation. All rights reserved.
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at:
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When using or redistributing this file, you may do so under the
* License only. No other modification of this header is permitted.
*
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2010 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include "ixgbe_sw.h"
static int ixgbe_alloc_tbd_ring(ixgbe_tx_ring_t *);
static void ixgbe_free_tbd_ring(ixgbe_tx_ring_t *);
static int ixgbe_alloc_rbd_ring(ixgbe_rx_data_t *);
static void ixgbe_free_rbd_ring(ixgbe_rx_data_t *);
static int ixgbe_alloc_tcb_lists(ixgbe_tx_ring_t *);
static void ixgbe_free_tcb_lists(ixgbe_tx_ring_t *);
static int ixgbe_alloc_rcb_lists(ixgbe_rx_data_t *);
static void ixgbe_free_rcb_lists(ixgbe_rx_data_t *);
#ifdef __sparc
#else
#endif
/*
*/
DMA_ATTR_V0, /* version number */
0x0000000000000000ull, /* low address */
0xFFFFFFFFFFFFFFFFull, /* high address */
0x00000000FFFFFFFFull, /* dma counter max */
IXGBE_DMA_ALIGNMENT, /* alignment */
0x00000FFF, /* burst sizes */
0x00000001, /* minimum transfer size */
0x00000000FFFFFFFFull, /* maximum transfer size */
0xFFFFFFFFFFFFFFFFull, /* maximum segment size */
0x00000001, /* granularity */
DDI_DMA_FLAGERR /* DMA flags */
};
/*
*/
DMA_ATTR_V0, /* version number */
0x0000000000000000ull, /* low address */
0xFFFFFFFFFFFFFFFFull, /* high address */
0x00000000FFFFFFFFull, /* dma counter max */
IXGBE_DMA_ALIGNMENT, /* alignment */
0x00000FFF, /* burst sizes */
0x00000001, /* minimum transfer size */
0x00000000FFFFFFFFull, /* maximum transfer size */
0xFFFFFFFFFFFFFFFFull, /* maximum segment size */
0x00000001, /* granularity */
DDI_DMA_FLAGERR /* DMA flags */
};
/*
* DMA attributes for transmit.
*/
DMA_ATTR_V0, /* version number */
0x0000000000000000ull, /* low address */
0xFFFFFFFFFFFFFFFFull, /* high address */
0x00000000FFFFFFFFull, /* dma counter max */
1, /* alignment */
0x00000FFF, /* burst sizes */
0x00000001, /* minimum transfer size */
0x00000000FFFFFFFFull, /* maximum transfer size */
0xFFFFFFFFFFFFFFFFull, /* maximum segment size */
MAX_COOKIE, /* scatter/gather list length */
0x00000001, /* granularity */
DDI_DMA_FLAGERR /* DMA flags */
};
/*
* DMA access attributes for descriptors.
*/
};
/*
* DMA access attributes for buffers.
*/
};
/*
*/
int
{
int i;
for (i = 0; i < ixgbe->num_rx_rings; i++) {
/*
* Allocate receive desciptor ring and control block lists
*/
goto alloc_dma_failure;
goto alloc_dma_failure;
}
for (i = 0; i < ixgbe->num_tx_rings; i++) {
/*
* Allocate transmit desciptor ring and control block lists
*/
goto alloc_dma_failure;
goto alloc_dma_failure;
}
return (IXGBE_SUCCESS);
return (IXGBE_FAILURE);
}
/*
*/
void
{
int i;
/*
* Free DMA resources of rx rings
*/
for (i = 0; i < ixgbe->num_rx_rings; i++) {
}
/*
* Free DMA resources of tx rings
*/
for (i = 0; i < ixgbe->num_tx_rings; i++) {
}
}
int
{
/*
* Allocate memory for software receive rings
*/
return (IXGBE_FAILURE);
}
/*
* Allocate memory for the work list.
*/
"Could not allocate memory for rx work list");
goto alloc_rx_data_failure;
}
/*
* Allocate memory for the free list.
*/
"Cound not allocate memory for rx free list");
goto alloc_rx_data_failure;
}
/*
* Allocate memory for the rx control blocks for work list and
* free list.
*/
"Cound not allocate memory for rx control blocks");
goto alloc_rx_data_failure;
}
return (IXGBE_SUCCESS);
return (IXGBE_FAILURE);
}
void
{
return;
sizeof (rx_control_block_t) * rcb_count);
}
}
}
}
/*
* ixgbe_alloc_tbd_ring - Memory allocation for the tx descriptors of one ring.
*/
static int
{
int ret;
/*
* If tx head write-back is enabled, an extra tbd is allocated
* to save the head write-back value
*/
if (ixgbe->tx_head_wb_enable) {
size += sizeof (union ixgbe_adv_tx_desc);
}
/*
* Allocate a DMA handle for the transmit descriptor
* memory area.
*/
if (ret != DDI_SUCCESS) {
"Could not allocate tbd dma handle: %x", ret);
return (IXGBE_FAILURE);
}
/*
* Allocate memory to DMA data to and from the transmit
* descriptors.
*/
if (ret != DDI_SUCCESS) {
"Could not allocate tbd dma memory: %x", ret);
}
return (IXGBE_FAILURE);
}
/*
* Initialize the entire transmit buffer descriptor area to zero
*/
/*
* Allocates DMA resources for the memory that was allocated by
* the ddi_dma_mem_alloc call. The DMA resources then get bound to the
* the memory address
*/
if (ret != DDI_DMA_MAPPED) {
"Could not bind tbd dma resource: %x", ret);
}
}
return (IXGBE_FAILURE);
}
return (IXGBE_SUCCESS);
}
/*
* ixgbe_free_tbd_ring - Free the tx descriptors of one ring.
*/
static void
{
}
}
}
}
/*
* ixgbe_alloc_rbd_ring - Memory allocation for the rx descriptors of one ring.
*/
static int
{
int ret;
/*
* Allocate a new DMA handle for the receive descriptor
* memory area.
*/
if (ret != DDI_SUCCESS) {
"Could not allocate rbd dma handle: %x", ret);
return (IXGBE_FAILURE);
}
/*
* Allocate memory to DMA data to and from the receive
* descriptors.
*/
if (ret != DDI_SUCCESS) {
"Could not allocate rbd dma memory: %x", ret);
}
return (IXGBE_FAILURE);
}
/*
* Initialize the entire transmit buffer descriptor area to zero
*/
/*
* Allocates DMA resources for the memory that was allocated by
* the ddi_dma_mem_alloc call.
*/
if (ret != DDI_DMA_MAPPED) {
"Could not bind rbd dma resource: %x", ret);
}
}
return (IXGBE_FAILURE);
}
return (IXGBE_SUCCESS);
}
/*
* ixgbe_free_rbd_ring - Free the rx descriptors of one ring.
*/
static void
{
}
}
}
}
/*
* ixgbe_alloc_dma_buffer - Allocate DMA resources for a DMA buffer.
*/
static int
{
int ret;
if (ret != DDI_SUCCESS) {
"Could not allocate dma buffer handle: %x", ret);
return (IXGBE_FAILURE);
}
if (ret != DDI_SUCCESS) {
}
"Could not allocate dma buffer memory: %x", ret);
return (IXGBE_FAILURE);
}
if (ret != DDI_DMA_MAPPED) {
}
}
"Could not bind dma buffer handle: %x", ret);
return (IXGBE_FAILURE);
}
return (IXGBE_SUCCESS);
}
/*
* ixgbe_free_dma_buffer - Free one allocated area of dma memory and handle.
*/
void
{
} else {
return;
}
}
}
}
/*
* ixgbe_alloc_tcb_lists - Memory allocation for the transmit control bolcks
* of one ring.
*/
static int
{
int i;
int ret;
/*
* Allocate memory for the work list.
*/
"Cound not allocate memory for tx work list");
return (IXGBE_FAILURE);
}
/*
* Allocate memory for the free list.
*/
"Cound not allocate memory for tx free list");
return (IXGBE_FAILURE);
}
/*
* Allocate memory for the tx control blocks of free list.
*/
kmem_zalloc(sizeof (tx_control_block_t) *
"Cound not allocate memory for tx control blocks");
return (IXGBE_FAILURE);
}
/*
* Allocate dma memory for the tx control block of free list.
*/
/*
* Pre-allocate dma handles for transmit. These dma handles
* will be dynamically bound to the data buffers passed down
* from the upper layers at the time of transmitting.
*/
&tcb->tx_dma_handle);
if (ret != DDI_SUCCESS) {
"Could not allocate tx dma handle: %x", ret);
goto alloc_tcb_lists_fail;
}
/*
* Pre-allocate transmit buffers for packets that the
* size is less than bcopy_thresh.
*/
if (ret != IXGBE_SUCCESS) {
goto alloc_tcb_lists_fail;
}
}
return (IXGBE_SUCCESS);
return (IXGBE_FAILURE);
}
/*
* ixgbe_free_tcb_lists - Release the memory allocated for
* the transmit control bolcks of one ring.
*/
static void
{
int i;
return;
/* Free the tx dma handle for dynamical binding */
} else {
/*
* If the dma handle is NULL, then we don't
* have to check the remaining.
*/
break;
}
}
}
}
}
}
/*
* ixgbe_alloc_rcb_lists - Memory allocation for the receive control blocks
* of one ring.
*/
static int
{
int i;
int ret;
/*
* Allocate memory for the rx control blocks for work list and
* free list.
*/
/* Attach the rx control block to the work list */
} else {
/* Attach the rx control block to the free list */
}
if (ret != IXGBE_SUCCESS) {
goto alloc_rcb_lists_fail;
}
}
return (IXGBE_SUCCESS);
return (IXGBE_FAILURE);
}
/*
* ixgbe_free_rcb_lists - Free the receive control blocks of one ring.
*/
static void
{
int i;
if (ref_cnt == 0) {
}
} else {
}
}
}
/*
* ixgbe_set_fma_flags - Set the attribute for fma support.
*/
void
{
if (dma_flag) {
} else {
}
}