e1000g_alloc.c revision fe62dec3a38f1f79ffe68417df75dbbb58135bb7
/*
* This file is provided under a CDDLv1 license. When using or
* redistributing this file, you may do so under this license.
* In redistributing this file this license must be included
* and no other modification of this header file is permitted.
*
* CDDL LICENSE SUMMARY
*
* Copyright(c) 1999 - 2008 Intel Corporation. All rights reserved.
*
* The contents of this file are subject to the terms of Version
* 1.0 of the Common Development and Distribution License (the "License").
*
* You should have received a copy of the License with this software.
* You can obtain a copy of the License at
* See the License for the specific language governing permissions
* and limitations under the License.
*/
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms of the CDDLv1.
*/
/*
* **********************************************************************
* Module Name: *
* e1000g_alloc.c *
* *
* Abstract: *
* This file contains some routines that take care of *
* memory allocation for descriptors and buffers. *
* *
* **********************************************************************
*/
#include "e1000g_sw.h"
#include "e1000g_debug.h"
#define TX_SW_PKT_AREA_SZ \
static int e1000g_alloc_tx_descriptors(e1000g_tx_ring_t *);
static int e1000g_alloc_rx_descriptors(e1000g_rx_ring_t *);
static void e1000g_free_tx_descriptors(e1000g_tx_ring_t *);
static void e1000g_free_rx_descriptors(e1000g_rx_ring_t *);
static int e1000g_alloc_tx_packets(e1000g_tx_ring_t *);
static int e1000g_alloc_rx_packets(e1000g_rx_ring_t *);
static void e1000g_free_tx_packets(e1000g_tx_ring_t *);
static void e1000g_free_rx_packets(e1000g_rx_ring_t *);
static int e1000g_alloc_dma_buffer(struct e1000g *,
static void e1000g_free_dma_buffer(dma_buffer_t *);
#ifdef __sparc
static void e1000g_free_dvma_buffer(dma_buffer_t *);
#endif
/* DMA access attributes for descriptors <Little Endian> */
static ddi_device_acc_attr_t e1000g_desc_acc_attr = {
};
/* DMA access attributes for DMA buffers */
#ifdef __sparc
static ddi_device_acc_attr_t e1000g_buf_acc_attr = {
};
#else
static ddi_device_acc_attr_t e1000g_buf_acc_attr = {
};
#endif
/* DMA attributes for tx mblk buffers */
static ddi_dma_attr_t e1000g_tx_dma_attr = {
DMA_ATTR_V0, /* version of this structure */
0, /* lowest usable address */
0xffffffffffffffffULL, /* highest usable address */
0x7fffffff, /* maximum DMAable byte count */
1, /* alignment in bytes */
0x7ff, /* burst sizes (any?) */
1, /* minimum transfer */
0xffffffffU, /* maximum transfer */
0xffffffffffffffffULL, /* maximum segment length */
18, /* maximum number of segments */
1, /* granularity */
DDI_DMA_FLAGERR, /* dma_attr_flags */
};
static ddi_dma_attr_t e1000g_buf_dma_attr = {
DMA_ATTR_V0, /* version of this structure */
0, /* lowest usable address */
0xffffffffffffffffULL, /* highest usable address */
0x7fffffff, /* maximum DMAable byte count */
1, /* alignment in bytes */
0x7ff, /* burst sizes (any?) */
1, /* minimum transfer */
0xffffffffU, /* maximum transfer */
0xffffffffffffffffULL, /* maximum segment length */
1, /* maximum number of segments */
1, /* granularity */
DDI_DMA_FLAGERR, /* dma_attr_flags */
};
static ddi_dma_attr_t e1000g_desc_dma_attr = {
DMA_ATTR_V0, /* version of this structure */
0, /* lowest usable address */
0xffffffffffffffffULL, /* highest usable address */
0x7fffffff, /* maximum DMAable byte count */
E1000_MDALIGN, /* alignment in bytes 4K! */
0x7ff, /* burst sizes (any?) */
1, /* minimum transfer */
0xffffffffU, /* maximum transfer */
0xffffffffffffffffULL, /* maximum segment length */
1, /* maximum number of segments */
1, /* granularity */
DDI_DMA_FLAGERR, /* dma_attr_flags */
};
#ifdef __sparc
static ddi_dma_lim_t e1000g_dma_limits = {
(uint_t)0, /* dlim_addr_lo */
0x1, /* dlim_minxfer */
1024 /* dlim_speed */
};
#endif
#ifdef __sparc
#else
#endif
extern krwlock_t e1000g_dma_type_lock;
int
{
int result;
while ((result != DDI_SUCCESS) &&
if (result == DDI_SUCCESS) {
if (result != DDI_SUCCESS)
}
/*
* If the allocation fails due to resource shortage,
* we'll reduce the numbers of descriptors/buffers by
* half, and try the allocation again.
*/
if (result != DDI_SUCCESS) {
/*
* We must ensure the number of descriptors
* is always a multiple of 8.
*/
}
}
return (result);
}
/*
* e1000g_alloc_descriptors - allocate DMA buffers for descriptors
*
* This routine allocates neccesary DMA buffers for
* Transmit Descriptor Area
* Receive Descrpitor Area
*/
static int
{
int result;
if (result != DDI_SUCCESS)
return (DDI_FAILURE);
if (result != DDI_SUCCESS) {
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
static void
{
}
static int
{
int mystat;
/*
* Solaris 7 has a problem with allocating physically contiguous memory
* that is aligned on a 4K boundary. The transmit and rx descriptors
* need to aligned on a 4kbyte boundary. We first try to allocate the
* memory with DMA attributes set to 4K alignment and also no scatter/
* gather mechanism specified. In most cases, this does not allocate
* memory aligned at a 4Kbyte boundary. We then try asking for memory
* the amount of memory is less than 4k i.e a page size. If neither of
* these options work or if the number of descriptors is greater than
* 4K, ie more than 256 descriptors, we allocate 4k extra memory and
* and then align the memory at a 4k boundary.
*/
/*
* Memory allocation for the transmit buffer descriptors.
*/
/*
* Allocate a new DMA handle for the transmit descriptor
* memory area.
*/
DDI_DMA_DONTWAIT, 0,
if (mystat != DDI_SUCCESS) {
"Could not allocate tbd dma handle: %d", mystat);
return (DDI_FAILURE);
}
/*
* Allocate memory to DMA data to and from the transmit
* descriptors.
*/
size,
DDI_DMA_DONTWAIT, 0,
if ((mystat != DDI_SUCCESS) ||
if (mystat == DDI_SUCCESS) {
}
}
} else
alloc_flag = B_TRUE;
/*
* Initialize the entire transmit buffer descriptor area to zero
*/
if (alloc_flag)
/*
* If the previous DMA attributes setting could not give us contiguous
* memory or the number of descriptors is greater than the page size,
* we allocate 4K extra memory and then align it at a 4k boundary.
*/
if (!alloc_flag) {
/*
*/
/*
* Allocate a new DMA handle for the transmit descriptor memory
* area.
*/
DDI_DMA_DONTWAIT, 0,
if (mystat != DDI_SUCCESS) {
"Could not re-allocate tbd dma handle: %d", mystat);
return (DDI_FAILURE);
}
/*
* Allocate memory to DMA data to and from the transmit
* descriptors.
*/
size,
DDI_DMA_DONTWAIT, 0,
if (mystat != DDI_SUCCESS) {
"Could not allocate tbd dma memory: %d", mystat);
}
return (DDI_FAILURE);
} else
alloc_flag = B_TRUE;
/*
* Initialize the entire transmit buffer descriptor area to zero
*/
/*
* Memory has been allocated with the ddi_dma_mem_alloc call,
* but has not been aligned. We now align it on a 4k boundary.
*/
} /* alignment workaround */
/*
* Transmit buffer descriptor memory allocation succeeded
*/
/*
* Allocates DMA resources for the memory that was allocated by
* the ddi_dma_mem_alloc call. The DMA resources then get bound to the
* the memory address
*/
if (mystat != DDI_SUCCESS) {
"Could not bind tbd dma resource: %d", mystat);
}
}
return (DDI_FAILURE);
}
if (cookie_count != 1) {
"Could not bind tbd dma resource in a single frag. "
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
static int
{
int mystat;
/*
* Memory allocation for the receive buffer descriptors.
*/
/*
* Asking for aligned memory with DMA attributes set for 4k alignment
*/
/*
* Allocate a new DMA handle for the receive descriptors
*/
DDI_DMA_DONTWAIT, 0,
if (mystat != DDI_SUCCESS) {
"Could not allocate rbd dma handle: %d", mystat);
return (DDI_FAILURE);
}
/*
* Allocate memory to DMA data to and from the receive
* descriptors.
*/
size,
DDI_DMA_DONTWAIT, 0,
/*
* Check if memory allocation succeeded and also if the
* allocated memory is aligned correctly.
*/
if ((mystat != DDI_SUCCESS) ||
if (mystat == DDI_SUCCESS) {
}
}
} else
alloc_flag = B_TRUE;
/*
* Initialize the allocated receive descriptor memory to zero.
*/
if (alloc_flag)
/*
* If memory allocation did not succeed, do the alignment ourselves
*/
if (!alloc_flag) {
/*
* Allocate a new DMA handle for the receive descriptor.
*/
DDI_DMA_DONTWAIT, 0,
if (mystat != DDI_SUCCESS) {
"Could not re-allocate rbd dma handle: %d", mystat);
return (DDI_FAILURE);
}
/*
* Allocate memory to DMA data to and from the receive
* descriptors.
*/
size,
DDI_DMA_DONTWAIT, 0,
if (mystat != DDI_SUCCESS) {
"Could not allocate rbd dma memory: %d", mystat);
}
return (DDI_FAILURE);
} else
alloc_flag = B_TRUE;
/*
* Initialize the allocated receive descriptor memory to zero.
*/
} /* alignment workaround */
/*
* The memory allocation of the receive descriptors succeeded
*/
/*
* Allocates DMA resources for the memory that was allocated by
* the ddi_dma_mem_alloc call.
*/
if (mystat != DDI_SUCCESS) {
"Could not bind rbd dma resource: %d", mystat);
}
}
return (DDI_FAILURE);
}
if (cookie_count != 1) {
"Could not bind rbd dma resource in a single frag. "
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
static void
{
}
}
}
}
static void
{
}
}
}
}
/*
*
* This routine allocates neccesary buffers for
* Transmit sw packet structure
* DMA handle for Transmit
* DMA buffer for Transmit
* Receive sw packet structure
* DMA buffer for Receive
*/
static int
{
int result;
if (result != DDI_SUCCESS) {
if (e1000g_dma_type == USE_DVMA) {
"No enough dvma resource for Tx packets, "
"trying to allocate dma buffers...\n");
goto again;
}
"Failed to allocate dma buffers for Tx packets\n");
return (DDI_FAILURE);
}
if (result != DDI_SUCCESS) {
if (e1000g_dma_type == USE_DVMA) {
"No enough dvma resource for Rx packets, "
"trying to allocate dma buffers...\n");
goto again;
}
"Failed to allocate dma buffers for Rx packets\n");
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
static void
{
}
#ifdef __sparc
static int
{
int mystat;
if (e1000g_force_detach)
else
&buf->dma_handle);
if (mystat != DDI_SUCCESS) {
"Could not allocate dvma buffer handle: %d\n", mystat);
return (DDI_FAILURE);
}
}
"Could not allocate dvma buffer memory\n");
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
static void
{
} else {
return;
}
}
}
}
#endif
static int
{
int mystat;
if (e1000g_force_detach)
else
DDI_DMA_DONTWAIT, 0,
&buf->dma_handle);
if (mystat != DDI_SUCCESS) {
"Could not allocate dma buffer handle: %d\n", mystat);
return (DDI_FAILURE);
}
DDI_DMA_DONTWAIT, 0,
if (mystat != DDI_SUCCESS) {
}
"Could not allocate dma buffer memory: %d\n", mystat);
return (DDI_FAILURE);
}
if (mystat != DDI_SUCCESS) {
}
}
"Could not bind buffer dma handle: %d\n", mystat);
return (DDI_FAILURE);
}
if (count != 1) {
}
}
}
"Could not bind buffer as a single frag. "
"Count = %d\n", count);
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
static void
{
} else {
return;
}
}
}
}
static int
{
int j;
int mystat;
/*
* Memory allocation for the Transmit software structure, the transmit
* software packet. This structure stores all the relevant information
* for transmitting a single packet.
*/
return (DDI_FAILURE);
/*
* Pre-allocate dma handles for transmit. These dma handles
* will be dynamically bound to the data buffers passed down
* from the upper layers at the time of transmitting. The
* dynamic binding only applies for the packets that are larger
* than the tx_bcopy_thresh.
*/
switch (e1000g_dma_type) {
#ifdef __sparc
case USE_DVMA:
&packet->tx_dma_handle);
break;
#endif
case USE_DMA:
DDI_DMA_DONTWAIT, 0,
&packet->tx_dma_handle);
break;
default:
break;
}
if (mystat != DDI_SUCCESS) {
"Could not allocate tx dma handle: %d\n", mystat);
goto tx_pkt_fail;
}
/*
* Pre-allocate transmit buffers for small packets that the
* size is less than tx_bcopy_thresh. The data of those small
* packets will be bcopy() to the transmit buffers instead of
* using dynamical DMA binding. For small packets, bcopy will
* bring better performance than DMA binding.
*/
switch (e1000g_dma_type) {
#ifdef __sparc
case USE_DVMA:
break;
#endif
case USE_DMA:
break;
default:
break;
}
if (mystat != DDI_SUCCESS) {
switch (e1000g_dma_type) {
#ifdef __sparc
case USE_DVMA:
break;
#endif
case USE_DMA:
break;
default:
break;
}
"Allocate Tx buffer fail\n");
goto tx_pkt_fail;
}
} /* for */
return (DDI_SUCCESS);
return (DDI_FAILURE);
}
static int
{
int i;
/*
* Allocate memory for the rx_sw_packet structures. Each one of these
* structures will contain a virtual and physical address to an actual
* receive buffer in host memory. Since we use one rx_sw_packet per
* received packet, the maximum number of rx_sw_packet that we'll
* need is equal to the number of receive descriptors that we've
* allocated.
*/
for (i = 0; i < packet_num; i++) {
goto rx_pkt_fail;
}
return (DDI_SUCCESS);
return (DDI_FAILURE);
}
static p_rx_sw_packet_t
{
int mystat;
"Cound not allocate memory for Rx SwPacket\n");
return (NULL);
}
switch (e1000g_dma_type) {
#ifdef __sparc
case USE_DVMA:
break;
#endif
case USE_DMA:
break;
default:
break;
}
if (mystat != DDI_SUCCESS) {
"Failed to allocate Rx buffer\n");
return (NULL);
}
/*
* esballoc is changed to desballoc which
* is undocumented call but as per sun,
* we can use it. It gives better efficiency.
*/
}
return (packet);
}
void
{
}
#ifdef __sparc
case USE_DVMA:
break;
#endif
case USE_DMA:
break;
default:
break;
}
}
static void
{
rx_ring->pending_count++;
} else {
}
}
}
}
static void
{
int j;
break;
/* Free the Tx DMA handle for dynamical binding */
#ifdef __sparc
case USE_DVMA:
break;
#endif
case USE_DMA:
break;
default:
break;
}
} else {
/*
* If the dma handle is NULL, then we don't
* need to check the packets left. For they
* have not been initialized or have been freed.
*/
break;
}
#ifdef __sparc
case USE_DVMA:
break;
#endif
case USE_DMA:
break;
default:
break;
}
}
}
}
/*
* e1000g_release_dma_resources - release allocated DMA resources
*
* This function releases any pending buffers that has been
* previously allocated
*/
void
{
}
/* ARGSUSED */
void
{
if (acc_flag) {
} else {
}
if (dma_flag) {
} else {
}
}