ioat_chan.c revision c6c65e5445ba6bc005f3da488bddd36494d26e65
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
#include <sys/ddi_impldefs.h>
#include <sys/sysmacros.h>
#include <sys/mach_mmu.h>
#ifdef __xpv
#include <sys/hypervisor.h>
#endif
extern ddi_device_acc_attr_t ioat_acc_attr;
/* dma attr for the descriptor rings */
DMA_ATTR_V0, /* dma_attr_version */
0x0, /* dma_attr_addr_lo */
0xffffffffffffffff, /* dma_attr_addr_hi */
0xffffffff, /* dma_attr_count_max */
0x1000, /* dma_attr_align */
0x1, /* dma_attr_burstsizes */
0x1, /* dma_attr_minxfer */
0xffffffff, /* dma_attr_maxxfer */
0xffffffff, /* dma_attr_seg */
0x1, /* dma_attr_sgllen */
0x1, /* dma_attr_granular */
0x0, /* dma_attr_flags */
};
/* dma attr for the completion buffers */
DMA_ATTR_V0, /* dma_attr_version */
0x0, /* dma_attr_addr_lo */
0xffffffffffffffff, /* dma_attr_addr_hi */
0xffffffff, /* dma_attr_count_max */
0x40, /* dma_attr_align */
0x1, /* dma_attr_burstsizes */
0x1, /* dma_attr_minxfer */
0xffffffff, /* dma_attr_maxxfer */
0xffffffff, /* dma_attr_seg */
0x1, /* dma_attr_sgllen */
0x1, /* dma_attr_granular */
0x0, /* dma_attr_flags */
};
/*
* ioat_channel_init()
*/
int
{
int i;
/*
* initialize each dma channel's state which doesn't change across
*/
for (i = 0; i < state->is_num_channels; i++) {
}
/* initial the allocator (from 0 to state->is_num_channels) */
return (DDI_SUCCESS);
}
/*
* ioat_channel_fini()
*/
void
{
}
/*
* ioat_channel_alloc()
* NOTE: We intentionaly don't handle DCOPY_SLEEP (if no channels are
* available)
*/
/*ARGSUSED*/
int
{
#define CHANSTRSIZE 20
struct ioat_channel_s *channel;
char chanstr[CHANSTRSIZE];
int e;
/* allocate a H/W channel */
if (e != DDI_SUCCESS) {
return (DCOPY_NORESOURCES);
}
#ifdef DEBUG
{
/* if we're cbv2, verify that the V2 compatibility bit is set */
}
}
#endif
/*
* Configure DMA channel
* Channel In Use
* Error Interrupt Enable
* Any Error Abort Enable
* Error Completion Enable
*/
/* check channel error register, clear any errors */
if (estat != 0) {
#ifdef DEBUG
#endif
}
/* allocate and initialize the descriptor buf */
if (e != DDI_SUCCESS) {
goto chinitfail_desc_alloc;
}
/* allocate and initialize the completion space */
e = ioat_completion_alloc(channel);
if (e != DDI_SUCCESS) {
}
/* setup kmem_cache for commands */
cmd_size = sizeof (struct dcopy_cmd_s) +
sizeof (struct dcopy_cmd_priv_s) +
sizeof (struct ioat_cmd_private_s);
goto chinitfail_kmem_cache;
}
/* start-up the channel */
/* fill in the channel info returned to dcopy */
} else {
} else {
}
}
return (DCOPY_SUCCESS);
return (DCOPY_FAILURE);
}
/*
* ioat_channel_suspend()
*/
/*ARGSUSED*/
void
{
/*
* normally you would disable interrupts and reset the H/W here. But
* since the suspend framework doesn't know who is using us, it may
* not suspend their I/O before us. Since we won't actively be doing
* any DMA or interrupts unless someone asks us to, it's safe to not
* do anything here.
*/
}
/*
* ioat_channel_resume()
*/
int
{
int i;
for (i = 0; i < state->is_num_channels; i++) {
continue;
}
/*
* Configure DMA channel
* Channel In Use
* Error Interrupt Enable
* Any Error Abort Enable
* Error Completion Enable
*/
/* check channel error register, clear any errors */
if (estat != 0) {
#ifdef DEBUG
#endif
estat);
}
/* Re-initialize the ring */
/* write the physical address into the chain address register */
} else {
}
/* re-initialize the completion buffer */
/* write the phys addr into the completion address register */
/* start-up the channel */
}
return (DDI_SUCCESS);
}
/*
* ioat_channel_free()
*/
void
ioat_channel_free(void *channel_private)
{
struct ioat_channel_s *channel;
/* disable the interrupts */
/* cleanup command cache */
/* free the H/W DMA engine */
}
/*
* ioat_channel_intr()
*/
void
{
} else {
}
/* if that status isn't ACTIVE or IDLE, the channel has failed */
if (status & IOAT_CHAN_STS_FAIL_MASK) {
"chanstat_lo=0x%X; chanerr=0x%X\n",
return;
}
/*
* clear interrupt disable bit if set (it's a RW1C). Read it back to
* ensure the write completes.
*/
/* tell dcopy we have seen a completion on this channel */
}
/*
* ioat_channel_start()
*/
void
{
/* set the first descriptor up as a NULL descriptor */
/* setup the very first descriptor */
}
/*
* ioat_channel_reset()
*/
void
{
/* hit the reset bit */
} else {
}
}
/*
* ioat_completion_alloc()
*/
int
{
int e;
/*
* allocate memory for the completion status, zero it out, and get
* the paddr. We'll allocate a physically contiguous cache line.
*/
if (e != DDI_SUCCESS) {
}
if (e != DDI_SUCCESS) {
goto cmplallocfail_mem_alloc;
}
if (e != DDI_SUCCESS) {
goto cmplallocfail_addr_bind;
}
/* write the physical address into the completion address register */
return (DDI_SUCCESS);
return (DDI_FAILURE);
}
/*
* ioat_completion_free()
*/
void
{
/* reset the completion address register */
/* unbind, then free up the memory, dma handle */
}
/*
* ioat_ring_alloc()
*/
int
{
int e;
ring->cr_post_cnt = 0;
/*
* allocate memory for the ring, zero it out, and get the paddr.
* We'll allocate a physically contiguous chunck of memory which
* simplifies the completion logic.
*/
if (e != DDI_SUCCESS) {
}
/*
* logic. Then round that number up to a whole multiple of 4.
*/
sizeof (ioat_chan_desc_t);
if (e != DDI_SUCCESS) {
goto ringallocfail_mem_alloc;
}
if (e != DDI_SUCCESS) {
goto ringallocfail_addr_bind;
}
/* write the physical address into the chain address register */
} else {
}
return (DCOPY_SUCCESS);
return (DCOPY_FAILURE);
}
/*
* ioat_ring_free()
*/
void
{
/* reset the chain address register */
} else {
}
/* unbind, then free up the memory, dma handle */
}
/*
* ioat_ring_seed()
* write the first descriptor in the ring.
*/
void
{
/* init the completion state */
/* write in the descriptor and init the descriptor state */
ring->cr_post_cnt++;
ring->cr_desc_gen = 0;
ring->cr_desc_prev = 0;
/* hit the start bit */
} else {
/*
* if this is CBv2, link the descriptor to an empty
* descriptor
*/
desc = (ioat_chan_dma_desc_t *)
prev = (ioat_chan_dma_desc_t *)
(uint16_t)1);
}
}
/*
* ioat_cmd_alloc()
*/
int
{
int kmflag;
if (flags & DCOPY_NOSLEEP) {
kmflag = KM_NOSLEEP;
} else {
}
/* save the command passed incase DCOPY_ALLOC_LINK is set */
return (DCOPY_NORESOURCES);
}
/* setup the dcopy and ioat private state pointers */
sizeof (struct dcopy_cmd_priv_s));
/*
* if DCOPY_ALLOC_LINK is set, link the old command to the new one
* just allocated.
*/
if (flags & DCOPY_ALLOC_LINK) {
} else {
}
return (DCOPY_SUCCESS);
}
/*
* ioat_cmd_free()
*/
void
{
/*
* free all the commands in the chain (see DCOPY_ALLOC_LINK in
* ioat_cmd_alloc() for more info).
*/
}
}
/*
* ioat_cmd_post()
*/
int
{
int e;
/* if the channel has had a fatal failure, return failure */
return (DCOPY_FAILURE);
}
/* make sure we have space for the descriptors */
if (e != DCOPY_SUCCESS) {
return (DCOPY_NORESOURCES);
}
/* if we support DCA, and the DCA flag is set, post a DCA desc */
}
/*
* the dma copy may have to be broken up into multiple descriptors
* since we can't cross a page boundary.
*/
while (size > 0) {
/* adjust for any offset into the page */
if ((src_addr & PAGEOFFSET) == 0) {
} else {
}
if ((dest_addr & PAGEOFFSET) == 0) {
} else {
}
/* take the smallest of the three */
/*
* if this is the last descriptor, and we are supposed to
* generate a completion, generate a completion. same logic
* for interrupt.
*/
ctrl = 0;
}
}
}
ctrl);
/* go to the next page */
}
/*
* if we are going to create a completion, save away the state so we
* can poll on it.
*/
}
/* if queue not defined, tell the DMA engine about it */
0x2);
} else {
}
}
return (DCOPY_SUCCESS);
}
/*
* ioat_cmd_post_dca()
*/
static void
{
/* keep track of the number of descs posted for cbv2 */
ring->cr_post_cnt++;
/*
* post a context change desriptor. If dca has never been used on
* this channel, or if the id doesn't match the last id used on this
* channel, set CONTEXT_CHANGE bit and dca id, set dca state to active,
* and save away the id we're using.
*/
}
/* Put the descriptors physical address in the previous descriptor */
/*LINTED:E_TRUE_LOGICAL_EXPR*/
/* sync the current desc */
/* update the previous desc and sync it too */
/* save the current desc_next and desc_last for the completion */
ring->cr_desc_next++;
ring->cr_desc_next = 0;
ring->cr_desc_gen++;
}
/*
* if this is CBv2, link the descriptor to an empty descriptor. Since
* we always leave on desc empty to detect full, this works out.
*/
desc = (ioat_chan_dca_desc_t *)
prev = (ioat_chan_dca_desc_t *)
}
}
/*
* ioat_cmd_post_copy()
*
*/
static void
{
/* keep track of the number of descs posted for cbv2 */
ring->cr_post_cnt++;
/* write in the DMA desc */
/* Put the descriptors physical address in the previous descriptor */
/*LINTED:E_TRUE_LOGICAL_EXPR*/
/* sync the current desc */
/* update the previous desc and sync it too */
ring->cr_desc_next++;
ring->cr_desc_next = 0;
ring->cr_desc_gen++;
}
/*
* if this is CBv2, link the descriptor to an empty descriptor. Since
* we always leave on desc empty to detect full, this works out.
*/
desc = (ioat_chan_dma_desc_t *)
prev = (ioat_chan_dma_desc_t *)
}
}
/*
* ioat_cmd_poll()
*/
int
{
/* if the channel had a fatal failure, fail all polls */
return (DCOPY_FAILURE);
}
/*
* if the current completion is the same as the last time we read one,
* post is still pending, nothing further to do. We track completions
* as indexes into the ring since post uses VAs and the H/W returns
* PAs. We grab a snapshot of generation and last_cmpl in the mutex.
*/
/*
* if we wrapped the ring, increment the generation. Store
* the last cmpl. This logic assumes a physically contiguous
* ring.
*/
ring->cr_cmpl_gen++;
}
} else {
}
/*
* if cmd isn't passed in, well return. Useful for updating the
* consumer pointer (ring->cr_cmpl_last).
*/
return (DCOPY_PENDING);
}
/*
* if the post's generation is old, this post has completed. No reason
* to go check the last completion. if the generation is the same
* and if the post is before or = to the last completion processed,
* the post has completed.
*/
return (DCOPY_COMPLETED);
return (DCOPY_COMPLETED);
}
return (DCOPY_PENDING);
}
/*
* ioat_ring_reserve()
*/
int
{
int num_desc;
int i;
/*
* figure out how many descriptors we need. This can include a dca
* desc and multiple desc for a dma copy.
*/
num_desc = 0;
num_desc++;
}
while (size > 0) {
num_desc++;
/* adjust for any offset into the page */
if ((src_addr & PAGEOFFSET) == 0) {
} else {
}
if ((dest_addr & PAGEOFFSET) == 0) {
} else {
}
/* take the smallest of the three */
/* go to the next page */
}
/* Make sure we have space for these descriptors */
for (i = 0; i < num_desc; i++) {
/*
* if this is the last descriptor in the ring, see if the
* last completed descriptor is #0.
*/
if (ring->cr_cmpl_last == 0) {
/*
* if we think the ring is full, update where
* the H/W really is and check for full again.
*/
if (ring->cr_cmpl_last == 0) {
return (DCOPY_NORESOURCES);
}
}
/*
* go to the next descriptor which is zero in this
* case.
*/
desc = 0;
/*
* if this is not the last descriptor in the ring, see if
* the last completion we saw was the next descriptor.
*/
} else {
/*
* if we think the ring is full, update where
* the H/W really is and check for full again.
*/
return (DCOPY_NORESOURCES);
}
}
/* go to the next descriptor */
desc++;
}
}
return (DCOPY_SUCCESS);
}