/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright 2012 Garrett D'Amore <garrett@damore.org>. All rights reserved.
*/
#include <sys/ddi_impldefs.h>
#include <sys/sysmacros.h>
#include <sys/ddidmareq.h>
#include <sys/sysiosbus.h>
#include <vm/hat_sfmmu.h>
#include <sys/machparam.h>
#include <sys/machsystm.h>
#include <sys/iommutsb.h>
/* Useful debugging Stuff */
#include <sys/nexusdebug.h>
/* Bitfield debugging definitions for this file */
/* Turn on if you need to keep track of outstanding IOMMU usage */
/* #define IO_MEMUSAGE */
/* Turn on to debug IOMMU unmapping code */
/* #define IO_MEMDEBUG */
};
extern void *sbusp; /* sbus soft state hook */
/*
* This is the number of pages that a mapping request needs before we force
* the TLB flush code to use diagnostic registers. This value was determined
* through a series of test runs measuring dma mapping settup performance.
*/
int sysio_iommu_tsb_sizes[] = {
};
int
{
int i;
#ifdef DEBUG
debug_info = 1;
#endif
/*
* Simply add each registers offset to the base address
* to calculate the already mapped virtual address of
* the device register...
*
* define a macro for the pointer arithmetic; all registers
* are 64 bits wide and are defined as uint64_t's.
*/
/* Set up the DVMA resource sizes */
return (DDI_FAILURE);
}
/*
* Initialize the DVMA vmem arena.
*/
/* Set the limit for dvma_reserve() to 1/2 of the total dvma space */
#if defined(DEBUG) && defined(IO_MEMUSAGE)
#endif /* DEBUG && IO_MEMUSAGE */
/*
* Get the base address of the TSB table and store it in the hardware
*/
/*
* We plan on the PROM flushing all TLB entries. If this is not the
* case, this is where we should flush the hardware TLB.
*/
/* Set the IOMMU registers */
(void) iommu_resume_init(softsp);
/* check the convenient copy of TSB base, and flush write buffers */
if (*softsp->tsb_base_addr !=
return (DDI_FAILURE);
}
softsp->sbus_io_hi_pfn = 0;
}
"base reg: %p IOMMU flush reg: %p TSB base addr %p\n",
(void *)softsp->iommu_flush_reg,
(void *)softsp->soft_tsb_base_addr));
return (DDI_SUCCESS);
}
/*
* function to uninitialize the iommu and release the tsb back to
* the spare pool. See startup.c for tsb spare management.
*/
int
{
/* flip off the IOMMU enable switch */
*softsp->iommu_ctrl_reg &=
return (DDI_SUCCESS);
}
/*
* Initialize iommu hardware registers when the system is being resumed.
* (Subset of iommu_init())
*/
int
{
int i;
/*
* Reset the base address of the TSB table in the hardware
*/
/*
* Figure out the correct size of the IOMMU TSB entries. If we
* end up with a size smaller than that needed for 8M of IOMMU
* space, default the size to 8M. XXX We could probably panic here
*/
i = sizeof (sysio_iommu_tsb_sizes) / sizeof (sysio_iommu_tsb_sizes[0])
- 1;
while (i > 0) {
if (tsb_bytes >= sysio_iommu_tsb_sizes[i])
break;
i--;
}
tsb_size = i;
/* OK, lets flip the "on" switch of the IOMMU */
return (DDI_SUCCESS);
}
void
{
int i, do_flush = 0;
if (npages == 1) {
return;
}
"TLB vaddr reg %lx, IO addr 0x%x "
"Base addr 0x%x, Hi addr 0x%x\n",
tmpreg = *valid_bit_reg;
"TLB valid reg %lx\n",
(void *)valid_bit_reg, tmpreg));
if (tmpreg & IOMMU_TLB_VALID) {
do_flush = 1;
}
}
}
if (do_flush)
}
/*
* Shorthand defines
*/
/*
* If DDI_DMA_PARTIAL flag is set and the request is for
* less than MIN_DVMA_WIN_SIZE, it's not worth the hassle so
* we turn off the DDI_DMA_PARTIAL flag
*/
/* ARGSUSED */
void
{
#if defined(DEBUG) && defined(IO_MEMDEBUG)
#if defined(IO_MEMUSAGE)
#endif /* DEBUG && IO_MEMUSAGE */
/*
* Run thru the mapped entries and free 'em
*/
#if defined(IO_MEMUSAGE)
while (walk) {
break;
}
}
#endif /* IO_MEMUSAGE */
while (npages) {
("dma_mctl: freeing ioaddr %x iotte %p\n",
npages--;
ioaddr += IOMMU_PAGESIZE;
iotte_ptr++;
}
#endif /* DEBUG && IO_MEMDEBUG */
}
int
{
int diag_tlb_flush;
#if defined(DEBUG) && defined(IO_MEMUSAGE)
#endif /* DEBUG && IO_MEMUSAGE */
/* Set Valid and Cache for mem xfer */
/*
* Set the per object bits of the TTE here. We optimize this for
* the memory case so that the while loop overhead is minimal.
*/
/* Turn on NOSYNC if we need consistent mem */
/* Set streaming mode if not consistent mem */
} else if (softsp->stream_buf_off) {
}
#if defined(DEBUG) && defined(IO_MEMUSAGE)
KM_SLEEP);
#endif /* DEBUG && IO_MEMUSAGE */
/*
* Grab the mappings from the dmmu and stick 'em into the
* iommu.
*/
/* If we're going to flush the TLB using diag mode, do it now. */
if (diag_tlb_flush)
do {
/*
* Fetch the pfn for the DMA object
*/
if (!pf_is_memory(pfn)) {
/* DVMA'ing to IO space */
/* Turn off cache bit if set */
if (iotte_flag & IOTTE_CACHE)
/* Turn off stream bit if set */
if (iotte_flag & IOTTE_STREAM)
/* Intra sbus transfer */
/* Turn on intra flag */
"Intra xfer pfnum %lx TTE %lx\n",
pfn, iotte_flag));
} else {
/*EMPTY*/
("Inter xfer pfnum %lx "
"tte hi %lx\n",
pfn, iotte_flag));
} else {
#if defined(DEBUG) && defined(IO_MEMDEBUG)
goto bad;
#endif /* DEBUG && IO_MEMDEBUG */
}
}
}
addr += IOMMU_PAGESIZE;
"tte flag %lx addr %lx ioaddr %x\n",
/* Flush the IOMMU TLB before loading a new mapping */
if (!diag_tlb_flush)
/* Set the hardware IO TTE */
ioaddr += IOMMU_PAGESIZE;
npages--;
iotte_ptr++;
#if defined(DEBUG) && defined(IO_MEMUSAGE)
pfnp++;
#endif /* DEBUG && IO_MEMUSAGE */
} while (npages != 0);
#if defined(DEBUG) && defined(IO_MEMUSAGE)
#endif /* DEBUG && IO_MEMUSAGE */
return (rval);
#if defined(DEBUG) && defined(IO_MEMDEBUG)
bad:
/* If we fail a mapping, free up any mapping resources used */
return (rval);
#endif /* DEBUG && IO_MEMDEBUG */
}
int
{
int diag_tlb_flush;
#if defined(DEBUG) && defined(IO_MEMUSAGE)
#endif /* DEBUG && IO_MEMUSAGE */
/* Set Valid and Cache for mem xfer */
/*
* Set the per object bits of the TTE here. We optimize this for
* the memory case so that the while loop overhead is minimal.
*/
/* Turn on NOSYNC if we need consistent mem */
} else if (softsp->stream_buf_off) {
/* Set streaming mode if not consistent mem */
}
#if defined(DEBUG) && defined(IO_MEMUSAGE)
KM_SLEEP);
#endif /* DEBUG && IO_MEMUSAGE */
/*
* Grab the mappings from the dmmu and stick 'em into the
* iommu.
*/
/* If we're going to flush the TLB using diag mode, do it now. */
if (diag_tlb_flush)
do {
} else {
pplist++;
}
"tte flag %lx ioaddr %x\n", (void *)iotte_ptr,
/* Flush the IOMMU TLB before loading a new mapping */
if (!diag_tlb_flush)
/* Set the hardware IO TTE */
ioaddr += IOMMU_PAGESIZE;
npages--;
iotte_ptr++;
#if defined(DEBUG) && defined(IO_MEMUSAGE)
pfnp++;
#endif /* DEBUG && IO_MEMUSAGE */
} while (npages != 0);
#if defined(DEBUG) && defined(IO_MEMUSAGE)
#endif /* DEBUG && IO_MEMUSAGE */
return (rval);
}
int
{
/* Take care of 64 bit limits. */
if (!(dma_flags & DDI_DMA_SBUS_64BIT)) {
/*
* return burst size for 32-bit mode
*/
return (DDI_FAILURE);
}
/*
* check if SBus supports 64 bit and if caller
* is child of SBus. No support through bridges
*/
/*
* SBus doesn't support it or bridge. Do 32-bit
* xfers
*/
return (DDI_FAILURE);
}
return (DDI_FAILURE);
}
/* Check for old-style 64 bit burstsizes */
if (burstsize64 & SYSIO64_BURST_MASK) {
/* Scale back burstsizes if Necessary */
} else {
/* Get the 64 bit burstsizes. */
/* Scale back burstsizes if Necessary */
}
/*
* Set the largest value of the smallest burstsize that the
* device or the bus can manage.
*/
return (DDI_SUCCESS);
}
int
{
/*
* Setup dma burstsizes and min-xfer counts.
*/
if (dma_attr->dma_attr_burstsizes == 0)
return (DDI_DMA_BADATTR);
/*
* Check sanity for hi and lo address limits
*/
return (DDI_DMA_BADATTR);
}
return (DDI_DMA_BADATTR);
if (waitfp != DDI_DMA_DONTWAIT) {
}
return (DDI_DMA_NORESOURCES);
}
"hi %x lo %x min %x burst %x\n",
/* See if the DMA engine has any limit restrictions. */
}
return (DDI_SUCCESS);
}
/*ARGSUSED*/
int
{
if (softsp->dvma_call_list_id != 0) {
}
return (DDI_SUCCESS);
}
static int
{
return (DDI_DMA_TOOBIG);
}
return (DDI_DMA_TOOBIG);
}
}
return (DDI_DMA_MAPOK);
}
int
{
int rval;
#ifdef lint
#endif
if (mp->dmai_inuse)
return (DDI_DMA_INUSE);
if (rval != DDI_DMA_MAPOK)
return (rval);
}
mp->dmai_offset = 0;
case DMA_OTYP_VADDR:
case DMA_OTYP_BUFVADDR:
"req addr %lx off %x OBJSIZE %x\n",
/* We don't need the addr anymore if we have a shadow list */
break;
case DMA_OTYP_PAGES:
break;
case DMA_OTYP_PADDR:
default:
/*
* Not a supported type for this implementation
*/
goto bad;
}
/* Get our soft state once we know we're mapping an object. */
/*
* If the request is for partial mapping arrangement,
* the device has to be able to address at least the
* size of the window we are establishing.
*/
goto bad;
}
}
/*
* If the size requested is less than a moderate amt,
* skip the partial mapping stuff- it's not worth the
* effort.
*/
if (npages > MIN_DVMA_WIN_SIZE) {
}
} else {
}
} else {
goto bad;
}
}
/*
* save dmareq-object, size and npages into mp
*/
if (ioaddr == 0) {
goto bad;
}
/*
* If we have a 1 page request and we're working with a page
* list, we're going to speed load an IOMMU entry.
*/
#if defined(DEBUG) && defined(IO_MEMUSAGE)
#endif /* DEBUG && IO_MEMUSAGE */
} else if (softsp->stream_buf_off)
else
*iotte_ptr =
cp->dmac_notused = 0;
*ccountp = 1;
}
"pfn %lx tte flag %lx addr %lx ioaddr %x\n",
#if defined(DEBUG) && defined(IO_MEMUSAGE)
KM_SLEEP);
#endif /* DEBUG && IO_MEMUSAGE */
return (DDI_DMA_MAPPED);
}
} else {
(void *)(uintptr_t)
}
if (ioaddr == 0) {
else
goto bad;
}
/*
* At this point we have a range of virtual address allocated
* with which we now have to map to the requested object.
*/
if (addr) {
addr & ~IOMMU_PAGEOFFSET);
if (rval == DDI_DMA_NOMAPPING)
goto bad_nomap;
} else {
if (rval == DDI_DMA_NOMAPPING)
goto bad_nomap;
}
if (cp) {
cp->dmac_notused = 0;
*ccountp = 1;
}
return (DDI_DMA_PARTIAL_MAP);
} else {
return (DDI_DMA_MAPPED);
}
/*
* Could not create mmu mappings.
*/
iommu_ptob(npages));
} else {
iommu_ptob(npages));
}
bad:
if (rval == DDI_DMA_NORESOURCES &&
}
mp->dmai_inuse = 0;
return (rval);
}
/* ARGSUSED */
int
{
/* sync the entire object */
/* flush stream write buffers */
}
#if defined(DEBUG) && defined(IO_MEMDEBUG)
/*
* 'Free' the dma mappings.
*/
#endif /* DEBUG && IO_MEMDEBUG */
else
mp->dmai_ndvmapages = 0;
mp->dmai_inuse = 0;
if (softsp->dvma_call_list_id != 0)
return (DDI_SUCCESS);
}
/*ARGSUSED*/
int
{
}
return (DDI_SUCCESS);
}
/*ARGSUSED*/
int
{
int rval;
winsize));
/*
* win is in the range [0 .. dmai_nwin-1]
*/
return (DDI_FAILURE);
return (DDI_FAILURE);
cookiep->dmac_notused = 0;
*ccountp = 1;
/*
* Nothing to do...
*/
return (DDI_SUCCESS);
}
return (rval);
/*
* Set this again in case iommu_map_window() has changed it
*/
return (DDI_SUCCESS);
}
static int
{
#if defined(DEBUG) && defined(IO_MEMDEBUG)
/* Free mappings for current window */
#endif /* DEBUG && IO_MEMDEBUG */
} else {
}
} else {
flags = 0;
flags += MMU_PAGESIZE;
}
}
/* Set up mappings for next window */
if (addr) {
return (DDI_FAILURE);
} else {
return (DDI_FAILURE);
}
/*
* also invalidate read stream buffer
*/
}
return (DDI_SUCCESS);
}
/*ARGSUSED*/
int
{
switch (request) {
case DDI_DMA_SET_SBUS64:
{
}
case DDI_DMA_RESERVE:
{
int i;
/* Some simple sanity checks */
if (dma_lim->dlim_burstsizes == 0) {
("Reserve: bad burstsizes\n"));
return (DDI_DMA_BADLIMITS);
}
("Reserve: bad limits\n"));
return (DDI_DMA_BADLIMITS);
}
("Reserve: dma_reserve is exhausted\n"));
return (DDI_DMA_NORESOURCES);
}
if (ioaddr == 0) {
("Reserve: No dvma resources available\n"));
return (DDI_DMA_NOMAPPING);
}
/* create a per request structure */
KM_SLEEP);
/*
* We need to remember the size of the transfer so that
* we can figure the virtual pages to sync when the transfer
* is complete.
*/
/* Allocate a streaming cache sync flag for each index */
sizeof (int), KM_SLEEP);
/* Allocate a physical sync flag for each index */
for (i = 0; i < np; i++)
&iommu_fast_dvma->sync_flag[i]);
("Reserve: mapping object %p base addr %lx size %x\n",
break;
}
case DDI_DMA_RELEASE:
{
/* Unload stale mappings and flush stale tlb's */
npages--;
iotte_ptr++;
ioaddr += IOMMU_PAGESIZE;
}
else
sizeof (uint64_t));
/*
* Now that we've freed some resource,
* if there is anybody waiting for it
* try and get them going.
*/
if (softsp->dvma_call_list_id != 0)
break;
}
default:
"0%x\n", request));
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
/*ARGSUSED*/
void
{
int npages;
#if defined(DEBUG) && defined(IO_MEMUSAGE)
#endif /* DEBUG && IO_MEMUSAGE */
addr &= ~IOMMU_PAGEOFFSET;
#if defined(DEBUG) && defined(IO_MEMUSAGE)
KM_SLEEP);
#endif /* DEBUG && IO_MEMUSAGE */
else if (!softsp->stream_buf_off)
"size %x offset %x index %x kaddr %lx\n",
do {
if (pfn == PFN_INVALID) {
"from hat_getpfnum()\n"));
}
/* load tte */
npages--;
iotte_ptr++;
addr += IOMMU_PAGESIZE;
ioaddr += IOMMU_PAGESIZE;
#if defined(DEBUG) && defined(IO_MEMUSAGE)
pfnp++;
#endif /* DEBUG && IO_MEMUSAGE */
} while (npages > 0);
#if defined(DEBUG) && defined(IO_MEMUSAGE)
#endif /* DEBUG && IO_MEMUSAGE */
}
/*ARGSUSED*/
void
{
#if defined(DEBUG) && defined(IO_MEMUSAGE)
#endif /* DEBUG && IO_MEMUSAGE */
#if defined(DEBUG) && defined(IO_MEMUSAGE)
break;
}
}
#endif /* DEBUG && IO_MEMUSAGE */
"addr %p sync flag pfn %llx index %x page count %lx\n", (void *)mp,
}
}
/*ARGSUSED*/
void
{
return;
"sync flag addr %p, sync flag pfn %llx\n", (void *)mp,
}