immu_dvma.c revision 9e986f0e5fb5e5ac09af90cd3b63f7836d983f9d
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* All rights reserved.
*/
/*
* Copyright (c) 2009, Intel Corporation.
* All rights reserved.
*/
/*
* DVMA code
* This file contains Intel IOMMU code that deals with DVMA
* i.e. DMA remapping.
*/
#include <sys/sysmacros.h>
#include <sys/pci_cfgspace.h>
/*
* Macros based on PCI spec
*/
#define IMMU_CONTIG_PADDR(d, p) \
typedef struct dvma_arg {
int dva_level;
int dva_error;
} dvma_arg_t;
/* Extern globals */
extern struct memlist *phys_install;
/* static Globals */
/*
* Used to setup DMA objects (memory regions)
* for DMA reads by IOMMU units
*/
static ddi_dma_attr_t immu_dma_attr = {
0U,
0xffffffffU,
0xffffffffU,
MMU_PAGESIZE, /* MMU page aligned */
0x1,
0x1,
0xffffffffU,
0xffffffffU,
1,
4,
0
};
static ddi_device_acc_attr_t immu_acc_attr = {
};
/* globals private to this file */
static kmutex_t immu_domain_lock;
static list_t immu_unity_domain_list;
static list_t immu_xlate_domain_list;
/* structure used to store idx into each level of the page tables */
typedef struct xlate {
int xlt_level;
} xlate_t;
/* 0 is reserved by Vt-d spec. Solaris reserves 1 */
#define IMMU_UNITY_DID 1
static mod_hash_t *bdf_domain_hash;
static domain_t *
{
return (NULL);
}
return (domain);
} else {
return (NULL);
}
}
static void
{
int r;
return;
}
ASSERT(r != MH_ERR_DUPLICATE);
ASSERT(r == 0);
}
static int
{
return (DDI_WALK_TERMINATE);
}
immu_devi)) {
return (DDI_WALK_TERMINATE);
}
}
return (DDI_WALK_CONTINUE);
}
static void
{
}
if (spclist) {
}
}
/*
* Set the immu_devi struct in the immu_devi field of a devinfo node
*/
int
{
return (DDI_SUCCESS);
}
/*
* Assume a new immu_devi struct is needed
*/
/*
* No BDF. Set bus = -1 to indicate this.
* We still need to create a immu_devi struct
* though
*/
bus = -1;
dev = 0;
func = 0;
}
"structure");
return (DDI_FAILURE);
}
/*
* Check if some other thread allocated a immu_devi while we
* didn't own the lock.
*/
} else {
}
return (DDI_SUCCESS);
}
static dev_info_t *
{
dvma_arg_t dvarg = {0};
"find lpc_devinfo for ISA device");
return (NULL);
}
"ISA device");
return (NULL);
}
}
static dev_info_t *
{
/*
* The GFX device may not be on the same IMMU unit as "agpgart"
* so search globally
*/
if (!list_is_empty(list_gfx)) {
break;
}
}
"Cannot redirect agpgart");
return (NULL);
}
/* list is not empty we checked above */
}
static immu_flags_t
{
immu_flags_t flags = 0;
} else {
}
#ifdef BUGGY_DRIVERS
#else
/*
* Read and write flags need to be reversed.
* DMA_READ means read from device and write
* to memory. So DMA read means DVMA write.
*/
flags |= IMMU_FLAGS_READ;
/*
* Some buggy drivers specify neither READ or WRITE
* For such drivers set both read and write permissions
*/
}
#endif
return (flags);
}
int
{
size_t actual_size = 0;
void *next;
return (-1);
}
return (-1);
}
return (-1);
}
/*
* Memory allocation failure. Maybe a temporary condition
* so return error rather than panic, so we can try again
*/
if (actual_size < IMMU_PAGESIZE) {
return (-1);
}
return (0);
}
void
{
/* destroy will panic if lock is held. */
/* don't zero out hwpg_vaddr and swpg_next_array for debugging */
}
/*
* pgtable_alloc()
* alloc a IOMMU pgtable structure.
* This same struct is used for root and context tables as well.
* - a pgtable_t struct
* so we set up DMA for this page
* - a SW page which is only for our bookeeping
* (for example to hold pointers to the next level pgtable).
* So a simple kmem_alloc suffices
*/
static pgtable_t *
{
int kmflags;
return (NULL);
}
return (pgtable);
}
static void
{
/* Dont need to flush the write we will flush when we use the entry */
}
static void
{
}
/*
* Function to identify a display device from the PCI class code
*/
static boolean_t
{
static uint_t disp_classes[] = {
0x000100,
0x030000,
0x030001
};
for (i = 0; i < nclasses; i++) {
if (classcode == disp_classes[i])
return (B_TRUE);
}
return (B_FALSE);
}
/*
*/
static boolean_t
{
if (!(status & PCI_STAT_CAP))
return (B_FALSE);
if (cap == PCI_CAP_ID_PCI_E) {
/*
* See section 7.8.2 of PCI-Express Base Spec v1.0a
* PCIE_PCIECAP_DEV_TYPE_PCIE2PCI implies that the
* device is a PCIE2PCI bridge
*/
*is_pcib =
((status & PCIE_PCIECAP_DEV_TYPE_MASK) ==
}
}
return (is_pciex);
}
/*
* immu_dvma_get_immu()
* get the immu unit structure for a dev_info node
*/
immu_t *
{
/*
* check if immu unit was already found earlier.
* If yes, then it will be stashed in immu_devi struct.
*/
/*
* May fail because of low memory. Return error rather
* than panic as we want driver to rey again later
*/
"No immu_devi structure");
/*NOTREACHED*/
}
}
return (immu);
}
"Cannot find immu_t for device");
/*NOTREACHED*/
}
/*
* Check if some other thread found immu
* while lock was not held
*/
/* immu_devi should be present as we found it earlier */
"immu_dvma_get_immu: No immu_devi structure");
/*NOTREACHED*/
}
/* nobody else set it, so we should do it */
} else {
/*
* if some other thread got immu before
* us, it should get the same results
*/
"immu units found for device. Expected (%p), "
"actual (%p)", (void *)immu,
/*NOTREACHED*/
}
}
return (immu);
}
/* ############################# IMMU_DEVI code ############################ */
/*
* Allocate a immu_devi structure and initialize it
*/
static immu_devi_t *
{
int kmflags;
/* bus == -1 indicate non-PCI device (no BDF) */
"Intel IOMMU immu_devi structure");
return (NULL);
}
if (bus == -1) {
return (immu_devi);
}
} else {
}
} else {
}
/* check for certain special devices */
return (immu_devi);
}
static void
{
}
static domain_t *
{
return (NULL);
}
if (domain) {
}
return (domain);
}
/* ############################# END IMMU_DEVI code ######################## */
/* ############################# DOMAIN code ############################### */
/*
* This routine always succeeds
*/
static int
{
int did;
if (did == 0) {
" domain-device: %s%d. immu unit is %s. Using "
"unity domain with domain-id (%d)",
}
return (did);
}
static int
{
/*
* The field dvp->dva_rdip is a work-in-progress
* and gets updated as we walk up the ancestor
* tree. The final ddip is set only when we reach
* the top of the tree. So the dvp->dva_ddip field cannot
* be relied on until we reach the top of the field.
*/
/* immu_devi may not be set. */
return (DDI_WALK_TERMINATE);
}
}
}
/*
* If we encounter a PCIE_PCIE bridge *ANCESTOR* we need to
* terminate the walk (since the device under the PCIE bridge
* is a PCIE device and has an independent entry in the
*/
return (DDI_WALK_TERMINATE);
}
/*
* In order to be a domain-dim, it must be a PCI device i.e.
* must have valid BDF. This also eliminates the root complex.
*/
}
/* continue walking to find ddip */
return (DDI_WALK_CONTINUE);
}
/* if domain is set, it must be the same */
if (dvp->dva_domain) {
}
return (DDI_WALK_TERMINATE);
}
/* immu_devi either has both set or both clear */
/* Domain may already be set, continue walking so that ddip gets set */
if (dvp->dva_domain) {
return (DDI_WALK_CONTINUE);
}
/* domain is not set in either immu_devi or dvp */
return (DDI_WALK_CONTINUE);
}
/* ok, the BDF hash had a domain for this BDF. */
/* Grab lock again to check if something else set immu_devi fields */
} else {
}
/*
* walk upwards until the topmost PCI bridge is found
*/
return (DDI_WALK_CONTINUE);
}
static void
{
int dcount = 0;
/*
* We call into routines that grab the lock so we should
* not be called with the lock held. This does not matter
* much since, no else has a reference to this domain
*/
/*
* UNITY arenas are a mirror of the physical memory
* installed on the system.
*/
#ifdef BUGGY_DRIVERS
/*
*/
dcount = 1;
#endif
mp = phys_install;
if (mp->ml_address == 0) {
/* since we already mapped page1 above */
} else {
}
dcount = 1;
while (mp) {
dcount = 1;
}
while (mp) {
dcount = 1;
}
}
/*
* create_xlate_arena()
* Create the dvma arena for a domain with translation
* mapping
*/
static void
{
char *arena_name;
int vmem_flags;
void *vmem_ret;
/* Note, don't do sizeof (arena_name) - it is just a pointer */
(void) snprintf(arena_name,
sizeof (domain->dom_dvma_arena_name),
/*
* No one else has access to this domain.
* So no domain locks needed
*/
/* Restrict mgaddr (max guest addr) to MGAW */
/*
* To ensure we avoid ioapic and PCI MMIO ranges we just
* use the physical memory address range of the system as the
* range
*/
mp = phys_install;
if (mp->ml_address == 0)
else
else
"%s: Creating dvma vmem arena [0x%" PRIx64
/*
* We always allocate in quanta of IMMU_PAGESIZE
*/
size, /* size */
IMMU_PAGESIZE, /* quantum */
NULL, /* afunc */
NULL, /* ffunc */
NULL, /* source */
0, /* qcache_max */
"Failed to allocate DVMA arena(%s) "
/*NOTREACHED*/
}
while (mp) {
if (mp->ml_address == 0)
else
else
"%s: Adding dvma vmem span [0x%" PRIx64
"Failed to allocate DVMA arena(%s) "
"for domain ID (%d)",
/*NOTREACHED*/
}
}
}
/* ################################### DOMAIN CODE ######################### */
/*
* Set the domain and domain-dip for a dip
*/
static void
{
if (fddip) {
} else {
}
if (fdomain) {
} else {
}
}
/*
* device_domain()
* Get domain for a device. The domain may be global in which case it
* is shared between all IOMMU units. Due to potential AGAW differences
* between IOMMU units, such global domains *have to be* UNITY mapping
* domains. Alternatively, the domain may be local to a IOMMU unit.
* Local domains may be shared or immu_devi, although the
* scope of sharing
* is restricted to devices controlled by the IOMMU unit to
* which the domain
* belongs. If shared, they (currently) have to be UNITY domains. If
* immu_devi a domain may be either UNITY or translation (XLATE) domain.
*/
static domain_t *
{
dvma_arg_t dvarg = {0};
int level;
/*
* Check if the domain is already set. This is usually true
* if this is not the first DVMA transaction.
*/
if (domain) {
return (domain);
}
/*
* possible that there is no IOMMU unit for this device
* - BIOS bugs are one example.
*/
return (NULL);
}
level = 0;
/*
* maybe low memory. return error,
* so driver tries again later
*/
return (NULL);
}
/* should have walked at least 1 dip (i.e. edip) */
/*
* We may find the domain during our ancestor walk on any one of our
* ancestor dips, If the domain is found then the domain-dip
* (i.e. ddip) will also be found in the same immu_devi struct.
* The domain-dip is the highest ancestor dip which shares the
* same domain with edip.
* The domain may or may not be found, but the domain dip must
* be found.
*/
return (NULL);
}
/*
* Did we find a domain ?
*/
if (domain) {
goto found;
}
/* nope, so allocate */
return (NULL);
}
/*FALLTHROUGH*/
/*
* We know *domain *is* the right domain, so panic if
* another domain is set for either the request-dip or
* effective dip.
*/
return (domain);
}
static void
{
/* 0 is reserved by Vt-d */
/*LINTED*/
ASSERT(IMMU_UNITY_DID > 0);
/* domain created during boot and always use sleep flag */
/*
* Setup the domain's initial page table
* should never fail.
*/
/*
* put it on the system-wide UNITY domain list
*/
}
/*
* ddip is the domain-dip - the topmost dip in a domain
* rdip is the requesting-dip - the device which is
* requesting DVMA setup
* if domain is a non-shared domain rdip == ddip
*/
static domain_t *
{
int kmflags;
char mod_hash_name[128];
int did;
int dcount = 0;
/*
* First allocate a domainid.
* This routine will never fail, since if we run out
* of domains the unity domain will be allocated.
*/
if (did == IMMU_UNITY_DID) {
/* domain overflow */
return (immu->immu_unity_domain);
}
/*NOTREACHED*/
}
/*
* Create xlate DVMA arena for this domain.
*/
/*
* Setup the domain's initial page table
*/
"pgtable for domain (%d). IOMMU unit: %s",
/*NOTREACHED*/
}
/*
* Since this is a immu unit-specific domain, put it on
* the per-immu domain list.
*/
/*
* Also put it on the system-wide xlate domain list
*/
#ifdef BUGGY_DRIVERS
/*
*/
dcount = 1;
#endif
return (domain);
}
/*
* Create domainid arena.
* Domainid 0 is reserved by Vt-d spec and cannot be used by
* system software.
* Domainid 1 is reserved by solaris and used for *all* of the following:
* as the "uninitialized" domain - For devices not yet controlled
* by Solaris
* as the "unity" domain - For devices that will always belong
* to the unity domain
* as the "overflow" domain - Used for any new device after we
* run out of domains
* All of the above domains map into a single domain with
* domainid 1 and UNITY DVMA mapping
*/
static void
{
sizeof (immu->immu_did_arena_name),
1, /* quantum */
NULL, /* afunc */
NULL, /* ffunc */
NULL, /* source */
0, /* qcache_max */
VM_SLEEP);
/* Even with SLEEP flag, vmem_create() can fail */
}
}
/* ######################### CONTEXT CODE ################################# */
static void
{
int sid;
/* Check the most common case first with reader lock */
if (ROOT_GET_P(hw_rent)) {
return;
} else {
}
} else {
}
goto again;
}
}
/* need to disable context entry before reprogramming it */
/* flush caches */
/*LINTED*/
}
}
static pgtable_t *
{
int bus;
int devfunc;
/* Allocate a zeroed root table (4K 256b entries) */
/*
* Setup context tables for all possible root table entries.
* Start out with unity domains for all entries.
*/
/*LINTED*/
}
}
return (root_table);
}
/*
* Called during rootnex attach, so no locks needed
*/
static void
{
}
/*
* Find top pcib
*/
static int
{
}
return (DDI_WALK_CONTINUE);
}
static int
{
int r_bus;
int d_bus;
int r_devfunc;
int d_devfunc;
"request-dip are NULL or are root devinfo");
return (DDI_FAILURE);
}
/*
* We need to set the context fields
* based on what type of device rdip and ddip are.
* To do that we need the immu_devi field.
* Set the immu_devi field (if not already set)
*/
"immu_context_update: failed to set immu_devi for ddip");
return (DDI_FAILURE);
}
"immu_context_update: failed to set immu_devi for rdip");
return (DDI_FAILURE);
}
/* rdip is a PCIE device. set context for it only */
#ifdef BUGGY_DRIVERS
} else if (r_immu_devi == d_immu_devi) {
#ifdef TEST
#endif
/* rdip is a PCIE device. set context for it only */
#endif
} else if (d_pcib_type == IMMU_PCIB_PCIE_PCI) {
/*
* ddip is a PCIE_PCI bridge. Set context for ddip's
* secondary bus. If rdip is on ddip's secondary
* bus, set context for rdip. Else, set context
* for rdip's PCI bridge on ddip's secondary bus.
*/
d_immu_devi->imd_sec, 0);
} else {
} else {
" bridge for PCI device");
/*NOTREACHED*/
}
}
} else if (d_pcib_type == IMMU_PCIB_PCI_PCI) {
} else if (d_pcib_type == IMMU_PCIB_ENDPOINT) {
/*
* ddip is a PCIE device which has a non-PCI device under it
* i.e. it is a PCI-nonPCI bridge. Example: pciicde-ata
*/
} else {
"set IMMU context.");
/*NOTREACHED*/
}
/* XXX do we need a membar_producer() here */
return (DDI_SUCCESS);
}
/* ##################### END CONTEXT CODE ################################## */
/* ##################### MAPPING CODE ################################## */
static boolean_t
{
if (immu_flags & IMMU_FLAGS_PAGE1) {
} else {
}
/* The PDTE must be set i.e. present bit is set */
return (B_FALSE);
}
/*
* Just assert to check most significant system software field
* (PDTE_SW4) as it is same as present bit and we
* checked that above
*/
/*
* TM field should be clear if not reserved.
* non-leaf is always reserved
*/
return (B_FALSE);
}
}
/*
* The SW3 field is not used and must be clear
*/
return (B_FALSE);
}
/*
* PFN (for PTE) or next level pgtable-paddr (for PDE) must be set
*/
"PTE paddr mismatch: %lx != %lx",
return (B_FALSE);
}
} else {
"PDE paddr mismatch: %lx != %lx",
return (B_FALSE);
}
}
/*
* SNP field should be clear if not reserved.
* non-leaf is always reserved
*/
return (B_FALSE);
}
}
/* second field available for system software should be clear */
return (B_FALSE);
}
/* Super pages field should be clear */
return (B_FALSE);
}
/*
* least significant field available for
* system software should be clear
*/
return (B_FALSE);
}
return (B_FALSE);
}
return (B_FALSE);
}
return (B_TRUE);
}
/*ARGSUSED*/
static void
{
int idx;
npages = *npages_ptr;
/*
* since a caller gets a unique dvma for a physical address,
* no other concurrent thread will be writing to the same
* PTE even if it has the same paddr. So no locks needed.
*/
/* Cannot clear a HW PTE that is aleady clear */
dvma += IMMU_PAGESIZE;
npages--;
}
#ifdef TEST
/* dont need to flush write during unmap */
#endif
*npages_ptr = npages;
}
/*ARGSUSED*/
static void
{
int level;
/* level 0 is never used. Sanity check */
/*
* Skip the first 12 bits which is the offset into
* 4K PFN (phys page frame based on IMMU_PAGESIZE)
*/
/* skip to level 1 i.e. leaf PTE */
}
}
/*
* Read the pgtables
*/
static void
{
/* xlate should be at level 0 */
/* start with highest level pgtable i.e. root */
}
continue;
}
/* xlate's leafier level is not set, set it now */
/* Lock the pgtable in read mode */
/*
* since we are unmapping, the pgtable should
* already point to a leafier pgtable.
*/
}
}
/*ARGSUSED*/
static void
{
#ifndef DEBUG
/* Set paddr */
pte = 0;
#else
}
#ifdef BUGGY_DRIVERS
return;
#else
goto out;
#endif
}
/* Don't touch SW4. It is the present field */
/* clear TM field if not reserved */
}
#ifdef DEBUG
/* Clear 3rd field for system software - not used */
#endif
/* Set paddr */
/* clear SNP field if not reserved. */
}
#ifdef DEBUG
/* Clear SW2 field available for software */
#endif
#ifdef DEBUG
/* SP is don't care for PTEs. Clear it for cleanliness */
#endif
#ifdef DEBUG
/* Clear SW1 field available for software */
#endif
/*
* Now that we are done writing the PTE
* set the "present" flag. Note this present
* spec says is available for system software.
* This is an implementation detail of Solaris
* bare-metal Intel IOMMU.
* by the Vt-d spec
*/
out:
#ifdef BUGGY_DRIVERS
#else
if (immu_flags & IMMU_FLAGS_READ)
if (immu_flags & IMMU_FLAGS_WRITE)
#endif
#endif
}
/*ARGSUSED*/
static void
{
int idx;
int j;
nvpages = *nvpages_ptr;
/*
* since a caller gets a unique dvma for a physical address,
* no other concurrent thread will be writing to the same
* PTE even if it has the same paddr. So no locks needed.
*/
for (j = dcount - 1; j >= 0; j--) {
break;
}
ASSERT(j >= 0);
nvpages = *nvpages_ptr;
== B_TRUE);
nppages--;
nvpages--;
paddr += IMMU_PAGESIZE;
dvma += IMMU_PAGESIZE;
if (nppages == 0) {
j++;
}
if (j == dcount) {
break;
}
if (nppages == 0) {
}
}
/* flush writes to HW PTE table */
sizeof (hw_pdte_t));
if (nvpages) {
*nvpages_ptr = nvpages;
} else {
*dvma_ptr = 0;
*nvpages_ptr = 0;
}
}
/*ARGSUSED*/
static void
{
/* if PDE is already set, make sure it is correct */
#ifdef BUGGY_DRIVERS
return;
#else
goto out;
#endif
}
/* Dont touch SW4, it is the present bit */
/* don't touch TM field it is reserved for PDEs */
/* 3rd field available for system software is not used */
/* Set next level pgtable-paddr for PDE */
/* don't touch SNP field it is reserved for PDEs */
/* Clear second field available for system software */
/* No super pages for PDEs */
/* Clear SW1 for software */
/*
* Now that we are done writing the PDE
* set the "present" flag. Note this present
* spec says is available for system software.
* This is an implementation detail of Solaris
* base-metal Intel IOMMU.
* by the Vt-d spec
*/
out:
#ifdef BUGGY_DRIVERS
#else
if (immu_flags & IMMU_FLAGS_READ)
if (immu_flags & IMMU_FLAGS_WRITE)
#endif
}
/*
* Used to set PDEs
*/
static boolean_t
{
int level;
/* xlate should be at level 0 */
/* start with highest level pgtable i.e. root */
/* speculative alloc */
}
}
/* Lock the pgtable in READ mode first */
/*
* check if leafier level already has a pgtable
* if yes, verify
*/
/* Change to a write lock */
goto again;
}
} else {
#ifndef BUGGY_DRIVERS
/*
* If buggy driver we already set permission
* READ+WRITE so nothing to do for that case
* XXX Check that read writer perms change before
* actually setting perms. Also need to hold lock
*/
if (immu_flags & IMMU_FLAGS_READ)
if (immu_flags & IMMU_FLAGS_WRITE)
#endif
}
== B_TRUE);
}
if (new) {
}
return (set);
}
/*
* dvma_map()
* map a contiguous range of DVMA pages
*
* immu: IOMMU unit for which we are generating DVMA cookies
* domain: domain
* sdvma: Starting dvma
* spaddr: Starting paddr
* npages: Number of pages
* rdip: requesting device
* immu_flags: flags
*/
static boolean_t
{
uint64_t n;
n = snvpages;
while (n > 0) {
/* Lookup or allocate PGDIRs and PGTABLEs if necessary */
== B_TRUE) {
}
/* set all matching ptes that fit into this leaf pgtable */
}
return (pde_set);
}
/*
* dvma_unmap()
* unmap a range of DVMAs
*
* immu: IOMMU unit state
* domain: domain for requesting device
* ddip: domain-dip
* dvma: starting DVMA
* npages: Number of IMMU pages to be unmapped
* rdip: requesting device
*/
static void
{
uint64_t n;
n = snpages;
while (n > 0) {
/* setup the xlate array */
/* just lookup existing pgtables. Should never fail */
/* clear all matching ptes that fit into this leaf pgtable */
}
/* No need to flush IOTLB after unmap */
}
static uint64_t
{
/* shotcuts */
/* parameters */
/* nocross is checked in cookie_update() */
/* handle the rollover cases */
}
/*
* allocate from vmem arena.
*/
return (dvma);
}
static void
{
return;
}
}
/*ARGSUSED*/
static void
{
int i;
/*
* we allocated DVMA in a single chunk. Calculate total number
* of pages
*/
}
#ifdef DEBUG
/* Unmap only in DEBUG mode */
#endif
}
dma->dp_max_cookies = 0;
dma->dp_max_dcookies = 0;
dma->dp_cookie_size = 0;
}
/*
* cookie_alloc()
*/
static int
{
int kmflag;
/* we need to allocate new array */
} else {
kmflag = KM_NOSLEEP;
}
/*
* XXX make sure cookies size doen't exceed sinfo->si_max_cookie_size;
*/
/*
* figure out the rough estimate of array size
* At a minimum, each cookie must hold 1 page.
* At a maximum, it cannot exceed dma_attr_sgllen
*/
max_dcookies++;
/* allocate the dvma cookie array */
return (DDI_FAILURE);
}
/* allocate the "phys" cookie array */
return (DDI_FAILURE);
}
/* allocate the "real" cookie array - the one given to users */
if (max_cookies > prealloc) {
goto fail;
}
} else {
/* the preallocated buffer fits this size */
}
return (DDI_SUCCESS);
fail:
dma->dp_cookie_size = 0;
dma->dp_max_cookies = 0;
dma->dp_max_dcookies = 0;
sinfo->si_max_pages = 0;
return (DDI_FAILURE);
}
/*ARGSUSED*/
static void
{
paddr &= IMMU_PAGEMASK;
/*
* check to see if this page would put us
* over the max cookie size.
*/
dvmax++; /* use the next dvcookie */
dmax++; /* also means we use the next dcookie */
}
/*
* check to see if this page would make us larger than
* the nocross boundary. If yes, create a new cookie
* otherwise we will fail later with vmem_xalloc()
* due to overconstrained alloc requests
* nocross == 0 implies no nocross constraint.
*/
if (nocross > 0) {
<= nocross);
> nocross) {
dvmax++; /* use the next dvcookie */
dmax++; /* also means we use the next dcookie */
}
<= nocross);
}
/*
* If the cookie is empty
*/
== 0);
} else {
/* Cookie not empty. Add to it */
/* Check if this paddr is contiguous */
} else {
/* No, we need a new dcookie */
dmax++;
}
}
}
static void
{
int i;
/* First calculate the total number of pages required */
}
/* Now allocate dvma */
/* Now map the dvma */
/* Invalidate the IOTLB */
/* Now setup dvcookies and real cookie addresses */
}
#ifdef TEST
#endif
}
/*
* cookie_create()
*/
static int
{
/*
* Allocate cookie, dvcookie and dcookie
*/
return (DDI_FAILURE);
}
pcnt = 0;
/* retrieve paddr, psize, offset from dmareq */
if (buftype == DMA_OTYP_PAGES) {
} else {
(buftype == DMA_OTYP_BUFVADDR));
}
pcnt++;
} else {
}
}
/* save the iommu page offset */
/*
* setup dvcookie and dcookie for [paddr, paddr+psize)
*/
while (size > 0) {
/* get the size for this page (i.e. partial or full page) */
if (buftype == DMA_OTYP_PAGES) {
/* get the paddr from the page_t */
/* index into the array of page_t's to get the paddr */
pcnt++;
} else {
/* call into the VM to get the paddr */
}
/*
* set dvcookie and dcookie for [paddr, paddr+psize)
*/
}
/* take account in the offset into the first page */
/* save away how many cookies we have */
return (DDI_SUCCESS);
}
/* ############################# Functions exported ######################## */
/*
* setup the DVMA subsystem
* this code runs only for the first IOMMU unit
*/
void
{
/* locks */
/* Create lists */
/* Setup BDF domain hash */
nchains = 0xff;
}
}
/*
* Startup up one DVMA unit
*/
void
{
if (immu_gfxdvma_enable == B_FALSE &&
return;
}
/*
* DVMA will start once IOMMU is "running"
*/
}
/*
* immu_dvma_physmem_update()
* called when the installed memory on a
* system increases, to expand domain DVMA
* for domains with UNITY mapping
*/
void
{
int dcount;
/*
* Just walk the system-wide list of domains with
* UNITY mapping. Both the list of *all* domains
* and *UNITY* domains is protected by the same
* single lock
*/
/* There is no vmem_arena for unity domains. Just map it */
dcount = 1;
}
}
int
{
int dcount = 0;
int r = DDI_FAILURE;
/*
* Intel IOMMU will only be turned on if IOMMU
* page size is a multiple of IOMMU page size
*/
/*LINTED*/
/* Can only do DVMA if dip is attached */
/*NOTREACHED*/
}
/*
* possible that there is no IOMMU unit for this device
* - BIOS bugs are one example.
*/
return (DDI_DMA_NORESOURCES);
}
/*
* redirect isa devices attached under lpc to lpc dip
*/
/*NOTREACHED*/
}
}
/* Reset immu, as redirection can change IMMU */
/*
* for gart, redirect to the real graphic devinfo
*/
/*NOTREACHED*/
}
}
/*
* Setup DVMA domain for the device. This does
* work only the first time we do DVMA for a
* device.
*/
return (DDI_DMA_NORESOURCES);
}
/*
* If a domain is found, we must also have a domain dip
* which is the topmost ancestor dip of rdip that shares
* the same domain with rdip.
*/
return (DDI_DMA_NORESOURCES);
}
/* mapping already done. Let rootnex create cookies */
r = DDI_DMA_USE_PHYSICAL;
} else if (immu_flags & IMMU_FLAGS_DMAHDL) {
/* if we have a DMA handle, the IOMMUs must be running */
"DMA handle (%p): NULL attr", hp);
/*NOTREACHED*/
}
return (DDI_DMA_NORESOURCES);
}
r = DDI_DMA_MAPPED;
} else if (immu_flags & IMMU_FLAGS_MEMRNG) {
dcount = 1;
r = DDI_DMA_MAPPED;
} else {
/*NOTREACHED*/
}
/*
* Update the root and context entries
*/
!= DDI_SUCCESS) {
return (DDI_DMA_NORESOURCES);
}
return (r);
}
int
{
/*
* Intel IOMMU will only be turned on if IOMMU
* page size is same as MMU page size
*/
/*LINTED*/
/* rdip need not be attached */
return (DDI_DMA_NORESOURCES);
}
/*
* Get the device domain, this should always
* succeed since there had to be a domain to
* setup DVMA.
*/
/*NOTREACHED*/
}
/*
* possible that there is no IOMMU unit for this device
* - BIOS bugs are one example.
*/
return (DDI_DMA_NORESOURCES);
}
/*
* redirect isa devices attached under lpc to lpc dip
*/
/*NOTREACHED*/
}
}
/* Reset immu, as redirection can change IMMU */
/*
* for gart, redirect to the real graphic devinfo
*/
/*NOTREACHED*/
}
}
"a device without domain or with an uninitialized "
"domain");
return (DDI_DMA_NORESOURCES);
}
/*
* immu must be set in the domain.
*/
/*
* domain is unity, nothing to do here, let the rootnex
* code free the cookies.
*/
return (DDI_DMA_USE_PHYSICAL);
}
"no private dma structure", hp);
/*NOTREACHED*/
}
/* No invalidation needed for unmap */
return (DDI_SUCCESS);
}
{
/* Just want atomic reads. No need for lock */
0);
return (immu_devi);
}