intel_iommu.c revision 408a1f8e8d62a79cfb7c54491234b73c736aee40
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Portions Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright (c) 2008, Intel Corporation.
* All rights reserved.
*/
/*
* Intel IOMMU implementaion
*/
#include <sys/pci_impl.h>
#include <sys/sysmacros.h>
#include <sys/ddidmareq.h>
#include <sys/ddi_impldefs.h>
#include <sys/smp_impldefs.h>
#include <sys/archsystm.h>
#include <sys/x86_archext.h>
#include <sys/bootconf.h>
#include <sys/bootinfo.h>
#include <sys/intel_iommu.h>
/*
* internal variables
* iommu_state - the list of iommu structures
* reserve_memory - the list of reserved regions
* page_num - the count of pages for iommu page tables
*/
static list_t iommu_states;
static list_t reserve_memory;
/*
* record some frequently used dips
*/
/*
* dvma cache related variables
*/
/*
* this is used when there is a dedicated drhd for the
* gfx
*/
int gfx_drhd_disable = 0;
/*
* switch to disable dmar remapping unit, even the initiation work has
* been finished
*/
int dmar_drhd_disable = 0;
static char *dmar_fault_reason[] = {
"Reserved",
"The present field in root-entry is Clear",
"The present field in context-entry is Clear",
"Hardware detected invalid programming of a context-entry",
"The DMA request attempted to access an address beyond max support",
"The Write field in a page-table entry is Clear when DMA write",
"The Read field in a page-table entry is Clear when DMA read",
"Access the next level page table resulted in error",
"Access the root-entry table resulted in error",
"Access the context-entry table resulted in error",
"Reserved field not initialized to zero in a present root-entry",
"Reserved field not initialized to zero in a present context-entry",
"Reserved field not initialized to zero in a present page-table entry",
"DMA blocked due to the Translation Type field in context-entry",
"Incorrect fault event reason number"
};
#define DMAR_MAX_REASON_NUMBER (14)
/*
* cpu_clflush()
* flush the cpu cache line
*/
static void
{
uint_t i;
for (i = 0; i < size; i += x86_clflush_size) {
clflush_insn(addr+i);
}
mfence_insn();
}
/*
* iommu_page_init()
* do some init work for the iommu page allocator
*/
static void
iommu_page_init(void)
{
page_num = 0;
}
/*
* iommu_get_page()
* get a 4k iommu page, and zero out it
*/
static paddr_t
{
page_num++;
return (paddr);
}
/*
* iommu_free_page()
* free the iommu page allocated with iommu_get_page
*/
static void
{
page_num--;
}
((iommu)->iu_reg_handle, \
((iommu)->iu_reg_handle, \
/*
* calculate_agaw()
* calculate agaw from gaw
*/
static int
calculate_agaw(int gaw)
{
int r, agaw;
if (r == 0)
else
if (agaw > 64)
agaw = 64;
return (agaw);
}
/*
* destroy_iommu_state()
* destory an iommu state
*/
static void
{
}
/*
* iommu_update_stats - update iommu private kstat counters
*
* This routine will dump and reset the iommu's internal
* statistics counters. The current stats dump values will
* be sent to the kernel status area.
*/
static int
{
const char *state;
if (rw == KSTAT_WRITE)
return (EACCES);
return (0);
}
/*
* iommu_init_stats - initialize kstat data structures
*
* This routine will create and initialize the iommu private
* statistics counters.
*/
int
{
/*
* Create and init kstat
*/
"misc", KSTAT_TYPE_NAMED,
sizeof (iommu_kstat_t) / sizeof (kstat_named_t), 0);
"Could not create kernel statistics for %s",
return (DDI_FAILURE);
}
/*
* Initialize all the statistics
*/
/*
* Function to provide kernel stat update on demand
*/
/*
* Pointer into provider's raw statistics
*/
/*
* Add kstat to systems kstat chain
*/
return (DDI_SUCCESS);
}
/*
* iommu_intr_handler()
* the fault event handler for a single drhd
*/
static int
{
int index, fault_reg_offset;
int sindex, max_fault_index;
/* read the fault status */
/* check if we have a pending fault for this IOMMU */
if (!(status & IOMMU_FAULT_STS_PPF)) {
return (0);
}
/*
* handle all primary pending faults
*/
/*
* don't loop forever for a misbehaving IOMMU. Return after 1 loop
* so that we some progress.
*/
do {
if (index > max_fault_index)
index = 0;
/* read the higher 64bits */
/* check if pending fault */
if (!IOMMU_FRR_GET_F(val))
break;
/* get the fault reason, fault type and sid */
/* read the first 64bits */
/* clear the fault */
/* report the fault info */
"%s generated a fault event when translating DMA %s\n"
"the reason is:\n\t %s",
/*
* At this point we have cleared the overflow if any
*/
/* clear over flow */
if (status & IOMMU_FAULT_STS_PFO) {
#ifdef DEBUG
"Clearing fault overflow");
#endif
}
return (1);
}
/*
* intel_iommu_intr_handler()
* call iommu_intr_handler for each iommu
*/
static uint_t
{
int claimed = 0;
}
}
/*
* intel_iommu_add_intr()
* the interface to hook dmar interrupt handler
*/
static void
intel_iommu_add_intr(void)
{
msi_addr = (MSI_ADDR_HDR |
apic_cpus[0].aci_local_id);
(void) iommu_intr_handler(iommu);
}
}
/*
* wait max 60s for the hardware completion
*/
#define IOMMU_WAIT_TIME 60000000
{ \
while (1) { \
ntick = ddi_get_lbolt(); \
if (completion) {\
break; \
} \
"iommu wait completion time out\n"); \
} else { \
iommu_cpu_nop();\
}\
}\
}
/*
* dmar_flush_write_buffer()
* flush the write buffer
*/
static void
{
/* record the statistics */
}
/*
* dmar_flush_iotlb_common()
* flush the iotlb cache
*/
static void
{
/*
*/
}
}
/*
* if the hardward doesn't support page selective invalidation, we
* will use domain type. Otherwise, use global type
*/
switch (type) {
case TLB_INV_G_PAGE:
addr & IOMMU_PAGE_OFFSET) {
goto ignore_psi;
}
break;
case TLB_INV_G_DOMAIN:
break;
case TLB_INV_G_GLOBAL:
break;
default:
return;
}
/*
* do the actual flush
*/
/* verify there is no pending command */
if (iva)
/*
* check the result and record the statistics
*/
switch (TLB_INV_GET_IAIG(status)) {
/* global */
case 1:
break;
/* domain */
case 2:
break;
/* psi */
case 3:
break;
default:
break;
}
}
/*
* dmar_flush_iotlb_psi()
* register based iotlb psi invalidation
*/
static void
{
uint_t used_count = 0;
/* choose page specified invalidation */
/* MAMV is valid only if PSI is set */
while (count != 0) {
/* First calculate alignment of DVMA */
;
/* truncate count to the nearest power of 2 */
;
}
if (align >= used_count) {
} else {
/* align < used_count */
used_count = align;
;
}
count -= used_count;
}
/* choose domain invalidation */
} else {
0, 0, TLB_INV_G_DOMAIN);
}
}
/*
* dmar_flush_iotlb_dsi()
* flush dsi iotlb
*/
static void
{
}
/*
* dmar_flush_iotlb_glb()
* flush global iotbl
*/
static void
{
}
/*
* dmar_flush_context_cache()
* flush the context cache
*/
static void
{
/*
* define the command
*/
switch (type) {
case CTT_INV_G_DEVICE:
break;
case CTT_INV_G_DOMAIN:
break;
case CTT_INV_G_GLOBAL:
break;
default:
return;
}
/* verify there is no pending command */
/* record the context cache statistics */
}
/*
* dmar_flush_context_fsi()
* function based context cache flush
*/
static void
{
}
/*
* dmar_flush_context_dsi()
* domain based context cache flush
*/
static void
{
}
/*
* dmar_flush_context_gbl()
* flush global context cache
*/
static void
{
}
/*
* dmar_set_root_entry_table()
* set root entry table
*/
static void
{
}
/*
* dmar_enable_unit()
* enable the dmar unit
*/
static void
{
}
/*
* iommu_bringup_unit()
* the processes to bring up a dmar unit
*/
static void
{
/*
* flush the iommu write buffer
*/
/*
* set root entry table
*/
/*
* flush the context cache
*/
/*
* flush the iotlb cache
*/
/*
* at last enable the unit
*/
}
/*
* iommu_dvma_cache_get()
* get a dvma from the cache
*/
static uint64_t
{
if (index >= DVMA_CACHE_HEAD_CNT)
return (0);
((nocross == 0) ||
< (nocross - 1)))) {
break;
}
}
if (cache_node) {
return (ioaddr);
}
return (0);
}
/*
* iommu_dvma_cache_put()
* put a dvma to the cache after use
*/
static void
{
/* out of cache range */
if (index >= DVMA_CACHE_HEAD_CNT) {
return;
}
/* get a node block */
if (cache_node) {
}
/* no cache, alloc one */
if (cache_node == NULL) {
}
/* initialize this node */
/* insert into the free list */
/* shrink the cache list */
}
if (shrink) {
}
}
/*
* iommu_dvma_cache_flush()
* flush the dvma caches when vmem_xalloc() failed
*/
static void
{
ddi_node_name(dip));
while (cache_node) {
}
}
}
/*
* get_dvma_cookie_array()
* get a dvma cookie array from the cache or allocate
*/
static iommu_dvma_cookie_t *
{
if (array_size > MAX_COOKIE_CACHE_SIZE) {
KM_SLEEP));
}
/* LINTED E_EQUALITY_NOT_ASSIGNMENT */
cache_head->dch_count--;
}
if (cookie) {
return (cookie);
} else {
KM_SLEEP));
}
}
/*
* put_dvma_cookie_array()
* put a dvma cookie array to the cache or free
*/
static void
{
if (array_size > MAX_COOKIE_CACHE_SIZE) {
return;
}
cache_head->dch_count++;
}
/*
* dmar_reg_plant_wait()
* the plant wait operation for register based cache invalidation
*/
static void
{
/* get a node */
if (node) {
}
/* no cache, alloc one */
}
/* initialize this node */
/* insert into the pend list */
head->ich_pend_count++;
}
/*
* dmar_release_dvma_cookie()
* release the dvma cookie
*/
static void
{
uint_t i;
/* free dvma */
for (i = 0; i < count; i++) {
}
/* free the cookie array */
}
/*
* dmar_reg_reap_wait()
* the reap wait operation for register based cache invalidation
*/
static void
{
if (node) {
head->ich_pend_count--;
}
if (node) {
/* put the node into the node cache */
}
}
/*
* dmar_init_ops()
* init dmar ops
*/
static void
{
/* initialize the dmar operations */
/* cpu clflush */
if (iommu->iu_coherency) {
} else {
}
/* write buffer */
} else {
}
/* cache related functions */
}
/*
* create_iommu_state()
* alloc and setup the iommu state
*/
static int
{
int bitnum;
int ret;
static ddi_device_acc_attr_t ioattr = {
};
/*
* map the register address space
*/
&(iommu->iu_reg_handle));
if (ret != DDI_SUCCESS) {
return (DDI_FAILURE);
}
(void *)ipltospl(IOMMU_INTR_IPL));
/*
* get the register value
*/
/*
* if the hardware access is non-coherent, we need clflush
*/
} else {
if (!(x86_feature & X86_CLFSH)) {
"missing clflush functionality");
return (DDI_FAILURE);
}
}
/*
* retrieve the maximum number of domains
*/
/*
* setup the domain id allocator
* domain id 0 is reserved by the architecture
*/
/*
* calculate the agaw
*/
while (bitnum < 5) {
break;
else
bitnum++;
}
if (bitnum >= 5) {
/*NOTREACHED*/
return (DDI_FAILURE);
} else {
}
/*
* the iommu is orginally disabled
*/
iommu->iu_global_cmd_reg = 0;
/*
* init kstat
*/
(void) iommu_init_stats(iommu);
/*
* init dmar ops
*/
/*
* alloc root entry table, this should put after init ops
*/
/*
* initialize the iotlb pending list and cache
*/
MUTEX_DRIVER, NULL);
sizeof (iotlb_pend_node_t),
MUTEX_DRIVER, NULL);
sizeof (iotlb_pend_node_t),
/*
* insert this iommu into the list
*/
/*
* report this unit
*/
return (DDI_SUCCESS);
}
/*
* memory_region_overlap()
* handle the pci mmio pages overlap condition
*/
static boolean_t
{
rmem->rm_pfn_start);
rmem->rm_pfn_end);
return (B_TRUE);
}
}
return (B_FALSE);
}
/*
* collect_pci_mmio_walk
* reserve a single dev mmio resources
*/
static int
{
/*
* ingore the devices which have no assigned-address
* properties
*/
&length) != DDI_PROP_SUCCESS)
return (DDI_WALK_CONTINUE);
for (i = 0; i < account; i++) {
/*
* check the memory io assigned-addresses
* refer to pci.h for bits defination of
* pci_phys_hi
*/
== PCI_ADDR_MEM32) ||
== PCI_ADDR_MEM64)) {
KM_SLEEP);
if (!memory_region_overlap(rmem)) {
}
}
}
return (DDI_WALK_CONTINUE);
}
/*
* collect_pci_mmio()
* walk through the pci device tree, and collect the mmio resources
*/
static int
{
int count;
/*
* walk through the device tree under pdip
* normally, pdip should be the pci root nexus
*/
return (DDI_SUCCESS);
}
/*
* iommu_collect_reserve_memory()
* collect the reserved memory region
*/
static void
{
/*
* reserve pages for pci memory mapped io
*/
(void) collect_pci_mmio(pci_top_devinfo);
/*
* reserve pages for ioapic
*/
}
/*
* match_dip_sbdf()
* walk function for get_dip_from_info()
*/
static int
{
if (private &&
return (DDI_WALK_TERMINATE);
}
return (DDI_WALK_CONTINUE);
}
/*
* get_dip_from_info()
*/
static int
{
int count;
return (DDI_SUCCESS);
else
return (DDI_FAILURE);
}
/*
* get_pci_top_bridge()
* get the top level bridge for a pci device
*/
static dev_info_t *
{
while (pdip != pci_top_devinfo) {
}
return (tmp);
}
/*
* domain_vmem_init_reserve()
* dish out the reserved pages
*/
static void
{
size, /* size */
IOMMU_PAGE_SIZE, /* align/quantum */
0, /* phase */
0, /* nocross */
VM_NOSLEEP) == NULL) {
}
}
}
/*
* domain_vmem_init()
* initiate the domain vmem
*/
static void
{
char vmem_name[64];
static uint_t vmem_instance = 0;
/*
* create the whole available virtual address and
* dish out the reserved memory regions with xalloc
*/
"domain_vmem_%d", vmem_instance++);
size, /* size */
IOMMU_PAGE_SIZE, /* quantum */
NULL, /* afunc */
NULL, /* ffunc */
NULL, /* source */
0, /* qcache_max */
VM_SLEEP);
/*
* dish out the reserved pages
*/
}
/*
* iommu_domain_init()
* initiate a domain
*/
static int
{
uint_t i;
/*
* allocate the domain id
*/
}
/*
* record the domain statistics
*/
/*
* create vmem map
*/
/*
* create the first level page table
*/
/*
* init the CPU available page tables
*/
/*
* init the dvma cache
*/
for (i = 0; i < DVMA_CACHE_HEAD_CNT; i++) {
/* init the free list */
sizeof (dvma_cache_node_t),
/* init the memory cache list */
sizeof (dvma_cache_node_t),
}
return (DDI_SUCCESS);
}
/*
* dmar_check_sub()
* check to see if the device is under scope of a p2p bridge
*/
static boolean_t
{
return (B_TRUE);
}
return (B_FALSE);
}
/*
* iommu_get_dmar()
* get the iommu structure for a device
*/
static intel_iommu_state_t *
{
/*
* walk the drhd list for a match
*/
/*
* match the include all
*/
if (drhd->di_include_all)
return ((intel_iommu_state_t *)
/*
* try to match the device scope
*/
/*
* get a perfect match
*/
return ((intel_iommu_state_t *)
}
/*
* maybe under a scope of a p2p
*/
return ((intel_iommu_state_t *)
}
}
/*
* shouldn't get here
*/
ddi_node_name(dip));
return (NULL);
}
/*
* domain_set_root_context
* set root context for a single device
*/
static void
{
/*
* set root entry
*/
if (!ROOT_ENTRY_GET_P(rce)) {
} else {
}
/* set context entry */
if (!CONT_ENTRY_GET_P(rce)) {
} else if (CONT_ENTRY_GET_ASR(rce) !=
" %d, %d, %d has been set", bus,
}
/* cache mode set, flush context cache */
/* cache mode not set, flush write buffer */
} else {
}
}
/*
* setup_single_context()
* setup the root context entry
*/
static void
{
}
/*
* setup_context_walk()
* the walk function to set up the possible context entries
*/
static int
{
return (DDI_WALK_PRUNECHILD);
}
/*
* setup_possible_contexts()
* set up all the possible context entries for a device under ppb
*/
static void
{
int count;
/* for pci-pci bridge */
return;
}
/* for pcie-pci bridge */
/* for functions under pcie-pci bridge */
}
/*
* iommu_alloc_domain()
* allocate a domain for device, the result is returned in domain parameter
*/
static int
{
need_to_set_parent = 0;
ddi_node_name(dip));
}
/*
* check if the domain has already allocated
*/
if (private->idp_domain) {
return (DDI_SUCCESS);
}
/*
* we have to assign a domain for this device,
*/
}
/*
* hold the parent for modifying its children
*/
/*
* check to see if it is under a pci bridge
*/
if (b_private->idp_domain) {
goto get_domain_finish;
} else {
need_to_set_parent = 1;
}
}
/*
* OK, we have to allocate a new domain
*/
/*
* setup the domain
*/
if (need_to_set_parent)
return (DDI_FAILURE);
}
/*
* add the device to the domain's device list
*/
if (need_to_set_parent) {
} else {
}
/*
* return new domain
*/
return (DDI_SUCCESS);
}
/*
* iommu_get_domain()
* get a iommu domain for dip, and the result is returned in domain
*/
static int
{
/*
* for isa devices attached under lpc
*/
if (lpc_devinfo) {
} else {
return (DDI_FAILURE);
}
}
/*
* for gart, use the real graphic devinfo
*/
if (gfx_devinfo) {
} else {
return (DDI_FAILURE);
}
}
/*
* if iommu private is NULL, we share
* the domain with the parent
*/
}
/*
* check if the domain has already allocated
*/
if (private->idp_domain) {
return (DDI_SUCCESS);
}
/*
* allocate a domain for this device
*/
}
/*
* helper functions to manipulate iommu pte
*/
static inline void
{
}
static inline paddr_t
{
return (*pte & IOMMU_PAGE_MASK);
}
/*
* dvma_level_offset()
* get the page table offset by specifying a dvma and level
*/
static inline uint_t
{
return (offset);
}
/*
* iommu_setup_level_table()
* setup the page table for a level
*/
static iovpte_t
{
/*
* the pte is nonpresent, alloc new page
*/
sizeof (*pte));
}
return (vpte);
}
/*
* iommu_setup_page_table()
* setup the page table for a dvma
*/
static caddr_t
{
int i;
for (i = level; i > 1; i--) {
}
}
/*
* iommu_map_page_range()
* map a range of pages for iommu translation
*
* domain: the device domain
* dvma: the start dvma for mapping
* start: the start physcial address
* end: the end physical address
* flags: misc flag
*/
static int
{
count = 0;
"non-NULL pte");
}
} else {
}
paddr += IOMMU_PAGE_SIZE;
offset++;
count++;
}
/* flush cpu and iotlb cache */
if (!(flags & IOMMU_PAGE_PROP_NOSYNC)) {
/* cache mode set, flush iotlb */
/* cache mode not set, flush write buffer */
} else {
}
}
}
return (DDI_SUCCESS);
}
/*
* build_single_rmrr_identity_map()
* build identity map for a single rmrr unit
*/
static void
{
continue;
}
continue;
}
/*
* setup the page tables
*/
size, /* size */
IOMMU_PAGE_SIZE, /* align/quantum */
0, /* phase */
0, /* nocross */
VM_NOSLEEP) != NULL)) {
(void) iommu_map_page_range(domain,
} else {
}
}
}
/*
* build_rmrr_identity_map()
* build identity mapping for devices under rmrr scopes
*/
static void
build_rmrr_identity_map(void)
{
int i;
for (i = 0; i < DMAR_MAX_SEGMENT; i++) {
break;
}
}
}
/*
* drhd_only_for_gfx()
* return TRUE, if the drhd is only for gfx
*/
static boolean_t
{
int dev_num;
if (drhd->di_include_all)
return (B_FALSE);
/* get the device number attached to this drhd */
dev_num = 0;
dev_num++;
}
if (dev_num == 1) {
return (B_FALSE);
}
if (private->idp_is_display)
return (B_TRUE);
}
return (B_FALSE);
}
/*
* build_dev_identity_map()
* build identity map for a device
*/
static void
{
"this device may not be functional",
ddi_node_name(dip));
return;
}
while (mp != 0) {
(void) iommu_map_page_range(domain,
}
/*
* record the identity map for domain, any device
* which uses this domain will needn't any further
* map
*/
}
/*
* build_isa_gfx_identity_walk()
* the walk function for build_isa_gfx_identity_map()
*/
static int
{
/* ignore the NULL private device */
if (!private)
return (DDI_WALK_CONTINUE);
/* fix the gfx and fd */
if (private->idp_is_display) {
gfx_devinfo = dip;
} else if (private->idp_is_lpc) {
lpc_devinfo = dip;
}
/* workaround for pci8086,10bc pci8086,11bc */
" Port LP Server Adapter applied\n");
}
return (DDI_WALK_CONTINUE);
}
/*
* build_isa_gfx_identity_map()
* build identity map for isa and gfx devices
*/
static void
{
int count;
/*
* walk through the device tree from pdip
* normally, pdip should be the pci root
*/
}
/*
* dmar_check_boot_option()
* check the intel iommu boot option
*/
static void
{
int len;
char *boot_option;
opt);
*var = 1;
opt);
*var = 0;
}
}
}
extern void (*rootnex_iommu_add_intr)(void);
/*
* intel_iommu_attach_dmar_nodes()
* attach intel iommu nodes
*/
int
{
int i;
/*
* retrieve the dmar boot options
*/
/*
* init the lists
*/
if (pci_top_devinfo == NULL) {
return (DDI_FAILURE);
}
/*
* initiate each iommu unit
*/
for (i = 0; i < DMAR_MAX_SEGMENT; i++) {
goto iommu_init_fail;
}
}
/*
* collect the reserved memory pages
*/
/*
* build identity map for devices in the rmrr scope
*/
/*
* build identity map for isa and gfx devices
*/
/*
* initialize the dvma cookie cache
*/
for (i = 0; i < MAX_COOKIE_CACHE_SIZE; i++) {
MUTEX_DRIVER, NULL);
cookie_cache[i].dch_count = 0;
}
/*
* regist the intr add function
*/
/*
* enable dma remapping
*/
if (!dmar_drhd_disable) {
if (gfx_drhd_disable &&
continue;
}
}
return (DDI_SUCCESS);
/*
* free the reserve memory list
*/
}
/*
* free iommu state structure
*/
}
return (DDI_FAILURE);
}
/*
* get_level_table()
* get level n page table, NULL is returned if
* failure encountered
*/
static caddr_t
{
/* walk to the level n page table */
for (i = level; i > n; i--) {
}
}
/*
* iommu_alloc_cookie_array()
* allocate the cookie array which is needed by map sgl
*/
static int
{
int kmflag;
/* figure out the rough estimate of array size */
/* the preallocated buffer fit this size */
/* we need to allocate new array */
} else {
/* convert the sleep flags */
} else {
kmflag = KM_NOSLEEP;
}
sizeof (ddi_dma_cookie_t);
return (IOMMU_SGL_NORESOURCES);
}
}
/* allocate the dvma cookie array */
return (IOMMU_SGL_SUCCESS);
}
/*
* iommu_alloc_dvma()
* alloc a dvma range for the caller
*/
static int
{
/* shotcuts */
/* parameters */
/* handle the rollover cases */
}
/* get from cache first */
/* allocate from vmem arena */
/* if xalloc failed, we have to flush the cache and retry */
}
}
/*
* save the dvma range in the device dvma cookie
*/
return (DDI_SUCCESS);
}
/*
* iommu_map_dvma()
* map dvma to the physical addresses, the actual
* mapped dvma page number is returned
*/
static int
{
int flags;
/* map each physical address */
}
/*
* intel_iommu_map_sgl()
* called from rootnex_dma_bindhdl(), to build dma
* cookies when iommu is enabled
*/
int
{
int e;
/* get domain for the dma request */
return (IOMMU_SGL_NORESOURCES);
}
/* direct return if drhd is disabled */
return (IOMMU_SGL_DISABLE);
/*
* allocate the cookies arrays, if the pre-allocated
* space is not enough, we should reallocate it
*/
return (IOMMU_SGL_NORESOURCES);
pcnt = 0;
cnt = 0;
/* retrieve paddr, psize, offset from dmareq */
if (buftype == DMA_OTYP_PAGES) {
} else {
(buftype == DMA_OTYP_BUFVADDR));
}
pcnt++;
} else {
}
}
/* save the iommu page offset */
/*
* allocate the dvma and map [paddr, paddr+psize)
*/
if (e != DDI_SUCCESS)
return (IOMMU_SGL_NORESOURCES);
/*
* setup the first cookie with the dvma of the page
* and the its size, we don't take account in the
* offset into the first page now
*/
dvma += IOMMU_PTOB(e);
while (size > 0) {
/* get the size for this page (i.e. partial or full page) */
if (buftype == DMA_OTYP_PAGES) {
/* get the paddr from the page_t */
/* index into the array of page_t's to get the paddr */
pcnt++;
} else {
/* call into the VM to get the paddr */
}
/*
* check to see if this page would put us
* over the max cookie size
*/
/* use the next cookie */
cnt++;
/* allocate the dvma and map [paddr, paddr+psize) */
if (e != DDI_SUCCESS)
return (IOMMU_SGL_NORESOURCES);
/* save the cookie information */
dvma += IOMMU_PTOB(e);
/*
* we can add this page in the current cookie
*/
} else {
dvma += IOMMU_PTOB(e);
}
}
/* take account in the offset into the first page */
/* save away how many cookies we have */
return (IOMMU_SGL_SUCCESS);
}
/*
* iommu_clear_leaf_pte()
* clear a single leaf pte
*/
static void
{
int count;
/* retrieve the leaf page table */
if (!leaf_table) {
return;
}
/* map the leaf page and walk to the pte */
/* clear the ptes */
count = 0;
(offset < IOMMU_PTE_MAX)) {
if (!*pte) {
} else {
*pte = 0;
}
csize += IOMMU_PAGE_SIZE;
offset++;
count++;
}
/* flush cpu and iotlb cache */
/* unmap the leaf page */
}
}
/*
* intel_iommu_unmap_sgl()
* called from rootnex_dma_unbindhdl(), to unbind dma
* cookies when iommu is enabled
*/
void
{
uint64_t i;
/* get the device domain, no return check needed here */
/* if the drhd is disabled, nothing will be done */
return;
/* the drhd is enabled */
for (i = 0; i < sinfo->si_sgl_size; i++) {
/* clear leaf ptes */
}
}