amd_iommu_impl.c revision 7866414c22c54c30309a9b69942372196e667821
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include <sys/iommulib.h>
#include <sys/amd_iommu.h>
#include <sys/bootconf.h>
#include <sys/ddidmareq.h>
#include "amd_iommu_impl.h"
#include "amd_iommu_acpi.h"
#include "amd_iommu_page_tables.h"
0U, /* dma_attr_addr_lo */
0xffffffffffffffffULL, /* dma_attr_addr_hi */
0xffffffffU, /* dma_attr_count_max */
1, /* dma_attr_burstsizes */
64, /* dma_attr_minxfer */
0xffffffffU, /* dma_attr_maxxfer */
0xffffffffU, /* dma_attr_seg */
1, /* dma_attr_sgllen, variable */
64, /* dma_attr_granular */
0 /* dma_attr_flags */
};
};
struct iommulib_ops amd_iommulib_ops = {
"AMD IOMMU Vers. 1",
NULL,
};
static kmutex_t amd_iommu_pgtable_lock;
static int
{
const char *f = "amd_iommu_register";
!= DDI_SUCCESS) {
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
static int
{
/* we never registered */
return (DDI_SUCCESS);
}
!= DDI_SUCCESS) {
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
static int
{
/*
* Setup passthru mapping for "special" devices
*/
}
return (DDI_SUCCESS);
}
static int
{
const char *f = "amd_iommu_start";
/*
* Disable HT tunnel translation.
* XXX use ACPI
*/
if (hinfop) {
if (amd_iommu_debug) {
"amd_iommu: using ACPI for CTRL registers");
}
}
AMD_IOMMU_INVTO, 5);
/*
* The Device table entry bit 0 (V) controls whether the device
* table entry is valid for address translation and Device table
* entry bit 128 (IV) controls whether interrupt remapping is valid.
* By setting both to zero we are essentially doing pass-thru. Since
* this table is zeroed on allocation, essentially we will have
* pass-thru when IOMMU is enabled.
*/
/* Finally enable the IOMMU ... */
AMD_IOMMU_ENABLE, 1);
if (amd_iommu_debug) {
}
return (DDI_SUCCESS);
}
static void
{
const char *f = "amd_iommu_stop";
AMD_IOMMU_ENABLE, 0);
/*
* Disable translation on HT tunnel traffic
*/
}
static int
{
int i;
int err;
const char *f = "amd_iommu_setup_tables_and_buffers";
/*
* We will put the Device Table, Command Buffer and
* Event Log in contiguous memory. Allocate the maximum
* size allowed for such structures
* Device Table: 256b * 64K = 32B * 64K
* Command Buffer: 128b * 32K = 16B * 32K
* Event Log: 128b * 32K = 16B * 32K
*/
/*
* Alloc a DMA handle.
*/
if (err != DDI_SUCCESS) {
return (DDI_FAILURE);
}
/*
* Alloc memory for tables and buffers
* XXX remove cast to size_t
*/
if (err != DDI_SUCCESS) {
iommu->aiomt_dma_mem_realsz = 0;
return (DDI_FAILURE);
}
/*
* The VA must be 4K aligned and >= table size
*/
AMD_IOMMU_TABLE_ALIGN) == 0);
/*
* Now bind the handle
*/
if (err != DDI_DMA_MAPPED) {
"to AMD IOMMU tables and buffers. bufrealsz=%p",
iommu->aiomt_buf_dma_ncookie = 0;
iommu->aiomt_dma_mem_realsz = 0;
return (DDI_FAILURE);
}
/*
* We assume the DMA engine on the IOMMU is capable of handling the
* whole table buffer in a single cookie. If not and multiple cookies
* are needed we fail.
*/
"cookies for DMA to AMD IOMMU tables and buffers. "
iommu->aiomt_buf_dma_ncookie = 0;
iommu->aiomt_dma_mem_realsz = 0;
return (DDI_FAILURE);
}
/*
* The address in the cookie must be 4K aligned and >= table size
*/
& AMD_IOMMU_TABLE_ALIGN) == 0);
<= iommu->aiomt_dma_mem_realsz);
/*
* Setup the device table pointers in the iommu struct as
* well as the IOMMU device table register
*/
/*
* Set V=1 and TV = 0, so any inadvertant pass-thrus cause
* page faults. Also set SE bit so we aren't swamped with
* page fault messages
*/
for (i = 0; i <= AMD_IOMMU_MAX_DEVICEID; i++) {
/*LINTED*/
[i * AMD_IOMMU_DEVTBL_ENTRY_SZ];
}
/*
* Setup the command buffer pointers
*/
/*LINTED*/
AMD_IOMMU_CMDHEADPTR, 0);
AMD_IOMMU_CMDTAILPTR, 0);
/*
* Setup the event log pointers
*/
/*LINTED*/
/* dma sync so device sees this init */
if (amd_iommu_debug & AMD_IOMMU_DEBUG_TABLES) {
}
return (DDI_SUCCESS);
}
static void
{
const char *f = "amd_iommu_teardown_tables_and_buffers";
AMD_IOMMU_EVENTBASE, 0);
AMD_IOMMU_EVENTLEN, 0);
AMD_IOMMU_COMBASE, 0);
AMD_IOMMU_COMLEN, 0);
AMD_IOMMU_DEVTABBASE, 0);
AMD_IOMMU_DEVTABSIZE, 0);
return;
/* Unbind the handle */
}
iommu->aiomt_buf_dma_ncookie = 0;
/* Free the table memory allocated for DMA */
iommu->aiomt_dma_mem_realsz = 0;
/* Free the DMA handle */
}
static void
{
AMD_IOMMU_CMDBUF_RUN) == 0);
AMD_IOMMU_EVENT_LOG_RUN) == 0);
/* Must be set prior to enabling command buffer */
/* Must be set prior to enabling event logging */
/* No interrupts for completion wait - too heavy weight. use polling */
}
static int
{
} else {
if (amd_iommu_debug) {
}
AMD_IOMMU_EXCL_LIM, 0);
}
return (DDI_SUCCESS);
}
static void
{
(void) amd_iommu_setup_exclusion(iommu);
}
static uint_t
{
/*LINTED*/
const char *f = "amd_iommu_intr_handler";
if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
}
AMD_IOMMU_EVENT_LOG_INT) == 1) {
if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
}
WAIT_SEC(1);
return (DDI_INTR_CLAIMED);
}
AMD_IOMMU_EVENT_OVERFLOW_INT) == 1) {
return (DDI_INTR_CLAIMED);
}
return (DDI_INTR_UNCLAIMED);
}
static int
{
int intrcap0;
int intrcapN;
int type;
int err;
int req;
int avail;
int p2req;
int actual;
int i;
int j;
const char *f = "amd_iommu_setup_interrupts";
return (DDI_FAILURE);
}
if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
}
/*
* for now we only support MSI
*/
if ((type & DDI_INTR_TYPE_MSI) == 0) {
"MSI interrupts not supported. Failing init.",
return (DDI_FAILURE);
}
if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
}
if (err != DDI_SUCCESS) {
"ddi_intr_get_nintrs failed err = %d",
return (DDI_FAILURE);
}
if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
"MSI number of interrupts requested: %d",
}
if (req == 0) {
"interrupts requested. Failing init", f,
return (DDI_FAILURE);
}
if (err != DDI_SUCCESS) {
"ddi_intr_get_navail failed err = %d", f,
return (DDI_FAILURE);
}
if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
"MSI number of interrupts available: %d",
}
if (avail == 0) {
"interrupts available. Failing init", f,
return (DDI_FAILURE);
}
"interrupts: requested (%d) > available (%d). "
return (DDI_FAILURE);
}
/* Allocate memory for DDI interrupt handles */
KM_SLEEP);
/* Convert req to a power of two as required by ddi_intr_alloc */
p2req = 0;
p2req++;
p2req--;
if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
"MSI power of 2 number of interrupts: %d,%d",
}
if (err != DDI_SUCCESS) {
"ddi_intr_alloc failed: err = %d",
return (DDI_FAILURE);
}
if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
"number of interrupts actually allocated %d",
}
"ddi_intr_alloc failed: actual (%d) < req (%d)",
return (DDI_FAILURE);
}
for (i = 0; i < iommu->aiomt_actual_intrs; i++) {
!= DDI_SUCCESS) {
"ddi_intr_add_handler failed: intr = %d, err = %d",
for (j = 0; j < i; j++) {
(void) ddi_intr_remove_handler(
iommu->aiomt_intr_htable[j]);
}
return (DDI_FAILURE);
}
}
!= DDI_SUCCESS ||
"ddi_intr_get_cap failed or inconsistent cap among "
"interrupts: intrcap0 (%d) < intrcapN (%d)",
return (DDI_FAILURE);
}
if (intrcap0 & DDI_INTR_FLAG_BLOCK) {
/* Need to call block enable */
if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
"Need to call block enable",
}
"ddi_intr_block enable failed ", f, driver,
return (DDI_FAILURE);
}
} else {
if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
"Need to call individual enable",
}
for (i = 0; i < iommu->aiomt_actual_intrs; i++) {
!= DDI_SUCCESS) {
"ddi_intr_enable failed: intr = %d", f,
for (j = 0; j < i; j++) {
(void) ddi_intr_disable(
iommu->aiomt_intr_htable[j]);
}
return (DDI_FAILURE);
}
}
}
if (amd_iommu_debug & AMD_IOMMU_DEBUG_INTR) {
"Interrupts successfully %s enabled. # of interrupts = %d",
}
return (DDI_SUCCESS);
}
static void
{
int i;
} else {
for (i = 0; i < iommu->aiomt_actual_intrs; i++) {
(void) ddi_intr_disable(
iommu->aiomt_intr_htable[i]);
}
}
}
for (i = 0; i < iommu->aiomt_actual_intrs; i++) {
(void) ddi_intr_remove_handler(
iommu->aiomt_intr_htable[i]);
}
}
for (i = 0; i < iommu->aiomt_actual_intrs; i++) {
}
}
}
iommu->aiomt_intr_htable_sz = 0;
}
static amd_iommu_t *
{
const char *f = "amd_iommu_init";
if (!(low_addr32 & AMD_IOMMU_REG_ADDR_LOCKED)) {
"Unable to use IOMMU unit idx=%d - skipping ...", f, driver,
return (NULL);
}
/*
* Since everything in the capability block is locked and RO at this
* point, copy everything into the IOMMU struct
*/
/* Get cap header */
if (hinfop)
else
iommu->aiomt_iotlb =
/*
* Get address of IOMMU control registers
*/
if (hinfop) {
} else {
}
/*
* Get cap range reg
*/
if (iommu->aiomt_rng_valid) {
} else {
iommu->aiomt_rng_bus = 0;
iommu->aiomt_first_devfn = 0;
iommu->aiomt_last_devfn = 0;
}
if (hinfop)
else
/*
* Get cap misc reg
*/
if (global) {
} else {
}
if (hinfop) {
} else {
}
/*
* Set up mapping between control registers PA and VA
*/
"control regs. Skipping IOMMU idx=%d", f, driver,
(void) amd_iommu_fini(iommu);
return (NULL);
}
/*
* Setup the various control register's VA
*/
/*
* Setup the DEVICE table, CMD buffer, and LOG buffer in
* memory and setup DMA access to this memory location
*/
(void) amd_iommu_fini(iommu);
return (NULL);
}
(void) amd_iommu_fini(iommu);
return (NULL);
}
(void) amd_iommu_fini(iommu);
return (NULL);
}
/*
* need to setup domain table before gfx bypass
*/
/*
* Set pass-thru for special devices like IOAPIC and HPET
*
* Also, gfx devices don't use DDI for DMA. No need to register
* before setting up gfx passthru
*/
(void) amd_iommu_fini(iommu);
return (NULL);
}
(void) amd_iommu_fini(iommu);
return (NULL);
}
(void) amd_iommu_fini(iommu);
return (NULL);
}
if (amd_iommu_debug) {
}
return (iommu);
}
static int
{
const char *f = "amd_iommu_fini";
return (DDI_FAILURE);
}
}
return (DDI_SUCCESS);
}
int
{
int idx;
const char *f = "amd_iommu_setup";
/* First setup PCI access to config space */
return (DDI_FAILURE);
}
/*
* The AMD IOMMU is part of an independent PCI function. There may be
* more than one IOMMU in that PCI function
*/
"subclass(0x%x)/programming interface(0x%x)", f, driver,
return (DDI_FAILURE);
}
/*
* Find and initialize all IOMMU units in this function
*/
break;
/* check if cap ID is secure device cap id */
if (id != PCI_CAP_ID_SECURE_DEV) {
if (amd_iommu_debug) {
"%s: %s%d: skipping IOMMU: idx(0x%x) "
"cap ID (0x%x) != secure dev capid (0x%x)",
}
continue;
}
/* check if cap type is IOMMU cap type */
if (cap_type != AMD_IOMMU_CAP) {
"cap type (0x%x) != AMD IOMMU CAP (0x%x)", f,
continue;
}
"failed to init IOMMU", f,
continue;
}
} else {
}
statep->aioms_nunits++;
}
if (amd_iommu_debug) {
}
return (DDI_SUCCESS);
}
int
{
int teardown;
int error = DDI_SUCCESS;
const char *f = "amd_iommu_teardown";
teardown = 0;
error = DDI_FAILURE;
continue;
}
statep->aioms_nunits--;
teardown++;
}
return (error);
}
/* Interface with IOMMULIB */
/*ARGSUSED*/
static int
{
char *s;
if (amd_iommu_disable_list) {
if (s == NULL)
return (DDI_SUCCESS);
if (*s == '\0' || *s == ':') {
return (DDI_FAILURE);
}
}
}
return (DDI_SUCCESS);
}
/*ARGSUSED*/
static int
{
arg, dma_handlep));
}
/*ARGSUSED*/
static int
{
}
/*ARGSUSED*/
static int
int km_flags)
{
int i;
char *path;
int error = DDI_FAILURE;
const char *f = "map_current_window";
return (DDI_DMA_NORESOURCES);
}
if (amd_iommu_debug == AMD_IOMMU_DEBUG_PAGE_TABLES) {
"from handle for device %s",
}
start_va = 0;
for (i = 0; i < ccount; i++) {
break;
}
cookie_array[i].dmac_type = 0;
}
if (i != ccount) {
ccount, i, 1);
goto out;
}
}
out:
return (error);
}
/*ARGSUSED*/
static int
{
int i;
int error = DDI_FAILURE;
char *path;
int pathfree;
const char *f = "unmap_current_window";
if (!locked)
if (path) {
pathfree = 1;
} else {
path = "<path-mem-alloc-failed>";
pathfree = 0;
}
if (ncookies == -1)
for (i = 0; i < ncookies; i++) {
AMD_IOMMU_VMEM_MAP) != DDI_SUCCESS) {
break;
}
}
!= DDI_SUCCESS) {
f, path);
}
if (i != ncookies) {
error = DDI_FAILURE;
goto out;
}
error = DDI_SUCCESS;
out:
if (pathfree)
if (!locked)
return (error);
}
/*ARGSUSED*/
static int
{
int dma_error = DDI_DMA_NOMAPPING;
int error;
char *path;
int km_flags;
const char *f = "amd_iommu_bindhdl";
return (dma_error);
if (path) {
} else {
goto unbind;
}
if (amd_iommu_debug & AMD_IOMMU_DEBUG_BIND) {
f, path,
(void *)cookiep->dmac_cookie_addr,
*ccountp);
}
cookie_array = NULL;
ccount = 0;
goto unbind;
}
if (error != DDI_SUCCESS) {
goto unbind;
}
goto unbind;
}
*cookiep = cookie_array[0];
if (amd_iommu_debug & AMD_IOMMU_DEBUG_BIND) {
f, path,
*ccountp);
}
return (dma_error);
return (dma_error);
}
/*ARGSUSED*/
static int
{
int error = DDI_FAILURE;
const char *f = "amd_iommu_unbindhdl";
cookie_array = NULL;
ccount = 0;
&ccount) != DDI_SUCCESS) {
error = DDI_FAILURE;
goto out;
}
error = DDI_FAILURE;
goto out;
}
!= DDI_SUCCESS) {
error = DDI_FAILURE;
goto out;
}
!= DDI_SUCCESS) {
error = DDI_FAILURE;
} else {
error = DDI_SUCCESS;
}
out:
if (cookie_array)
return (error);
}
/*ARGSUSED*/
static int
{
int error;
const char *f = "amd_iommu_sync";
cookie_array = NULL;
ccount = 0;
&ccount) != DDI_SUCCESS) {
"for device %p", f, (void *)rdip);
error = DDI_FAILURE;
goto out;
}
"for device %p", f, (void *)rdip);
error = DDI_FAILURE;
goto out;
}
len, cache_flags);
ccount) != DDI_SUCCESS) {
"for device %p", f, (void *)rdip);
error = DDI_FAILURE;
} else {
cookie_array = NULL;
ccount = 0;
}
out:
if (cookie_array)
return (error);
}
/*ARGSUSED*/
static int
{
int error = DDI_FAILURE;
int km_flags;
struct ddi_dma_req sdmareq = {0};
const char *f = "amd_iommu_win";
cookie_array = NULL;
ccount = 0;
&ccount) != DDI_SUCCESS) {
error = DDI_FAILURE;
goto out;
}
error = DDI_FAILURE;
goto out;
}
error = DDI_FAILURE;
goto out;
}
if (cookie_array) {
cookie_array = NULL;
ccount = 0;
}
cookie_array = NULL;
ccount = 0;
&ccount) != DDI_SUCCESS) {
error = DDI_FAILURE;
goto out;
}
ccount) != DDI_SUCCESS) {
error = DDI_FAILURE;
goto out;
}
*cookiep = cookie_array[0];
out:
if (cookie_array)
return (error);
}
/* Obsoleted DMA routines */
/*ARGSUSED*/
static int
{
ASSERT(0);
}
/*ARGSUSED*/
static int
{
ASSERT(0);
}
{
split_t s;
}
{
split_t s;
return (s.u64);
}
void
{
char *propval;
/*
* ignore AMD iommu
*/
amd_iommu_disable = 1;
}
}
/*
* Copy the list of drivers for which IOMMU is disabled by user.
*/
== DDI_SUCCESS) {
KM_SLEEP);
}
}
void
{
char *disable;
== DDI_PROP_SUCCESS) {
amd_iommu_disable = 1;
}
}
&disable) == DDI_PROP_SUCCESS) {
KM_SLEEP);
}
}