/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
*/
#include <sys/amd_iommu.h>
#include <sys/bootconf.h>
#include <sys/sysmacros.h>
#include <sys/ddidmareq.h>
#include "amd_iommu_impl.h"
#include "amd_iommu_acpi.h"
#include "amd_iommu_page_tables.h"
0U, /* dma_attr_addr_lo */
0xffffffffffffffffULL, /* dma_attr_addr_hi */
0xffffffffU, /* dma_attr_count_max */
1, /* dma_attr_burstsizes */
64, /* dma_attr_minxfer */
0xffffffffU, /* dma_attr_maxxfer */
0xffffffffU, /* dma_attr_seg */
1, /* dma_attr_sgllen, variable */
64, /* dma_attr_granular */
0 /* dma_attr_flags */
};
static struct {
int f_count;
/*ARGSUSED*/
static int
{
if (bdf == -1) {
} else {
return (DDI_FAILURE);
}
} else {
}
return (DDI_SUCCESS);
}
/*ARGSUSED*/
static int
{
const char *f = "amd_iommu_get_domain";
switch (deviceid) {
case AMD_IOMMU_INVALID_DOMAIN:
case AMD_IOMMU_SYS_DOMAIN:
break;
default:
break;
}
if (amd_iommu_debug & AMD_IOMMU_DEBUG_DEVTBL) {
}
return (DDI_SUCCESS);
}
static uint16_t
{
return (domainid % AMD_IOMMU_DOMAIN_HASH_SZ);
}
/*ARGSUSED*/
void
{
}
/*ARGSUSED*/
void
{
if (amd_iommu_domain_table) {
sizeof (amd_iommu_domain_t *) * AMD_IOMMU_DOMAIN_HASH_SZ);
}
}
static amd_iommu_domain_t *
{
return (dp);
}
return (NULL);
if (type == AMD_IOMMU_VMEM_MAP) {
base = MMU_PAGESIZE;
return (NULL);
}
} else {
}
return (dp);
}
static void
{
int flags;
const char *f = "amd_iommu_teardown_domain";
else
}
"Failed to invalidate domain in IOMMU HW cache",
}
}
static int
{
const char *f = "amd_iommu_get_deviceid";
/* be conservative. Always assume an alias */
*aliasp = 1;
*deviceid = 0;
/* Check for special special devices (rdip == NULL) */
"%s: %s%d: idx=%d, failed to get SRC BDF "
"for special-device",
return (DDI_DMA_NOMAPPING);
}
*aliasp = 1;
return (DDI_SUCCESS);
}
if (amd_iommu_debug & AMD_IOMMU_DEBUG_DEVTBL) {
f, path);
}
"for rdip=%p, path = %s",
path);
return (DDI_DMA_NOMAPPING);
}
"PCI dip (%p). rdip path = %s",
return (DDI_DMA_NOMAPPING);
}
return (DDI_DMA_NOMAPPING);
}
"for PCI dip (%p) rdip path = %s.",
path);
return (DDI_DMA_NOMAPPING);
}
if (amd_iommu_debug & AMD_IOMMU_DEBUG_DEVTBL) {
}
return (DDI_SUCCESS);
}
/*ARGSUSED*/
static int
{
int i;
/* If already passthru, don't touch */
return (0);
}
AMD_IOMMU_REG_GET64(&(devtbl_entry[0]),
return (0);
}
/* New devtbl entry for this domain. Bump up the domain ref-count */
entry[3] = 0;
entry[2] = 0;
for (i = 1; i < 4; i++) {
devtbl_entry[i] = entry[i];
}
devtbl_entry[0] = entry[0];
/* we did an actual init */
return (1);
}
void
{
int alias;
char *path;
int pathfree;
int V;
int TV;
int instance;
const char *driver;
const char *f = "amd_iommu_set_passthru";
if (rdip) {
} else {
driver = "special-device";
instance = 0;
}
if (path) {
if (rdip)
else
pathfree = 1;
} else {
pathfree = 0;
path = "<path-mem-alloc-failed>";
}
!= DDI_SUCCESS) {
"Failed to get device ID for device %s.", f, driver,
goto out;
}
/* No deviceid */
if (deviceid == -1) {
goto out;
}
iommu->aiomt_devtbl_sz) {
"for rdip (%p) exceeds device table size (%u), path=%s",
f, driver,
goto out;
}
/*LINTED*/
/* Already passthru */
if (V == 0 && TV == 0) {
goto out;
}
/* Existing translations */
goto out;
}
/* Invalid setting */
if (V == 0 && TV == 1) {
goto out;
}
&cmdargs, 0, 0);
out:
if (pathfree)
}
static int
const char *path)
{
const char *f = "amd_iommu_set_devtbl_entry";
if (amd_iommu_debug & AMD_IOMMU_DEBUG_DEVTBL) {
f, path);
}
iommu->aiomt_devtbl_sz) {
"for rdip (%p) exceeds device table size (%u), path=%s",
f, driver,
return (DDI_DMA_NOMAPPING);
}
/*LINTED*/
if (amd_iommu_debug & AMD_IOMMU_DEBUG_DEVTBL) {
}
/*
* Flush internal caches, need to do this if we came up from
* fast boot
*/
&cmdargs, 0, 0);
if (error != DDI_SUCCESS) {
"Failed to invalidate domain in IOMMU HW cache",
return (error);
}
if (error != DDI_SUCCESS) {
"Failed to invalidate translations in IOMMU HW cache",
return (error);
}
/* Initialize device table entry */
&cmdargs, 0, 0);
}
return (error);
}
int
int *domain_freed, char *path)
{
const char *f = "amd_iommu_clear_devtbl_entry";
if (amd_iommu_debug & AMD_IOMMU_DEBUG_DEVTBL) {
"domainid = %d, deviceid = %u, path = %s",
}
iommu->aiomt_devtbl_sz) {
"for rdip (%p) exceeds device table size (%u), path = %s",
return (DDI_FAILURE);
}
/*LINTED*/
if (amd_iommu_debug & AMD_IOMMU_DEBUG_DEVTBL) {
}
/* Nothing to do */
return (DDI_SUCCESS);
}
*domain_freed = 1;
}
&cmdargs, 0, 0);
if (error != DDI_SUCCESS)
error = DDI_FAILURE;
return (error);
}
int
{
return (DDI_SUCCESS);
}
void
{
sizeof (amd_iommu_page_table_t *) * AMD_IOMMU_PGTABLE_HASH_SZ);
}
static uint32_t
{
return (pa_4K % AMD_IOMMU_PGTABLE_HASH_SZ);
}
static void
{
}
static void
{
else
}
static amd_iommu_page_table_t *
{
continue;
AMD_IOMMU_PGTABLE_ALIGN) == 0);
break;
}
}
return (pt);
}
/*ARGSUSED*/
static amd_iommu_page_table_t *
{
if (level == AMD_IOMMU_PGTABLE_MAXLEVEL) {
} else {
}
return (NULL);
}
}
}
static amd_iommu_page_table_t *
{
int i;
if (amd_iommu_no_pgtable_freelist == 1)
return (NULL);
if (amd_iommu_pgtable_freelist.f_count == 0)
return (NULL);
for (i = 0; i < AMD_IOMMU_PGTABLE_SZ / (sizeof (*pte_array)); i++) {
AMD_IOMMU_PTDE_PR) == 0);
}
return (pt);
}
static int
{
int err;
const char *f = "amd_iommu_alloc_pgtable";
if (pt)
goto init_pgtable;
return (DDI_DMA_NORESOURCES);
/*
* Each page table is 4K in size
*/
/*
* Alloc a DMA handle. Use the IOMMU dip as we want this DMA
* to *not* enter the IOMMU - no recursive entrance.
*/
if (err != DDI_SUCCESS) {
"Cannot alloc DMA handle for IO Page Table",
}
/*
* Alloc memory for IO Page Table.
* XXX remove size_t cast kludge
*/
if (err != DDI_SUCCESS) {
"Cannot allocate DMA memory for IO Page table",
return (DDI_DMA_NORESOURCES);
}
/*
* The Page table DMA VA must be 4K aligned and
* size >= than requested memory.
*
*/
== 0);
/*
* Now bind the handle
*/
if (err != DDI_DMA_MAPPED) {
"Cannot bind memory for DMA to IO Page Tables. "
"bufrealsz=%p",
err);
}
/*
* We assume the DMA engine on the IOMMU is capable of handling the
* whole page table in a single cookie. If not and multiple cookies
* are needed we fail.
*/
if (ncookies != 1) {
"Cannot handle multiple "
"cookies for DMA to IO page Table, #cookies=%u",
return (DDI_DMA_NOMAPPING);
}
/*
* The address in the cookie must be 4K aligned and >= table size
*/
return (DDI_SUCCESS);
}
static int
{
if (amd_iommu_no_pgtable_freelist == 1)
return (DDI_FAILURE);
return (DDI_FAILURE);
return (DDI_SUCCESS);
}
static void
{
int i;
const char *f = "amd_iommu_free_pgtable";
for (i = 0; i < AMD_IOMMU_PGTABLE_SZ / (sizeof (*pte_array)); i++) {
AMD_IOMMU_PTDE_PR) == 0);
}
return;
/* Unbind the handle */
"Failed to unbind handle: %p for IOMMU Page Table",
(void *)pt->pt_dma_hdl);
}
/* Free the table memory allocated for DMA */
/* Free the DMA handle */
}
static int
{
/* nothing to set. PDE is already set */
== next_pgtable_pa_4K);
return (DDI_SUCCESS);
}
/* Page Directories are always RW */
return (DDI_SUCCESS);
}
static int
struct ddi_dma_req *dmareq)
{
int R;
int W;
/* nothing to set if PTE is already set */
/*
* Adjust current permissions
* DDI_DMA_WRITE means direction of DMA is MEM -> I/O
* so that requires Memory READ permissions i.e. sense
* is inverted.
* Note: either or both of DD_DMA_READ/WRITE may be set
*/
if (amd_iommu_no_RW_perms == 0) {
}
}
}
== pa_4K);
return (DDI_SUCCESS);
}
if (amd_iommu_no_RW_perms == 0) {
} else {
}
}
}
} else {
}
/* TODO what is correct for FC and U */
return (DDI_SUCCESS);
}
static void
{
if (level == AMD_IOMMU_PGTABLE_MAXLEVEL) {
} else {
}
}
static int
struct ddi_dma_req *dmareq,
int km_flags)
{
int error;
const char *f = "amd_iommu_setup_1_pgtable";
*next_idxp = 0;
error = DDI_SUCCESS;
if (level == AMD_IOMMU_PGTABLE_MAXLEVEL) {
} else {
}
/* Check if page table is already allocated */
goto out;
}
km_flags)) != DDI_SUCCESS) {
return (error);
}
out:
if (level != AMD_IOMMU_PGTABLE_MAXLEVEL) {
}
if (level == 1) {
} else {
}
return (error);
}
typedef enum {
} pdte_tear_t;
static pdte_tear_t
{
ASSERT(next_level == 0);
/* PTE */
return (PDTE_NOT_TORN);
}
} else {
}
AMD_IOMMU_PTDE_PR) == 0);
} else {
ASSERT(0);
}
return (PGTABLE_TORN_DOWN);
}
return (retval);
}
static int
{
int level;
int error;
const char *f = "amd_iommu_create_pgtables";
"deviceid = %u, va = %p, pa = %p, path = %s",
}
if (domainid == AMD_IOMMU_PASSTHRU_DOMAIN) {
/* No need for pagetables. Just set up device table entry */
goto passthru;
}
index = 0;
"deviceid=%u, va= %p, pa = %p, Failed to setup "
"page table(s) at level = %d, path = %s.",
return (error);
}
if (level > 1) {
} else {
index = 0;
}
}
"domainid=%d."
"Failed to set device table entry for path %s.",
return (error);
}
return (DDI_SUCCESS);
}
static int
{
int level;
int flags;
int tear_level;
int invalidate_pte;
int invalidate_pde;
const char *f = "amd_iommu_destroy_pgtables";
"deviceid = %u, va = %p, path = %s",
}
if (domainid == AMD_IOMMU_PASSTHRU_DOMAIN) {
/*
* there are no pagetables for the passthru domain.
* Just the device table entry
*/
error = DDI_SUCCESS;
goto passthru;
}
index = 0;
if (pt) {
continue;
}
break;
}
if (level == 0) {
}
}
tear_level = -1;
invalidate_pde = 0;
invalidate_pte = 0;
switch (retval) {
case PDTE_NOT_TORN:
goto invalidate;
case PDTE_TORN_DOWN:
invalidate_pte = 1;
goto invalidate;
case PGTABLE_TORN_DOWN:
invalidate_pte = 1;
invalidate_pde = 1;
tear_level = level;
break;
}
index = prev_index;
}
/*
* Now teardown the IOMMU HW caches if applicable
*/
if (invalidate_pte) {
if (amd_iommu_pageva_inval_all) {
} else if (invalidate_pde) {
} else {
flags = 0;
}
"rdip=%p. Failed to invalidate IOMMU HW cache "
error = DDI_FAILURE;
goto out;
}
}
if (tear_level == AMD_IOMMU_PGTABLE_MAXLEVEL) {
} else {
error = DDI_SUCCESS;
}
out:
return (error);
}
static int
{
switch (error) {
case DDI_DMA_MAPPED:
case DDI_DMA_PARTIAL_MAP:
case DDI_DMA_NORESOURCES:
case DDI_DMA_NOMAPPING:
break;
default:
/*NOTREACHED*/
}
return (error);
}
int
{
int alias;
char *path;
const char *f = "amd_iommu_map_pa2va";
*start_vap = 0;
goto out;
}
/*
* First get deviceid
*/
!= DDI_SUCCESS) {
goto out;
}
/*
* Next get the domain for this rdip
*/
!= DDI_SUCCESS) {
goto out;
}
goto out;
}
"pgshift = %d",
}
} else {
0,
AMD_IOMMU_SIZE_4G), /* XXX rollover */
if (start_va == 0) {
goto out;
}
}
"for pfn = %p, va = %p, path = %s",
}
}
pg << MMU_PAGESHIFT,
km_flags)) != DDI_SUCCESS) {
goto out;
}
"for pfn = %p, vapg = %p, path = %s",
}
}
if (amd_iommu_debug & AMD_IOMMU_DEBUG_PA2VA) {
}
out:
return (cvt_bind_error(error));
}
int
{
char *path;
int pathfree;
int alias;
int error;
int domain_freed;
const char *f = "amd_iommu_unmap_va";
if (amd_iommu_no_unmap)
return (DDI_SUCCESS);
if (path) {
pathfree = 1;
} else {
pathfree = 0;
path = "<path-mem-alloc-failed>";
}
/*
* First get deviceid
*/
!= DDI_SUCCESS) {
error = DDI_FAILURE;
goto out;
}
/*
* Next get the domain for this rdip
*/
!= DDI_SUCCESS) {
error = DDI_FAILURE;
goto out;
}
/* should never result in domain allocation/vmem_create */
error = DDI_FAILURE;
goto out;
}
domain_freed = 0;
domain_freed = 0;
error = DDI_FAILURE;
goto out;
}
if (domain_freed) {
break;
}
}
/*
* vmem_xalloc() must be paired with vmem_xfree
*/
}
if (domain_freed)
error = DDI_SUCCESS;
out:
if (pathfree)
return (error);
}