/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* All rights reserved.
*/
/*
* immu_regs.c - File that operates on a IMMU unit's regsiters
*/
#include <sys/dditypes.h>
#include <sys/archsystm.h>
#include <sys/x86_archext.h>
#include <sys/sysmacros.h>
((immu)->immu_regs_handle, \
((immu)->immu_regs_handle, \
};
/*
* wait max 60s for the hardware completion
*/
{ \
while (1) { \
ntick = ddi_get_lbolt(); \
if (completion) { \
break; \
} \
"immu wait completion time out"); \
/*NOTREACHED*/ \
} else { \
ht_pause();\
}\
}\
}
};
/*
* iotlb_flush()
* flush the iotlb cache
*/
static void
{
/* no lock needed since cap and excap fields are RDONLY */
/*
*/
}
}
/*
* if the hardward doesn't support page selective invalidation, we
* will use domain type. Otherwise, use global type
*/
switch (type) {
case IOTLB_PSI:
break;
case IOTLB_DSI:
break;
case IOTLB_GLOBAL:
break;
default:
return;
}
if (iva)
}
/*
* immu_regs_iotlb_psi()
* iotlb page specific invalidation
*/
/*ARGSUSED*/
void
{
int dvma_am;
int npg_am;
int max_am;
int am;
int npages_left;
int npages;
int i;
return;
}
for (i = 0; i < immu_flush_gran && npages_left > 0; i++) {
/* First calculate alignment of DVMA */
if (dvma == 0) {
} else {
;
dvma_am--;
}
/* Calculate the npg_am */
;
npages_left -= npages;
}
if (npages_left) {
}
}
/*
* immu_regs_iotlb_dsi()
* domain specific invalidation
*/
/*ARGSUSED*/
void
{
}
/*
* immu_regs_iotlb_gbl()
* global iotlb invalidation
*/
/*ARGSUSED*/
void
{
}
static int
{
int r, agaw;
if (r == 0)
else
if (agaw > 64)
agaw = 64;
return (agaw);
}
/*
* set_immu_agaw()
* calculate agaw for a IOMMU unit
*/
static int
{
int nlevels;
/*
* mgaw is the maximum guest address width.
* Addresses above this value will be
* blocked by the IOMMU unit.
* sagaw is a bitmask that lists all the
* AGAWs supported by this IOMMU unit.
*/
/*
* Get bitpos corresponding to
* magaw
*/
/*
* Maximum SAGAW is specified by
* Vt-d spec.
*/
if (sagaw_mask > max_sagaw_mask) {
"is larger than maximu SAGAW bitmask "
"(%x) specified by Intel Vt-d spec",
return (DDI_FAILURE);
}
/*
* Find a supported AGAW <= magaw
*
* sagaw_mask bitpos AGAW (bits) nlevels
* ==============================================
* 0 0 0 0 1 0 30 2
* 0 0 0 1 0 1 39 3
* 0 0 1 0 0 2 48 4
* 0 1 0 0 0 3 57 5
* 1 0 0 0 0 4 64(66) 6
*/
mask = 1;
nlevels = 0;
agaw = 0;
if (mask & sagaw_mask) {
}
}
/* calculated agaw can be > 64 */
"is outside valid limits [30,%d] specified by Vt-d spec "
return (DDI_FAILURE);
}
"level (%d) is outside valid limits [2,6]",
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
static int
{
int error;
/*
* This lock may be acquired by the IOMMU interrupt handler
*/
(void *)ipltospl(IMMU_INTR_IPL));
/*
* map the register address space
*/
&(immu->immu_regs_handle));
if (error == DDI_FAILURE) {
return (DDI_FAILURE);
}
/*
* get the register value
*/
/*
* if the hardware access is non-coherent, we need clflush
*/
} else {
"immu unit %s can't be enabled due to "
return (DDI_FAILURE);
}
}
/* Setup SNP and TM reserved fields */
else
immu->immu_ptemask = 0;
/*
* Check for Mobile 4 series chipset
*/
if (immu_quirk_mobile4 == B_TRUE &&
"IMMU: Mobile 4 chipset quirk detected. "
"Force-setting RWBF");
}
/*
* retrieve the maximum number of domains
*/
/*
* calculate the agaw
*/
return (DDI_FAILURE);
}
immu->immu_regs_cmdval = 0;
return (DDI_SUCCESS);
}
/* ############### Functions exported ################## */
/*
* immu_regs_setup()
* Setup mappings to a IMMU unit's registers
*/
void
{
int i;
for (i = 0; i < IMMU_MAXSEG; i++) {
/* do your best, continue on error */
} else {
}
}
}
}
/*
* immu_regs_map()
*/
int
{
int error;
/*
* remap the register address space
*/
&(immu->immu_regs_handle));
if (error != DDI_SUCCESS) {
return (DDI_FAILURE);
}
(void) immu_intr_handler(immu);
return (error);
}
/*
* immu_regs_suspend()
*/
void
{
/* Finally, unmap the regs */
}
/*
* immu_regs_startup()
* set a IMMU unit's registers to startup the unit
*/
void
{
return;
}
}
/*
* immu_regs_shutdown()
* shutdown a unit
*/
void
{
return;
}
}
/*
* immu_regs_intr()
* Set a IMMU unit regs to setup a IMMU unit's
* interrupt handler
*/
void
{
}
/*
* immu_regs_passthru_supported()
* Returns B_TRUE ifi passthru is supported
*/
{
return (B_TRUE);
}
return (B_FALSE);
}
/*
* immu_regs_is_TM_reserved()
* Returns B_TRUE if TM field is reserved
*/
{
return (B_FALSE);
}
return (B_TRUE);
}
/*
* immu_regs_is_SNP_reserved()
* Returns B_TRUE if SNP field is reserved
*/
{
}
/*
* immu_regs_wbf_flush()
* If required and supported, write to IMMU
* unit's regs to flush DMA write buffer(s)
*/
void
{
return;
}
}
/*
* immu_regs_cpu_flush()
* flush the cpu cache line after CPU memory writes, so
* IOMMU can see the writes
*/
void
{
return;
}
mfence_insn();
}
/*
* immu_regs_context_flush()
* flush the context cache
*/
static void
{
/*
* define the command
*/
switch (type) {
case CONTEXT_FSI:
| CCMD_INV_DID(did)
break;
case CONTEXT_DSI:
| CCMD_INV_DID(did);
break;
case CONTEXT_GLOBAL:
break;
default:
"%s: incorrect context cache flush type",
/*NOTREACHED*/
}
}
/*ARGSUSED*/
void
{
}
/*ARGSUSED*/
void
{
}
/*ARGSUSED*/
void
{
}
/*
* Nothing to do, all register operations are synchronous.
*/
/*ARGSUSED*/
static void
{
}
void
{
}
/* enable queued invalidation interface */
void
{
if (immu_qinv_enable == B_FALSE)
return;
/* Initialize the Invalidation Queue Tail register to zero */
/* set invalidation queue base address register */
/* enable queued invalidation interface */
}
/* enable interrupt remapping hardware unit */
void
{
if (immu_intrmap_enable == B_FALSE)
return;
/* set interrupt remap table pointer */
/* global flush intr entry cache */
/* enable interrupt remapping */
status);
/* set compatible mode */
status);
}
{
}
{
}
void
{
}
void
{
}