immu_qinv.c revision d2256d265bf2bcad0d811b81411de3802a4b97c6
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* All rights reserved.
*/
/*
* Copyright (c) 2009, Intel Corporation.
* All rights reserved.
*/
#include <sys/archsystm.h>
#include <sys/sysmacros.h>
/* invalidation queue table entry size */
#define QINV_ENTRY_SIZE 0x10
/* max value of Queue Size field of Invalidation Queue Address Register */
#define QINV_MAX_QUEUE_SIZE 0x7
/* status data size of invalidation wait descriptor */
#define QINV_SYNC_DATA_SIZE 0x4
/* status data value of invalidation wait descriptor */
#define QINV_SYNC_DATA_FENCE 1
#define QINV_SYNC_DATA_UNFENCE 2
/* invalidation queue head and tail */
#define QINV_IQA_TAIL_SHIFT 4
/* invalidation queue entry structure */
typedef struct qinv_inv_dsc {
} qinv_dsc_t;
/*
* struct iotlb_cache_node
* the pending data for iotlb flush
*/
typedef struct iotlb_pend_node {
/*
* struct iotlb_cache_head
* the pending head for the iotlb flush
*/
typedef struct iotlb_pend_head {
/* the pending node cache list */
/*
* qinv_iotlb_t
* pending data for qiueued invalidation iotlb flush
*/
typedef struct qinv_iotlb {
} qinv_iotlb_t;
/* physical contigous pages for invalidation queue */
typedef struct qinv_mem {
} qinv_mem_t;
/*
* invalidation queue state
* This structure describes the state information of the
* invalidation queue table and related status memeory for
* invalidation wait descriptor
*
* qinv_table - invalidation queue table
* qinv_sync - sync status memory for invalidation wait descriptor
* qinv_iotlb_pend_node - pending iotlb node
*/
typedef struct qinv {
} qinv_t;
static struct immu_flushops immu_qinv_flushops = {
};
/* helper macro for making queue invalidation descriptor */
#define CC_INV_DSC_HIGH (0)
((uint64_t)(g) << 4) | \
1)
((uint64_t)(g) << 4) | \
2)
3)
#define IEC_INV_DSC_HIGH (0)
((uint64_t)(g) << 4) | \
4)
5)
/*
* QS field of Invalidation Queue Address Register
* the size of invalidation queue is 1 << (qinv_iqa_qs + 8)
*/
/*
* the invalidate desctiptor type of queued invalidation interface
*/
static char *qinv_dsc_type[] = {
"Reserved",
"Context Cache Invalidate Descriptor",
"IOTLB Invalidate Descriptor",
"Device-IOTLB Invalidate Descriptor",
"Interrupt Entry Cache Invalidate Descriptor",
"Invalidation Wait Descriptor",
"Incorrect queue invalidation type"
};
#define QINV_MAX_DSC_TYPE (sizeof (qinv_dsc_type) / sizeof (char *))
/*
* the queued invalidation interface functions
*/
/*LINTED*/
/*LINTED*/
/* submit invalidation request descriptor to invalidation queue */
static void
{
qinv_table->qinv_mem_tail = 0;
/*
* inv queue table exhausted, wait hardware to fetch
* next descriptor
*/
}
}
/* queued invalidation interface -- invalidate context cache */
static void
{
}
/* queued invalidation interface -- invalidate iotlb */
static void
{
dr = 1;
dw = 1;
switch (type) {
case TLB_INV_G_PAGE:
addr & IMMU_PAGEOFFSET) {
goto qinv_ignore_psi;
}
break;
case TLB_INV_G_DOMAIN:
break;
case TLB_INV_G_GLOBAL:
break;
default:
return;
}
}
/* queued invalidation interface -- invalidate dev_iotlb */
static void
{
}
/* queued invalidation interface -- invalidate interrupt entry cache */
static void
{
}
/*
* alloc free entry from sync status table
*/
static uint_t
{
sync_mem->qinv_mem_tail = 0;
/* should never happen */
goto sync_mem_exhausted;
}
return (tail);
}
/*
* queued invalidation interface -- invalidation wait descriptor
* fence flag not set, need status data to indicate the invalidation
* wait descriptor completion
*/
static void
{
/* plant an iotlb pending node */
/*
* sdata = QINV_SYNC_DATA_UNFENCE, fence = 0, sw = 1, if = 0
* indicate the invalidation wait descriptor completion by
* performing a coherent DWORD write to the status address,
* not by generating an invalidation completion event
*/
}
/*
* queued invalidation interface -- invalidation wait descriptor
* fence flag set, indicate descriptors following the invalidation
* wait descriptor must be processed by hardware only after the
* invalidation wait descriptor completes.
*/
static void
{
/* sw = 0, fence = 1, iflag = 0 */
}
/*
* queued invalidation interface -- invalidation wait descriptor
* wait until the invalidation request finished
*/
static void
{
/*
* sdata = QINV_SYNC_DATA_FENCE, fence = 1, sw = 1, if = 0
* indicate the invalidation wait descriptor completion by
* performing a coherent DWORD write to the status address,
* not by generating an invalidation completion event
*/
while ((*status) != QINV_SYNC_DATA_FENCE)
}
/* get already completed invalidation wait requests */
static int
{
int index;
while (*value == QINV_SYNC_DATA_UNFENCE) {
*value = 0;
(*cnt)++;
sync_mem->qinv_mem_head = 0;
} else
}
if ((*cnt) > 0)
return (index);
else
return (-1);
}
/*
* call ddi_dma_mem_alloc to allocate physical contigous
* pages for invalidation queue table
*/
static int
{
0U,
0xffffffffffffffffULL,
0xffffffffU,
MMU_PAGESIZE, /* page aligned */
0x1,
0x1,
0xffffffffU,
0xffffffffffffffffULL,
1,
4,
0
};
};
immu_qinv_enable == B_FALSE) {
return (DDI_SUCCESS);
}
if (qinv_iqa_qs > QINV_MAX_QUEUE_SIZE)
"alloc invalidation queue table handler failed");
}
"alloc invalidation queue sync mem handler failed");
goto sync_table_handle_failed;
}
/* alloc physical contiguous pages for invalidation queue */
size,
NULL,
&size,
"alloc invalidation queue table failed");
goto queue_table_mem_failed;
}
/* get the base physical address of invalidation request queue */
/* alloc status memory for invalidation wait descriptor */
size,
NULL,
&size,
"alloc invalidation queue sync mem failed");
goto sync_table_mem_failed;
}
/*
* init iotlb pend node for submitting invalidation iotlb
* queue request
*/
* sizeof (qinv_iotlb_pend_node_t *), KM_SLEEP);
/* set invalidation queue structure */
return (DDI_SUCCESS);
return (DDI_FAILURE);
}
/*
* ###########################################################################
*
* Functions exported by immu_qinv.c
*
* ###########################################################################
*/
/*
* initialize invalidation request queue structure.
*/
int
{
int nerr;
if (immu_qinv_enable == B_FALSE) {
return (DDI_FAILURE);
}
nerr = 0;
} else {
nerr++;
break;
}
}
}
void
{
return;
}
}
/*
* queued invalidation interface
* function based context cache invalidation
*/
void
{
}
/*
* queued invalidation interface
* domain based context cache invalidation
*/
void
{
}
/*
* queued invalidation interface
* invalidation global context cache
*/
void
{
}
/*
* queued invalidation interface
* paged based iotlb invalidation
*/
void
{
/* choose page specified invalidation */
<= ADDR_AM_MAX(am)) {
break;
}
am++;
}
}
/* choose domain invalidation */
} else {
0, hint, TLB_INV_G_DOMAIN);
}
}
/*
* queued invalidation interface
* domain based iotlb invalidation
*/
void
{
}
/*
* queued invalidation interface
* global iotlb invalidation
*/
void
{
}
/*
* the plant wait operation for queued invalidation interface
*/
void
{
if (node) {
}
/* no cache, alloc one */
}
/* plant an invalidation wait descriptor, not wait its completion */
}
/*
* the reap wait operation for queued invalidation interface
*/
void
{
while (cnt--) {
continue;
index++;
index = 0;
}
}
/* queued invalidation interface -- global invalidate interrupt entry cache */
void
{
}
/* queued invalidation interface -- invalidate single interrupt entry cache */
void
{
}
/* queued invalidation interface -- invalidate interrupt entry caches */
void
{
/* requested interrupt count is not a power of 2 */
for (i = 0; i < cnt; i++) {
}
return;
}
mask++;
}
for (i = 0; i < cnt; i++) {
}
return;
}
}
void
{
/* access qinv data */
+ (head * QINV_ENTRY_SIZE));
/* report the error */
"generated a fault when fetching a descriptor from the"
"\tinvalidation queue, or detects that the fetched"
"\tdescriptor is invalid. The head register is "
"0x%" PRIx64
"\tthe type is %s",
head,
}