zulu_hat.c revision e0731422366620894c16c1ee6515551c5f00733d
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License, Version 1.0 only
* (the "License"). You may not use this file except in compliance
* with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2005 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include <sys/tnf_probe.h>
#include <vm/hat_sfmmu.h>
#include <vm/xhat_sfmmu.h>
#include <sys/zulu_hat.h>
/*
* This file contains the implementation of zulu_hat: an XHAT provider
* to support the MMU for the XVR-4000 graphics accelerator (code name zulu).
*
* The zulu hat is linked into the kernel misc module zuluvm.
* zuluvm provides services that the zulu device driver module requires
* that are not part of the standard ddi. See PSARC 2002/231.
*
* The zulu driver is delivered by the graphics consolidation.
* zuluvm is in ON workspace.
*
* There are two types of interfaces provided by zulu_hat
* 1. The set of functions and data structures used by zuluvm to obtain
* tte entries for the zulu MMU and to manage the association between
* user process's address spaces and zulu graphics contexts.
*
* 2. The entry points required for an XHAT provider: zulu_hat_ops
*/
/*
* zulu_ctx_tab contains an array of pointers to the zulu_hats.
*
* During zulu graphics context switch, the zulu MMU's current context register
* is set to the index of the process's zulu hat's location in the array
* zulu_ctx_tab.
*
* This allows the TL=1 TLB miss handler to quickly find the zulu hat and
* lookup a tte in the zulu hat's TSB.
*
* To synchronize with the trap handler we use bit zero of
* the pointer as a lock bit. See the function zulu_ctx_tsb_lock_enter().
*
* If the trap handler finds the ctx locked it doesn't wait, it
* posts a soft interrupt which is handled at TL=0.
*/
#define ZULU_HAT_MAX_CTX 32
/*
* To avoid searching through the whole zulu_ctx_tab for a free slot,
* we maintain the value of zulu_ctx_search_start.
*
* This value is a guess as to where a free slot in the context table might be.
* All slots < zulu_ctx_search_start are definitely occupied.
*/
static int zulu_ctx_search_start = 0;
/*
* this mutex protects the zulu_ctx_tab and zulu_ctx_search_start
*/
static kmutex_t zulu_ctx_lock;
static uint64_t zulu_tsb_miss = 0;
static uint64_t zulu_as_fault = 0;
/*
* The zulu device has two zulu data mmus.
* We use the base pagesize for one of them and the and 4M for the other.
*/
extern int zuluvm_base_pgsize;
/*
* call zuluvm to remove translations for a page
*/
static void
{
/* context has been stolen, so page is already demapped */
return;
}
}
static void
{
if (zulu_ctx < 0) {
/* context has been stolen */
return;
}
}
/*
* steal the least recently used context slot.
*/
static int
{
int ctx;
/*
* we shouldn't be here unless all slots are occupied
*/
zhat_oldest = zhat;
}
}
/* Nobody should have the tsb lock bit set here */
== 0);
return (ctx);
}
/*
* find a slot in the context table for a zulu_hat
*/
static void
{
int ctx;
if (ZULU_CTX_IS_FREE(ctx)) {
break;
}
}
if (ctx == ZULU_HAT_MAX_CTX) {
/* table is full need to steal an entry */
ctx = zulu_hat_steal_ctx();
}
}
/*
* zulu_hat_validate_ctx: Called before the graphics context associated
* with a given zulu hat becomes the current zulu graphics context.
* Make sure that the hat has a slot in zulu_ctx_tab.
*/
void
{
}
}
static void
{
}
}
}
/*
* Lock the zulu tsb for a given zulu_hat.
*
* We're just protecting against the TLB trap handler here. Other operations
* on the zulu_hat require entering the zhat's lock.
*/
static void
{
return;
}
for (; ; ) {
if (!(lck & ZULU_CTX_LOCK)) {
/*
* success
*/
break;
}
}
}
}
static void
{
if (zulu_ctx < 0) {
return;
}
lck &= ~ZULU_CTX_LOCK;
}
/*
* Each zulu hat has a "shadow tree" which is a table of 4MB address regions
* for which the zhat has mappings.
*
* This table is maintained in an avl tree.
* Nodes in the tree are called shadow blocks (or sblks)
*
* This data structure allows unload operations by (address, range) to be
* much more efficent.
*
* We get called a lot for address ranges that have never been supplied
* to zulu.
*/
/*
* compare the base address of two nodes in the shadow tree
*/
static int
zulu_shadow_tree_compare(const void *a, const void *b)
{
return (-1);
return (1);
} else {
return (0);
}
}
/*
* lookup the entry in the shadow tree for a given virtual address
*/
static struct zulu_shadow_blk *
{
struct zulu_shadow_blk proto;
struct zulu_shadow_blk *sblk;
/*
* pages typically fault in in order so we cache the last shadow
* block that was referenced so we usually get to reduce calls to
* avl_find.
*/
} else {
}
return (sblk);
}
/*
* insert a sblk into the shadow tree for a given zblk.
* If a sblk already exists, just increment it's refcount.
*/
static void
{
}
/*
* a blk can set both the minimum and maximum when it
* is the first zblk added to a previously emptied sblk
*/
}
} else {
}
}
/*
* decrement the ref_count for the sblk that corresponds to a given zblk.
* When the ref_count goes to zero remove the sblk from the tree and free it.
*/
static void
{
struct zulu_shadow_blk *sblk;
}
} else {
/*
* Update the high and low water marks for this sblk.
* These are estimates, because we don't know if the previous
* or next region are actually occupied, but we can tell
* whether the previous values have become invalid.
*
* In the most often applied case a segment is being
* unloaded, and the min_addr will be kept up to date as
* the zblks are deleted in order.
*/
}
}
}
}
static void
{
struct zulu_shadow_blk *sblk;
}
}
/*
* zulu_hat_insert_map:
*
* Add a zulu_hat_blk to the a zhat's mappings list.
*
* Several data stuctures are used
* tsb: for simple fast lookups by the trap handler
* hash table: for efficent lookups by address, range
* An shadow tree of 4MB ranges with mappings for unloading big regions.
*/
static void
{
int tsb_hash;
/*
* The hash table is an array of buckets. Each bucket is the
* head of a linked list of mappings who's address hashess to the bucket
* New entries go to the head of the list.
*/
if (zblk->zulu_hash_next) {
}
}
/*
* remove a block from a zhat
*/
static void
{
/*
* first remove zblk from hash table
*/
if (zblk->zulu_hash_prev) {
} else {
}
if (zblk->zulu_hash_next) {
}
/*
* then remove the tsb entry
*/
}
}
/*
* look for a mapping to a given vaddr and page size
*/
static struct zulu_hat_blk *
{
struct zulu_hat_blk *zblkp;
int blks_checked = 0;
blks_checked++;
int tsb_hash;
break;
}
}
return (zblkp);
}
/*
* Lookup a zblk for a given virtual address.
*/
static struct zulu_hat_blk *
{
/*
* if the hat is using 4M pages, look first for a 4M page
*/
return (zblkp);
}
}
/*
* Otherwise look for a 8k page
* Note: if base pagesize gets increased to 64K remove this test
*/
return (zblkp);
}
}
/*
* only if the page isn't found in the sizes that match the zulu mmus
* look for the inefficient 64K or 512K page sizes
*/
return (zblkp);
}
}
}
return (zblkp);
}
/*
* zulu_hat_load: Load translation for given vaddr
*/
int
{
struct zulu_hat_blk *zblkp;
int rval;
/*
* lookup in our tsb first
*/
if (flags_pfn) {
p++; /* ignore the tag */
*p = flags_pfn; /* load the flags */
}
zulu_tsb_hit++;
return (0);
}
if (zblkp) {
}
return (0);
}
/*
* Set a flag indicating that we're processing a fault.
* See comments in zulu_hat_unload_region.
*/
/*
* caller wants to know the page size (used by preload)
*/
} else {
*ppg_size = -1;
}
}
if (as_err != 0) {
rval = -1;
} else {
rval = 0;
}
return (rval);
}
static struct xhat *
zulu_hat_alloc(void *arg)
{
(void) arg;
/*
* The zulu hat has a few opaque data structs embedded in it.
* This tag makes finding the our data easier with a debugger.
*/
}
static void
{
}
static void
{
(void) xhat;
}
/*
* zulu_hat_memload: This is the callback where the vm system gives us our
* translations
*/
static void
{
void *blk;
struct zulu_hat_blk *zblk;
/*
* keep track of the highest address that this zhat has had
* a mapping for.
* We use this in unload to avoid searching for regions that
* we've never seen.
*
* This is particularly useful avoiding repeated searches for
* for the process's mappings to the zulu hardware. These mappings
* are explicitly unloaded at each graphics context switch..
*
* This takes advantage of the fact that the device addresses
* are always above than the heap where most DMA data is stored.
*/
}
/*
* The perm bit is actually in the tte which gets copied to the TSB
*/
switch (zblk->zulu_hat_blk_size) {
case ZULU_TTE8K:
break;
case ZULU_TTE64K:
break;
case ZULU_TTE512K:
break;
case ZULU_TTE4M:
break;
default:
panic("zulu_hat illegal page size\n");
}
}
}
static void
{
}
static void
{
(void) size;
}
static void
{
}
}
static void
{
struct zulu_hat_blk *zblkp;
struct zulu_hat_blk *next;
}
}
static void
{
*pfree_list = zblk;
}
static void
{
int found = 0;
/*
* check address against the low and highwater marks for mappings
* in this sblk
*/
}
}
/*
* REMIND: It's not safe to touch the sblk after we enter this loop
* because it may get deleted.
*/
struct zulu_hat_blk *zblkp;
continue;
}
found++;
/*
* skip demap page if as_free has already been entered
* zuluvm demapped the context already
*/
/*
* We're being called from within as_fault to
* unload the last translation we loaded.
*
* This is probably due to watchpoint handling.
* Delay the demap for a millisecond
* to allow zulu to make some progress.
*/
drv_usecwait(1000);
zhat->fault_ivaddr_last = 0;
}
}
break;
}
}
}
static void
{
int found = 0;
(void) flags;
/*
* The following test prevents us from searching for the user's
* mappings to the zulu device registers. Those mappings get unloaded
* every time a graphics context switch away from a given context
* occurs.
*
* Since the heap is located at smaller virtual addresses than the
* registers, this simple test avoids quite a bit of useless work.
*/
/*
* all existing mappings have lower addresses than vaddr
* no need to search further.
*/
return;
}
do {
struct zulu_shadow_blk *sblk;
found++;
} else {
}
&free_list);
}
}
static void
{
(void) size;
(void) pcb;
}
/*
* unload one page
*/
static int
void *xblk)
{
int do_delete;
(void) pp;
(void) flags;
do_delete = 1;
/*
* now that the entry is removed from the TSB, remove the
* translation from the zulu hardware.
*
* Skip the demap if this as is in the process of being freed.
* The zuluvm as callback has demapped the whole context.
*/
}
} else {
/*
* This block has already been removed from the zulu_hat,
* it's on a free list waiting for our thread to release
* a mutex so it can be freed
*/
do_delete = 0;
/* CSTYLED */);
}
if (do_delete) {
}
return (0);
}
static void
{
struct zulu_hat_blk *zblk;
int i;
int nblks = 0;
/*
* real swapout calls are rare so we don't do anything in
* particular to optimize them.
*
* Just loop over all buckets in the hash table and free each
* zblk.
*/
for (i = 0; i < ZULU_HASH_TBL_NUM; i++) {
struct zulu_hat_blk *next;
nblks++;
}
}
/*
* remove all mappings for this context from zulu hardware.
*/
}
static void
{
}
/*
* Functions to manage changes in protections for mappings.
*
* These are rarely called in normal operation so for now just unload
* the region.
* If the mapping is still needed, it will fault in later with the new
* attrributes.
*/
typedef enum {
static void
{
}
static void
{
#ifdef DEBUG
printf("zulu_hat_chgprot: ctx: %d addr: %lx, size: %lx flags: %x\n",
#endif
}
static void
{
#ifdef DEBUG
printf("zulu_hat_setattr: ctx: %d addr: %lx, size: %lx flags: %x\n",
#endif
}
static void
{
#ifdef DEBUG
printf("zulu_hat_clrattr: ctx: %d addr: %lx, size: %lx flags: %x\n",
#endif
}
static void
{
#ifdef DEBUG
printf("zulu_hat_chgattr: ctx: %d addr: %lx, size: %lx flags: %x\n",
#endif
}
struct xhat_ops zulu_hat_ops = {
zulu_hat_alloc, /* xhat_alloc */
zulu_hat_free, /* xhat_free */
zulu_hat_free_start, /* xhat_free_start */
NULL, /* xhat_free_end */
NULL, /* xhat_dup */
NULL, /* xhat_swapin */
zulu_hat_swapout, /* xhat_swapout */
zulu_hat_memload, /* xhat_memload */
zulu_hat_memload_array, /* xhat_memload_array */
zulu_hat_devload, /* xhat_devload */
zulu_hat_unload, /* xhat_unload */
zulu_hat_unload_callback, /* xhat_unload_callback */
zulu_hat_setattr, /* xhat_setattr */
zulu_hat_clrattr, /* xhat_clrattr */
zulu_hat_chgattr, /* xhat_chgattr */
zulu_hat_unshare, /* xhat_unshare */
zulu_hat_chgprot, /* xhat_chgprot */
zulu_hat_pageunload, /* xhat_pageunload */
};
NULL,
NULL,
NULL,
};
0,
NULL,
NULL,
"zulu_hat_provider",
sizeof (struct zulu_hat_blk) + sizeof (struct xhat_hme_blk)
};
/*
* The following functions are the entry points that zuluvm uses.
*/
/*
* initialize this module. Called from zuluvm's _init function
*/
int
{
int c;
int rval;
for (c = 0; c < ZULU_HAT_MAX_CTX; c++) {
}
if (rval != 0) {
}
return (rval);
}
/*
* un-initialize this module. Called from zuluvm's _fini function
*/
int
{
if (xhat_provider_unregister(&zulu_hat_provider) != 0) {
return (-1);
}
return (0);
}
int
zulu_hat_attach(void *arg)
{
(void) arg;
return (0);
}
int
zulu_hat_detach(void *arg)
{
(void) arg;
return (0);
}
/*
* create a zulu hat for this address space.
*/
struct zulu_hat *
{
int xhat_rval;
}
return (zhat);
}
void
{
}
/*
* zulu_hat_terminate
*
* Disables any further TLB miss processing for this hat
* Called by zuluvm's as_free callback. The primary purpose of this
* function is to cause any pending zulu DMA to abort quickly.
*/
void
{
/*
* zap the tsb
*/
}