/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Machine frame segment driver. This segment driver allows dom0 processes to
* the privcmd driver provide the MFN values backing each mapping, and we map
* them into the process's address space at this time. Demand-faulting is not
* supported by this driver due to the requirements upon some of the ioctl()s.
*/
#include <sys/hypervisor.h>
typedef struct segmf_mfn_s {
} segmf_mfn_t;
/* g_flags */
typedef struct segmf_gref_s {
} segmf_gref_t;
typedef union segmf_mu_u {
segmf_mfn_t m;
segmf_gref_t g;
} segmf_mu_t;
typedef enum {
SEGMF_MAP_EMPTY = 0,
typedef struct segmf_map_s {
segmf_mu_t u;
} segmf_map_t;
struct segmf_data {
};
static struct segmf_data *
{
return (data);
}
int
{
struct segmf_crargs *a = args;
int error;
for (i = 0; i < npages; i++) {
}
if (error != 0)
return (error);
}
/*
* Duplicate a seg and return new segment in newseg.
*/
static int
{
}
/*
* We only support unmapping the whole segment, and we automatically unlock
* what we previously soft-locked.
*/
static int
{
panic("segmf_unmap");
return (ENOTSUP);
return (0);
}
static void
{
}
static int segmf_faultpage_debug = 0;
/*ARGSUSED*/
static int
{
if (type == F_SOFTLOCK) {
data->softlockcnt++;
} else
if (segmf_faultpage_debug > 0) {
uprintf("segmf_faultpage: addr %p domid %x mfn %lx prot %x\n",
}
/*
* Ask the HAT to load a throwaway mapping to page zero, then
* overwrite it with our foreign domain mapping. It gets removed
* later via hat_unload()
*/
if (prot & PROT_WRITE)
pte |= PT_WRITABLE;
if (type == F_SOFTLOCK) {
data->softlockcnt--;
}
return (FC_MAKE_ERR(EFAULT));
}
return (0);
}
static int
{
switch (rw) {
case S_READ:
return (PROT_READ);
case S_WRITE:
return (PROT_WRITE);
case S_EXEC:
return (PROT_EXEC);
case S_OTHER:
default:
break;
}
}
static void
{
if (data->softlockcnt == 0) {
if (AS_ISUNMAPWAIT(as)) {
if (AS_ISUNMAPWAIT(as)) {
}
}
}
}
static int
{
int error = 0;
caddr_t a;
return (FC_PROT);
/* loop over the address range handling each fault */
if (error != 0)
break;
}
/*
* Undo what's been done so far.
*/
if (done > 0)
}
return (error);
}
/*
* We never demand-fault for seg_mf.
*/
/*ARGSUSED*/
static int
{
return (FC_MAKE_ERR(EFAULT));
}
/*ARGSUSED*/
static int
{
return (0);
}
/*ARGSUSED*/
static int
{
return (EINVAL);
}
/*ARGSUSED*/
static int
{
return (EINVAL);
}
/*ARGSUSED*/
static int
{
return (-1);
}
/*ARGSUSED*/
static int
{
return (0);
}
/*
* XXPV Hmm. Should we say that mf mapping are "in core?"
*/
/*ARGSUSED*/
static size_t
{
size_t v;
*vec++ = 1;
return (v);
}
/*ARGSUSED*/
static int
{
return (0);
}
static int
{
if (pgno != 0) {
do
while (pgno != 0)
;
}
return (0);
}
static u_offset_t
{
}
/*ARGSUSED*/
static int
{
return (MAP_SHARED);
}
/*ARGSUSED1*/
static int
{
return (0);
}
/*ARGSUSED*/
static int
{
return (0);
}
/*ARGSUSED*/
static void
{}
/*ARGSUSED*/
static int
{
return (ENOTSUP);
}
/*ARGSUSED*/
static int
{
return (ENOTSUP);
}
static int
{
return (0);
}
/*ARGSUSED*/
static lgrp_mem_policy_info_t *
{
return (NULL);
}
/*ARGSUSED*/
static int
{
return (0);
}
/*
* Add a set of contiguous foreign MFNs to the segment. soft-locking them. The
* pre-faulting is necessary due to live migration; in particular we must
* return an error in response to IOCTL_PRIVCMD_MMAPBATCH rather than faulting
* later on a bad MFN. Whilst this isn't necessary for the other MMAP
* ioctl()s, we lock them too, as they should be transitory.
*/
int
{
pgcnt_t i;
int error = 0;
return (EINVAL);
/*
* Don't mess with dom0.
*
* Only allow the domid to be set once for the segment.
* After that attempts to add mappings to this segment for
* other domains explicitly fails.
*/
return (EACCES);
goto out;
}
for (i = 0; i < pgcnt; i++) {
}
if (fc != 0) {
for (i = 0; i < pgcnt; i++) {
}
}
out:
return (error);
}
int
{
uint_t i;
int e;
return (EINVAL);
/*
* Don't mess with dom0.
*
* Only allow the domid to be set once for the segment.
* After that attempts to add mappings to this segment for
* other domains explicitly fails.
*/
return (EACCES);
e = 0;
e = EINVAL;
goto out;
}
/* store away the grefs passed in then fault in the pages */
for (i = 0; i < cnt; i++) {
if (flags & SEGMF_GREF_WR) {
}
}
if (fc != 0) {
for (i = 0; i < cnt; i++) {
}
}
out:
return (e);
}
int
{
long e;
int i;
int n;
if (cnt > SEGMF_MAX_GREFS) {
return (-1);
}
/*
* for each entry which isn't empty and is currently mapped,
* set it up for an unmap then mark them empty.
*/
n = 0;
for (i = 0; i < cnt; i++) {
mapop[n].dev_bus_addr = 0;
n++;
}
}
/* if there's nothing to unmap, just return */
if (n == 0) {
return (0);
}
if (e != 0) {
return (-1);
}
return (0);
}
void
{
}
static int
{
int e;
int i;
if (cnt > SEGMF_MAX_GREFS) {
return (-1);
}
/*
* map in each page passed in into the user apps AS. We do this by
* passing the MA of the actual pte of the mapping to the hypervisor.
*/
for (i = 0; i < cnt; i++) {
}
}
return (FC_MAKE_ERR(EFAULT));
}
/* save handle for segmf_release_grefs() and mark it as mapped */
for (i = 0; i < cnt; i++) {
}
return (0);
}
(int (*)())segmf_kluster,
};