swap_vnops.c revision c6939658adb0a356a77bc28f7df252ceb4a8f6cc
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
#include <sys/sysmacros.h>
/*
* Define the routines within this file.
*/
const fs_operation_def_t swap_vnodeops_template[] = {
};
/* ARGSUSED */
static void
{
}
/*
* Return all the pages from [off..off+len] in given file
*/
static int
{
int err;
"swapfs getpage:vp %p off %llx len %ld",
} else {
}
return (err);
}
/*
* Called from pvn_getpages or swap_getpage to get a particular page.
*/
/*ARGSUSED*/
static int
{
int flags;
int err = 0;
int flag_noreloc;
extern int kcage_on;
int upgrade = 0;
/*
* Until there is a call-back mechanism to cause SEGKP
* pages to be unlocked, make them non-relocatable.
*/
if (SEG_IS_SEGKP(seg))
else
flag_noreloc = 0;
/*
* In very rare instances, a segkp page may have been
* relocated outside of the kernel by the kernel cage
* due to the window between page_unlock() and
* VOP_PUTPAGE() in segkp_unlock(). Due to the
* rareness of these occurances, the solution is to
* relocate the page to a P_NORELOC page.
*/
if (flag_noreloc != 0) {
upgrade = 1;
if (!page_tryupgrade(pp)) {
goto again;
}
}
panic("swap_getapage: "
"page_relocate_cage failed");
}
}
if (pl) {
if (upgrade)
} else {
}
} else {
/*
* Someone raced in and created the page after we did the
* lookup but before we did the create, so go back and
* try to look it up again.
*/
goto again;
if (pvp) {
if (!err) {
panic("swap_getapage: null anon");
PAGESIZE);
hat_setmod(pp);
}
}
} else {
if (!err)
/*
* If it's a fault ahead, release page_io_lock
* and SE_EXCL we grabbed in page_create_va
*
* If we are here, we haven't called VOP_PAGEIO
* and thus calling pvn_read_done(pp, B_READ)
* below may mislead that we tried i/o. Besides,
* in case of async, pvn_read_done() should
* not be called by *getpage()
*/
/*
* swap_getphysname can return error
* only when we are getting called from
* swapslot_free which passes non-NULL
* pl to VOP_GETPAGE.
*/
}
}
}
}
return (err);
}
/*
* Called from large page anon routines only! This is an ugly hack where
* the anon layer directly calls into swapfs with a preallocated large page.
* Another method would have been to change to VOP and add an extra arg for
* the preallocated large page. This all could be cleaned up later when we
* solve the anonymous naming problem and no longer need to loop across of
* the VOP in PAGESIZE increments to fill in or initialize a large page as
* is done today. I think the latter is better since it avoid a change to
* the VOP interface that could later be avoided.
*/
int
{
int err = 0;
/*
* If we are not using a preallocated page then we know one already
* exists. So just let the old code handle it.
*/
return (err);
}
*nreloc = 0;
/*
* If existing page is found we may need to relocate.
*/
err = -1;
err = -2;
} else {
if (page_pptonum(pp) &
}
return (err);
}
if (*nreloc != 0) {
return (0);
}
*nreloc = 1;
/*
* If necessary do the page io.
*/
/*
* Since we are only called now on behalf of an
* address space operation it's impossible for
* us to fail unlike swap_getapge() which
* also gets called from swapslot_free().
*/
"swap_getconpage: swap_getphysname failed!");
}
if (pvp) {
} else {
}
}
/*
* Normally we would let pvn_read_done() destroy
* the page on IO error. But since this is a preallocated
* page we'll let the anon layer handle it.
*/
if (err != 0)
return (err);
}
/* Async putpage klustering stuff */
int sw_pending_size;
extern int klustsize;
extern struct async_reqs *sw_getreq();
extern void sw_putreq(struct async_reqs *);
extern void sw_putbackreq(struct async_reqs *);
extern struct async_reqs *sw_getfree();
extern void sw_putfree(struct async_reqs *);
/*
* Flags are composed of {B_INVAL, B_DIRTY B_FREE, B_DONTNEED}.
* If len == 0, do from off to EOF.
*/
static int swap_nopage = 0; /* Don't do swap_putpage's if set */
/* ARGSUSED */
static int
int flags,
{
int err = 0;
struct async_reqs *arg;
if (swap_nopage)
return (0);
"swap_putpage: vp %p, off %llx len %lx, flags %x\n",
return (ENOSYS);
if (!vn_has_cached_data(vp))
return (0);
if (len == 0) {
if (curproc == proc_pageout)
/* Search the entire vp list for pages >= off. */
} else {
/*
* Loop over all offsets in the range [off...off + len]
* looking for pages to deal with.
*/
/*
* If we run out of the async req slot, put the page
* now instead of queuing.
*/
sw_pending_size < klustsize &&
(arg = sw_getfree())) {
/*
* If we are clustering, we should allow
* pageout to feed us more pages because # of
* cluster is considered to be one I/O.
*/
if (pushes)
pushes--;
continue;
}
/*
* If we are not invalidating pages, use the
* routine page_lookup_nowait() to prevent
* reclaiming them from the free list.
*/
else
else {
if (err != 0)
break;
}
}
}
/* If invalidating, verify all pages on vnode list are gone. */
"swap_putpage: B_INVAL, pages not gone");
}
return (err);
}
/*
* Write out a single page.
* For swapfs this means choose a physical swap slot and write the page
* out using VOP_PAGEIO.
* In the (B_ASYNC | B_FREE) case we try to find a bunch of other dirty
* swapfs pages, a bunch of contiguous swap slots and then write them
* all out in one clustered i/o.
*/
/*ARGSUSED*/
static int
int flags,
{
int err;
u_offset_t klstart = 0;
struct async_reqs *arg;
/*
* Clear force flag so that p_lckcnt pages are not invalidated.
*/
/*
* This check is added for callers who access swap_putpage with len = 0.
* swap_putpage calls swap_putapage page-by-page via pvn_vplist_dirty.
* And it's necessary to do the same queuing if users have the same
* B_ASYNC|B_FREE flags on.
*/
hat_setmod(pp);
return (0);
}
"swap_putapage: pp %p, vp %p, off %llx, flags %x\n",
hat_setmod(pp);
goto out;
}
/*
* If this is ASYNC | FREE and we've accumulated a bunch of such
* pending requests, kluster.
*/
else
while (klsz < swap_klustsize) {
break;
}
break;
}
continue;
}
/* Get new physical backing store for the page */
hat_setmod(pp);
break;
}
/* Try to cluster new physical name with previous ones */
} else {
hat_setmod(pp);
break;
}
}
/* Statistics */
if (!err) {
}
out:
"swapfs putapage:vp %p klvp %p, klstart %lx, klsz %lx",
if (lenp)
return (err);
}
static void
{
int err;
/*
* The caller will free/invalidate large page in one shot instead of
* one small page at a time.
*/
return;
}
else
}