vm_seg.c revision 0209230bf1261579beab4f55226bb509e6b850cb
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
/* All Rights Reserved */
/*
* University Copyright- Copyright (c) 1982, 1986, 1988
* The Regents of the University of California
* All Rights Reserved
*
* University Acknowledgment- Portions of this document are derived from
* software developed by the University of California, Berkeley, and its
* contributors.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
/*
* VM - segment management.
*/
#include <sys/inttypes.h>
#include <sys/mem_config.h>
#include <vm/seg_kmem.h>
/*
* kstats for segment advise
*/
{ "MADV_FREE_hit", KSTAT_DATA_ULONG },
{ "MADV_FREE_miss", KSTAT_DATA_ULONG },
};
/* #define PDEBUG */
int pdebug = 0;
#else
#define pdebug 0
#endif /* PDEBUG */
#define P_BASESHIFT 6
/*
* entry in the segment page cache
*/
struct seg_pcache {
struct seg_pcache *p_hprev;
int p_active; /* active count */
int p_ref; /* ref bit */
};
struct seg_phash {
struct seg_pcache *p_hprev;
int p_qlen; /* Q length */
};
int seg_preapahead;
static int p_hashsize = 0;
(P_HASHMASK & \
/*
* lookup an address range in pagelock cache. Return shadow list
* and bump up active count.
*/
struct page **
{
struct seg_pcache *pcp;
/*
* Skip pagelock cache, while DR is in progress or
* seg_pcache is off.
*/
if (seg_pdisable || seg_plazy == 0) {
return (NULL);
}
PPRINT5("seg_plookup hit: seg %p, addr %p, "
"len %lx, count %d, pplist %p \n",
}
}
PPRINT("seg_plookup miss:\n");
return (NULL);
}
/*
* mark address range inactive. If the cache is off or the address
* range is not in the cache we call the segment driver to reclaim
* the pages. Otherwise just decrement active count and set ref bit.
*/
void
{
struct seg_pcache *pcp;
if (seg_plazy == 0) {
return;
}
int npages;
/* free the entry */
seg_plocked -= npages;
}
goto out;
}
return;
}
}
out:
}
/*
* The seg_pinsert_check() is used by segment drivers to predict whether
* a call to seg_pinsert will fail and thereby avoid wasteful pre-processing.
*/
int
{
if (seg_plazy == 0) {
return (SEGP_FAIL);
}
if (seg_pdisable != 0) {
return (SEGP_FAIL);
}
return (SEGP_FAIL);
}
/*
* If the SEGP_FORCE_WIRED flag is set,
* we skip the check for seg_pwindow.
*/
if ((flags & SEGP_FORCE_WIRED) == 0) {
return (SEGP_FAIL);
}
}
return (SEGP_SUCCESS);
}
/*
* insert address range with shadow list into pagelock cache. If
* the cache is off or caching is temporarily disabled or the allowed
* 'window' is exceeded - return SEGP_FAIL. Otherwise return
* SEGP_SUCCESS.
*/
int
{
struct seg_pcache *pcp;
if (seg_plazy == 0) {
return (SEGP_FAIL);
}
if (seg_pdisable != 0) {
return (SEGP_FAIL);
}
return (SEGP_FAIL);
}
/*
* If the SEGP_FORCE_WIRED flag is set,
* we skip the check for seg_pwindow.
*/
if ((flags & SEGP_FORCE_WIRED) == 0) {
if (seg_plocked_window > seg_pwindow) {
return (SEGP_FAIL);
}
}
seg_plocked += npages;
PPRINT4("seg_pinsert: seg %p, addr %p, len %lx, pplist %p\n",
return (SEGP_SUCCESS);
}
/*
* purge all entries from the pagelock cache if not active
* and not recently used. Drop all locks and call through
* the address space into the segment driver to reclaim
* the pages. This makes sure we get the address space
* and segment driver locking right.
*/
static void
seg_ppurge_all(int force)
{
struct seg_pcache *pcp;
int purge_count = 0;
pgcnt_t npages_window = 0;
/*
* if the cache if off or empty, return
*/
if (seg_plazy == 0 || seg_plocked == 0) {
return;
}
/*
* While 'force' is set, seg_pasync_thread is not
* throttled. This is to speedup flushing of seg_pcache
* in preparation for DR.
*
* In normal case, when 'force' is not set, we throttle
* seg_pasync_thread so that we don't spend all the time
* time in purging the cache.
*/
/*
* purge entries which are not active and
* have not been used recently and
* have the SEGP_ASYNC_FLUSH flag.
*
* In the 'force' case, we ignore the
* SEGP_ASYNC_FLUSH flag.
*/
if (force)
/*
* try to get the readers lock on the address
* space before taking out the cache element.
* This ensures as_pagereclaim() can actually
* call through the address space and free
* the pages. If we don't get the lock, just
* skip this entry. The pages will be reclaimed
* by the segment driver at unmap time.
*/
RW_READER)) {
delcallb_list = pcp;
purge_count++;
}
} else {
}
}
break;
}
/*
* run the delayed callback list. We don't want to hold the
* cache lock during a call through the address space.
*/
while (delcallb_list != NULL) {
pcp = delcallb_list;
PPRINT4("seg_ppurge_all: purge seg %p, addr %p, len %lx, "
}
}
seg_plocked -= npages;
}
/*
* Remove cached pages for segment(s) entries from hashtable.
* The segments are identified by a given clients callback
* function.
* This is useful for multiple seg's cached on behalf of
* The clients callback function may return status indicating
* that the last seg's entry has been purged. In such a case
* the seg_ppurge_seg() stops searching hashtable and exits.
* Otherwise all hashtable entries are scanned.
*/
void
{
pgcnt_t npages_window = 0;
int done = 0;
/*
* if the cache if off or empty, return
*/
if (seg_plazy == 0 || seg_plocked == 0) {
return;
}
seg_pdisable++;
/*
* purge entries which are not active
*/
done = 1;
}
}
}
if (done)
break;
}
if (done)
break;
}
seg_pdisable--;
seg_plocked -= npages;
}
/*
* purge all entries for a given segment. Since we
* callback into the segment driver directly for page
* reclaim the caller needs to hold the right locks.
*/
void
{
struct seg_pcache *pcp;
pgcnt_t npages_window = 0;
if (seg_plazy == 0) {
return;
}
break;
}
delcallb_list = pcp;
}
}
while (delcallb_list != NULL) {
pcp = delcallb_list;
PPRINT4("seg_ppurge: purge seg %p, addr %p, len %lx, "
}
}
seg_plocked -= npages;
}
static void seg_pinit_mem_config(void);
/*
* setup the pagelock cache
*/
static void
seg_pinit(void)
{
int i;
if (p_hashsize == 0) {
/*
* Choose p_hashsize based on physmem.
*/
if (physmegs < 64) {
p_hashsize = 64;
} else if (physmegs < 1024) {
p_hashsize = 1024;
p_hashsize = 8192;
seg_pmaxqlen = 16;
} else {
seg_pmaxqlen = 128;
}
}
for (i = 0; i < p_hashsize; i++) {
}
if (seg_pwindow == 0) {
if (physmegs < 24) {
/* don't use cache */
seg_plazy = 0;
} else if (physmegs < 64) {
} else {
}
}
}
}
/*
* called by pageout if memory is low
*/
void
seg_preap(void)
{
/*
* if the cache if off or empty, return
*/
if (seg_plocked == 0 || seg_plazy == 0) {
return;
}
}
static void seg_pupdate(void *);
/*
* run as a backgroud thread and reclaim pagelock
* pages which have not been used recently
*/
void
seg_pasync_thread(void)
{
callb_generic_cpr, "seg_pasync");
if (seg_preap_interval == 0) {
} else {
seg_preap_interval *= hz;
}
if (seg_plazy && seg_pupdate_active) {
}
for (;;) {
seg_ppurge_all(0);
}
}
static void
seg_pupdate(void *dummy)
{
if (seg_plazy && seg_pupdate_active) {
}
}
static struct kmem_cache *seg_cache;
/*
* Initialize segment management data structures.
*/
void
seg_init(void)
{
if (ksp) {
}
seg_pinit();
}
/*
* Allocate a segment to cover [base, base+size]
* and attach it to the specified address space.
*/
struct seg *
{
}
/* caller must fill in ops, data */
return (new);
}
/*
* Attach a segment to the address space. Used by seg_alloc()
* and for kernel startup to attach to static segments.
*/
int
{
/*
* as_addseg() will add the segment at the appropraite point
* in the list. It will return -1 if there is overlap with
* an already existing segment.
*/
}
/*
* Unmap a segment and free it from its associated address space.
* This should be called by anybody who's finished with a whole segment's
* mapping. Just calls SEGOP_UNMAP() on the whole mapping . It is the
* responsibility of the segment driver to unlink the the segment
* from the address space, and to free public and private data structures
* associated with the segment. (This is typically done by a call to
* seg_free()).
*/
void
{
#ifdef DEBUG
int ret;
#endif /* DEBUG */
/* Shouldn't have called seg_unmap if mapping isn't yet established */
/* Unmap the whole mapping */
#ifdef DEBUG
#else
#endif /* DEBUG */
}
/*
* Free the segment from its associated as. This should only be called
* if a mapping to the segment has not yet been established (e.g., if
* an error occurs in the middle of doing an as_map when the segment
* has already been partially set up) or if it has already been deleted
* (e.g., from a segment driver unmap routine if the unmap applies to the
* entire segment). If the mapping is currently set up then seg_unmap() should
* be called instead.
*/
void
{
/*
* If the segment private data field is NULL,
* then segment driver is not attached yet.
*/
}
/*ARGSUSED*/
static void
void *arg,
{
/* Nothing to do. */
}
/*
* Attempt to purge seg_pcache. May need to return before this has
* completed to allow other pre_del callbacks to unlock pages. This is
* ok because:
* 1) The seg_pdisable flag has been set so at least we won't
* cache anymore locks and the locks we couldn't purge
* will not be held if they do get released by a subsequent
* pre-delete callback.
*
* 2) The rest of the memory delete thread processing does not
* depend on the changes made in this pre-delete callback. No
* panics will result, the worst that will happen is that the
* DR code will timeout and cancel the delete.
*/
/*ARGSUSED*/
static int
void *arg,
{
int stall_count = 0;
seg_pdisable++;
ASSERT(seg_pdisable != 0);
/*
* Attempt to empty the cache. Terminate if seg_plocked does not
* diminish with SEGP_STALL_THRESHOLD consecutive attempts.
*/
while (seg_plocked != 0) {
seg_ppurge_all(1);
if (seg_plocked == old_plocked) {
if (stall_count++ > SEGP_STALL_THRESHOLD) {
" pagelock cache - continuing");
break;
}
} else
stall_count = 0;
if (seg_plocked != 0)
}
return (0);
}
/*ARGSUSED*/
static void
void *arg,
int cancelled)
{
ASSERT(seg_pdisable != 0);
seg_pdisable--;
}
static kphysm_setup_vector_t seg_p_mem_config_vec = {
};
static void
seg_pinit_mem_config(void)
{
int ret;
/*
* Want to catch this in the debug kernel. At run time, if the
* callbacks don't get run all will be OK as the disable just makes
* it more likely that the pages can be collected.
*/
}
extern struct seg_ops segspt_shmops;
/*
* Verify that segment is not a shared anonymous segment which reserves
* swap. zone.max-swap accounting (zone->zone_max_swap) cannot be transfered
* from one zone to another if any segments are shared. This is because the
* last process to exit will credit the swap reservation. This could lead
* to the swap being reserved by one zone, and credited to another.
*/
{
struct segvn_data *svd;
return (B_FALSE);
return (B_FALSE);
}
return (B_TRUE);
}
/*
* Return swap reserved by a segment backing a private mapping.
*/
{
struct segvn_data *svd;
}
return (swap);
}