vm_dep.h revision 7c478bd95313f5f23a4c958a745db2134aa03244
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License, Version 1.0 only
* (the "License"). You may not use this file except in compliance
* with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2005 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* UNIX machine dependent virtual memory support.
*/
#ifndef _VM_DEP_H
#define _VM_DEP_H
#pragma ident "%Z%%M% %I% %E% SMI"
#ifdef __cplusplus
extern "C" {
#endif
#include <vm/hat_sfmmu.h>
#include <sys/archsystm.h>
/*
* Per page size free lists. Allocated dynamically.
*/
#define MTYPE_RELOC 0
#define MTYPE_NORELOC 1
/*
* macros to loop through the mtype range - noops for sparc
*/
/* mtype init for page_get_replacement_page */
/*
* Internal PG_ flags.
*/
/*
* PGI mtype flags - should not overlap PGI flags
*/
/*
* couple of locks. There are mutexes for both the page freelist
* and the page cachelist. We want enough locks to make contention
* reasonable, but not too many -- otherwise page_freelist_lock() gets
* so expensive that it becomes the bottleneck!
*/
#define NPC_MUTEX 16
/* Find the bin for the given page if it was of size szc */
typedef char hpmctr_t;
#ifdef DEBUG
#else
#endif
#ifdef DEBUG
/* page list count */
typedef struct {
struct {
struct {
int plc_mts_colors;
} plc_mt[MAX_MEM_TYPES];
} plcnt_t[MAX_MEM_NODES];
int szc; \
} \
}
#define PLCNT_INIT(base) { \
plc_mts_colors = colors; \
} \
} \
} \
}
cnt); \
if (flags & PG_CACHE_LIST) \
plc_mts_pgcnt, cnt); \
}
if (flags & PG_LIST_ISINIT) \
}
}
#else
#define PLCNT_INIT(base)
}
}
#endif
/*
* get the ecache setsize for the current cpu.
*/
/*
* For sfmmu each larger page is 8 times the size of the previous
* size page.
*/
/*
* The counter base must be per page_counter element to prevent
* races when re-indexing, and the base page size element should
* be aligned on a boundary of the given region size.
*
* We also round up the number of pages spanned by the counters
* for a given region to PC_BASE_ALIGN in certain situations to simplify
* the coding for some non-performance critical routines.
*/
extern int ecache_alignsize;
#define L2CACHE_ALIGN ecache_alignsize
extern int consistent_coloring;
extern uint_t vac_colors_mask;
extern int vac_size;
extern int vac_shift;
/*
* Auto large page selection support variables. Some CPU
* implementations may differ from the defaults and will need
* to change these.
*/
extern int auto_lpg_tlb_threshold;
extern int auto_lpg_minszc;
extern int auto_lpg_maxszc;
extern size_t auto_lpg_heap_default;
extern size_t auto_lpg_stack_default;
extern size_t auto_lpg_va_default;
extern size_t auto_lpg_remap_threshold;
/*
* AS_2_BIN macro controls the page coloring policy.
* 0 (default) uses various vaddr bits
* 1 virtual=paddr
* 2 bin hopping
*/
switch (consistent_coloring) { \
default: \
"AS_2_BIN: bad consistent coloring value"); \
/* assume default algorithm -> continue */ \
case 0: { \
int slew = 0; \
\
\
\
break; \
} \
case 1: \
break; \
case 2: { \
/* make sure physical color aligns with vac color */ \
while ((cnt & vac_colors_mask) != \
addr_to_vcolor(addr)) { \
cnt++; \
} \
/* update per as page coloring fields */ \
} \
break; \
} \
} \
/*
* Function to get an ecache color bin: F(as, cnt, vcolor).
* the goal of this function is to:
* - to spread a processes' physical pages across the entire ecache to
* maximize its use.
* - to minimize vac flushes caused when we reuse a physical page on a
* different vac color than it was previously used.
* - to prevent all processes to use the same exact colors and trash each
* other.
*
* cnt is a bin ptr kept on a per as basis. As we page_create we increment
* the ptr so we spread out the physical pages to cover the entire ecache.
* The virtual color is made a subset of the physical color in order to
* in minimize virtual cache flushing.
* We add in the as to spread out different as. This happens when we
* initialize the start count value.
* sizeof(struct as) is 60 so we shift by 3 to get into the bit range
* that will tend to change. For example, on spitfire based machines
* (vcshft == 1) contigous as are spread bu ~6 bins.
* vcshft provides for proper virtual color alignment.
* In theory cnt should be updated using cas only but if we are off by one
* or 2 it is no big deal.
* We also keep a start value which is used to randomize on what bin we
* start counting when it is time to start another loop. This avoids
* contigous allocations of ecache size to point to the same bin.
* Why 3? Seems work ok. Better than 7 or anything larger.
*/
#define PGCLR_LOOPFACTOR 3
/*
* When a bin is empty, and we can't satisfy a color request correctly,
* we scan. If we assume that the programs have reasonable spatial
* behavior, then it will not be a good idea to use the adjacent color.
* Using the adjacent color would result in virtually adjacent addresses
* mapping into the same spot in the cache. So, if we stumble across
* an empty bin, skip a bunch before looking. After the first skip,
* then just look one bin at a time so we don't miss our cache on
* every look. Be sure to check every bin. Page_create() will panic
* if we miss a page.
*
* This also explains the `<=' in the for loops in both page_get_freelist()
* and page_get_cachelist(). Since we checked the target bin, skipped
* a bunch, then continued one a time, we wind up checking the target bin
* twice to make sure we get all of them bins.
*/
#define BIN_STEP 20
#ifdef VM_STATS
struct vmm_vmstats_str {
};
extern struct vmm_vmstats_str vmm_vmstats;
#endif /* VM_STATS */
/*
* Used to hold off page relocations into the cage until OBP has completed
* its boot-time handoff of its resources to the kernel.
*/
extern int page_relocate_ready;
/*
* cpu/mmu-dependent vm variables may be reset at bootup.
*/
extern uint_t mmu_page_sizes;
extern uint_t max_mmu_page_sizes;
extern uint_t mmu_hashcnt;
extern uint_t max_mmu_hashcnt;
extern size_t mmu_ism_pagesize;
extern int mmu_exported_pagesize_mask;
extern uint_t mmu_exported_page_sizes;
extern uint_t szc_2_userszc[];
extern uint_t userszc_2_szc[];
/*
* Platform specific map_pgsz large page hook routines.
*/
/*
* Platform specific page routines
*/
/*
* platform specific large pages for kernel heap support
*/
extern void mmu_init_kcontext();
extern uint64_t kcontextreg;
#ifdef __cplusplus
}
#endif
#endif /* _VM_DEP_H */