Lines Matching refs:smap

98 			struct smap *, enum seg_rw);
99 struct smap *get_smap_kpm(caddr_t, page_t **);
134 size_t len, enum seg_rw rw, struct smap *smp);
135 static void segmap_smapadd(struct smap *smp);
136 static struct smap *segmap_hashin(struct smap *smp, struct vnode *vp,
138 static void segmap_hashout(struct smap *smp);
178 * Translate addr into smap number within segment.
183 * Translate addr in seg into struct smap pointer.
200 static struct smap *smd_smap;
214 struct smap *scpu_last_smap;
230 * - per smap mutexes
232 * The lock ordering is to get the smap mutex to lock down the slot
238 * is no overlapping of hashchain and smap locks. After the slot is
243 * then locking the smap slot at the head of the freelist. This is
246 * The smap lock protects all fields in smap structure except for
303 struct smap *smp;
328 * Scale the number of smap freelists to be
362 * is known when the smap structures are initialized below.
375 * Allocate and initialize the smap hash chain headers.
395 * Allocate and initialize the smap structures.
397 * The smap array is large enough to affect boot time
404 kmem_alloc(sizeof (struct smap) * npages, KM_SLEEP);
408 struct smap *smpfreelist;
436 * sm_flag = 0 (no SM_QNDX_ZERO) implies smap on sm_freeq[1]
455 * smap freelists. Init the scpu_last_smap field to the first
456 * smap element so there is no need to check for NULL.
497 struct smap *smp)
515 * NOP in case of a kpm based smap, so dangerous things
591 struct smap *smp;
620 panic("segmap_fault: smap not found "
777 struct smap *smp;
795 panic("segmap_faulta: smap not found "
924 * Add smap to the appropriate free list.
927 segmap_smapadd(struct smap *smp)
930 struct smap *smpfreelist;
963 * then the smap just freed is already gone.
988 static struct smap *
989 segmap_hashin(struct smap *smp, struct vnode *vp, u_offset_t off, int hashid)
991 struct smap **hpp;
992 struct smap *tmp;
1026 * In the case where the vp has been freed and the the smap
1029 * vnode/smap structure for a vnode which has the same
1048 segmap_hashout(struct smap *smp)
1050 struct smap **hpp, *hp;
1119 * Locks held on entry: smap lock
1120 * Locks held on exit : smap lock.
1124 grab_smp(struct smap *smp, page_t *pp)
1184 static struct smap *
1189 struct smap *smp, *first;
1276 * Fastpath the case we get the smap mutex
1285 * Skip to the next queue or smap.
1384 struct smap *smp;
1403 panic("segmap_pagecreate: smap not found "
1495 struct smap *smp;
1517 panic("segmap_pageunlock: smap not found "
1607 struct smap *smp, *nsmp;
1664 * Get smap lock and recheck its tag. The hash lock
1666 * and (vp, off) won't change when we have smap mtx.
1713 * Taking the last smap on freelist
1718 * Reclaiming 1st smap on list
1867 * can't hit us, we have bumped the smap
1936 * have bumped the smap refcnt and hat_pageunload needs the
1981 struct smap *smp;
1999 panic("segmap_release: smap not found "
2134 struct smap *smp, *smp_end;
2217 struct smap *smp, enum seg_rw rw)
2276 * Find the smap structure corresponding to the
2279 struct smap *
2282 struct smap *smp;
2302 * Assume the last smap used on this cpu is the one needed.
2311 * Assumption wrong, find the smap on the hash chain.
2343 struct smap *smp, enum seg_rw rw)
2349 struct smap *