Searched refs:page_t (Results 26 - 50 of 149) sorted by relevance

123456

/illumos-gate/usr/src/uts/sun4u/os/
H A Dppage.c156 ppmapin(page_t *pp, uint_t vprot, caddr_t hint)
267 pp_load_tlb(processorid_t cpu, caddr_t **pslot, page_t *pp, uint_t prot)
364 ppcopy_common(page_t *fm_pp, page_t *to_pp)
416 ppcopy_kernel__relocatable(page_t *fm_pp, page_t *to_pp)
440 ppcopy(page_t *fm_pp, page_t *to_pp)
500 pagezero(page_t *pp, uint_t off, uint_t len)
/illumos-gate/usr/src/uts/i86pc/vm/
H A Dvm_machdep.c107 static page_t *io_pool_4g; /* pool for 32 bit dma limited devices */
108 static page_t *io_pool_16m; /* pool for 24 bit dma limited legacy devices */
127 static void page_io_pool_sub(page_t **, page_t *, page_t *);
342 page_t ****page_freelists;
343 page_t ***page_cachelists;
1024 static page_t *
1034 page_t *pp;
1035 page_t *plis
[all...]
H A Dhat_i86.h237 extern page_t *hat_kpm_vaddr2page(caddr_t);
241 extern hment_t *hati_page_unmap(page_t *pp, htable_t *ht, uint_t entry);
/illumos-gate/usr/src/uts/common/os/
H A Dbp_map.c88 page_t *pp;
89 page_t **pplist;
209 page_t *pp;
275 page_t **pplist;
281 page_t *page;
283 page_t *pp;
340 * for a pp or pplist, get the pfn, then go to the next page_t
H A Dmem_cage.c237 static int kcage_invalidate_page(page_t *, pgcnt_t *);
238 static int kcage_setnoreloc_pages(page_t *, se_t);
909 page_t *pp;
912 extern void page_list_noreloc_startup(page_t *);
1309 kcage_setnoreloc_pages(page_t *rootpp, se_t se)
1312 page_t *pp;
1363 kcage_assimilate_page(page_t *pp, pgcnt_t *nfreedp)
1445 page_t *pp;
1558 kcage_relocate_page(page_t *pp, pgcnt_t *nfreedp)
1560 page_t *op
[all...]
H A Dmem_config.c87 * page_t memory (metadata) for a memseg.
95 extern page_t *ppvm_base;
127 page_t *pp;
128 page_t *opp, *oepp, *segpp;
190 * Allocate the page_t's from existing memory;
196 ASSERT(btopr(npgs * sizeof (page_t)) <= metapgs);
203 * We store the page_t's for this new memory in the first
210 * of (PAGESIZE + sizeof (page_t)) bytes per page.
213 (PAGESIZE + sizeof (page_t)));
218 ASSERT(btopr(npgs * sizeof (page_t)) <
[all...]
/illumos-gate/usr/src/uts/common/vm/
H A Dpage_retire.c45 * The p_toxic field in the page_t is used to indicate which errors have
49 * bits, the page_t must be held exclusively locked.
153 static int page_retire_pp_finish(page_t *, void *, uint_t);
160 * retired, and if we find inconsistencies, we scan every page_t in the
170 page_t *pr_pending_q[PR_PENDING_QMAX];
462 page_settoxic(page_t *pp, uchar_t bits)
475 page_clrtoxic(page_t *pp, uchar_t bits)
486 page_retire_done(page_t *pp, int code)
530 page_retire_destroy(page_t *pp)
597 page_clear_transient_ue(page_t *p
[all...]
H A Dvm_pagelist.c231 void page_ctr_add(int, int, page_t *, int);
232 void page_ctr_add_internal(int, int, page_t *, int);
233 void page_ctr_sub(int, int, page_t *, int);
234 void page_ctr_sub_internal(int, int, page_t *, int);
237 page_t *page_promote(int, pfn_t, uchar_t, int, int);
238 page_t *page_demote(int, pfn_t, pfn_t, uchar_t, uchar_t, int, int);
239 page_t *page_freelist_split(uchar_t,
241 page_t *page_get_mnode_cachelist(uint_t, uint_t, int, int);
242 static int page_trylock_cons(page_t *pp, se_t se);
807 page_ctr_add_internal(int mnode, int mtype, page_t *p
[all...]
H A Dseg_dev.h108 page_t **dp_pparray; /* pages allocated for this cookie */
H A Dvpm.h203 page_t *vpm_pp; /* page pointer */
H A Dseg_kmem.c361 page_t *pp;
446 page_t *pp;
679 page_t ***ppp, enum lock_type type, enum seg_rw rw)
681 page_t **pplist, *pp;
697 nb = sizeof (page_t *) * npages;
824 page_t *
860 page_t *(*page_create_func)(void *, size_t, int, void *), void *pcarg)
862 page_t *ppl;
906 page_t *pp = ppl;
984 void (*func)(page_t *))
[all...]
H A Danon.h399 uint_t *, page_t *[], size_t, page_t *, uint_t *,
404 uint_t *, page_t *[], uint_t *,
408 page_t *[], struct vpage [], int, int, struct cred *);
H A Dseg_spt.c180 page_t **ppa);
301 page_t *pp;
382 page_t **ppa;
414 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * npages),
591 kmem_free(ppa, ((sizeof (page_t *)) * npages));
628 page_t *rootpp;
991 pplist = kmem_zalloc(sizeof (page_t *) * tot_npages, KM_SLEEP);
1151 kmem_free(pl, sizeof (page_t *) * tot_npages);
1317 pplist = kmem_zalloc(sizeof (page_t *)
1409 kmem_free(pl, sizeof (page_t *) * btop
[all...]
H A Dseg_map.c99 struct smap *get_smap_kpm(caddr_t, page_t **);
499 page_t *pp;
592 page_t *pp, **ppp;
595 page_t *pl[MAXPPB + 1];
827 & MAXBOFFSET))), PAGESIZE, (uint_t *)NULL, (page_t **)NULL, 0,
1097 page_t *pp;
1124 grab_smp(struct smap *smp, page_t *pp)
1192 page_t *pp = NULL;
1382 page_t *pp;
1497 page_t *p
[all...]
/illumos-gate/usr/src/uts/sun4/io/efcode/
H A Dfc_physio.c66 page_t **pplist;
120 page_t **pplist = NULL;
/illumos-gate/usr/src/uts/sun4u/vm/
H A Dmach_kpm.c42 static caddr_t sfmmu_kpm_mapin(page_t *);
43 static void sfmmu_kpm_mapout(page_t *, caddr_t);
44 static int sfmmu_kpme_lookup(struct kpme *, page_t *);
45 static void sfmmu_kpme_add(struct kpme *, page_t *);
46 static void sfmmu_kpme_sub(struct kpme *, page_t *);
47 static caddr_t sfmmu_kpm_getvaddr(page_t *, int *);
48 static int sfmmu_kpm_fault(caddr_t, struct memseg *, page_t *);
49 static int sfmmu_kpm_fault_small(caddr_t, struct memseg *, page_t *);
50 static void sfmmu_kpm_vac_conflict(page_t *, caddr_t);
51 void sfmmu_kpm_pageunload(page_t *);
[all...]
/illumos-gate/usr/src/uts/sun4v/vm/
H A Dmach_vm_dep.c361 pagescrub(page_t *pp, uint_t off, uint_t len)
426 page_t *ppl;
427 page_t *rootpp;
430 page_t **ppa;
465 ppa = kmem_zalloc(npages * sizeof (page_t *), KM_SLEEP);
467 page_t *pp = ppl;
495 kmem_free(ppa, npages * sizeof (page_t *));
562 page_t *pp;
566 page_t *rootpp = NULL;
/illumos-gate/usr/src/cmd/mdb/common/modules/genunix/
H A Dbio.c142 page_t p;
165 if (mdb_vread(&p, sizeof (page_t), addr) == -1)
H A Dmemory.c145 page_t page;
161 if (mdb_vread(&page, sizeof (page_t), pp) == -1) {
162 mdb_warn("unable to read page_t at %#lx", pp);
207 if (mdb_vread(&page, sizeof (page_t), pp) == -1) {
208 mdb_warn("unable to read page_t at %#lx", pp);
251 wsp->walk_data = mdb_alloc(sizeof (page_t) * PAGE_BUFFER, UM_SLEEP);
259 page_t *buf = wsp->walk_data;
262 const page_t *pg_addr = msp->pages;
267 if (mdb_vread(buf, pg_read * sizeof (page_t),
269 mdb_warn("can't read page_t'
[all...]
/illumos-gate/usr/src/uts/sun4u/cpu/
H A Dmach_cpu_module.c243 * whenever the sun4u page_t grows beyond 128
251 volatile int garbage[ECACHE_PAGE_BYTE_MAX - sizeof (page_t)];
265 * whenever the sun4u page_t grows beyond 128
273 volatile int garbage[ECACHE_PAGE_BYTE_MAX - sizeof (page_t)];
/illumos-gate/usr/src/uts/i86pc/sys/
H A Dmachsystm.h176 extern page_t *page_get_physical(uintptr_t seed);
230 extern page_t *page_get_high_mfn(mfn_t);
/illumos-gate/usr/src/uts/common/sys/
H A Dramdisk.h199 page_t **rd_ppa;
/illumos-gate/usr/src/uts/common/sys/fs/
H A Dhsfs_impl.h42 extern int hsfs_putapage(vnode_t *, page_t *, u_offset_t *, size_t *, int,
/illumos-gate/usr/src/uts/sun4/vm/
H A Dsfmmu.c260 page_t *pp;
383 if ((pp != NULL) && PP_ISFREE((page_t *)pp)) {
1092 static page_t *
1116 uint_t attr, page_t *(*page_create_func)(void *, size_t, int, void *),
1119 page_t *ppl;
1120 page_t *rootpp;
1123 page_t **ppa;
1151 ppa = kmem_zalloc(npages * sizeof (page_t *), KM_SLEEP);
1153 page_t *pp = ppl;
1172 kmem_free(ppa, npages * sizeof (page_t *));
[all...]
/illumos-gate/usr/src/uts/common/io/
H A Dphysmem.c97 uint_t *protp, page_t *pl[], size_t plsz, struct seg *seg, caddr_t addr,
459 page_t *pp;
528 map_page_proc(page_t *pp, void *arg, uint_t flags)
644 page_t *pl[], size_t plsz, struct seg *seg, caddr_t addr, enum seg_rw rw,
647 page_t *pp;
707 page_t *pp;
733 page_t *rpp;

Completed in 361 milliseconds

123456