Lines Matching defs:vml

745 	vmap_t vml[],
786 vml[i].vs_data = (void *)&vpm->vpm_pp;
788 vml[i].vs_data = (void *)pp;
792 vml[i].vs_addr = hat_kpm_mapin(pp, 0);
793 vml[i].vs_len = PAGESIZE;
797 vml[i].vs_data = NULL;
798 vml[i].vs_addr = (caddr_t)NULL;
806 * the page addresses are returned in the SGL vml (vmap_t) array passed in.
818 vmap_t *vml,
834 vml[0].vs_data = NULL;
835 vml[0].vs_addr = (caddr_t)NULL;
845 * Ensure length fits within the vml[] array. One element of
864 return (vpm_pagecreate(vp, baseoff, len, vml, nseg, newpage));
921 vml[0].vs_addr = NULL;
922 vml[0].vs_data = NULL;
932 vml[i].vs_data = (void *)&(vpm->vpm_pp);
934 vml[i].vs_data = (void *)pplist[i];
938 vml[i].vs_addr = hat_kpm_mapin(pplist[i], 0);
939 vml[i].vs_len = PAGESIZE;
942 vml[i].vs_data = NULL;
943 vml[i].vs_addr = (caddr_t)NULL;
952 vpm_unmap_pages(vmap_t vml[], enum seg_rw rw)
959 for (i = 0; vml[i].vs_data != NULL; i++) {
960 ASSERT(IS_KPM_ADDR(vml[i].vs_addr));
963 pp = *(((page_t **)vml[i].vs_data));
965 pp = (page_t *)vml[i].vs_data;
980 vpm = (struct vpmap *)((char *)vml[i].vs_data
982 hat_kpm_mapout(pp, 0, vml[i].vs_addr);
992 hat_kpm_mapout(pp, 0, vml[i].vs_addr);
995 vml[i].vs_data = NULL;
996 vml[i].vs_addr = NULL;
1020 struct vmap vml[MINVMAPS];
1031 fetchpage, vml, MINVMAPS, &npages, rw);
1044 (void) kzero(vml[0].vs_addr, (uint_t)pon);
1049 vml[i].vs_addr != NULL; i++) {
1051 error = uiomove(vml[i].vs_addr + pon,
1068 (void) kzero(vml[i].vs_addr + pon, (uint_t)nzero);
1070 vpm_unmap_pages(vml, rw);
1130 vmap_t vml[],
1144 vmap_t vml[],
1168 vpm_unmap_pages(vmap_t vml[], enum seg_rw rw)