Lines Matching defs:vsp

220 #define	VMEM_INSERT(vprev, vsp, type)					\
223 (vsp)->vs_##type##next = (vnext); \
224 (vsp)->vs_##type##prev = (vprev); \
225 (vprev)->vs_##type##next = (vsp); \
226 (vnext)->vs_##type##prev = (vsp); \
229 #define VMEM_DELETE(vsp, type) \
231 vmem_seg_t *vprev = (vsp)->vs_##type##prev; \
232 vmem_seg_t *vnext = (vsp)->vs_##type##next; \
243 vmem_seg_t *vsp;
246 if ((vsp = vmem_segfree) != NULL)
247 vmem_segfree = vsp->vs_knext;
250 return (vsp);
257 vmem_putseg_global(vmem_seg_t *vsp)
260 vsp->vs_knext = vmem_segfree;
261 vmem_segfree = vsp;
271 vmem_seg_t *vsp;
275 vsp = vmp->vm_segfree;
276 vmp->vm_segfree = vsp->vs_knext;
279 return (vsp);
286 vmem_putseg(vmem_t *vmp, vmem_seg_t *vsp)
288 vsp->vs_knext = vmp->vm_segfree;
289 vmp->vm_segfree = vsp;
294 * Add vsp to the appropriate freelist.
297 vmem_freelist_insert(vmem_t *vmp, vmem_seg_t *vsp)
301 ASSERT(*VMEM_HASH(vmp, vsp->vs_start) != vsp);
303 vprev = (vmem_seg_t *)&vmp->vm_freelist[highbit(VS_SIZE(vsp)) - 1];
304 vsp->vs_type = VMEM_FREE;
306 VMEM_INSERT(vprev, vsp, k);
312 * Take vsp from the freelist.
315 vmem_freelist_delete(vmem_t *vmp, vmem_seg_t *vsp)
317 ASSERT(*VMEM_HASH(vmp, vsp->vs_start) != vsp);
318 ASSERT(vsp->vs_type == VMEM_FREE);
320 if (vsp->vs_knext->vs_start == 0 && vsp->vs_kprev->vs_start == 0) {
322 * The segments on both sides of 'vsp' are freelist heads,
323 * so taking vsp leaves the freelist at vsp->vs_kprev empty.
325 ASSERT(vmp->vm_freemap & VS_SIZE(vsp->vs_kprev));
326 vmp->vm_freemap ^= VS_SIZE(vsp->vs_kprev);
328 VMEM_DELETE(vsp, k);
332 * Add vsp to the allocated-segment hash table and update kstats.
335 vmem_hash_insert(vmem_t *vmp, vmem_seg_t *vsp)
339 vsp->vs_type = VMEM_ALLOC;
340 bucket = VMEM_HASH(vmp, vsp->vs_start);
341 vsp->vs_knext = *bucket;
342 *bucket = vsp;
345 vsp->vs_depth = (uint8_t)getpcstack(vsp->vs_stack,
347 vsp->vs_thread = thr_self();
348 vsp->vs_timestamp = gethrtime();
350 vsp->vs_depth = 0;
354 vmp->vm_kstat.vk_mem_inuse += VS_SIZE(vsp);
358 * Remove vsp from the allocated-segment hash table and update kstats.
363 vmem_seg_t *vsp, **prev_vspp;
366 while ((vsp = *prev_vspp) != NULL) {
367 if (vsp->vs_start == addr) {
368 *prev_vspp = vsp->vs_knext;
372 prev_vspp = &vsp->vs_knext;
375 if (vsp == NULL) {
379 if (VS_SIZE(vsp) != size) {
381 "(expect %lu)", vmp, addr, size, VS_SIZE(vsp));
387 return (vsp);
409 * Remove segment vsp from the arena.
412 vmem_seg_destroy(vmem_t *vmp, vmem_seg_t *vsp)
414 ASSERT(vsp->vs_type != VMEM_ROTOR);
415 VMEM_DELETE(vsp, a);
417 vmem_putseg(vmp, vsp);
473 * Remove span vsp from vmp and update kstats.
476 vmem_span_destroy(vmem_t *vmp, vmem_seg_t *vsp)
478 vmem_seg_t *span = vsp->vs_aprev;
479 size_t size = VS_SIZE(vsp);
490 vmem_seg_destroy(vmp, vsp);
495 * Allocate the subrange [addr, addr + size) from segment vsp.
500 vmem_seg_alloc(vmem_t *vmp, vmem_seg_t *vsp, uintptr_t addr, size_t size)
502 uintptr_t vs_start = vsp->vs_start;
503 uintptr_t vs_end = vsp->vs_end;
510 ASSERT(vsp->vs_type == VMEM_FREE);
521 vsp->vs_start = addr_end;
522 vsp = vmem_seg_create(vmp, vsp->vs_aprev, addr, addr + size);
523 vmem_hash_insert(vmp, vsp);
524 return (vsp);
527 vmem_freelist_delete(vmp, vsp);
531 vmem_seg_create(vmp, vsp, addr_end, vs_end));
535 vmem_seg_create(vmp, vsp->vs_aprev, vs_start, addr));
537 vsp->vs_start = addr;
538 vsp->vs_end = addr + size;
540 vmem_hash_insert(vmp, vsp);
541 return (vsp);
561 vmem_seg_t *vsp;
568 (vsp = vmem_getseg_global()) != NULL)
569 vmem_putseg(vmp, vsp);
658 vmem_seg_t *vsp = NULL;
678 vsp = vprev;
680 vsp = vnext;
684 * vsp could represent a complete imported span,
687 if (vsp != NULL && vsp->vs_aprev->vs_import &&
689 vsp->vs_aprev->vs_type == VMEM_SPAN &&
690 vsp->vs_anext->vs_type == VMEM_SPAN) {
691 void *vaddr = (void *)vsp->vs_start;
692 size_t size = VS_SIZE(vsp);
693 ASSERT(size == VS_SIZE(vsp->vs_aprev));
694 vmem_freelist_delete(vmp, vsp);
695 vmem_span_destroy(vmp, vsp);
712 vmem_seg_t *vsp, *rotor;
735 vsp = rotor->vs_anext;
736 if (vsp->vs_type == VMEM_FREE && (vs_size = VS_SIZE(vsp)) > realsize &&
739 addr = vsp->vs_start;
740 vsp->vs_start = addr + realsize;
753 if (vsp->vs_type == VMEM_FREE && VS_SIZE(vsp) >= size)
755 vsp = vsp->vs_anext;
756 if (vsp == rotor) {
768 vsp = rotor->vs_aprev;
769 if (vsp->vs_type == VMEM_FREE && VS_SIZE(vsp) >= size)
787 vsp = rotor->vs_anext;
794 addr = vsp->vs_start;
795 vsp = vmem_seg_alloc(vmp, vsp, addr, size);
796 ASSERT(vsp->vs_type == VMEM_ALLOC &&
797 vsp->vs_start == addr && vsp->vs_end == addr + size);
803 vmem_advance(vmp, rotor, vsp);
817 vmem_seg_t *vsp;
889 for (vbest = NULL, vsp = (flist == 0) ? NULL :
891 vsp != NULL; vsp = vsp->vs_knext) {
893 if (vsp->vs_start == 0) {
905 VS_SIZE(vsp)));
908 vsp = (vmem_seg_t *)&vmp->vm_freelist[flist];
909 ASSERT(vsp->vs_knext->vs_type == VMEM_FREE);
912 if (vsp->vs_end - 1 < (uintptr_t)minaddr)
914 if (vsp->vs_start > (uintptr_t)maxaddr - 1)
916 start = MAX(vsp->vs_start, (uintptr_t)minaddr);
917 end = MIN(vsp->vs_end - 1, (uintptr_t)maxaddr - 1) + 1;
923 (vbest != NULL && VS_SIZE(vsp) >= VS_SIZE(vbest)))
925 vbest = vsp;
1009 vmem_seg_t *vsp, *vnext, *vprev;
1013 vsp = vmem_hash_delete(vmp, (uintptr_t)vaddr, size);
1014 vsp->vs_end = P2ROUNDUP(vsp->vs_end, vmp->vm_quantum);
1019 vnext = vsp->vs_anext;
1021 ASSERT(vsp->vs_end == vnext->vs_start);
1023 vsp->vs_end = vnext->vs_end;
1030 vprev = vsp->vs_aprev;
1032 ASSERT(vprev->vs_end == vsp->vs_start);
1034 vprev->vs_end = vsp->vs_end;
1035 vmem_seg_destroy(vmp, vsp);
1036 vsp = vprev;
1042 if (vsp->vs_aprev->vs_import && vmp->vm_source_free != NULL &&
1043 vsp->vs_aprev->vs_type == VMEM_SPAN &&
1044 vsp->vs_anext->vs_type == VMEM_SPAN) {
1045 vaddr = (void *)vsp->vs_start;
1046 size = VS_SIZE(vsp);
1047 ASSERT(size == VS_SIZE(vsp->vs_aprev));
1048 vmem_span_destroy(vmp, vsp);
1052 vmem_freelist_insert(vmp, vsp);
1067 vmem_seg_t *vsp;
1109 vsp = vmp->vm_freelist[flist].vs_knext;
1110 addr = vsp->vs_start;
1111 (void) vmem_seg_alloc(vmp, vsp, addr, size);
1137 vmem_seg_t *vsp;
1142 for (vsp = seg0->vs_knext; vsp != seg0; vsp = vsp->vs_knext) {
1144 ASSERT(vsp->vs_type == VMEM_SPAN);
1145 if (start >= vsp->vs_start && end - 1 <= vsp->vs_end - 1)
1149 return (vsp != seg0);
1194 vmem_seg_t *vsp;
1243 vsp = prevseg;
1256 vsp = prevseg;
1269 vsp = nextseg;
1278 vsp = span;
1289 vsp = oldseg;
1291 vsp = vmem_seg_create(vmp, oldseg, addr, endaddr);
1302 vsp = oldseg;
1304 vsp = vmem_seg_create(vmp, span, addr, endaddr);
1306 vmem_freelist_insert(vmp, vsp);
1308 return (vsp);
1322 vmem_seg_t *vsp;
1339 vsp = vmem_span_create(vmp, vaddr, size, 0);
1341 vsp = vmem_extend_unlocked(vmp, addr, endaddr);
1343 ASSERT(VS_SIZE(vsp) >= alloc);
1345 addr = vsp->vs_start;
1346 (void) vmem_seg_alloc(vmp, vsp, addr, alloc);
1367 vmem_seg_t *vsp;
1379 for (vsp = seg0->vs_anext; vsp != seg0; vsp = vsp->vs_anext) {
1380 if (vsp->vs_type & typemask) {
1381 void *start = (void *)vsp->vs_start;
1382 size_t size = VS_SIZE(vsp);
1384 vmem_advance(vmp, &walker, vsp);
1388 vsp = &walker;
1435 vmem_seg_t *vsp;
1475 vsp = &vmp->vm_seg0;
1476 vsp->vs_anext = vsp;
1477 vsp->vs_aprev = vsp;
1478 vsp->vs_knext = vsp;
1479 vsp->vs_kprev = vsp;
1480 vsp->vs_type = VMEM_SPAN;
1482 vsp = &vmp->vm_rotor;
1483 vsp->vs_type = VMEM_ROTOR;
1484 VMEM_INSERT(&vmp->vm_seg0, vsp, a);
1541 vmem_seg_t *vsp;
1570 for (vsp = seg0->vs_anext; vsp != seg0; vsp = vsp->vs_anext)
1571 vmem_putseg_global(vsp);
1587 vmem_seg_t **old_table, **new_table, *vsp;
1614 vsp = old_table[h];
1615 while (vsp != NULL) {
1616 uintptr_t addr = vsp->vs_start;
1617 vmem_seg_t *next_vsp = vsp->vs_knext;
1619 vsp->vs_knext = *hash_bucket;
1620 *hash_bucket = vsp;
1621 vsp = next_vsp;