Lines Matching defs:lists

127  * The logical page free list is maintained as two lists, the 'free'
128 * and the 'cache' lists.
131 * The implementation of the lists is machine dependent.
147 * page free and page cache lists. If there is just one
162 * lists are searched again. If a page is freeed while pcf_block is
540 * Add a physical chunk of memory to the system free lists during startup.
1726 * Called from page_create_va() when both the cache and free lists
1801 * free and cache lists.
1804 * First try both lists without worring about color.
1808 * stop deletions from the lists. This will help because
1891 * locked pages on the lists free up,
2387 * and cache lists we try for the correct color.
2433 * just waiting to stuff it on the appropriate lists.
2616 * The free list is really two lists maintained by
3672 * Break page list cppp into two lists with npages in the first list.
3695 /* Fix head and tail of new lists */
3752 * The caller is responsible for protecting the lists.
3772 * The caller is responsible for protecting the lists.
4502 * Replace the page "old" with the page "new" on the page hash and vnode lists
6203 * Each hash bucket will have it's own mutex and two lists which are:
6213 * outstanding for a given page, it will always be in one of the two lists.
6223 page_capture_hash_bucket_t lists[2]; /* sentinel nodes */
6277 * No more requests can get added to the hash lists for this consumer
6283 bp1 = page_capture_hash[i].lists[j].next;
6285 while (bp1 != &page_capture_hash[i].lists[j]) {
6333 bp = page_capture_hash[index].lists[0].next;
6334 while (bp != &page_capture_hash[index].lists[0]) {
6341 bp->next = page_capture_hash[index].lists[1].next;
6342 bp->prev = &page_capture_hash[index].lists[1];
6343 page_capture_hash[index].lists[1].next = bp;
6435 tp1 = page_capture_hash[index].lists[l].next;
6436 while (tp1 != &page_capture_hash[index].lists[l]) {
6450 bp1->next = page_capture_hash[index].lists[0].next;
6451 bp1->prev = &page_capture_hash[index].lists[0];
6453 page_capture_hash[index].lists[0].next = bp1;
6477 bp2 = page_capture_hash[index].lists[i].next;
6478 while (bp2 != &page_capture_hash[index].lists[i]) {
6777 bp1 = page_capture_hash[index].lists[i].next;
6778 while (bp1 != &page_capture_hash[index].lists[i]) {
6861 bp1->next = page_capture_hash[index].lists[1].next;
6862 bp1->prev = &page_capture_hash[index].lists[1];
6865 page_capture_hash[index].lists[1].next = bp1;
6877 bp2 = page_capture_hash[index].lists[i].next;
6878 while (bp2 != &page_capture_hash[index].lists[i]) {
7073 bp = page_capture_hash[index].lists[i].next;
7074 while (bp != &page_capture_hash[index].lists[i]) {
7099 page_capture_hash[i].lists[0].next =
7100 &page_capture_hash[i].lists[0];
7101 page_capture_hash[i].lists[0].prev =
7102 &page_capture_hash[i].lists[0];
7103 page_capture_hash[i].lists[1].next =
7104 &page_capture_hash[i].lists[1];
7105 page_capture_hash[i].lists[1].prev =
7106 &page_capture_hash[i].lists[1];
7130 /* walk lists looking for pages to scrub */
7143 bp = page_capture_hash[i].lists[j].next;
7144 while (bp != &page_capture_hash[i].lists[j]) {
7187 bp1 = &page_capture_hash[i].lists[1];
7190 bp1->prev->next = page_capture_hash[i].lists[0].next;
7191 bp2->prev = &page_capture_hash[i].lists[0];
7192 page_capture_hash[i].lists[0].next->prev = bp1->prev;
7193 page_capture_hash[i].lists[0].next = bp2;
7200 bp1 = page_capture_hash[i].lists[0].next;
7201 while (bp1 != &page_capture_hash[i].lists[0]) {
7206 page_capture_hash[i].lists[0].next = bp1->next;
7208 &page_capture_hash[i].lists[0];
7222 bp1 = page_capture_hash[i].lists[0].next;
7242 bp1 = page_capture_hash[i].lists[0].next;