Lines Matching defs:slab

117  * Default number of packet descriptors per descriptor slab.  Making
118 * this too small will trigger more descriptor slab allocation; making
329 pdesc_slab_t *slab;
356 slab = Q2PDSLAB(mmd->mmd_pd_slab_q.ql_next);
357 while (slab != Q2PDSLAB(&(mmd->mmd_pd_slab_q))) {
358 pdesc_slab_t *slab_next = Q2PDSLAB(slab->pds_next);
360 remque(&(slab->pds_next));
361 slab->pds_next = NULL;
362 slab->pds_prev = NULL;
363 slab->pds_mmd = NULL;
364 slab->pds_used = 0;
365 kmem_cache_free(pd_slab_cache, slab);
369 slab = slab_next;
727 pdesc_slab_t *slab, *slab_last;
739 * Is slab list empty or the last-added slab is full? If so,
740 * allocate new slab for the descriptor; otherwise, use the
741 * last-added slab instead.
746 slab = kmem_cache_alloc(pd_slab_cache, kmflags);
747 if (slab == NULL) {
753 slab->pds_mmd = mmd;
755 ASSERT(slab->pds_used == 0);
756 ASSERT(slab->pds_next == NULL && slab->pds_prev == NULL);
758 /* insert slab at end of list */
759 insque(&(slab->pds_next), mmd->mmd_pd_slab_q.ql_prev);
762 slab = slab_last;
764 ASSERT(slab->pds_used < slab->pds_sz);
765 pd = &(slab->pds_free_desc[slab->pds_used++]);
769 pd->pd_slab = slab;
789 * Packet descriptor slab kmem cache constructor routine.
795 pdesc_slab_t *slab;
799 ASSERT(cnt > 0); /* slab size can't be zero */
801 slab = (pdesc_slab_t *)buf;
802 slab->pds_next = NULL;
803 slab->pds_prev = NULL;
804 slab->pds_mmd = NULL;
805 slab->pds_used = 0;
806 slab->pds_sz = cnt;
809 pdesc_t *pd = &(slab->pds_free_desc[i]);
816 * Packet descriptor slab kmem cache destructor routine.
822 pdesc_slab_t *slab;
824 slab = (pdesc_slab_t *)buf;
825 ASSERT(slab->pds_next == NULL);
826 ASSERT(slab->pds_prev == NULL);
827 ASSERT(slab->pds_mmd == NULL);
828 ASSERT(slab->pds_used == 0);
829 ASSERT(slab->pds_sz > 0);