Lines Matching refs:cb

75 ldl_strategy_done(buf_t *cb)
81 ASSERT(SEMA_HELD(&cb->b_sem));
82 ASSERT((cb->b_flags & B_DONE) == 0);
87 lbp = (lufs_buf_t *)cb;
90 if (cb->b_flags & B_ERROR)
97 if (atomic_add_long_nv(&sv->sv_nb_left, -cb->b_bcount)) {
166 buf_t *cb;
194 cb = bioclone(pb, offset, pbcount, dev,
202 cb->b_flags |= B_ERROR;
203 cb->b_resid = cb->b_bcount;
204 biodone(cb);
226 fssnap_strategy(&ufsvfsp->vfs_snapshot, cb);
231 (void) bdev_strategy(cb);
292 * This means we can safely reference and change the cb and bp fields
300 cirbuf_t *cb = &ul->un_wrbuf;
302 ASSERT(bp == cb->cb_bp && bp == cb->cb_dirty);
313 cb->cb_dirty = NULL;
325 if ((newbp = cb->cb_free) != NULL) {
326 cb->cb_free = newbp->b_forw;
343 rw_enter(&cb->cb_rwlock, RW_WRITER);
348 rw_exit(&cb->cb_rwlock);
352 inval_range(ml_unit_t *ul, cirbuf_t *cb, off_t lof, off_t nb)
362 rw_enter(&cb->cb_rwlock, RW_WRITER);
363 bp = cb->cb_bp;
365 if (bp == cb->cb_dirty || bp->b_bcount == 0) {
381 } while (bp != cb->cb_bp);
382 rw_exit(&cb->cb_rwlock);
387 * This means we can safely reference and change the cb and bp fields
394 cirbuf_t *cb = &ul->un_wrbuf;
400 if ((bp = cb->cb_dirty) != NULL) {
408 inval_range(ul, cb, ul->un_tail_lof, 1);
413 rw_enter(&cb->cb_rwlock, RW_WRITER);
414 bp = cb->cb_bp->b_forw;
417 cb->cb_dirty = bp;
418 cb->cb_bp = bp;
424 rw_exit(&cb->cb_rwlock);
436 alloc_wrbuf(cirbuf_t *cb, size_t bufsize)
444 if (cb->cb_nb)
445 free_cirbuf(cb);
447 bzero(cb, sizeof (*cb));
448 rw_init(&cb->cb_rwlock, NULL, RW_DRIVER, NULL);
450 rw_enter(&cb->cb_rwlock, RW_WRITER);
460 bp->b_forw = cb->cb_free;
461 cb->cb_free = bp;
464 cb->cb_va = kmem_alloc(bufsize, KM_SLEEP);
465 cb->cb_nb = bufsize;
470 bp = cb->cb_free;
471 cb->cb_free = bp->b_forw;
475 cb->cb_bp = bp;
476 bp->b_un.b_addr = cb->cb_va;
477 bp->b_bufsize = cb->cb_nb;
479 rw_exit(&cb->cb_rwlock);
483 alloc_rdbuf(cirbuf_t *cb, size_t bufsize, size_t blksize)
492 if (cb->cb_nb)
493 free_cirbuf(cb);
495 bzero(cb, sizeof (*cb));
496 rw_init(&cb->cb_rwlock, NULL, RW_DRIVER, NULL);
498 rw_enter(&cb->cb_rwlock, RW_WRITER);
500 cb->cb_va = kmem_alloc(bufsize, KM_SLEEP);
501 cb->cb_nb = bufsize;
508 va = cb->cb_va;
518 if (cb->cb_bp) {
519 bp->b_forw = cb->cb_bp->b_forw;
520 bp->b_back = cb->cb_bp;
521 cb->cb_bp->b_forw->b_back = bp;
522 cb->cb_bp->b_forw = bp;
525 cb->cb_bp = bp;
530 rw_exit(&cb->cb_rwlock);
534 free_cirbuf(cirbuf_t *cb)
538 if (cb->cb_nb == 0)
541 rw_enter(&cb->cb_rwlock, RW_WRITER);
542 ASSERT(cb->cb_dirty == NULL);
547 while ((bp = cb->cb_bp) != NULL) {
549 cb->cb_bp = NULL;
551 cb->cb_bp = bp->b_forw;
562 while ((bp = cb->cb_free) != NULL) {
563 cb->cb_free = bp->b_forw;
568 kmem_free(cb->cb_va, cb->cb_nb);
569 cb->cb_va = NULL;
570 cb->cb_nb = 0;
571 rw_exit(&cb->cb_rwlock);
572 rw_destroy(&cb->cb_rwlock);
584 find_bp(ml_unit_t *ul, cirbuf_t *cb, off_t lof)
591 rw_enter(&cb->cb_rwlock, RW_READER);
592 bp = cb->cb_bp;
597 rw_exit(&cb->cb_rwlock);
601 } while (bp != cb->cb_bp);
602 rw_exit(&cb->cb_rwlock);
608 find_read_lof(ml_unit_t *ul, cirbuf_t *cb, off_t lof)
619 rw_enter(&cb->cb_rwlock, RW_READER);
620 bpend = bp = cb->cb_bp->b_forw;
629 rw_exit(&cb->cb_rwlock);
642 cirbuf_t *cb;
661 cb = &ul->un_rdbuf;
662 rw_enter(&cb->cb_rwlock, RW_WRITER);
663 bp = cb->cb_bp->b_forw;
667 cb->cb_bp = bp;
668 rw_exit(&cb->cb_rwlock);
683 * This means we can safely reference and change the cb and bp fields
688 extend_write_bp(ml_unit_t *ul, cirbuf_t *cb, buf_t *bp)
692 ASSERT(bp == cb->cb_bp && bp == cb->cb_dirty);
716 rw_enter(&cb->cb_rwlock, RW_WRITER);
730 bpforw->b_forw = cb->cb_free;
731 cb->cb_free = bpforw;
733 rw_exit(&cb->cb_rwlock);
745 cirbuf_t *cb = &ul->un_wrbuf;
778 if (!extend_write_bp(ul, cb, bp)) {
897 cirbuf_t *cb = &ul->un_wrbuf;
902 if ((bp = cb->cb_dirty) == NULL)
947 cirbuf_t *cb = &ul->un_wrbuf;
952 if ((bp = cb->cb_dirty) == NULL)
1064 cirbuf_t *cb = &ul->un_wrbuf;
1066 rw_enter(&cb->cb_rwlock, RW_WRITER);
1070 bp = cb->cb_bp;
1077 } while (bp != cb->cb_bp);
1078 rw_exit(&cb->cb_rwlock);