Lines Matching refs:buf

459 	struct vioif_rx_buf *buf = (void *) free_arg;
460 struct vioif_softc *sc = buf->rb_sc;
462 kmem_cache_free(sc->sc_rxbuf_cache, buf);
471 struct vioif_rx_buf *buf = buffer;
475 DDI_DMA_SLEEP, NULL, &buf->rb_mapping.vbm_dmah)) {
481 if (ddi_dma_mem_alloc(buf->rb_mapping.vbm_dmah,
484 NULL, &buf->rb_mapping.vbm_buf, &len, &buf->rb_mapping.vbm_acch)) {
491 if (ddi_dma_addr_bind_handle(buf->rb_mapping.vbm_dmah, NULL,
492 buf->rb_mapping.vbm_buf, len, DDI_DMA_READ | DDI_DMA_STREAMING,
493 DDI_DMA_SLEEP, NULL, &buf->rb_mapping.vbm_dmac,
494 &buf->rb_mapping.vbm_ncookies)) {
500 ASSERT(buf->rb_mapping.vbm_ncookies <= VIOIF_INDIRECT_MAX);
502 buf->rb_sc = sc;
503 buf->rb_frtn.free_arg = (void *) buf;
504 buf->rb_frtn.free_func = vioif_rx_free;
508 ddi_dma_mem_free(&buf->rb_mapping.vbm_acch);
510 ddi_dma_free_handle(&buf->rb_mapping.vbm_dmah);
520 struct vioif_rx_buf *buf = buffer;
522 ASSERT(buf->rb_mapping.vbm_acch);
523 ASSERT(buf->rb_mapping.vbm_acch);
525 (void) ddi_dma_unbind_handle(buf->rb_mapping.vbm_dmah);
526 ddi_dma_mem_free(&buf->rb_mapping.vbm_acch);
527 ddi_dma_free_handle(&buf->rb_mapping.vbm_dmah);
536 struct vioif_tx_buf *buf = &sc->sc_txbufs[i];
541 ASSERT(buf->tb_inline_mapping.vbm_acch);
542 ASSERT(buf->tb_inline_mapping.vbm_dmah);
544 (void) ddi_dma_unbind_handle(buf->tb_inline_mapping.vbm_dmah);
545 ddi_dma_mem_free(&buf->tb_inline_mapping.vbm_acch);
546 ddi_dma_free_handle(&buf->tb_inline_mapping.vbm_dmah);
549 ASSERT(!buf->tb_mp);
552 for (j = 0; buf->tb_external_mapping[j].vbm_dmah; j++)
554 &buf->tb_external_mapping[j].vbm_dmah);
556 kmem_free(buf->tb_external_mapping,
564 struct vioif_rx_buf *buf = sc->sc_rxbufs[i];
566 if (buf)
567 kmem_cache_free(sc->sc_rxbuf_cache, buf);
605 struct vioif_tx_buf *buf = &sc->sc_txbufs[i];
611 DDI_DMA_SLEEP, NULL, &buf->tb_inline_mapping.vbm_dmah)) {
618 if (ddi_dma_mem_alloc(buf->tb_inline_mapping.vbm_dmah,
620 DDI_DMA_SLEEP, NULL, &buf->tb_inline_mapping.vbm_buf,
621 &len, &buf->tb_inline_mapping.vbm_acch)) {
629 if (ddi_dma_addr_bind_handle(buf->tb_inline_mapping.vbm_dmah,
630 NULL, buf->tb_inline_mapping.vbm_buf, len,
632 &buf->tb_inline_mapping.vbm_dmac, &nsegments)) {
647 buf->tb_external_mapping = kmem_zalloc(
661 struct vioif_tx_buf *buf = &sc->sc_txbufs[i];
663 if (buf->tb_inline_mapping.vbm_dmah)
665 buf->tb_inline_mapping.vbm_dmah);
667 if (buf->tb_inline_mapping.vbm_acch)
669 &buf->tb_inline_mapping.vbm_acch);
671 if (buf->tb_inline_mapping.vbm_dmah)
673 &buf->tb_inline_mapping.vbm_dmah);
675 if (buf->tb_external_mapping)
676 kmem_free(buf->tb_external_mapping,
718 struct vioif_rx_buf *buf;
731 buf = sc->sc_rxbufs[ve->qe_index];
733 if (!buf) {
735 buf = kmem_cache_alloc(sc->sc_rxbuf_cache, kmflag);
736 sc->sc_rxbufs[ve->qe_index] = buf;
740 if (!buf) {
748 ASSERT(buf->rb_mapping.vbm_ncookies >= 1);
755 buf->rb_mapping.vbm_dmac.dmac_laddress,
760 buf->rb_mapping.vbm_dmac.dmac_laddress +
762 buf->rb_mapping.vbm_dmac.dmac_size -
770 if (buf->rb_mapping.vbm_ncookies > 1) {
774 vioif_dma_curr_cookie(buf->rb_mapping.vbm_dmah);
776 ddi_dma_nextcookie(buf->rb_mapping.vbm_dmah, &dmac);
777 virtio_ve_add_cookie(ve, buf->rb_mapping.vbm_dmah,
778 dmac, buf->rb_mapping.vbm_ncookies - 1, B_FALSE);
779 vioif_dma_reset_cookie(buf->rb_mapping.vbm_dmah,
805 struct vioif_rx_buf *buf;
812 buf = sc->sc_rxbufs[ve->qe_index];
813 ASSERT(buf);
839 bcopy((char *)buf->rb_mapping.vbm_buf +
845 buf->rb_mapping.vbm_buf +
847 VIOIF_IP_ALIGN, len, 0, &buf->rb_frtn);
901 struct vioif_tx_buf *buf;
910 buf = &sc->sc_txbufs[ve->qe_index];
911 mp = buf->tb_mp;
912 buf->tb_mp = NULL;
915 for (int i = 0; i < buf->tb_external_num; i++)
917 buf->tb_external_mapping[i].vbm_dmah);
942 struct vioif_tx_buf *buf;
943 buf = &sc->sc_txbufs[ve->qe_index];
945 ASSERT(buf);
948 mcopymsg(mp, buf->tb_inline_mapping.vbm_buf +
952 buf->tb_inline_mapping.vbm_dmac.dmac_laddress +
957 vioif_tx_lazy_handle_alloc(struct vioif_softc *sc, struct vioif_tx_buf *buf,
962 if (!buf->tb_external_mapping[i].vbm_dmah) {
965 &buf->tb_external_mapping[i].vbm_dmah);
981 struct vioif_tx_buf *buf;
986 buf = &sc->sc_txbufs[ve->qe_index];
988 ASSERT(buf);
990 buf->tb_external_num = 0;
1009 ret = vioif_tx_lazy_handle_alloc(sc, buf, i);
1016 buf->tb_external_mapping[i].vbm_dmah, NULL,
1040 virtio_ve_add_cookie(ve, buf->tb_external_mapping[i].vbm_dmah,
1047 buf->tb_external_num = i;
1049 buf->tb_mp = mp;
1059 buf->tb_external_mapping[j].vbm_dmah);
1069 struct vioif_tx_buf *buf;
1096 buf = &sc->sc_txbufs[ve->qe_index];
1099 (void) memset(buf->tb_inline_mapping.vbm_buf, 0,
1102 net_header = (struct virtio_net_hdr *)buf->tb_inline_mapping.vbm_buf;
1137 buf->tb_inline_mapping.vbm_dmac.dmac_laddress,
1495 char buf[512];
1496 char *bufp = buf;
1497 char *bufend = buf + sizeof (buf);
1507 dev_err(sc->sc_dev, CE_NOTE, "!%s Vioif (%b)", buf, features,