Lines Matching refs:xreq

237 xdb_get_buf(xdb_t *vdp, blkif_request_t *req, xdb_request_t *xreq)
247 bp = XDB_XREQ2BP(xreq);
248 curseg = xreq->xr_curseg;
255 xreq->xr_vdp = vdp;
256 xreq->xr_op = op;
257 xreq->xr_id = ddi_get64(acchdl, &req->id);
258 segs = xreq->xr_buf_pages = ddi_get8(acchdl, &req->nr_segments);
281 segs = xreq->xr_buf_pages =
289 vdp->xs_iopage_va, xreq->xr_idx, i);
312 xreq->xr_segs[i].fs = fs;
313 xreq->xr_segs[i].ls = ls;
334 xreq->xr_plist[j].p_pagenum);
340 xreq->xr_page_hdls[i] = mapops[i].handle;
357 xreq->xr_pplist[i] = &xreq->xr_plist[i];
358 xreq->xr_plist[i].p_pagenum =
374 vdp->xs_iopage_va, xreq->xr_idx, i);
399 XDB_DBPRINT(XDB_DBG_IO, (CE_NOTE, "reuse buf, xreq is %d!!",
400 xreq->xr_idx));
404 bp->b_un.b_addr = XDB_IOPAGE_VA(vdp->xs_iopage_va, xreq->xr_idx,
405 curseg) + xreq->xr_segs[curseg].fs * DEV_BSIZE;
406 bp->b_shadow = &xreq->xr_pplist[curseg];
424 for (i = curseg; i < xreq->xr_buf_pages; i++) {
425 if ((xreq->xr_segs[i].fs != 0) && (i != curseg)) {
428 sectors += (xreq->xr_segs[i].ls - xreq->xr_segs[i].fs + 1);
429 if ((xreq->xr_segs[i].ls != XB_LAST_SECTOR_IN_SEG) &&
430 (i != (xreq->xr_buf_pages - 1))) {
435 xreq->xr_curseg = i;
538 xdb_request_t *xreq;
575 xreq = xdb_get_req(vdp);
576 ASSERT(xreq);
592 xreq->xr_curseg = 0; /* start from first segment */
593 bp = xdb_get_buf(vdp, reqp, xreq);
596 xdb_free_req(xreq);
636 xdb_request_t *xreq = XDB_BP2XREQ(bp);
637 xdb_t *vdp = xreq->xr_vdp;
646 if ((bioerr == 0) && (xreq->xr_curseg < xreq->xr_buf_pages)) {
647 nbp = xdb_get_buf(vdp, NULL, xreq);
665 segs = xreq->xr_buf_pages;
673 vdp->xs_iopage_va, xreq->xr_idx, i);
680 unmapops[i].handle = xreq->xr_page_hdls[i];
690 if (xreq->xr_op == BLKIF_OP_WRITE_BARRIER ||
691 xreq->xr_op == BLKIF_OP_FLUSH_DISKCACHE) {
708 if (xdb_push_response(vdp, xreq->xr_id, xreq->xr_op, bioerr))
712 (unsigned long long)xreq->xr_id));
716 xdb_free_req(xreq);