Lines Matching defs:vdp

108 #define	USE_WRITE_BARRIER(vdp)						\
109 ((vdp)->xdf_feature_barrier && !(vdp)->xdf_flush_supported)
110 #define USE_FLUSH_DISKCACHE(vdp) \
111 ((vdp)->xdf_feature_barrier && (vdp)->xdf_flush_supported)
112 #define IS_WRITE_BARRIER(vdp, bp) \
113 (!IS_READ(bp) && USE_WRITE_BARRIER(vdp) && \
114 ((bp)->b_un.b_addr == (vdp)->xdf_cache_flush_block))
116 (!IS_READ(bp) && USE_FLUSH_DISKCACHE(vdp) && ((bp)->b_bcount == 0))
183 xdf_t *vdp = arg;
185 mutex_enter(&vdp->xdf_dev_lk);
186 vdp->xdf_timeout_id = 0;
187 mutex_exit(&vdp->xdf_dev_lk);
190 xdf_io_start(vdp);
202 xdf_t *vdp = (xdf_t *)arg;
203 ASSERT(vdp != NULL);
206 vdp->xdf_addr));
208 ddi_trigger_softintr(vdp->xdf_softintr_id);
213 gs_get(xdf_t *vdp, int isread)
221 if (vdp->xdf_gnt_callback.next == NULL) {
222 SETDMACBON(vdp);
224 &vdp->xdf_gnt_callback,
226 (void *)vdp,
235 if (vdp->xdf_timeout_id == 0)
237 vdp->xdf_timeout_id =
238 timeout(xdf_timeout_handler, vdp, hz);
243 gs->gs_oeid = vdp->xdf_peer;
282 vreq_get(xdf_t *vdp, buf_t *bp)
290 if (vdp->xdf_timeout_id == 0)
292 vdp->xdf_timeout_id =
293 timeout(xdf_timeout_handler, vdp, hz);
305 list_insert_head(&vdp->xdf_vreq_act, (void *)vreq);
311 vreq_free(xdf_t *vdp, v_req_t *vreq)
315 ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
318 list_remove(&vdp->xdf_vreq_act, vreq);
358 check_fbwrite(xdf_t *vdp, buf_t *bp, daddr_t blkno)
363 if (IS_WRITE_BARRIER(vdp, bp))
376 vdp->xdf_cache_flush_block, DEV_BSIZE);
386 vreq_setup(xdf_t *vdp, v_req_t *vreq)
407 if ((gs = gs_get(vdp, IS_READ(bp))) == NULL) {
409 "get ge_slotfailed\n", vdp->xdf_addr));
421 if (IS_WRITE_BARRIER(vdp, bp))
426 if (!IS_READ(bp) && USE_WRITE_BARRIER(vdp))
427 check_fbwrite(vdp, bp, vreq->v_blkno);
435 rc = ddi_dma_alloc_handle(vdp->xdf_dip, &xb_dma_attr,
436 xdf_dmacallback, (caddr_t)vdp, &dh);
438 SETDMACBON(vdp);
440 vdp->xdf_addr));
460 rc = ddi_dma_alloc_handle(vdp->xdf_dip, &dmaattr,
461 xdf_dmacallback, (caddr_t)vdp, &mdh);
463 SETDMACBON(vdp);
466 vdp->xdf_addr));
483 DDI_DMA_STREAMING, xdf_dmacallback, (caddr_t)vdp,
486 SETDMACBON(vdp);
489 vdp->xdf_addr));
510 dma_flags, xdf_dmacallback, (caddr_t)vdp,
515 xdf_dmacallback, (caddr_t)vdp, &dc, &ndcs);
526 SETDMACBON(vdp);
528 vdp->xdf_addr));
545 if ((gs = gs_get(vdp, IS_READ(bp))) == NULL) {
547 vdp->xdf_addr));
567 if ((gs = gs_get(vdp, IS_READ(bp))) == NULL) {
569 vdp->xdf_addr));
589 xdf_cmlb_attach(xdf_t *vdp)
591 dev_info_t *dip = vdp->xdf_dip;
594 XD_IS_CD(vdp) ? DTYPE_RODIRECT : DTYPE_DIRECT,
595 XD_IS_RM(vdp),
597 XD_IS_CD(vdp) ? DDI_NT_CD_XVMD : DDI_NT_BLOCK_XVMD,
599 (XD_IS_CD(vdp) ? 0 : CMLB_CREATE_ALTSLICE_VTOC_16_DTYPE_DIRECT) |
602 XD_IS_CD(vdp) ? 0 : CMLB_FAKE_LABEL_ONE_PARTITION,
604 vdp->xdf_vd_lbl, NULL));
617 xdf_kstat_enter(xdf_t *vdp, buf_t *bp)
621 ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
623 if (vdp->xdf_xdev_iostat == NULL)
626 kstat_runq_enter(KSTAT_IO_PTR(vdp->xdf_xdev_iostat));
628 kstat_waitq_enter(KSTAT_IO_PTR(vdp->xdf_xdev_iostat));
633 xdf_kstat_exit(xdf_t *vdp, buf_t *bp)
637 ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
639 if (vdp->xdf_xdev_iostat == NULL)
642 kstat_runq_exit(KSTAT_IO_PTR(vdp->xdf_xdev_iostat));
644 kstat_waitq_exit(KSTAT_IO_PTR(vdp->xdf_xdev_iostat));
649 xdf_kstat_waitq_to_runq(xdf_t *vdp, buf_t *bp)
653 ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
657 if (vdp->xdf_xdev_iostat == NULL)
659 kstat_waitq_to_runq(KSTAT_IO_PTR(vdp->xdf_xdev_iostat));
663 xdf_kstat_runq_to_waitq(xdf_t *vdp, buf_t *bp)
667 ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
671 if (vdp->xdf_xdev_iostat == NULL)
673 kstat_runq_back_to_waitq(KSTAT_IO_PTR(vdp->xdf_xdev_iostat));
679 xdf_t *vdp = (xdf_t *)ddi_get_driver_private(dip);
689 mutex_enter(&vdp->xdf_iostat_lk);
690 mutex_enter(&vdp->xdf_dev_lk);
693 if (vdp->xdf_xdev_iostat != NULL) {
694 mutex_exit(&vdp->xdf_dev_lk);
695 mutex_exit(&vdp->xdf_iostat_lk);
700 vdp->xdf_xdev_iostat = kstat;
701 vdp->xdf_xdev_iostat->ks_lock = &vdp->xdf_dev_lk;
702 kstat_install(vdp->xdf_xdev_iostat);
719 bp = vdp->xdf_f_act;
721 xdf_kstat_enter(vdp, bp);
724 if (vdp->xdf_ready_tq_bp != NULL)
725 xdf_kstat_enter(vdp, vdp->xdf_ready_tq_bp);
727 mutex_exit(&vdp->xdf_dev_lk);
728 mutex_exit(&vdp->xdf_iostat_lk);
735 xdf_t *vdp = (xdf_t *)ddi_get_driver_private(dip);
748 mutex_enter(&vdp->xdf_iostat_lk);
749 mutex_enter(&vdp->xdf_dev_lk);
751 if (vdp->xdf_xdev_iostat == NULL) {
752 mutex_exit(&vdp->xdf_dev_lk);
753 mutex_exit(&vdp->xdf_iostat_lk);
769 bp = vdp->xdf_f_act;
771 xdf_kstat_exit(vdp, bp);
774 if (vdp->xdf_ready_tq_bp != NULL)
775 xdf_kstat_exit(vdp, vdp->xdf_ready_tq_bp);
777 kstat = vdp->xdf_xdev_iostat;
778 vdp->xdf_xdev_iostat = NULL;
779 mutex_exit(&vdp->xdf_dev_lk);
781 mutex_exit(&vdp->xdf_iostat_lk);
795 xdf_bp_push(xdf_t *vdp, buf_t *bp)
797 ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
800 xdf_kstat_enter(vdp, bp);
802 if (curthread == vdp->xdf_ready_tq_thread) {
804 ASSERT(vdp->xdf_ready_tq_bp == NULL);
805 vdp->xdf_ready_tq_bp = bp;
810 ASSERT(bp != vdp->xdf_ready_tq_bp);
812 if (vdp->xdf_f_act == NULL) {
814 ASSERT(vdp->xdf_l_act == NULL);
815 ASSERT(vdp->xdf_i_act == NULL);
816 vdp->xdf_f_act = vdp->xdf_l_act = vdp->xdf_i_act = bp;
821 vdp->xdf_l_act->av_forw = bp;
822 vdp->xdf_l_act = bp;
823 if (vdp->xdf_i_act == NULL)
824 vdp->xdf_i_act = bp;
828 xdf_bp_pop(xdf_t *vdp, buf_t *bp)
832 ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
835 if (vdp->xdf_ready_tq_bp == bp) {
838 vdp->xdf_ready_tq_bp = NULL;
843 ASSERT((bp->av_forw != NULL) || (bp == vdp->xdf_l_act));
844 ASSERT((bp->av_forw == NULL) || (bp != vdp->xdf_l_act));
845 ASSERT(VREQ_DONE(BP_VREQ(vdp->xdf_f_act)));
846 ASSERT(vdp->xdf_f_act != vdp->xdf_i_act);
848 if (bp == vdp->xdf_f_act) {
850 vdp->xdf_f_act = bp->av_forw;
851 if (bp == vdp->xdf_l_act)
852 vdp->xdf_l_act = NULL;
855 bp_iter = vdp->xdf_f_act;
859 ASSERT(bp_iter != vdp->xdf_i_act);
862 if (bp == vdp->xdf_l_act)
863 vdp->xdf_l_act = bp_iter;
869 xdf_bp_next(xdf_t *vdp)
874 if (vdp->xdf_state == XD_CONNECTED) {
879 if ((bp = vdp->xdf_ready_tq_bp) == NULL)
887 if (vdp->xdf_state != XD_READY)
890 ASSERT(vdp->xdf_ready_tq_bp == NULL);
892 if ((bp = vdp->xdf_i_act) == NULL)
898 vdp->xdf_i_act = bp->av_forw;
903 xdf_io_fini(xdf_t *vdp, uint64_t id, int bioerr)
909 ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
921 xdf_bp_pop(vdp, bp);
924 xdf_kstat_exit(vdp, bp);
926 vreq_free(vdp, vreq);
942 xdf_intr_locked(xdf_t *vdp)
952 ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
954 if ((xbr = vdp->xdf_xb_ring) == NULL)
957 acchdl = vdp->xdf_xb_ring_hdl;
971 vdp->xdf_addr,
978 xdf_io_fini(vdp, id, bioerr);
990 xdf_t *vdp = (xdf_t *)arg;
993 mutex_enter(&vdp->xdf_dev_lk);
994 rv = xdf_intr_locked(vdp);
995 mutex_exit(&vdp->xdf_dev_lk);
998 xdf_io_start(vdp);
1004 xdf_ring_push(xdf_t *vdp)
1006 ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
1008 if (vdp->xdf_xb_ring == NULL)
1011 if (xvdi_ring_push_request(vdp->xdf_xb_ring)) {
1014 vdp->xdf_addr));
1017 if (xvdi_get_evtchn(vdp->xdf_dip) != INVALID_EVTCHN)
1018 xvdi_notify_oe(vdp->xdf_dip);
1022 xdf_ring_drain_locked(xdf_t *vdp)
1026 ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
1032 if (vdp->xdf_xb_ring == NULL)
1035 if (xvdi_ring_has_unconsumed_responses(vdp->xdf_xb_ring))
1036 (void) xdf_intr_locked(vdp);
1037 if (!xvdi_ring_has_incomp_request(vdp->xdf_xb_ring))
1039 xdf_ring_push(vdp);
1042 mutex_exit(&vdp->xdf_dev_lk);
1047 mutex_enter(&vdp->xdf_dev_lk);
1049 cmn_err(CE_WARN, "xdf@%s: xdf_ring_drain: timeout", vdp->xdf_addr);
1052 if (vdp->xdf_xb_ring != NULL) {
1053 if (xvdi_ring_has_incomp_request(vdp->xdf_xb_ring) ||
1054 xvdi_ring_has_unconsumed_responses(vdp->xdf_xb_ring))
1059 vdp->xdf_addr, rv);
1064 xdf_ring_drain(xdf_t *vdp)
1067 mutex_enter(&vdp->xdf_dev_lk);
1068 rv = xdf_ring_drain_locked(vdp);
1069 mutex_exit(&vdp->xdf_dev_lk);
1077 xdf_ring_destroy(xdf_t *vdp)
1083 ASSERT(MUTEX_HELD(&vdp->xdf_cb_lk));
1084 ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
1086 if ((vdp->xdf_state != XD_INIT) &&
1087 (vdp->xdf_state != XD_CONNECTED) &&
1088 (vdp->xdf_state != XD_READY)) {
1089 ASSERT(vdp->xdf_xb_ring == NULL);
1090 ASSERT(vdp->xdf_xb_ring_hdl == NULL);
1091 ASSERT(vdp->xdf_peer == INVALID_DOMID);
1092 ASSERT(vdp->xdf_evtchn == INVALID_EVTCHN);
1093 ASSERT(list_is_empty(&vdp->xdf_vreq_act));
1102 ec_unbind_evtchn(vdp->xdf_evtchn);
1104 (void) ddi_remove_intr(vdp->xdf_dip, 0, NULL);
1113 (void) xdf_ring_drain_locked(vdp);
1116 xvdi_free_evtchn(vdp->xdf_dip);
1117 vdp->xdf_evtchn = INVALID_EVTCHN;
1119 while ((vreq = list_head(&vdp->xdf_vreq_act)) != NULL) {
1129 xdf_kstat_runq_to_waitq(vdp, bp);
1135 vreq_free(vdp, vreq);
1141 vdp->xdf_i_act = vdp->xdf_f_act;
1144 xvdi_free_ring(vdp->xdf_xb_ring);
1145 vdp->xdf_xb_ring = NULL;
1146 vdp->xdf_xb_ring_hdl = NULL;
1147 vdp->xdf_peer = INVALID_DOMID;
1161 xdf_eject_pending(xdf_t *vdp)
1163 dev_info_t *dip = vdp->xdf_dip;
1166 if (!vdp->xdf_media_req_supported)
1185 xdf_media_req(xdf_t *vdp, char *req, boolean_t media_required)
1187 dev_info_t *dip = vdp->xdf_dip;
1196 ASSERT(MUTEX_HELD(&vdp->xdf_cb_lk));
1197 ASSERT(MUTEX_NOT_HELD(&vdp->xdf_dev_lk));
1203 if (!XD_IS_CD(vdp) || !vdp->xdf_media_req_supported)
1207 if (xdf_eject_pending(vdp))
1211 if (media_required && (vdp->xdf_xdev_nblocks == 0))
1215 if (vdp->xdf_state != XD_READY)
1228 xdf_process_rreq(xdf_t *vdp, struct buf *bp, blkif_request_t *rreq)
1235 dev_info_t *dip = vdp->xdf_dip;
1240 ddi_acc_handle_t acchdl = vdp->xdf_xb_ring_hdl;
1245 ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
1266 if (!vdp->xdf_wce)
1307 vdp->xdf_addr, seg, vreq->v_dmac.dmac_size, blk_off));
1310 vdp->xdf_addr, seg, fsect, lsect, gr, dma_addr));
1327 vdp->xdf_addr, rreq->id));
1331 xdf_io_start(xdf_t *vdp)
1338 mutex_enter(&vdp->xdf_dev_lk);
1346 if (vdp->xdf_suspending)
1348 if ((bp = xdf_bp_next(vdp)) == NULL)
1353 ((vreq = vreq_get(vdp, bp)) == NULL))
1357 if (vreq_setup(vdp, vreq) != DDI_SUCCESS)
1361 if ((rreq = xvdi_ring_get_request(vdp->xdf_xb_ring)) == NULL)
1367 xdf_process_rreq(vdp, bp, rreq);
1374 xdf_kstat_waitq_to_runq(vdp, bp);
1379 xdf_ring_push(vdp);
1381 mutex_exit(&vdp->xdf_dev_lk);
1387 xdf_isopen(xdf_t *vdp, int partition)
1402 if (vdp->xdf_vd_open[i] & parbit)
1415 xdf_busy(xdf_t *vdp)
1417 ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
1419 if ((vdp->xdf_xb_ring != NULL) &&
1420 xvdi_ring_has_unconsumed_responses(vdp->xdf_xb_ring)) {
1421 ASSERT(vdp->xdf_state != XD_CLOSED);
1425 if (!list_is_empty(&vdp->xdf_vreq_act) || (vdp->xdf_f_act != NULL)) {
1426 ASSERT(vdp->xdf_state != XD_CLOSED);
1430 if (xdf_isopen(vdp, -1)) {
1431 ASSERT(vdp->xdf_state != XD_CLOSED);
1435 if (vdp->xdf_connect_req > 0) {
1436 ASSERT(vdp->xdf_state != XD_CLOSED);
1444 xdf_set_state(xdf_t *vdp, xdf_state_t new_state)
1446 ASSERT(MUTEX_HELD(&vdp->xdf_cb_lk));
1447 ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
1449 vdp->xdf_addr, vdp->xdf_state, new_state));
1450 vdp->xdf_state = new_state;
1451 cv_broadcast(&vdp->xdf_dev_cv);
1455 xdf_disconnect(xdf_t *vdp, xdf_state_t new_state, boolean_t quiet)
1457 dev_info_t *dip = vdp->xdf_dip;
1460 ASSERT(MUTEX_HELD(&vdp->xdf_cb_lk));
1461 ASSERT(MUTEX_NOT_HELD(&vdp->xdf_dev_lk));
1465 if (vdp->xdf_state == new_state)
1468 mutex_enter(&vdp->xdf_dev_lk);
1469 busy = xdf_busy(vdp);
1472 if (vdp->xdf_state == XD_CLOSED) {
1474 xdf_set_state(vdp, new_state);
1475 mutex_exit(&vdp->xdf_dev_lk);
1481 if (!quiet && busy && (vdp->xdf_state == XD_READY) &&
1482 (vdp->xdf_xdev_nblocks != 0)) {
1484 vdp->xdf_addr);
1488 xdf_ring_destroy(vdp);
1491 xdf_set_state(vdp, (busy) ? XD_UNKNOWN : new_state);
1492 mutex_exit(&vdp->xdf_dev_lk);
1495 if (vdp->xdf_state == XD_CLOSED)
1507 xdf_setstate_init(xdf_t *vdp)
1509 dev_info_t *dip = vdp->xdf_dip;
1515 ASSERT(MUTEX_HELD(&vdp->xdf_cb_lk));
1516 ASSERT(MUTEX_NOT_HELD(&vdp->xdf_dev_lk));
1517 ASSERT((vdp->xdf_state == XD_UNKNOWN) ||
1518 (vdp->xdf_state == XD_CLOSED));
1521 ("xdf@%s: starting connection process\n", vdp->xdf_addr));
1527 if (xdf_eject_pending(vdp))
1533 if ((vdp->xdf_peer = xvdi_get_oeid(dip)) == INVALID_DOMID)
1550 vdp->xdf_evtchn = xvdi_get_evtchn(dip);
1552 ec_bind_evtchn_to_handler(vdp->xdf_evtchn, IPL_VBD, xdf_intr, vdp);
1554 if (ddi_add_intr(dip, 0, NULL, NULL, xdf_intr, (caddr_t)vdp) !=
1557 "failed to add intr handler", vdp->xdf_addr);
1563 sizeof (union blkif_sring_entry), &gref, &vdp->xdf_xb_ring) !=
1566 vdp->xdf_addr);
1569 vdp->xdf_xb_ring_hdl = vdp->xdf_xb_ring->xr_acc_hdl; /* ugly!! */
1577 vdp->xdf_addr);
1592 XBP_EVENT_CHAN, "%u", vdp->xdf_evtchn)) != 0) ||
1609 ASSERT(MUTEX_HELD(&vdp->xdf_cb_lk));
1610 mutex_enter(&vdp->xdf_dev_lk);
1611 xdf_set_state(vdp, XD_INIT);
1612 mutex_exit(&vdp->xdf_dev_lk);
1617 xvdi_free_ring(vdp->xdf_xb_ring);
1620 ec_unbind_evtchn(vdp->xdf_evtchn);
1622 (void) ddi_remove_intr(vdp->xdf_dip, 0, NULL);
1626 vdp->xdf_evtchn = INVALID_EVTCHN;
1628 xdf_disconnect(vdp, XD_UNKNOWN, B_FALSE);
1630 vdp->xdf_addr);
1635 xdf_get_flush_block(xdf_t *vdp)
1640 vdp->xdf_flush_mem = kmem_alloc(vdp->xdf_xdev_secsize * 2, KM_SLEEP);
1641 vdp->xdf_cache_flush_block =
1642 (char *)P2ROUNDUP((uintptr_t)(vdp->xdf_flush_mem),
1643 (int)vdp->xdf_xdev_secsize);
1645 if (xdf_lb_rdwr(vdp->xdf_dip, TG_READ, vdp->xdf_cache_flush_block,
1646 xdf_flush_block, vdp->xdf_xdev_secsize, NULL) != 0)
1654 xdf_t *vdp = (xdf_t *)arg;
1656 vdp->xdf_ready_tq_thread = curthread;
1665 mutex_enter(&vdp->xdf_dev_lk);
1666 if (vdp->xdf_cmbl_reattach) {
1667 vdp->xdf_cmbl_reattach = B_FALSE;
1669 mutex_exit(&vdp->xdf_dev_lk);
1670 if (xdf_cmlb_attach(vdp) != 0) {
1671 xdf_disconnect(vdp, XD_UNKNOWN, B_FALSE);
1674 mutex_enter(&vdp->xdf_dev_lk);
1678 if (vdp->xdf_state != XD_CONNECTED) {
1679 mutex_exit(&vdp->xdf_dev_lk);
1682 mutex_exit(&vdp->xdf_dev_lk);
1688 vdp->xdf_flush_supported = B_FALSE;
1689 if (vdp->xdf_feature_barrier) {
1694 vdp->xdf_flush_supported = B_TRUE;
1695 if (xdf_lb_rdwr(vdp->xdf_dip, TG_WRITE, NULL, 0, 0, 0) == 0) {
1696 vdp->xdf_flush_supported = B_TRUE;
1698 vdp->xdf_flush_supported = B_FALSE;
1710 if (xdf_get_flush_block(vdp) != DDI_SUCCESS) {
1711 xdf_disconnect(vdp, XD_UNKNOWN, B_FALSE);
1717 mutex_enter(&vdp->xdf_cb_lk);
1718 mutex_enter(&vdp->xdf_dev_lk);
1719 if (vdp->xdf_state == XD_CONNECTED)
1720 xdf_set_state(vdp, XD_READY);
1721 mutex_exit(&vdp->xdf_dev_lk);
1724 xdf_io_start(vdp);
1726 mutex_exit(&vdp->xdf_cb_lk);
1738 xdf_t *vdp;
1741 vdp = ddi_get_soft_state(xdf_ssp, ddi_get_instance(dip));
1743 ncyl = vdp->xdf_xdev_nblocks / (XDF_NHEADS * XDF_NSECTS);
1750 geomp->g_secsize = vdp->xdf_xdev_secsize;
1751 geomp->g_capacity = vdp->xdf_xdev_nblocks;
1763 xdf_setstate_connected(xdf_t *vdp)
1765 dev_info_t *dip = vdp->xdf_dip;
1772 ASSERT(MUTEX_HELD(&vdp->xdf_cb_lk));
1773 ASSERT(MUTEX_NOT_HELD(&vdp->xdf_dev_lk));
1774 ASSERT(vdp->xdf_state == XD_INIT);
1785 if (!(vdp->xdf_feature_barrier = xenbus_exists(oename, XBP_FB)))
1787 vdp->xdf_addr);
1802 "cannot read backend info", vdp->xdf_addr);
1807 vdp->xdf_addr);
1816 vdp->xdf_xdev_nblocks = nblocks;
1817 vdp->xdf_xdev_secsize = secsize;
1819 if (vdp->xdf_xdev_nblocks > DK_MAX_BLOCKS) {
1822 " 32-bit kernel", vdp->xdf_addr, vdp->xdf_xdev_nblocks);
1833 if (vdp->xdf_pgeom_fixed &&
1834 (vdp->xdf_pgeom.g_capacity > vdp->xdf_xdev_nblocks)) {
1837 vdp->xdf_addr);
1841 vdp->xdf_media_req_supported = xenbus_exists(oename, XBP_MEDIA_REQ_SUP);
1844 mutex_enter(&vdp->xdf_dev_lk);
1845 xdf_set_state(vdp, XD_CONNECTED);
1849 if ((vdp->xdf_dinfo != dinfo) ||
1850 (!vdp->xdf_pgeom_fixed &&
1851 (memcmp(&vdp->xdf_pgeom, &pgeom, sizeof (pgeom)) != 0))) {
1852 vdp->xdf_cmbl_reattach = B_TRUE;
1854 vdp->xdf_dinfo = dinfo;
1855 if (!vdp->xdf_pgeom_fixed)
1856 vdp->xdf_pgeom = pgeom;
1859 if (XD_IS_CD(vdp) || XD_IS_RM(vdp)) {
1860 if (vdp->xdf_xdev_nblocks == 0) {
1861 vdp->xdf_mstate = DKIO_EJECTED;
1862 cv_broadcast(&vdp->xdf_mstate_cv);
1864 vdp->xdf_mstate = DKIO_INSERTED;
1865 cv_broadcast(&vdp->xdf_mstate_cv);
1868 if (vdp->xdf_mstate != DKIO_NONE) {
1869 vdp->xdf_mstate = DKIO_NONE;
1870 cv_broadcast(&vdp->xdf_mstate_cv);
1874 mutex_exit(&vdp->xdf_dev_lk);
1876 cmn_err(CE_CONT, "?xdf@%s: %"PRIu64" blocks", vdp->xdf_addr,
1877 (uint64_t)vdp->xdf_xdev_nblocks);
1880 xdf_io_start(vdp);
1890 (void) ddi_taskq_dispatch(vdp->xdf_ready_tq, xdf_setstate_ready, vdp,
1902 xdf_t *vdp = (xdf_t *)ddi_get_driver_private(dip);
1905 vdp->xdf_addr, new_state));
1907 mutex_enter(&vdp->xdf_cb_lk);
1910 ASSERT(vdp->xdf_oe_change_thread == NULL);
1911 DEBUG_EVAL(vdp->xdf_oe_change_thread = curthread);
1914 if (vdp->xdf_suspending || (vdp->xdf_state == XD_SUSPEND)) {
1915 DEBUG_EVAL(vdp->xdf_oe_change_thread = NULL);
1916 mutex_exit(&vdp->xdf_cb_lk);
1925 if (vdp->xdf_state == XD_INIT)
1928 xdf_disconnect(vdp, XD_UNKNOWN, B_FALSE);
1929 if (xdf_setstate_init(vdp) != DDI_SUCCESS)
1931 ASSERT(vdp->xdf_state == XD_INIT);
1935 if ((vdp->xdf_state == XD_CONNECTED) ||
1936 (vdp->xdf_state == XD_READY))
1939 if (vdp->xdf_state != XD_INIT) {
1940 xdf_disconnect(vdp, XD_UNKNOWN, B_FALSE);
1941 if (xdf_setstate_init(vdp) != DDI_SUCCESS)
1943 ASSERT(vdp->xdf_state == XD_INIT);
1946 if (xdf_setstate_connected(vdp) != DDI_SUCCESS) {
1947 xdf_disconnect(vdp, XD_UNKNOWN, B_FALSE);
1950 ASSERT(vdp->xdf_state == XD_CONNECTED);
1954 if (xdf_isopen(vdp, -1)) {
1957 vdp->xdf_addr);
1962 xdf_disconnect(vdp, XD_CLOSED, B_FALSE);
1967 cv_broadcast(&vdp->xdf_dev_cv);
1968 DEBUG_EVAL(vdp->xdf_oe_change_thread = NULL);
1969 mutex_exit(&vdp->xdf_cb_lk);
1973 xdf_connect_locked(xdf_t *vdp, boolean_t wait)
1977 ASSERT(MUTEX_HELD(&vdp->xdf_cb_lk));
1978 ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
1981 if (vdp->xdf_state == XD_CLOSED)
1984 vdp->xdf_connect_req++;
1985 while (vdp->xdf_state != XD_READY) {
1986 mutex_exit(&vdp->xdf_dev_lk);
1989 if (vdp->xdf_connect_thread == NULL)
1990 vdp->xdf_connect_thread = curthread;
1992 if (vdp->xdf_connect_thread == curthread) {
2000 (void) xdf_disconnect(vdp, XD_UNKNOWN, B_TRUE);
2003 if (vdp->xdf_state == XD_UNKNOWN)
2004 (void) xdf_setstate_init(vdp);
2005 if (vdp->xdf_state == XD_INIT)
2006 (void) xdf_setstate_connected(vdp);
2009 mutex_enter(&vdp->xdf_dev_lk);
2010 if (!wait || (vdp->xdf_state == XD_READY))
2013 mutex_exit((&vdp->xdf_cb_lk));
2014 if (vdp->xdf_connect_thread != curthread) {
2015 rv = cv_wait_sig(&vdp->xdf_dev_cv, &vdp->xdf_dev_lk);
2018 rv = cv_reltimedwait_sig(&vdp->xdf_dev_cv,
2019 &vdp->xdf_dev_lk, drv_usectohz(100*1000),
2024 mutex_exit((&vdp->xdf_dev_lk));
2025 mutex_enter((&vdp->xdf_cb_lk));
2026 mutex_enter((&vdp->xdf_dev_lk));
2032 ASSERT(MUTEX_HELD(&vdp->xdf_cb_lk));
2033 ASSERT(MUTEX_HELD(&vdp->xdf_dev_lk));
2035 if (vdp->xdf_connect_thread == curthread) {
2040 cv_signal(&vdp->xdf_dev_cv);
2041 vdp->xdf_connect_thread = NULL;
2045 mutex_exit((&vdp->xdf_dev_lk));
2046 (void) xdf_media_req(vdp, XBV_MEDIA_REQ_LOCK, B_TRUE);
2047 mutex_enter((&vdp->xdf_dev_lk));
2049 vdp->xdf_connect_req--;
2050 return (vdp->xdf_state);
2056 xdf_t *vdp = (xdf_t *)arg;
2058 ASSERT(vdp != NULL);
2060 mutex_enter(&vdp->xdf_dev_lk);
2061 ASSERT(ISDMACBON(vdp));
2062 SETDMACBOFF(vdp);
2063 mutex_exit(&vdp->xdf_dev_lk);
2065 xdf_io_start(vdp);
2177 xdf_t *vdp = (xdf_t *)ddi_get_driver_private(dip);
2181 mutex_enter(&vdp->xdf_cb_lk);
2189 ASSERT(MUTEX_HELD(&vdp->xdf_cb_lk));
2198 mutex_exit(&vdp->xdf_cb_lk);
2211 if (cv_wait_sig(&vdp->xdf_hp_status_cv, &vdp->xdf_cb_lk) == 0) {
2213 mutex_exit(&vdp->xdf_cb_lk);
2219 ASSERT(MUTEX_HELD(&vdp->xdf_cb_lk));
2233 if (XD_IS_CD(vdp) && !xenbus_exists(oename, XBP_MEDIA_REQ_SUP)) {
2234 mutex_exit(&vdp->xdf_cb_lk);
2238 mutex_enter(&vdp->xdf_dev_lk);
2239 rv = xdf_connect_locked(vdp, B_TRUE);
2240 mutex_exit(&vdp->xdf_dev_lk);
2241 mutex_exit(&vdp->xdf_cb_lk);
2249 xdf_t *vdp = (xdf_t *)ddi_get_driver_private(dip);
2252 mutex_enter(&vdp->xdf_dev_lk);
2255 mutex_exit(&vdp->xdf_dev_lk);
2264 if ((vdp->xdf_xdev_nblocks != 0) &&
2265 (geomp->g_capacity > vdp->xdf_xdev_nblocks)) {
2266 mutex_exit(&vdp->xdf_dev_lk);
2270 bzero(&vdp->xdf_pgeom, sizeof (vdp->xdf_pgeom));
2271 vdp->xdf_pgeom.g_ncyl = geomp->g_ncyl;
2272 vdp->xdf_pgeom.g_acyl = geomp->g_acyl;
2273 vdp->xdf_pgeom.g_nhead = geomp->g_nhead;
2274 vdp->xdf_pgeom.g_nsect = geomp->g_nsect;
2275 vdp->xdf_pgeom.g_secsize = geomp->g_secsize;
2276 vdp->xdf_pgeom.g_capacity = geomp->g_capacity;
2277 vdp->xdf_pgeom.g_intrlv = geomp->g_intrlv;
2278 vdp->xdf_pgeom.g_rpm = geomp->g_rpm;
2280 vdp->xdf_pgeom_fixed = B_TRUE;
2281 mutex_exit(&vdp->xdf_dev_lk);
2284 cmlb_invalidate(vdp->xdf_vd_lbl, NULL);
2292 xdf_t *vdp = (xdf_t *)ddi_get_driver_private(dip);
2295 mutex_enter(&vdp->xdf_cb_lk);
2296 rv = XD_IS_CD(vdp);
2297 mutex_exit(&vdp->xdf_cb_lk);
2304 xdf_t *vdp = (xdf_t *)ddi_get_driver_private(dip);
2307 mutex_enter(&vdp->xdf_cb_lk);
2308 rv = XD_IS_RM(vdp);
2309 mutex_exit(&vdp->xdf_cb_lk);
2316 xdf_t *vdp = (xdf_t *)ddi_get_driver_private(dip);
2319 mutex_enter(&vdp->xdf_cb_lk);
2320 rv = vdp->xdf_media_req_supported;
2321 mutex_exit(&vdp->xdf_cb_lk);
2330 xdf_t *vdp;
2331 vdp = ddi_get_soft_state(xdf_ssp, ddi_get_instance(dip));
2333 if (vdp == NULL)
2336 mutex_enter(&vdp->xdf_dev_lk);
2337 *capp = vdp->xdf_pgeom.g_capacity;
2338 DPRINTF(LBL_DBG, ("xdf@%s:capacity %llu\n", vdp->xdf_addr, *capp));
2339 mutex_exit(&vdp->xdf_dev_lk);
2346 xdf_t *vdp;
2348 if ((vdp = ddi_get_soft_state(xdf_ssp, ddi_get_instance(dip))) == NULL)
2350 *geomp = vdp->xdf_pgeom;
2367 xdf_t *vdp;
2369 if (!(vdp = ddi_get_soft_state(xdf_ssp, ddi_get_instance(dip))))
2372 if (XD_IS_RO(vdp))
2384 xdf_t *vdp;
2388 if ((vdp = ddi_get_soft_state(xdf_ssp, instance)) == NULL)
2399 mutex_enter(&vdp->xdf_cb_lk);
2400 *(uint32_t *)arg = vdp->xdf_xdev_secsize;
2401 mutex_exit(&vdp->xdf_cb_lk);
2415 xdf_t *vdp;
2419 vdp = ddi_get_soft_state(xdf_ssp, ddi_get_instance(dip));
2422 ASSERT(curthread != vdp->xdf_oe_change_thread);
2424 if ((start + ((reqlen / (vdp->xdf_xdev_secsize / DEV_BSIZE))
2425 >> DEV_BSHIFT)) > vdp->xdf_pgeom.g_capacity)
2436 bp->b_blkno = start * (vdp->xdf_xdev_secsize / DEV_BSIZE);
2439 mutex_enter(&vdp->xdf_dev_lk);
2440 xdf_bp_push(vdp, bp);
2441 mutex_exit(&vdp->xdf_dev_lk);
2442 xdf_io_start(vdp);
2443 if (curthread == vdp->xdf_ready_tq_thread)
2444 (void) xdf_ring_drain(vdp);
2456 xdf_ioctl_mlock(xdf_t *vdp)
2459 mutex_enter(&vdp->xdf_cb_lk);
2460 rv = xdf_media_req(vdp, XBV_MEDIA_REQ_LOCK, B_TRUE);
2461 mutex_exit(&vdp->xdf_cb_lk);
2469 xdf_ioctl_munlock(xdf_t *vdp)
2472 mutex_enter(&vdp->xdf_cb_lk);
2473 rv = xdf_media_req(vdp, XBV_MEDIA_REQ_NONE, B_TRUE);
2474 mutex_exit(&vdp->xdf_cb_lk);
2483 xdf_ioctl_eject(xdf_t *vdp)
2487 mutex_enter(&vdp->xdf_cb_lk);
2488 if ((rv = xdf_media_req(vdp, XBV_MEDIA_REQ_EJECT, B_FALSE)) != 0) {
2489 mutex_exit(&vdp->xdf_cb_lk);
2499 (void) xdf_disconnect(vdp, XD_UNKNOWN, B_TRUE);
2500 mutex_enter(&vdp->xdf_dev_lk);
2501 if (xdf_connect_locked(vdp, B_TRUE) != XD_READY) {
2502 mutex_exit(&vdp->xdf_dev_lk);
2503 mutex_exit(&vdp->xdf_cb_lk);
2506 mutex_exit(&vdp->xdf_dev_lk);
2507 mutex_exit(&vdp->xdf_cb_lk);
2518 xdf_dkstate(xdf_t *vdp, enum dkio_state mstate)
2522 mutex_enter(&vdp->xdf_cb_lk);
2523 prev_state = vdp->xdf_mstate;
2525 if (vdp->xdf_mstate == mstate) {
2526 while (vdp->xdf_mstate == prev_state) {
2527 if (cv_wait_sig(&vdp->xdf_mstate_cv,
2528 &vdp->xdf_cb_lk) == 0) {
2529 mutex_exit(&vdp->xdf_cb_lk);
2536 (vdp->xdf_mstate == DKIO_INSERTED)) {
2537 (void) xdf_media_req(vdp, XBV_MEDIA_REQ_LOCK, B_TRUE);
2538 mutex_exit(&vdp->xdf_cb_lk);
2542 mutex_exit(&vdp->xdf_cb_lk);
2553 xdf_t *vdp;
2556 if (((vdp = ddi_get_soft_state(xdf_ssp, XDF_INST(minor))) == NULL) ||
2557 (!xdf_isopen(vdp, part)))
2561 vdp->xdf_addr, cmd, cmd));
2584 return (cmlb_ioctl(vdp->xdf_vd_lbl, dev, cmd, arg, mode, credp,
2589 return (xdf_ioctl_eject(vdp));
2591 return (xdf_ioctl_mlock(vdp));
2593 return (xdf_ioctl_munlock(vdp));
2596 if (!XD_IS_CD(vdp))
2605 media_info.dki_lbsize = vdp->xdf_xdev_secsize;
2606 media_info.dki_capacity = vdp->xdf_pgeom.g_capacity;
2607 if (XD_IS_CD(vdp))
2621 if (XD_IS_CD(vdp))
2630 info.dki_unit = ddi_get_instance(vdp->xdf_dip);
2650 if ((rv = xdf_dkstate(vdp, mstate)) != 0)
2652 mstate = vdp->xdf_mstate;
2659 int i = BOOLEAN2VOID(XD_IS_RM(vdp));
2665 int i = BOOLEAN2VOID(XD_IS_RM(vdp));
2674 vdp->xdf_wce = VOID2BOOLEAN(i);
2680 if (vdp->xdf_flush_supported) {
2681 rv = xdf_lb_rdwr(vdp->xdf_dip, TG_WRITE,
2683 } else if (vdp->xdf_feature_barrier &&
2685 rv = xdf_lb_rdwr(vdp->xdf_dip, TG_WRITE,
2686 vdp->xdf_cache_flush_block, xdf_flush_block,
2687 vdp->xdf_xdev_secsize, (void *)dev);
2706 xdf_t *vdp;
2715 vdp = ddi_get_soft_state(xdf_ssp, XDF_INST(minor));
2717 mutex_enter(&vdp->xdf_dev_lk);
2718 if (!xdf_isopen(vdp, part)) {
2719 mutex_exit(&vdp->xdf_dev_lk);
2725 ASSERT(curthread != vdp->xdf_oe_change_thread);
2728 if (!IS_READ(bp) && XD_IS_RO(vdp)) {
2729 mutex_exit(&vdp->xdf_dev_lk);
2737 p_blkct = vdp->xdf_xdev_nblocks;
2741 mutex_exit(&vdp->xdf_dev_lk);
2742 if (cmlb_partinfo(vdp->xdf_vd_lbl, part, &p_blkct,
2747 mutex_enter(&vdp->xdf_dev_lk);
2754 blkno = bp->b_blkno / (vdp->xdf_xdev_secsize / XB_BSIZE);
2759 vdp->xdf_addr, (longlong_t)blkno, (uint64_t)p_blkct));
2760 mutex_exit(&vdp->xdf_dev_lk);
2767 mutex_exit(&vdp->xdf_dev_lk);
2779 if (vdp->xdf_xdev_secsize != 0 &&
2780 vdp->xdf_xdev_secsize != XB_BSIZE) {
2781 nblks = bp->b_bcount / vdp->xdf_xdev_secsize;
2787 if (vdp->xdf_xdev_secsize != 0 &&
2788 vdp->xdf_xdev_secsize != XB_BSIZE) {
2791 vdp->xdf_xdev_secsize;
2801 vdp->xdf_addr, (longlong_t)blkno, (ulong_t)bp->b_bcount));
2807 xdf_bp_push(vdp, bp);
2808 mutex_exit(&vdp->xdf_dev_lk);
2809 xdf_io_start(vdp);
2811 (void) xdf_ring_drain(vdp);
2819 xdf_t *vdp;
2825 if ((vdp = ddi_get_soft_state(xdf_ssp, XDF_INST(minor))) == NULL)
2829 vdp->xdf_addr, (int64_t)uiop->uio_offset));
2832 if (!xdf_isopen(vdp, part))
2835 if (cmlb_partinfo(vdp->xdf_vd_lbl, part, &p_blkcnt,
2839 if (uiop->uio_loffset >= XB_DTOB(p_blkcnt, vdp))
2852 xdf_t *vdp;
2858 if ((vdp = ddi_get_soft_state(xdf_ssp, XDF_INST(minor))) == NULL)
2862 vdp->xdf_addr, (int64_t)uiop->uio_offset));
2865 if (!xdf_isopen(vdp, part))
2868 if (cmlb_partinfo(vdp->xdf_vd_lbl, part, &p_blkcnt,
2872 if (uiop->uio_loffset >= XB_DTOB(p_blkcnt, vdp))
2885 xdf_t *vdp;
2892 if ((vdp = ddi_get_soft_state(xdf_ssp, XDF_INST(minor))) == NULL)
2896 if (!xdf_isopen(vdp, part))
2899 if (cmlb_partinfo(vdp->xdf_vd_lbl, part, &p_blkcnt,
2903 if (uiop->uio_loffset >= XB_DTOB(p_blkcnt, vdp))
2916 xdf_t *vdp;
2923 if ((vdp = ddi_get_soft_state(xdf_ssp, XDF_INST(minor))) == NULL)
2927 if (!xdf_isopen(vdp, part))
2930 if (cmlb_partinfo(vdp->xdf_vd_lbl, part, &p_blkcnt,
2934 if (uiop->uio_loffset >= XB_DTOB(p_blkcnt, vdp))
2947 xdf_t *vdp;
2954 if ((vdp = ddi_get_soft_state(xdf_ssp, XDF_INST(minor))) == NULL)
2958 vdp->xdf_addr, (void *)addr, blkno, nblk));
2961 ASSERT(curthread != vdp->xdf_oe_change_thread);
2964 if (!xdf_isopen(vdp, part))
2967 if (cmlb_partinfo(vdp->xdf_vd_lbl, part, &p_blkcnt, &p_blkst,
2972 (p_blkcnt * (vdp->xdf_xdev_secsize / XB_BSIZE))) {
2974 vdp->xdf_addr, (daddr_t)((blkno + nblk) /
2975 (vdp->xdf_xdev_secsize / XB_BSIZE)), (uint64_t)p_blkcnt);
2987 mutex_enter(&vdp->xdf_dev_lk);
2988 xdf_bp_push(vdp, dbp);
2989 mutex_exit(&vdp->xdf_dev_lk);
2990 xdf_io_start(vdp);
2991 err = xdf_ring_drain(vdp);
3001 xdf_t *vdp;
3006 if ((vdp = ddi_get_soft_state(xdf_ssp, XDF_INST(minor))) == NULL)
3009 mutex_enter(&vdp->xdf_dev_lk);
3011 if (!xdf_isopen(vdp, part)) {
3012 mutex_exit(&vdp->xdf_dev_lk);
3017 ASSERT((vdp->xdf_vd_open[otyp] & parbit) != 0);
3019 ASSERT(vdp->xdf_vd_lyropen[part] > 0);
3020 if (--vdp->xdf_vd_lyropen[part] == 0)
3021 vdp->xdf_vd_open[otyp] &= ~parbit;
3023 vdp->xdf_vd_open[otyp] &= ~parbit;
3025 vdp->xdf_vd_exclopen &= ~parbit;
3027 mutex_exit(&vdp->xdf_dev_lk);
3035 xdf_t *vdp;
3043 if ((vdp = ddi_get_soft_state(xdf_ssp, XDF_INST(minor))) == NULL)
3048 DPRINTF(DDI_DBG, ("xdf@%s: opening\n", vdp->xdf_addr));
3051 mutex_enter(&vdp->xdf_cb_lk);
3052 mutex_enter(&vdp->xdf_dev_lk);
3053 if (!nodelay && (xdf_connect_locked(vdp, B_TRUE) != XD_READY)) {
3054 mutex_exit(&vdp->xdf_dev_lk);
3055 mutex_exit(&vdp->xdf_cb_lk);
3058 mutex_exit(&vdp->xdf_cb_lk);
3060 if ((flag & FWRITE) && XD_IS_RO(vdp)) {
3061 mutex_exit(&vdp->xdf_dev_lk);
3067 if ((vdp->xdf_vd_exclopen & parbit) ||
3068 ((flag & FEXCL) && xdf_isopen(vdp, part))) {
3069 mutex_exit(&vdp->xdf_dev_lk);
3074 firstopen = !xdf_isopen(vdp, -1);
3077 vdp->xdf_vd_lyropen[part]++;
3079 vdp->xdf_vd_open[otyp] |= parbit;
3082 vdp->xdf_vd_exclopen |= parbit;
3084 mutex_exit(&vdp->xdf_dev_lk);
3088 cmlb_invalidate(vdp->xdf_vd_lbl, NULL);
3099 if ((cmlb_partinfo(vdp->xdf_vd_lbl, part, &p_blkct,
3112 xdf_t *vdp = (xdf_t *)ddi_get_driver_private(dip);
3113 cv_broadcast(&vdp->xdf_hp_status_cv);
3120 xdf_t *vdp = ddi_get_soft_state(xdf_ssp, ddi_get_instance(dip));
3133 if (vdp == NULL)
3137 return (cmlb_prop_op(vdp->xdf_vd_lbl,
3171 xdf_t *vdp;
3174 if ((vdp = ddi_get_soft_state(xdf_ssp, ddi_get_instance(dip))) == NULL)
3178 xen_printf("xdf@%s: xdf_resume\n", vdp->xdf_addr);
3180 mutex_enter(&vdp->xdf_cb_lk);
3183 mutex_exit(&vdp->xdf_cb_lk);
3190 mutex_exit(&vdp->xdf_cb_lk);
3194 mutex_enter(&vdp->xdf_dev_lk);
3195 ASSERT(vdp->xdf_state != XD_READY);
3196 xdf_set_state(vdp, XD_UNKNOWN);
3197 mutex_exit(&vdp->xdf_dev_lk);
3199 if (xdf_setstate_init(vdp) != DDI_SUCCESS) {
3200 mutex_exit(&vdp->xdf_cb_lk);
3204 mutex_exit(&vdp->xdf_cb_lk);
3207 xen_printf("xdf@%s: xdf_resume: done\n", vdp->xdf_addr);
3211 xen_printf("xdf@%s: xdf_resume: fail\n", vdp->xdf_addr);
3221 xdf_t *vdp;
3275 vdp = ddi_get_soft_state(xdf_ssp, instance);
3276 ddi_set_driver_private(dip, vdp);
3277 vdp->xdf_dip = dip;
3278 vdp->xdf_addr = ddi_get_name_addr(dip);
3279 vdp->xdf_suspending = B_FALSE;
3280 vdp->xdf_media_req_supported = B_FALSE;
3281 vdp->xdf_peer = INVALID_DOMID;
3282 vdp->xdf_evtchn = INVALID_EVTCHN;
3283 list_create(&vdp->xdf_vreq_act, sizeof (v_req_t),
3285 cv_init(&vdp->xdf_dev_cv, NULL, CV_DEFAULT, NULL);
3286 cv_init(&vdp->xdf_hp_status_cv, NULL, CV_DEFAULT, NULL);
3287 cv_init(&vdp->xdf_mstate_cv, NULL, CV_DEFAULT, NULL);
3288 mutex_init(&vdp->xdf_dev_lk, NULL, MUTEX_DRIVER, (void *)ibc);
3289 mutex_init(&vdp->xdf_cb_lk, NULL, MUTEX_DRIVER, (void *)ibc);
3290 mutex_init(&vdp->xdf_iostat_lk, NULL, MUTEX_DRIVER, (void *)ibc);
3291 vdp->xdf_cmbl_reattach = B_TRUE;
3293 vdp->xdf_dinfo |= VDISK_CDROM;
3294 vdp->xdf_mstate = DKIO_EJECTED;
3296 vdp->xdf_mstate = DKIO_NONE;
3299 if ((vdp->xdf_ready_tq = ddi_taskq_create(dip, "xdf_ready_tq",
3307 if (ddi_add_softintr(dip, DDI_SOFTINT_LOW, &vdp->xdf_softintr_id,
3308 &softibc, NULL, xdf_iorestart, (caddr_t)vdp) != DDI_SUCCESS) {
3321 xdf_synthetic_pgeom(dip, &vdp->xdf_pgeom);
3322 vdp->xdf_pgeom_fixed = B_FALSE;
3328 cmlb_alloc_handle(&vdp->xdf_vd_lbl);
3329 if (xdf_cmlb_attach(vdp) != 0) {
3339 vdp->xdf_wce = B_TRUE;
3341 mutex_enter(&vdp->xdf_cb_lk);
3345 mutex_exit(&vdp->xdf_cb_lk);
3349 if (xdf_setstate_init(vdp) != DDI_SUCCESS) {
3352 mutex_exit(&vdp->xdf_cb_lk);
3355 mutex_exit(&vdp->xdf_cb_lk);
3378 DPRINTF(DDI_DBG, ("xdf@%s: attached\n", vdp->xdf_addr));
3382 (void) xvdi_switch_state(vdp->xdf_dip, XBT_NULL, XenbusStateClosed);
3385 if (vdp->xdf_vd_lbl != NULL) {
3386 cmlb_detach(vdp->xdf_vd_lbl, NULL);
3387 cmlb_free_handle(&vdp->xdf_vd_lbl);
3388 vdp->xdf_vd_lbl = NULL;
3390 if (vdp->xdf_softintr_id != NULL)
3391 ddi_remove_softintr(vdp->xdf_softintr_id);
3393 if (vdp->xdf_ready_tq != NULL)
3394 ddi_taskq_destroy(vdp->xdf_ready_tq);
3395 mutex_destroy(&vdp->xdf_cb_lk);
3396 mutex_destroy(&vdp->xdf_dev_lk);
3397 cv_destroy(&vdp->xdf_dev_cv);
3398 cv_destroy(&vdp->xdf_hp_status_cv);
3410 xdf_t *vdp;
3412 if ((vdp = ddi_get_soft_state(xdf_ssp, instance)) == NULL)
3416 xen_printf("xdf@%s: xdf_suspend\n", vdp->xdf_addr);
3420 mutex_enter(&vdp->xdf_cb_lk);
3421 mutex_enter(&vdp->xdf_dev_lk);
3423 vdp->xdf_suspending = B_TRUE;
3424 xdf_ring_destroy(vdp);
3425 xdf_set_state(vdp, XD_SUSPEND);
3426 vdp->xdf_suspending = B_FALSE;
3428 mutex_exit(&vdp->xdf_dev_lk);
3429 mutex_exit(&vdp->xdf_cb_lk);
3432 xen_printf("xdf@%s: xdf_suspend: done\n", vdp->xdf_addr);
3440 xdf_t *vdp;
3460 vdp = ddi_get_soft_state(xdf_ssp, instance);
3462 if (vdp == NULL)
3465 mutex_enter(&vdp->xdf_cb_lk);
3466 xdf_disconnect(vdp, XD_CLOSED, B_FALSE);
3467 if (vdp->xdf_state != XD_CLOSED) {
3468 mutex_exit(&vdp->xdf_cb_lk);
3471 mutex_exit(&vdp->xdf_cb_lk);
3473 ASSERT(!ISDMACBON(vdp));
3479 if (vdp->xdf_timeout_id != 0)
3480 (void) untimeout(vdp->xdf_timeout_id);
3483 ddi_taskq_destroy(vdp->xdf_ready_tq);
3485 cmlb_detach(vdp->xdf_vd_lbl, NULL);
3486 cmlb_free_handle(&vdp->xdf_vd_lbl);
3493 list_destroy(&vdp->xdf_vreq_act);
3496 ddi_remove_softintr(vdp->xdf_softintr_id);
3499 cv_destroy(&vdp->xdf_dev_cv);
3500 mutex_destroy(&vdp->xdf_cb_lk);
3501 mutex_destroy(&vdp->xdf_dev_lk);
3502 if (vdp->xdf_cache_flush_block != NULL)
3503 kmem_free(vdp->xdf_flush_mem, 2 * vdp->xdf_xdev_secsize);