Lines Matching refs:vd

75 #define	VD_NAME			"vd"
218 (vd->dring + (i)*vd->descriptor_size))
221 #define VD_CLIENT(vd) \
222 (((vd)->xfer_mode == VIO_DESC_MODE) ? "in-band client" : \
223 (((vd)->xfer_mode == VIO_DRING_MODE_V1_0) ? "dring client" : \
224 (((vd)->xfer_mode == 0) ? "null client" : \
228 #define VD_DSKIMG_LABEL_READ(vd, labelp) \
229 vd_dskimg_rw(vd, VD_SLICE_NONE, VD_OP_BREAD, (caddr_t)labelp, \
233 #define VD_DSKIMG_LABEL_WRITE(vd, labelp) \
234 vd_dskimg_rw(vd, VD_SLICE_NONE, VD_OP_BWRITE, (caddr_t)labelp, \
238 #define VD_DSKIMG(vd) ((vd)->vdisk_type == VD_DISK_TYPE_DISK && \
239 ((vd)->file || (vd)->volume))
242 #define VD_WRITE_INDEX_NEXT(vd, id) \
243 ((((id) + 1) >= vd->dring_len)? 0 : (id) + 1)
424 struct vd *vd; /* vd instance task is for */
440 typedef struct vd {
520 #define VD_LABEL_VTOC(vd) \
521 ((struct dk_label *)(void *)((vd)->flabel))
523 #define VD_LABEL_EFI_GPT(vd, lba) \
524 ((efi_gpt_t *)(void *)((vd)->flabel + (lba)))
525 #define VD_LABEL_EFI_GPE(vd, lba) \
526 ((efi_gpe_t *)(void *)((vd)->flabel + 2 * (lba)))
667 static int vd_setup_vd(vd_t *vd);
668 static int vd_setup_single_slice_disk(vd_t *vd);
669 static int vd_setup_slice_image(vd_t *vd);
670 static int vd_setup_disk_image(vd_t *vd);
671 static int vd_backend_check_size(vd_t *vd);
672 static boolean_t vd_enabled(vd_t *vd);
674 static int vd_dskimg_validate_geometry(vd_t *vd);
675 static boolean_t vd_dskimg_is_iso_image(vd_t *vd);
676 static void vd_set_exported_operations(vd_t *vd);
677 static void vd_reset_access(vd_t *vd);
678 static int vd_backend_ioctl(vd_t *vd, int cmd, caddr_t arg);
686 static boolean_t vd_slice_geom_isvalid(vd_t *vd, struct dk_geom *geom);
687 static boolean_t vd_slice_vtoc_isvalid(vd_t *vd, struct extvtoc *vtoc);
736 * vd - disk on which the operation is performed.
755 vd_dskimg_io_params(vd_t *vd, int slice, size_t *blkp, size_t *lenp)
761 ASSERT(vd->file || VD_DSKIMG(vd));
763 ASSERT(vd->vdisk_bsize == DEV_BSIZE);
771 if (vd->vdisk_type == VD_DISK_TYPE_SLICE || slice == VD_SLICE_NONE) {
774 if (offset >= vd->dskimg_size) {
777 offset, vd->dskimg_size);
780 maxlen = vd->dskimg_size - offset;
790 if (vd->vdisk_label == VD_DISK_LABEL_UNK &&
791 vio_ver_is_supported(vd->version, 1, 1)) {
792 (void) vd_dskimg_validate_geometry(vd);
793 if (vd->vdisk_label == VD_DISK_LABEL_UNK) {
800 if (vd->vdisk_label == VD_DISK_LABEL_VTOC) {
801 ASSERT(vd->vtoc.v_sectorsz == DEV_BSIZE);
803 ASSERT(vd->vdisk_label == VD_DISK_LABEL_EFI);
806 if (blk >= vd->slices[slice].nblocks) {
809 blk, vd->slices[slice].nblocks);
813 offset = (vd->slices[slice].start + blk) * DEV_BSIZE;
814 maxlen = (vd->slices[slice].nblocks - blk) * DEV_BSIZE;
833 if ((offset + len) > vd->dskimg_size) {
835 "dskimg_size (0x%lx)", offset, len, vd->dskimg_size);
856 * vd - disk on which the operation is performed.
872 vd_dskimg_rw(vd_t *vd, int slice, int operation, caddr_t data, size_t offset,
879 ASSERT(vd->file || VD_DSKIMG(vd));
881 ASSERT(vd->vdisk_bsize == DEV_BSIZE);
883 if ((status = vd_dskimg_io_params(vd, slice, &offset, &len)) != 0)
886 if (vd->volume) {
893 buf.b_edev = vd->dev[0];
906 if (ldi_strategy(vd->ldi_handle[0], &buf) != 0) {
923 ASSERT(vd->file);
926 vd->file_vnode, data, len, offset * DEV_BSIZE, UIO_SYSSPACE, FSYNC,
1072 * vd - disk on which the operation is performed.
1080 vd_dskimg_set_vtoc(vd_t *vd, struct dk_label *label)
1084 ASSERT(VD_DSKIMG(vd));
1086 if (VD_DSKIMG_LABEL_WRITE(vd, label) < 0) {
1119 if (vd_dskimg_rw(vd, VD_SLICE_NONE, VD_OP_BWRITE,
1142 * vd - disk on which the operation is performed.
1150 vd_dskimg_get_devid_block(vd_t *vd, size_t *blkp)
1154 ASSERT(VD_DSKIMG(vd));
1156 if (vd->vdisk_label == VD_DISK_LABEL_UNK) {
1164 if (vd->vdisk_label == VD_DISK_LABEL_EFI) {
1169 if (vd->efi_reserved == -1) {
1174 *blkp = vd->slices[vd->efi_reserved].start;
1178 ASSERT(vd->vdisk_label == VD_DISK_LABEL_VTOC);
1181 if (vd->dk_geom.dkg_acyl < 2) {
1183 "(acyl=%u)", vd->dk_geom.dkg_acyl);
1188 cyl = vd->dk_geom.dkg_ncyl + vd->dk_geom.dkg_acyl - 2;
1189 spc = vd->dk_geom.dkg_nhead * vd->dk_geom.dkg_nsect;
1190 head = vd->dk_geom.dkg_nhead - 1;
1192 *blkp = (cyl * (spc - vd->dk_geom.dkg_apc)) +
1193 (head * vd->dk_geom.dkg_nsect) + 1;
1223 * vd - disk on which the operation is performed.
1233 vd_dskimg_read_devid(vd_t *vd, ddi_devid_t *devid)
1240 ASSERT(vd->vdisk_bsize == DEV_BSIZE);
1242 if ((status = vd_dskimg_get_devid_block(vd, &blk)) != 0)
1248 if ((vd_dskimg_rw(vd, VD_SLICE_NONE, VD_OP_BREAD, (caddr_t)dkdevid, blk,
1300 * vd - disk on which the operation is performed.
1309 vd_dskimg_write_devid(vd_t *vd, ddi_devid_t devid)
1316 ASSERT(vd->vdisk_bsize == DEV_BSIZE);
1323 if ((status = vd_dskimg_get_devid_block(vd, &blk)) != 0)
1342 if ((status = vd_dskimg_rw(vd, VD_SLICE_NONE, VD_OP_BWRITE,
1363 * vd - disk on which the operation is performed.
1375 vd_do_scsi_rdwr(vd_t *vd, int operation, caddr_t data, size_t blk, size_t len)
1383 ASSERT(!vd->file);
1384 ASSERT(!vd->volume);
1385 ASSERT(vd->vdisk_bsize > 0);
1387 max_sectors = vd->max_xfer_sz;
1388 nblk = (len / vd->vdisk_bsize);
1390 if (len % vd->vdisk_bsize != 0)
1411 if (blk < (2 << 20) && nsectors <= 0xff && !vd->is_atapi_dev) {
1428 ucmd.uscsi_buflen = nsectors * vd->backend_bsize;
1443 status = ldi_ioctl(vd->ldi_handle[VD_ENTIRE_DISK_SLICE],
1444 USCSICMD, (intptr_t)&ucmd, (vd->open_flags | FKIOCTL),
1473 data += nsectors * vd->vdisk_bsize;
1490 * vd - disk on which the operation is performed.
1502 vd_scsi_rdwr(vd_t *vd, int operation, caddr_t data, size_t vblk, size_t vlen)
1512 if (vd->backend_bsize == 0) {
1517 if (vd_backend_check_size(vd) != 0)
1528 if (vd->vdisk_bsize == vd->backend_bsize)
1529 return (vd_do_scsi_rdwr(vd, operation, data, vblk, vlen));
1531 if (vd->vdisk_bsize > vd->backend_bsize)
1554 * --+--+--+--+--+--+--+--+--+--+--+--+--+--+--+- vd->vdisk_bsize
1560 * --+-----+-----+-----+-----+-----+-----+-----+-- vd->backend_bsize
1567 pblk = (vblk * vd->vdisk_bsize) / vd->backend_bsize;
1568 delta = (vblk * vd->vdisk_bsize) - (pblk * vd->backend_bsize);
1569 pnblk = ((delta + vlen - 1) / vd->backend_bsize) + 1;
1570 plen = pnblk * vd->backend_bsize;
1575 rv = vd_do_scsi_rdwr(vd, operation, (caddr_t)buf, pblk, plen);
1592 * vd - single-slice disk to read from
1602 vd_slice_flabel_read(vd_t *vd, caddr_t data, size_t offset, size_t length)
1605 uint_t limit = vd->flabel_limit * vd->vdisk_bsize;
1607 ASSERT(vd->vdisk_type == VD_DISK_TYPE_SLICE);
1608 ASSERT(vd->flabel != NULL);
1615 if (offset < vd->flabel_size) {
1617 if (offset + length <= vd->flabel_size) {
1618 bcopy(vd->flabel + offset, data, length);
1622 n = vd->flabel_size - offset;
1623 bcopy(vd->flabel + offset, data, n);
1648 * vd - single-slice disk to write to
1658 vd_slice_flabel_write(vd_t *vd, caddr_t data, size_t offset, size_t length)
1660 uint_t limit = vd->flabel_limit * vd->vdisk_bsize;
1665 ASSERT(vd->vdisk_type == VD_DISK_TYPE_SLICE);
1666 ASSERT(vd->flabel != NULL);
1676 if (vd->vdisk_label == VD_DISK_LABEL_VTOC &&
1677 offset == 0 && length == vd->vdisk_bsize) {
1687 if (vd_slice_geom_isvalid(vd, &geom) &&
1688 vd_slice_vtoc_isvalid(vd, &vtoc))
1722 * vd - single-slice disk on which the operation is performed
1744 vd_slice_fake_rdwr(vd_t *vd, int slice, int operation, caddr_t *datap,
1753 size_t bsize = vd->vdisk_bsize;
1755 ASSERT(vd->vdisk_type == VD_DISK_TYPE_SLICE);
1768 vd->vdisk_label != VD_DISK_LABEL_VTOC) &&
1770 vd->vdisk_label != VD_DISK_LABEL_EFI)) {
1779 n = vd_slice_flabel_write(vd, data, blk * bsize, length);
1781 n = vd_slice_flabel_read(vd, data, blk * bsize, length);
1799 if (vd->vdisk_label == VD_DISK_LABEL_VTOC &&
1805 if (vd->vdisk_label == VD_DISK_LABEL_EFI) {
1807 ablk = vd->vdisk_size - asize;
1809 ASSERT(vd->vdisk_label == VD_DISK_LABEL_VTOC);
1810 ASSERT(vd->dk_geom.dkg_apc == 0);
1812 csize = vd->dk_geom.dkg_nhead * vd->dk_geom.dkg_nsect;
1813 ablk = vd->dk_geom.dkg_ncyl * csize;
1814 asize = vd->dk_geom.dkg_acyl * csize;
1854 if (vd->vdisk_label == VD_DISK_LABEL_VTOC) {
1856 label = VD_LABEL_VTOC(vd);
1877 ASSERT(length == 0 || blk >= vd->flabel_limit);
1884 *blkp = blk - vd->flabel_limit;
1891 vd_flush_write(vd_t *vd)
1895 if (vd->file) {
1896 status = VOP_FSYNC(vd->file_vnode, FSYNC, kcred, NULL);
1898 status = ldi_ioctl(vd->ldi_handle[0], DKIOCFLUSHWRITECACHE,
1899 NULL, vd->open_flags | FKIOCTL, kcred, &rval);
1910 vd_t *vd = task->vd;
1914 ASSERT(vd->vdisk_bsize == DEV_BSIZE);
1916 if (vd->zvol) {
1918 status = ldi_strategy(vd->ldi_handle[0], buf);
1922 ASSERT(vd->file);
1925 vd->file_vnode, buf->b_un.b_addr, buf->b_bcount,
1971 vd_t *vd = task->vd;
1980 ASSERT(vd != NULL);
1985 ASSERT(slice == VD_SLICE_NONE || slice < vd->nslices);
2003 if (request->operation == VD_OP_BWRITE && !(vd->open_flags & FWRITE)) {
2041 if (vd->vdisk_type == VD_DISK_TYPE_SLICE) {
2045 rv = vd_slice_fake_rdwr(vd, slice, request->operation,
2079 } else if (vd->volume || vd->file) {
2081 rv = vd_dskimg_io_params(vd, slice, &offset, &length);
2101 rv = vd_scsi_rdwr(vd, request->operation, bufaddr, offset,
2118 buf->b_edev = vd->dev[slice];
2122 if (vd->file || vd->zvol) {
2157 task->write_index = vd->write_index;
2158 vd->write_queue[task->write_index] = buf;
2159 vd->write_index =
2160 VD_WRITE_INDEX_NEXT(vd, vd->write_index);
2165 ASSERT(vd->ioq != NULL);
2168 (void) ddi_taskq_dispatch(task->vd->ioq, vd_bio_task, buf,
2180 buf->b_lblkno = offset << vd->vio_bshift;
2182 request->status = ldi_strategy(vd->ldi_handle[slice], buf);
2249 vd_need_reset(vd_t *vd, boolean_t reset_ldc)
2251 mutex_enter(&vd->lock);
2252 vd->reset_state = B_TRUE;
2253 vd->reset_ldc = reset_ldc;
2254 mutex_exit(&vd->lock);
2263 vd_reset_if_needed(vd_t *vd)
2267 mutex_enter(&vd->lock);
2268 if (!vd->reset_state) {
2269 ASSERT(!vd->reset_ldc);
2270 mutex_exit(&vd->lock);
2273 mutex_exit(&vd->lock);
2275 PR0("Resetting connection state with %s", VD_CLIENT(vd));
2279 * out from under it; defer checking vd->reset_ldc, as one of the
2282 if (vd->ioq != NULL)
2283 ddi_taskq_wait(vd->ioq);
2284 ddi_taskq_wait(vd->completionq);
2286 status = vd_flush_write(vd);
2291 if ((vd->initialized & VD_DRING) &&
2292 ((status = ldc_mem_dring_unmap(vd->dring_handle)) != 0))
2295 vd_free_dring_task(vd);
2298 if (vd->vio_msgp != NULL) {
2299 kmem_free(vd->vio_msgp, vd->max_msglen);
2300 vd->vio_msgp = NULL;
2304 if (vd->inband_task.msg != NULL) {
2305 kmem_free(vd->inband_task.msg, vd->max_msglen);
2306 vd->inband_task.msg = NULL;
2309 mutex_enter(&vd->lock);
2311 if (vd->reset_ldc)
2313 if (vd->reset_ldc && ((status = ldc_down(vd->ldc_handle)) != 0))
2317 vd_reset_access(vd);
2319 vd->initialized &= ~(VD_SID | VD_SEQ_NUM | VD_DRING);
2320 vd->state = VD_STATE_INIT;
2321 vd->max_msglen = sizeof (vio_msg_t); /* baseline vio message size */
2324 vd->vio_msgp = kmem_alloc(vd->max_msglen, KM_SLEEP);
2327 (void) ldc_up(vd->ldc_handle);
2329 vd->reset_state = B_FALSE;
2330 vd->reset_ldc = B_FALSE;
2332 mutex_exit(&vd->lock);
2338 vd_mark_in_reset(vd_t *vd)
2342 PR0("vd_mark_in_reset: marking vd in reset\n");
2344 vd_need_reset(vd, B_FALSE);
2345 status = ddi_taskq_dispatch(vd->startq, vd_recv_msg, vd, DDI_SLEEP);
2348 vd_need_reset(vd, B_TRUE);
2354 vd_mark_elem_done(vd_t *vd, int idx, int elem_status, int elem_nbytes)
2361 if (vd->reset_state)
2365 if ((status = VIO_DRING_ACQUIRE(&otd, vd->dring_mtype,
2366 vd->dring_handle, idx, idx)) != 0) {
2368 vd_mark_in_reset(vd);
2387 if ((status = VIO_DRING_RELEASE(vd->dring_mtype,
2388 vd->dring_handle, idx, idx)) != 0) {
2390 vd_mark_in_reset(vd);
2415 vd_t *vd = task->vd;
2421 ASSERT(vd != NULL);
2457 if (vd->write_queue[wid] != NULL) {
2459 vd->write_queue[wid] = NULL;
2460 wid = VD_WRITE_INDEX_NEXT(vd, wid);
2469 while (vd->write_queue[wid] != NULL) {
2470 (void) biowait(vd->write_queue[wid]);
2471 vd->write_queue[wid] = NULL;
2472 wid = VD_WRITE_INDEX_NEXT(vd, wid);
2480 request->status = vd_flush_write(vd);
2483 (void (*)(void *))vd_flush_write, vd,
2494 if (!vd->reset_state)
2500 vd_mark_in_reset(vd);
2511 vd_mark_in_reset(vd);
2539 ASSERT(task->vd != NULL);
2549 status = send_msg(task->vd->ldc_handle, task->msg, task->msglen);
2554 vd_mark_in_reset(task->vd);
2558 vd_need_reset(task->vd, B_TRUE);
2580 vd_t *vd = task->vd;
2584 if (!vd->reset_state && (vd->xfer_mode == VIO_DRING_MODE_V1_0)) {
2585 status = vd_mark_elem_done(vd, task->index,
2588 vd_mark_in_reset(vd);
2590 vd_need_reset(vd, B_TRUE);
2615 if (!vd->reset_state)
2963 vd_slice_geom_isvalid(vd_t *vd, struct dk_geom *geom)
2965 ASSERT(vd->vdisk_type == VD_DISK_TYPE_SLICE);
2966 ASSERT(vd->vdisk_label == VD_DISK_LABEL_VTOC);
2968 if (geom->dkg_ncyl != vd->dk_geom.dkg_ncyl ||
2969 geom->dkg_acyl != vd->dk_geom.dkg_acyl ||
2970 geom->dkg_nsect != vd->dk_geom.dkg_nsect ||
2971 geom->dkg_pcyl != vd->dk_geom.dkg_pcyl)
2983 vd_slice_vtoc_isvalid(vd_t *vd, struct extvtoc *vtoc)
2988 ASSERT(vd->vdisk_type == VD_DISK_TYPE_SLICE);
2989 ASSERT(vd->vdisk_label == VD_DISK_LABEL_VTOC);
2991 if (vtoc->v_sanity != vd->vtoc.v_sanity ||
2992 vtoc->v_version != vd->vtoc.v_version ||
2993 vtoc->v_nparts != vd->vtoc.v_nparts ||
2994 strcmp(vtoc->v_volume, vd->vtoc.v_volume) != 0 ||
2995 strcmp(vtoc->v_asciilabel, vd->vtoc.v_asciilabel) != 0)
3000 vd->vtoc.v_part[VD_ENTIRE_DISK_SLICE].p_start ||
3002 vd->vtoc.v_part[VD_ENTIRE_DISK_SLICE].p_size)
3014 csize = vd->dk_geom.dkg_nhead * vd->dk_geom.dkg_nsect;
3020 if (vd->vtoc.v_part[0].p_size >= 4 * csize &&
3021 vtoc->v_part[0].p_size < vd->vtoc.v_part[0].p_size - 4 *csize)
3044 vd_do_slice_ioctl(vd_t *vd, int cmd, void *ioctl_arg)
3051 ASSERT(vd->vdisk_type == VD_DISK_TYPE_SLICE);
3054 return (vd_flush_write(vd));
3056 switch (vd->vdisk_label) {
3065 bcopy(&vd->dk_geom, ioctl_arg, sizeof (vd->dk_geom));
3070 bcopy(&vd->vtoc, ioctl_arg, sizeof (vd->vtoc));
3080 if (!vd_slice_geom_isvalid(vd, geom))
3092 if (!vd_slice_vtoc_isvalid(vd, vtoc))
3120 len = vd_slice_flabel_read(vd,
3122 lba * vd->vdisk_bsize, len);
3143 vds_efi_alloc_and_read(vd_t *vd, efi_gpt_t **gpt, efi_gpe_t **gpe)
3148 VD_EFI_DEV_SET(edev, vd, (vd_efi_ioctl_func)vd_backend_ioctl);
3156 vds_efi_free(vd_t *vd, efi_gpt_t *gpt, efi_gpe_t *gpe)
3160 VD_EFI_DEV_SET(edev, vd, (vd_efi_ioctl_func)vd_backend_ioctl);
3166 vd_dskimg_validate_efi(vd_t *vd)
3173 if ((status = vds_efi_alloc_and_read(vd, &gpt, &gpe)) != 0)
3176 bzero(&vd->vtoc, sizeof (struct extvtoc));
3177 bzero(&vd->dk_geom, sizeof (struct dk_geom));
3178 bzero(vd->slices, sizeof (vd_slice_t) * VD_MAXPART);
3180 vd->efi_reserved = -1;
3191 vd->slices[i].start = gpe[i].efi_gpe_StartingLBA;
3192 vd->slices[i].nblocks = gpe[i].efi_gpe_EndingLBA -
3197 vd->efi_reserved = i;
3201 ASSERT(vd->vdisk_size != 0);
3202 vd->slices[VD_EFI_WD_SLICE].start = 0;
3203 vd->slices[VD_EFI_WD_SLICE].nblocks = vd->vdisk_size;
3205 vds_efi_free(vd, gpt, gpe);
3224 * vd - disk on which the operation is performed.
3233 vd_dskimg_validate_geometry(vd_t *vd)
3236 struct dk_geom *geom = &vd->dk_geom;
3237 struct extvtoc *vtoc = &vd->vtoc;
3241 ASSERT(VD_DSKIMG(vd));
3243 if (VD_DSKIMG_LABEL_READ(vd, &label) < 0)
3252 if (vd_dskimg_validate_efi(vd) == 0) {
3253 vd->vdisk_label = VD_DISK_LABEL_EFI;
3257 vd->vdisk_label = VD_DISK_LABEL_UNK;
3258 vd_build_default_label(vd->dskimg_size, vd->vdisk_bsize,
3262 vd->vdisk_label = VD_DISK_LABEL_VTOC;
3269 bzero(vd->slices, sizeof (vd_slice_t) * VD_MAXPART);
3270 if (vd->vdisk_label != VD_DISK_LABEL_UNK) {
3272 vd->slices[i].start = vtoc->v_part[i].p_start;
3273 vd->slices[i].nblocks = vtoc->v_part[i].p_size;
3288 vd_do_dskimg_ioctl(vd_t *vd, int cmd, void *ioctl_arg)
3296 ASSERT(VD_DSKIMG(vd));
3304 rc = vd_dskimg_validate_geometry(vd);
3307 bcopy(&vd->dk_geom, geom, sizeof (struct dk_geom));
3314 rc = vd_dskimg_validate_geometry(vd);
3317 bcopy(&vd->vtoc, vtoc, sizeof (struct extvtoc));
3333 bcopy(ioctl_arg, &vd->dk_geom, sizeof (vd->dk_geom));
3338 ASSERT(vd->dk_geom.dkg_nhead != 0 &&
3339 vd->dk_geom.dkg_nsect != 0);
3347 vd_vtocgeom_to_label(vtoc, &vd->dk_geom, &label);
3350 if ((rc = vd_dskimg_set_vtoc(vd, &label)) != 0)
3356 return (vd_flush_write(vd));
3362 if (vd_dskimg_rw(vd, VD_SLICE_NONE, VD_OP_BREAD,
3372 if (vd_dskimg_rw(vd, VD_SLICE_NONE, VD_OP_BWRITE,
3386 (void) vd_dskimg_validate_geometry(vd);
3393 if (vd_dskimg_write_devid(vd, vd->dskimg_devid) != 0) {
3401 vd_backend_ioctl(vd_t *vd, int cmd, caddr_t arg)
3410 if (vd->vdisk_type == VD_DISK_TYPE_SLICE) {
3413 status = vd_do_slice_ioctl(vd, cmd, arg);
3415 } else if (VD_DSKIMG(vd)) {
3418 status = vd_do_dskimg_ioctl(vd, cmd, arg);
3423 status = ldi_ioctl(vd->ldi_handle[0], cmd, (intptr_t)arg,
3424 vd->open_flags | FKIOCTL, kcred, &rval);
3437 status = ldi_ioctl(vd->ldi_handle[0], cmd,
3438 (intptr_t)&vtoc, vd->open_flags | FKIOCTL,
3448 status = ldi_ioctl(vd->ldi_handle[0], cmd,
3449 (intptr_t)&vtoc, vd->open_flags | FKIOCTL,
3480 vd_do_ioctl(vd_t *vd, vd_dring_payload_t *request, void* buf, vd_ioctl_t *ioctl)
3486 ASSERT(request->slice < vd->nslices);
3493 if ((status = ldc_mem_copy(vd->ldc_handle, buf, 0, &nbytes,
3520 if (!(vd->open_flags & FWRITE) &&
3531 request->status = vd_backend_ioctl(vd, ioctl->cmd, ioctl->arg);
3559 if ((status = ldc_mem_copy(vd->ldc_handle, buf, 0, &nbytes,
3611 vd_t *vd = task->vd;
3656 ASSERT(vd != NULL);
3658 ASSERT(request->slice < vd->nslices);
3692 if (!(vd->open_flags & FWRITE) && ioctl[i].write) {
3701 status = vd_do_ioctl(vd, request, buf, &ioctl[i]);
3711 vd_t *vd = task->vd;
3720 if (vd->vdisk_type == VD_DISK_TYPE_SLICE) {
3733 if (VD_DSKIMG(vd)) {
3734 if (vd->dskimg_devid == NULL) {
3739 sz = ddi_devid_sizeof(vd->dskimg_devid);
3741 bcopy(vd->dskimg_devid, devid, sz);
3744 if (ddi_lyr_get_devid(vd->dev[request->slice],
3775 if ((status = ldc_mem_copy(vd->ldc_handle, (caddr_t)vd_devid, 0,
3790 vd_scsi_reset(vd_t *vd)
3798 status = ldi_ioctl(vd->ldi_handle[0], USCSICMD, (intptr_t)&uscsi,
3799 (vd->open_flags | FKIOCTL), kcred, &rval);
3807 vd_t *vd = task->vd;
3811 ASSERT(vd->scsi);
3821 request->status = vd_scsi_reset(vd);
3831 vd_t *vd = task->vd;
3852 (void) vd_backend_check_size(vd);
3853 ASSERT(vd->vdisk_size != 0);
3857 vd_cap.vdisk_block_size = vd->vdisk_bsize;
3858 vd_cap.vdisk_size = vd->vdisk_size;
3860 if ((rv = ldc_mem_copy(vd->ldc_handle, (char *)&vd_cap, 0, &nbytes,
3875 vd_t *vd = task->vd;
3879 ASSERT(vd->scsi);
3891 request->status = ldi_ioctl(vd->ldi_handle[request->slice], MHIOCSTATUS,
3892 NULL, (vd->open_flags | FKIOCTL), kcred, &rval);
3899 if ((rv = ldc_mem_copy(vd->ldc_handle, (char *)&access, 0, &nbytes,
3914 vd_t *vd = task->vd;
3918 ASSERT(vd->scsi);
3928 if ((rv = ldc_mem_copy(vd->ldc_handle, (char *)&flags, 0, &nbytes,
3936 request->status = ldi_ioctl(vd->ldi_handle[request->slice],
3937 MHIOCRELEASE, NULL, (vd->open_flags | FKIOCTL), kcred,
3940 vd->ownership = B_FALSE;
3963 request->status = ldi_ioctl(vd->ldi_handle[request->slice],
3964 MHIOCTKOWN, NULL, (vd->open_flags | FKIOCTL), kcred, &rval);
3978 request->status = ldi_ioctl(vd->ldi_handle[request->slice],
3979 MHIOCQRESERVE, NULL, (vd->open_flags | FKIOCTL), kcred,
3983 request->status = ldi_ioctl(vd->ldi_handle[request->slice],
3984 MHIOCTKOWN, NULL, (vd->open_flags | FKIOCTL), kcred, &rval);
3995 request->status = ldi_ioctl(vd->ldi_handle[request->slice],
3996 MHIOCQRESERVE, NULL, (vd->open_flags | FKIOCTL), kcred,
4002 (void) vd_scsi_reset(vd);
4005 request->status = ldi_ioctl(vd->ldi_handle[request->slice],
4006 MHIOCQRESERVE, NULL, (vd->open_flags | FKIOCTL), kcred,
4013 request->status = ldi_ioctl(vd->ldi_handle[request->slice],
4014 MHIOCQRESERVE, NULL, (vd->open_flags | FKIOCTL), kcred,
4020 vd->ownership = B_TRUE;
4028 vd_reset_access(vd_t *vd)
4032 if (vd->file || vd->volume || !vd->ownership)
4036 status = ldi_ioctl(vd->ldi_handle[0], MHIOCRELEASE, NULL,
4037 (vd->open_flags | FKIOCTL), kcred, &rval);
4044 vd->ownership = B_FALSE;
4055 status = vd_scsi_reset(vd);
4061 status = ldi_ioctl(vd->ldi_handle[0], MHIOCRELEASE, NULL,
4062 (vd->open_flags | FKIOCTL), kcred, &rval);
4065 vd->ownership = B_FALSE;
4082 ", rebooting the system", vd->device_path);
4085 panic(VD_RESET_ACCESS_FAILURE_MSG, vd->device_path);
4088 cmn_err(CE_WARN, VD_RESET_ACCESS_FAILURE_MSG, vd->device_path);
4136 vd_t *vd = task->vd;
4139 ASSERT(vd != NULL);
4159 if ((VD_OP_SUPPORTED(vd->operations, request->operation) == B_FALSE) ||
4167 if (request->slice >= vd->nslices &&
4168 ((vd->vdisk_type != VD_DISK_TYPE_DISK && vd_slice_single_slice) ||
4171 request->slice, (vd->nslices - 1));
4208 vd_t *vd = task->vd;
4230 (void) ddi_taskq_dispatch(vd->completionq, vd_complete,
4235 if (!vd->reset_state && (vd->xfer_mode == VIO_DRING_MODE_V1_0)) {
4237 status = vd_mark_elem_done(vd, task->index,
4240 vd_mark_in_reset(vd);
4242 vd_need_reset(vd, B_TRUE);
4325 vd_process_ver_msg(vd_t *vd, vio_msg_t *msg, size_t msglen)
4370 ASSERT(!(vd->initialized & VD_SID));
4371 vd->sid = ver_msg->tag.vio_sid;
4372 vd->initialized |= VD_SID;
4375 * Store the negotiated major and minor version values in the "vd" data
4379 vd->version.major = ver_msg->ver_major;
4380 vd->version.minor = ver_msg->ver_minor;
4388 vd_set_exported_operations(vd_t *vd)
4390 vd->operations = 0; /* clear field */
4397 if (vio_ver_is_supported(vd->version, 1, 1)) {
4398 ASSERT(vd->open_flags & FREAD);
4399 vd->operations |= VD_OP_MASK_READ | (1 << VD_OP_GET_CAPACITY);
4401 if (vd->open_flags & FWRITE)
4402 vd->operations |= VD_OP_MASK_WRITE;
4404 if (vd->scsi)
4405 vd->operations |= VD_OP_MASK_SCSI;
4407 if (VD_DSKIMG(vd) && vd_dskimg_is_iso_image(vd)) {
4413 vd->operations &= ~VD_OP_MASK_WRITE;
4415 } else if (vio_ver_is_supported(vd->version, 1, 0)) {
4416 vd->operations = VD_OP_MASK_READ | VD_OP_MASK_WRITE;
4420 ASSERT(vd->operations != 0);
4424 vd_process_attr_msg(vd_t *vd, vio_msg_t *msg, size_t msglen)
4460 if ((vd->initialized & VD_DISK_READY) == 0) {
4461 PR0("Retry setting up disk (%s)", vd->device_path);
4463 status = vd_setup_vd(vd);
4471 if (!vd_enabled(vd))
4479 vd->initialized |= VD_DISK_READY;
4480 ASSERT(vd->nslices > 0 && vd->nslices <= V_NUMPAR);
4482 ((vd->vdisk_type == VD_DISK_TYPE_DISK) ? "disk" : "slice"),
4483 (vd->volume ? "yes" : "no"),
4484 (vd->file ? "yes" : "no"),
4485 vd->nslices);
4489 vd->xfer_mode = attr_msg->xfer_mode;
4491 if (vd->xfer_mode == VIO_DESC_MODE) {
4515 vd->max_msglen = MAX(vd->max_msglen, max_inband_msglen);
4521 vd->inband_task.vd = vd;
4522 vd->inband_task.msg = kmem_alloc(vd->max_msglen, KM_SLEEP);
4523 vd->inband_task.index = 0;
4524 vd->inband_task.type = VD_FINAL_RANGE_TASK; /* range == 1 */
4528 attr_msg->vdisk_block_size = vd->vdisk_bsize;
4529 attr_msg->max_xfer_sz = vd->max_xfer_sz;
4531 attr_msg->vdisk_size = vd->vdisk_size;
4532 attr_msg->vdisk_type = (vd_slice_single_slice)? vd->vdisk_type :
4534 attr_msg->vdisk_media = vd->vdisk_media;
4537 vd_set_exported_operations(vd);
4538 attr_msg->operations = vd->operations;
4540 PR0("%s", VD_CLIENT(vd));
4542 ASSERT(vd->dring_task == NULL);
4548 vd_process_dring_reg_msg(vd_t *vd, vio_msg_t *msg, size_t msglen)
4579 if (vd->initialized & VD_DRING) {
4594 * "cookie" requires increasing the value of vd->max_msglen
4613 status = ldc_mem_dring_map(vd->ldc_handle, reg_msg->cookie,
4615 reg_msg->descriptor_size, mtype, &vd->dring_handle);
4629 ldc_mem_dring_info(vd->dring_handle, &dring_minfo)) != 0) {
4631 if ((status = ldc_mem_dring_unmap(vd->dring_handle)) != 0)
4643 vd->initialized |= VD_DRING;
4644 vd->dring_ident = 1; /* "There Can Be Only One" */
4645 vd->dring = dring_minfo.vaddr;
4646 vd->descriptor_size = reg_msg->descriptor_size;
4647 vd->dring_len = reg_msg->num_descriptors;
4648 vd->dring_mtype = dring_minfo.mtype;
4649 reg_msg->dring_ident = vd->dring_ident;
4651 vd->descriptor_size, vd->dring_len);
4657 vd->dring_task =
4658 kmem_zalloc((sizeof (*vd->dring_task)) * vd->dring_len, KM_SLEEP);
4659 for (int i = 0; i < vd->dring_len; i++) {
4660 vd->dring_task[i].vd = vd;
4661 vd->dring_task[i].index = i;
4663 status = ldc_mem_alloc_handle(vd->ldc_handle,
4664 &(vd->dring_task[i].mhdl));
4675 vd->dring_task[i].request = kmem_zalloc((vd->descriptor_size -
4677 vd->dring_task[i].msg = kmem_alloc(vd->max_msglen, KM_SLEEP);
4680 if (vd->file || vd->zvol) {
4681 vd->write_queue =
4682 kmem_zalloc(sizeof (buf_t *) * vd->dring_len, KM_SLEEP);
4689 vd_process_dring_unreg_msg(vd_t *vd, vio_msg_t *msg, size_t msglen)
4708 if (unreg_msg->dring_ident != vd->dring_ident) {
4710 vd->dring_ident, unreg_msg->dring_ident);
4738 vd_check_seq_num(vd_t *vd, uint64_t seq_num)
4740 if ((vd->initialized & VD_SEQ_NUM) && (seq_num != vd->seq_num + 1)) {
4742 seq_num, (vd->seq_num + 1));
4744 vd_need_reset(vd, B_FALSE);
4748 vd->seq_num = seq_num;
4749 vd->initialized |= VD_SEQ_NUM; /* superfluous after first time... */
4770 vd_process_desc_msg(vd_t *vd, vio_msg_t *msg, size_t msglen)
4796 if (vd_check_seq_num(vd, desc_msg->hdr.seq_num) != 0)
4808 ASSERT(vd->inband_task.msg != NULL);
4810 bcopy(msg, vd->inband_task.msg, msglen);
4811 vd->inband_task.msglen = msglen;
4817 desc_msg = (vd_dring_inband_msg_t *)vd->inband_task.msg;
4818 vd->inband_task.request = &desc_msg->payload;
4820 return (vd_process_task(&vd->inband_task));
4824 vd_process_element(vd_t *vd, vd_task_type_t type, uint32_t idx,
4833 if ((status = VIO_DRING_ACQUIRE(&otd, vd->dring_mtype,
4834 vd->dring_handle, idx, idx)) != 0) {
4840 bcopy(&elem->payload, vd->dring_task[idx].request,
4841 (vd->descriptor_size - sizeof (vio_dring_entry_hdr_t)));
4846 if ((status = VIO_DRING_RELEASE(vd->dring_mtype,
4847 vd->dring_handle, idx, idx)) != 0) {
4857 vd->dring_task[idx].type = type;
4860 bcopy(msg, vd->dring_task[idx].msg, msglen);
4862 vd->dring_task[idx].msglen = msglen;
4863 return (vd_process_task(&vd->dring_task[idx]));
4867 vd_process_element_range(vd_t *vd, int start, int end,
4888 nelem = ((end < start) ? end + vd->dring_len : end) - start + 1;
4889 for (i = start, n = nelem; n > 0; i = (i + 1) % vd->dring_len, n--) {
4892 status = vd_process_element(vd, type, i, msg, msglen);
4908 if (vd->ioq != NULL)
4909 ddi_taskq_wait(vd->ioq);
4910 ddi_taskq_wait(vd->completionq);
4917 vd_process_dring_msg(vd_t *vd, vio_msg_t *msg, size_t msglen)
4936 if (vd_check_seq_num(vd, dring_msg->seq_num) != 0)
4939 if (dring_msg->dring_ident != vd->dring_ident) {
4941 vd->dring_ident, dring_msg->dring_ident);
4945 if (dring_msg->start_idx >= vd->dring_len) {
4947 dring_msg->start_idx, vd->dring_len);
4952 (dring_msg->end_idx >= vd->dring_len)) {
4954 dring_msg->end_idx, vd->dring_len);
4961 return (vd_process_element_range(vd, dring_msg->start_idx,
4995 vd_do_process_msg(vd_t *vd, vio_msg_t *msg, size_t msglen)
5010 if ((msg->tag.vio_sid != vd->sid) && (vd->initialized & VD_SID)) {
5011 PR0("Expected SID %u, received %u", vd->sid,
5016 PR1("\tWhile in state %d (%s)", vd->state, vd_decode_state(vd->state));
5021 switch (vd->state) {
5023 if ((status = vd_process_ver_msg(vd, msg, msglen)) != 0)
5027 vd->state = VD_STATE_VER;
5031 if ((status = vd_process_attr_msg(vd, msg, msglen)) != 0)
5035 vd->state = VD_STATE_ATTR;
5039 switch (vd->xfer_mode) {
5045 vd->state = VD_STATE_DATA;
5050 vd_process_dring_reg_msg(vd, msg, msglen)) != 0)
5054 vd->state = VD_STATE_DRING;
5066 vd->state = VD_STATE_DATA;
5080 vd_process_dring_reg_msg(vd, msg, msglen)) != ENOMSG)
5089 status = vd_process_dring_unreg_msg(vd, msg, msglen);
5093 switch (vd->xfer_mode) {
5095 return (vd_process_desc_msg(vd, msg, msglen));
5102 if ((status = vd_process_dring_msg(vd, msg,
5112 status = vd_process_dring_unreg_msg(vd, msg, msglen);
5129 vd_process_msg(vd_t *vd, vio_msg_t *msg, size_t msglen)
5143 vd_need_reset(vd, B_TRUE);
5150 switch (status = vd_do_process_msg(vd, msg, msglen)) {
5176 PR1("\tResulting in state %d (%s)", vd->state,
5177 vd_decode_state(vd->state));
5180 task.vd = vd;
5190 (void) ddi_taskq_dispatch(vd->completionq, vd_serial_notify,
5198 ddi_taskq_wait(vd->completionq);
5204 vd_need_reset(vd, reset_ldc);
5211 vd_enabled(vd_t *vd)
5215 mutex_enter(&vd->lock);
5216 enabled = vd->enabled;
5217 mutex_exit(&vd->lock);
5224 vd_t *vd = (vd_t *)arg;
5227 ASSERT(vd != NULL);
5232 while (vd_enabled(vd) && status == 0) {
5239 vd_reset_if_needed(vd); /* can change vd->max_msglen */
5244 status = ldc_status(vd->ldc_handle, &lstatus);
5251 ASSERT(vd->max_msglen != 0);
5253 msgsize = vd->max_msglen; /* stable copy for alloc/free */
5256 status = recv_msg(vd->ldc_handle, vd->vio_msgp, &msglen);
5259 rv = vd_process_msg(vd, (void *)vd->vio_msgp, msglen);
5261 if (msgsize != vd->max_msglen) {
5263 msgsize, vd->max_msglen);
5264 kmem_free(vd->vio_msgp, msgsize);
5265 vd->vio_msgp =
5266 kmem_alloc(vd->max_msglen, KM_SLEEP);
5277 vd_need_reset(vd, B_FALSE);
5284 vd_need_reset(vd, B_TRUE);
5295 vd_t *vd = (vd_t *)(void *)arg;
5298 ASSERT(vd != NULL);
5300 if (!vd_enabled(vd))
5306 vd_need_reset(vd, B_TRUE);
5307 status = ddi_taskq_dispatch(vd->startq, vd_recv_msg, vd,
5311 vd_need_reset(vd, B_TRUE);
5318 if (vd->state != VD_STATE_INIT) {
5320 vd_need_reset(vd, B_FALSE);
5321 status = ddi_taskq_dispatch(vd->startq, vd_recv_msg,
5322 vd, DDI_SLEEP);
5325 vd_need_reset(vd, B_TRUE);
5331 (void) ldc_up(vd->ldc_handle);
5340 vd_need_reset(vd, B_FALSE);
5341 status = ddi_taskq_dispatch(vd->startq, vd_recv_msg,
5342 vd, DDI_SLEEP);
5345 vd_need_reset(vd, B_TRUE);
5355 status = ddi_taskq_dispatch(vd->startq, vd_recv_msg, vd,
5360 vd_need_reset(vd, B_TRUE);
5436 * vd - disk on which the operation is performed.
5443 vd_dskimg_is_iso_image(vd_t *vd)
5449 ASSERT(VD_DSKIMG(vd));
5455 if (vd->vdisk_media == VD_MEDIA_DVD || vd->vdisk_media == VD_MEDIA_CD)
5464 sec = (ISO_VOLDESC_SEC * ISO_SECTOR_SIZE) / vd->vdisk_bsize;
5465 rv = vd_dskimg_rw(vd, VD_SLICE_NONE, VD_OP_BREAD, (caddr_t)iso_buf,
5486 * vd - disk on which the operation is performed.
5493 vd_is_atapi_device(vd_t *vd)
5499 ASSERT(vd->ldi_handle[0] != NULL);
5500 ASSERT(!vd->file);
5502 rv = ldi_prop_lookup_string(vd->ldi_handle[0],
5505 PR0("'variant' property exists for %s", vd->device_path);
5511 rv = ldi_prop_exists(vd->ldi_handle[0], LDI_DEV_T_ANY, "atapi");
5513 PR0("'atapi' property exists for %s", vd->device_path);
5521 vd_setup_full_disk(vd_t *vd)
5524 major_t major = getmajor(vd->dev[0]);
5525 minor_t minor = getminor(vd->dev[0]) - VD_ENTIRE_DISK_SLICE;
5527 ASSERT(vd->vdisk_type == VD_DISK_TYPE_DISK);
5530 status = vd_backend_check_size(vd);
5533 if (!vd->scsi) {
5536 vd->device_path, status);
5545 vd->vdisk_size = VD_SIZE_UNKNOWN;
5546 vd->vdisk_bsize = 0;
5547 vd->backend_bsize = 0;
5548 vd->vdisk_media = VD_MEDIA_FIXED;
5552 vd->dev[VD_ENTIRE_DISK_SLICE] = vd->dev[0];
5553 vd->dev[0] = 0;
5554 vd->ldi_handle[VD_ENTIRE_DISK_SLICE] = vd->ldi_handle[0];
5555 vd->ldi_handle[0] = NULL;
5558 for (int slice = 0; slice < vd->nslices; slice++) {
5565 ASSERT(vd->dev[slice] == 0);
5566 ASSERT(vd->ldi_handle[slice] == NULL);
5571 vd->dev[slice] = makedevice(major, (minor + slice));
5595 status = ldi_open_by_dev(&vd->dev[slice], OTYP_BLK,
5596 vd->open_flags, kcred, &vd->ldi_handle[slice],
5597 vd->vds->ldi_ident);
5600 status = ldi_open_by_dev(&vd->dev[slice], OTYP_BLK,
5601 vd->open_flags | FNDELAY, kcred,
5602 &vd->ldi_handle[slice], vd->vds->ldi_ident);
5609 vd->ldi_handle[slice] = NULL;
5658 vd_setup_partition_vtoc(vd_t *vd)
5660 char *device_path = vd->device_path;
5665 if (vd->dk_geom.dkg_nsect == 0) {
5669 if (vd->dk_geom.dkg_nhead == 0) {
5675 csize = vd->dk_geom.dkg_nhead * vd->dk_geom.dkg_nsect;
5681 vd->dk_geom.dkg_ncyl = vd->vdisk_size / csize + 1;
5684 vd->dk_geom.dkg_acyl = 2;
5685 vd->dk_geom.dkg_pcyl = vd->dk_geom.dkg_ncyl + vd->dk_geom.dkg_acyl;
5689 bzero(vd->vtoc.v_part, sizeof (vd->vtoc.v_part));
5690 vd->vtoc.v_part[0].p_tag = V_UNASSIGNED;
5691 vd->vtoc.v_part[0].p_flag = 0;
5696 vd->vtoc.v_part[0].p_start = csize; /* start on cylinder 1 */
5697 vd->vtoc.v_part[0].p_size = (vd->vdisk_size / csize) * csize;
5700 vd->vtoc.v_nparts = 1;
5701 bcopy(VD_ASCIILABEL, vd->vtoc.v_asciilabel,
5703 sizeof (vd->vtoc.v_asciilabel)));
5704 bcopy(VD_VOLUME_NAME, vd->vtoc.v_volume,
5705 MIN(sizeof (VD_VOLUME_NAME), sizeof (vd->vtoc.v_volume)));
5708 vd->nslices = V_NUMPAR;
5709 vd->vtoc.v_nparts = V_NUMPAR;
5712 vd->vtoc.v_part[VD_ENTIRE_DISK_SLICE].p_tag = V_BACKUP;
5713 vd->vtoc.v_part[VD_ENTIRE_DISK_SLICE].p_flag = 0;
5714 vd->vtoc.v_part[VD_ENTIRE_DISK_SLICE].p_start = 0;
5715 vd->vtoc.v_part[VD_ENTIRE_DISK_SLICE].p_size =
5716 vd->dk_geom.dkg_ncyl * csize;
5718 vd_get_readable_size(vd->vdisk_size * vd->vdisk_bsize,
5726 vd->dk_geom.dkg_bcyl = 0;
5727 vd->dk_geom.dkg_intrlv = 1;
5728 vd->dk_geom.dkg_write_reinstruct = 0;
5729 vd->dk_geom.dkg_read_reinstruct = 0;
5735 (void) snprintf(vd->vtoc.v_asciilabel, LEN_DKL_ASCII,
5738 vd->dk_geom.dkg_ncyl, vd->dk_geom.dkg_acyl,
5739 vd->dk_geom.dkg_nhead, vd->dk_geom.dkg_nsect);
5740 bzero(vd->vtoc.v_volume, sizeof (vd->vtoc.v_volume));
5743 vd->flabel_limit = (uint_t)csize;
5744 vd->flabel_size = VD_LABEL_VTOC_SIZE(vd->vdisk_bsize);
5745 vd->flabel = kmem_zalloc(vd->flabel_size, KM_SLEEP);
5746 vd_vtocgeom_to_label(&vd->vtoc, &vd->dk_geom,
5747 VD_LABEL_VTOC(vd));
5751 vd->vdisk_size += csize * 3;
5801 vd_setup_partition_efi(vd_t *vd)
5811 ASSERT(vd->vdisk_bsize > 0);
5813 bsize = vd->vdisk_bsize;
5819 vd->flabel_limit = (uint_t)first_u_lba;
5820 vd->flabel_size = VD_LABEL_EFI_SIZE(bsize);
5821 vd->flabel = kmem_zalloc(vd->flabel_size, KM_SLEEP);
5822 gpt = VD_LABEL_EFI_GPT(vd, bsize);
5823 gpe = VD_LABEL_EFI_GPE(vd, bsize);
5829 vd->vdisk_size += first_u_lba;
5831 s0_end = vd->vdisk_size - 1;
5849 vd->nslices = V_NUMPAR;
5860 vd->vdisk_size += EFI_MIN_RESV_SIZE;
5863 gpt->efi_gpt_LastUsableLBA = LE_64(vd->vdisk_size - 1);
5866 vd->vdisk_size += (EFI_MIN_ARRAY_SIZE / bsize) + 1;
5867 gpt->efi_gpt_AlternateLBA = LE_64(vd->vdisk_size - 1);
5884 vd_setup_backend_vnode(vd_t *vd)
5888 char *file_path = vd->device_path;
5892 ASSERT(!vd->volume);
5894 if ((status = vn_open(file_path, UIO_SYSSPACE, vd->open_flags | FOFFMAX,
5895 0, &vd->file_vnode, 0, 0)) != 0) {
5897 status == EROFS) && (!(vd->initialized & VD_SETUP_ERROR) &&
5898 !(DEVI_IS_ATTACHING(vd->vds->dip)))) {
5905 * We set vd->file now so that vds_destroy_vd will take care of
5908 vd->file = B_TRUE;
5910 vd->max_xfer_sz = maxphys / DEV_BSIZE; /* default transfer size */
5915 dev = vd->file_vnode->v_vfsp->vfs_dev;
5920 vd->vds->ldi_ident);
5927 (intptr_t)&dk_cinfo, (vd->open_flags | FKIOCTL), kcred,
5936 vd->max_xfer_sz = dk_cinfo.dki_maxtransfer;
5944 file_path, getmajor(dev), getminor(dev), vd->max_xfer_sz);
5946 if (vd->vdisk_type == VD_DISK_TYPE_SLICE)
5947 status = vd_setup_slice_image(vd);
5949 status = vd_setup_disk_image(vd);
5955 vd_setup_slice_image(vd_t *vd)
5960 if ((status = vd_backend_check_size(vd)) != 0) {
5962 vd->device_path, status);
5966 vd->vdisk_media = VD_MEDIA_FIXED;
5967 vd->vdisk_label = (vd_slice_label == VD_DISK_LABEL_UNK)?
5970 if (vd->vdisk_label == VD_DISK_LABEL_EFI ||
5971 vd->dskimg_size >= 2 * ONE_TERABYTE) {
5972 status = vd_setup_partition_efi(vd);
5980 vd_build_default_label(vd->dskimg_size, vd->vdisk_bsize,
5982 vd_label_to_vtocgeom(&label, &vd->vtoc, &vd->dk_geom);
5983 status = vd_setup_partition_vtoc(vd);
5990 vd_setup_disk_image(vd_t *vd)
5993 char *backend_path = vd->device_path;
5995 if ((status = vd_backend_check_size(vd)) != 0) {
6002 if (vd->dskimg_size < sizeof (struct dk_label)) {
6011 status = vd_dskimg_validate_geometry(vd);
6017 if (vd_dskimg_is_iso_image(vd)) {
6023 if ((vd->vdisk_size * vd->vdisk_bsize) > ONE_GIGABYTE)
6024 vd->vdisk_media = VD_MEDIA_DVD;
6026 vd->vdisk_media = VD_MEDIA_CD;
6028 vd->vdisk_media = VD_MEDIA_FIXED;
6033 if (vd->vdisk_label != VD_DISK_LABEL_UNK) {
6035 status = vd_dskimg_read_devid(vd, &vd->dskimg_devid);
6049 vd->dskimg_devid = NULL;
6061 if (ddi_devid_init(vd->vds->dip, DEVID_FAB, NULL, 0,
6062 &vd->dskimg_devid) != DDI_SUCCESS) {
6064 vd->dskimg_devid = NULL;
6073 if (vd->vdisk_label != VD_DISK_LABEL_UNK) {
6074 if (vd_dskimg_write_devid(vd, vd->dskimg_devid) != 0) {
6076 ddi_devid_free(vd->dskimg_devid);
6077 vd->dskimg_devid = NULL;
6090 * vd - pointer to structure containing the vDisk info
6098 vd_open_using_ldi_by_name(vd_t *vd, int flags)
6101 char *device_path = vd->device_path;
6105 &vd->ldi_handle[0], vd->vds->ldi_ident);
6114 kcred, &vd->ldi_handle[0], vd->vds->ldi_ident);
6118 vd->ldi_handle[0] = NULL;
6131 vd_setup_backend_ldi(vd_t *vd)
6135 char *device_path = vd->device_path;
6138 ASSERT(vd->ldi_handle[0] != NULL);
6139 ASSERT(vd->dev[0] != NULL);
6141 vd->file = B_FALSE;
6144 if ((status = ldi_ioctl(vd->ldi_handle[0], DKIOCINFO,
6145 (intptr_t)&dk_cinfo, (vd->open_flags | FKIOCTL), kcred,
6169 vd->open_flags &= ~FWRITE;
6171 } else if (vd->open_flags & FWRITE) {
6173 (void) ldi_close(vd->ldi_handle[0], vd->open_flags & ~FWRITE,
6175 status = vd_open_using_ldi_by_name(vd, vd->open_flags);
6184 vd->max_xfer_sz = dk_cinfo.dki_maxtransfer;
6190 vd->is_atapi_dev = vd_is_atapi_device(vd);
6202 if (vd->vdisk_type == VD_DISK_TYPE_DISK) {
6204 if (vd->volume) {
6206 return (vd_setup_disk_image(vd));
6211 ASSERT(!vd->volume);
6213 vd->scsi = B_TRUE;
6214 return (vd_setup_full_disk(vd));
6227 return (vd_setup_single_slice_disk(vd));
6231 vd_setup_single_slice_disk(vd_t *vd)
6235 char *device_path = vd->device_path;
6238 vd->vdisk_media = VD_MEDIA_FIXED;
6240 if (vd->volume) {
6241 ASSERT(vd->vdisk_type == VD_DISK_TYPE_SLICE);
6248 vd->vdisk_type = VD_DISK_TYPE_SLICE;
6249 vd->nslices = 1;
6252 if ((status = vd_backend_check_size(vd)) != 0) {
6267 vd->vdisk_size >= ONE_TERABYTE / vd->vdisk_bsize) {
6268 vd->vdisk_label = VD_DISK_LABEL_EFI;
6270 status = ldi_ioctl(vd->ldi_handle[0], DKIOCGEXTVTOC,
6271 (intptr_t)&vd->vtoc, (vd->open_flags | FKIOCTL),
6276 status = ldi_ioctl(vd->ldi_handle[0], DKIOCGVTOC,
6277 (intptr_t)&vtoc, (vd->open_flags | FKIOCTL),
6279 vtoctoextvtoc(vtoc, vd->vtoc);
6283 status = ldi_ioctl(vd->ldi_handle[0], DKIOCGGEOM,
6284 (intptr_t)&vd->dk_geom, (vd->open_flags | FKIOCTL),
6292 vd->vdisk_label = VD_DISK_LABEL_VTOC;
6296 vd->vdisk_label = VD_DISK_LABEL_VTOC;
6297 vd_build_default_label(vd->vdisk_size * vd->vdisk_bsize,
6298 vd->vdisk_bsize, &label);
6299 vd_label_to_vtocgeom(&label, &vd->vtoc, &vd->dk_geom);
6302 vd->vdisk_label = VD_DISK_LABEL_EFI;
6306 if (vd->vdisk_label == VD_DISK_LABEL_VTOC) {
6308 status = vd_setup_partition_vtoc(vd);
6312 status = vd_setup_partition_efi(vd);
6321 * following attributes of the vd structure:
6354 vd_backend_check_size(vd_t *vd)
6363 if (vd->file) {
6367 rv = VOP_GETATTR(vd->file_vnode, &vattr, 0, kcred, NULL);
6369 PR0("VOP_GETATTR(%s) = errno %d", vd->device_path, rv);
6376 } else if (vd->volume) {
6379 rv = ldi_get_size(vd->ldi_handle[0], &backend_size);
6381 PR0("ldi_get_size() failed for %s", vd->device_path);
6390 rv = ldi_ioctl(vd->ldi_handle[0], DKIOCGMEDIAINFO,
6391 (intptr_t)&minfo, (vd->open_flags | FKIOCTL),
6395 vd->device_path, rv);
6399 if (vd->vdisk_type == VD_DISK_TYPE_SLICE) {
6400 rv = ldi_get_size(vd->ldi_handle[0], &backend_size);
6403 vd->device_path);
6407 ASSERT(vd->vdisk_type == VD_DISK_TYPE_DISK);
6429 old_size = vd->vdisk_size;
6434 vd->vdisk_bsize == vdisk_bsize)
6446 vd->vio_bshift = nshift;
6447 vd->vdisk_size = new_size;
6448 vd->vdisk_bsize = vdisk_bsize;
6449 vd->backend_bsize = backend_bsize;
6451 if (vd->file || vd->volume)
6452 vd->dskimg_size = backend_size;
6460 if (vd->vdisk_type == VD_DISK_TYPE_SLICE) {
6462 if (vd->vdisk_label == VD_DISK_LABEL_VTOC) {
6463 rv = vd_setup_partition_vtoc(vd);
6466 "(err = %d)", vd->device_path, rv);
6470 rv = vd_setup_partition_efi(vd);
6473 "(err = %d)", vd->device_path, rv);
6478 } else if (!vd->file && !vd->volume) {
6480 ASSERT(vd->vdisk_type == VD_DISK_TYPE_DISK);
6481 vd->vdisk_media = media;
6493 * vd - pointer to structure containing the vDisk info
6501 vd_identify_dev(vd_t *vd, int *dtype)
6504 char *device_path = vd->device_path;
6507 vds_t *vds = vd->vds;
6509 status = vd_open_using_ldi_by_name(vd, vd->open_flags & ~FWRITE);
6516 if ((status = ldi_get_dev(vd->ldi_handle[0], &vd->dev[0])) != 0) {
6526 drv_name = ddi_major_to_name(getmajor(vd->dev[0]));
6555 vd->zvol = B_TRUE;
6563 vd_setup_vd(vd_t *vd)
6568 char *path = vd->device_path;
6585 vd->volume = B_FALSE;
6586 status = vd_setup_backend_vnode(vd);
6625 if ((status = vd_identify_dev(vd, &drv_type)) != 0) {
6641 vd->volume = B_TRUE;
6653 if (vd->volume && vd_volume_force_slice) {
6654 vd->vdisk_type = VD_DISK_TYPE_SLICE;
6655 vd->nslices = 1;
6658 status = vd_setup_backend_ldi(vd);
6675 if (!(vd->initialized & VD_SETUP_ERROR) &&
6676 !(DEVI_IS_ATTACHING(vd->vds->dip))) {
6685 vd->initialized |= VD_SETUP_ERROR;
6687 } else if (vd->initialized & VD_SETUP_ERROR) {
6690 vd->initialized &= ~VD_SETUP_ERROR;
6701 if ((vd->file || vd->zvol) && vd->ioq == NULL) {
6702 (void) snprintf(tq_name, sizeof (tq_name), "vd_ioq%lu", vd->id);
6704 if ((vd->ioq = ddi_taskq_create(vd->vds->dip, tq_name,
6722 vd_t *vd;
6730 if ((vd = kmem_zalloc(sizeof (*vd), KM_NOSLEEP)) == NULL) {
6734 *vdp = vd; /* assign here so vds_destroy_vd() can cleanup later */
6735 vd->id = id;
6736 vd->vds = vds;
6737 (void) strncpy(vd->device_path, device_path, MAXPATHLEN);
6740 vd->open_flags = FREAD;
6743 vd->open_flags |= FWRITE;
6746 vd->open_flags |= FEXCL;
6750 vd->vdisk_type = VD_DISK_TYPE_SLICE;
6751 vd->nslices = 1;
6753 vd->vdisk_type = VD_DISK_TYPE_DISK;
6754 vd->nslices = V_NUMPAR;
6758 vd->vdisk_label = VD_DISK_LABEL_UNK;
6761 if ((status = vd_setup_vd(vd)) == 0) {
6762 vd->initialized |= VD_DISK_READY;
6764 ASSERT(vd->nslices > 0 && vd->nslices <= V_NUMPAR);
6766 ((vd->vdisk_type == VD_DISK_TYPE_DISK) ? "disk" : "slice"),
6767 (vd->volume ? "yes" : "no"), (vd->file ? "yes" : "no"),
6768 vd->nslices);
6781 mutex_init(&vd->lock, NULL, MUTEX_DRIVER, iblock);
6782 vd->initialized |= VD_LOCKING;
6788 if ((vd->startq = ddi_taskq_create(vds->dip, tq_name, 1,
6795 if ((vd->completionq = ddi_taskq_create(vds->dip, tq_name, 1,
6802 vd->max_msglen = sizeof (vio_msg_t); /* baseline vio message size */
6803 vd->vio_msgp = kmem_alloc(vd->max_msglen, KM_SLEEP);
6805 vd->enabled = 1; /* before callback can dispatch to startq */
6813 if ((status = ldc_init(ldc_id, &ldc_attr, &vd->ldc_handle)) != 0) {
6818 vd->initialized |= VD_LDC;
6820 if ((status = ldc_reg_callback(vd->ldc_handle, vd_handle_ldc_events,
6821 (caddr_t)vd)) != 0) {
6827 if ((status = ldc_open(vd->ldc_handle)) != 0) {
6833 if ((status = ldc_up(vd->ldc_handle)) != 0) {
6838 status = ldc_mem_alloc_handle(vd->ldc_handle, &(vd->inband_task.mhdl));
6846 if (mod_hash_insert(vds->vd_table, (mod_hash_key_t)id, vd) != 0) {
6852 vd->state = VD_STATE_INIT;
6889 vd_t *vd = (vd_t *)arg;
6892 if (vd == NULL)
6898 if (vd->initialized & VD_LOCKING) {
6899 mutex_enter(&vd->lock);
6900 vd->enabled = 0;
6901 mutex_exit(&vd->lock);
6905 if (vd->startq != NULL)
6906 ddi_taskq_destroy(vd->startq); /* waits for queued tasks */
6909 if (vd->ioq != NULL)
6910 ddi_taskq_destroy(vd->ioq);
6913 if (vd->completionq != NULL)
6914 ddi_taskq_destroy(vd->completionq); /* waits for tasks */
6916 vd_free_dring_task(vd);
6919 (void) ldc_mem_free_handle(vd->inband_task.mhdl);
6922 if (vd->initialized & VD_LDC) {
6924 if (vd->initialized & VD_DRING)
6925 (void) ldc_mem_dring_unmap(vd->dring_handle);
6928 while ((rv = ldc_close(vd->ldc_handle)) == EAGAIN) {
6936 (void) ldc_unreg_callback(vd->ldc_handle);
6937 (void) ldc_fini(vd->ldc_handle);
6946 (void) ldc_set_cb_mode(vd->ldc_handle, LDC_CB_DISABLE);
6947 while (ldc_unreg_callback(vd->ldc_handle) == EAGAIN)
6953 if (vd->vio_msgp != NULL) {
6954 kmem_free(vd->vio_msgp, vd->max_msglen);
6955 vd->vio_msgp = NULL;
6959 if (vd->inband_task.msg != NULL) {
6960 kmem_free(vd->inband_task.msg, vd->max_msglen);
6961 vd->inband_task.msg = NULL;
6964 if (vd->file) {
6966 (void) VOP_CLOSE(vd->file_vnode, vd->open_flags, 1,
6968 VN_RELE(vd->file_vnode);
6972 if (vd->ldi_handle[slice] != NULL) {
6974 (void) ldi_close(vd->ldi_handle[slice],
6975 vd->open_flags, kcred);
6981 if (vd->dskimg_devid != NULL)
6982 ddi_devid_free(vd->dskimg_devid);
6985 if (vd->flabel) {
6986 kmem_free(vd->flabel, vd->flabel_size);
6987 vd->flabel = NULL;
6988 vd->flabel_size = 0;
6992 if (vd->initialized & VD_LOCKING)
6993 mutex_destroy(&vd->lock);
6996 kmem_free(vd, sizeof (*vd));
7004 vd_t *vd = NULL;
7008 ldc_id, &vd)) != 0)
7009 vds_destroy_vd(vd);