vioblk.c revision 3e0831a90c729a9b8266ce68233ba63a32ffaa4c
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2014, Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2012, Alexey Zaytsev <alexey.zaytsev@gmail.com>
*/
#include <sys/sysmacros.h>
#include "virtiovar.h"
#include "virtioreg.h"
/* Feature bits */
#define VIRTIO_BLK_F_BARRIER (1<<0)
/* Configuration registers */
#define VIRTIO_BLK_CONFIG_CAPACITY 0 /* 64bit */
/* Command */
#define VIRTIO_BLK_T_IN 0
#define VIRTIO_BLK_T_OUT 1
#define VIRTIO_BLK_T_SCSI_CMD 2
#define VIRTIO_BLK_T_SCSI_CMD_OUT 3
#define VIRTIO_BLK_T_FLUSH 4
#define VIRTIO_BLK_T_FLUSH_OUT 5
#define VIRTIO_BLK_T_GET_ID 8
#define VIRTIO_BLK_T_BARRIER 0x80000000
/* Statuses */
#define VIRTIO_BLK_S_OK 0
#define VIRTIO_BLK_S_IOERR 1
#define VIRTIO_BLK_S_UNSUPP 2
#define DEF_MAXINDIRECT (128)
#define DEF_MAXSECTOR (4096)
#define VIOBLK_POISON 0xdead0001dead0001
/*
* Static Variables.
*/
static char vioblk_ident[] = "VirtIO block driver";
/* Request header structure */
struct vioblk_req_hdr {
};
struct vioblk_req {
struct vioblk_req_hdr hdr;
unsigned int ndmac;
};
struct vioblk_stats {
struct kstat_named sts_rw_outofmemory;
struct kstat_named sts_rw_badoffset;
struct kstat_named sts_rw_queuemax;
struct kstat_named sts_rw_cookiesmax;
struct kstat_named sts_rw_cacheflush;
struct kstat_named sts_intr_queuemax;
struct kstat_named sts_intr_total;
struct kstat_named sts_io_errors;
struct kstat_named sts_unsupp_errors;
struct kstat_named sts_nxio_errors;
};
struct vioblk_lstats {
unsigned int rw_cookiesmax;
unsigned int intr_queuemax;
unsigned int io_errors;
unsigned int unsupp_errors;
unsigned int nxio_errors;
};
struct vioblk_softc {
struct virtio_softc sc_virtio;
struct vioblk_req *sc_reqs;
struct vioblk_stats *ks_data;
struct vioblk_lstats sc_stats;
short sc_blkflags;
int sc_blk_size;
int sc_pblk_size;
int sc_seg_max;
int sc_seg_size_max;
};
static bd_ops_t vioblk_ops = {
};
static int vioblk_quiesce(dev_info_t *);
static struct dev_ops vioblk_dev_ops = {
0,
nulldev, /* identify */
nulldev, /* probe */
vioblk_attach, /* attach */
vioblk_detach, /* detach */
nodev, /* reset */
NULL, /* cb_ops */
NULL, /* bus_ops */
NULL, /* power */
vioblk_quiesce /* quiesce */
};
/* Standard Module linkage initialization for a Streams driver */
extern struct mod_ops mod_driverops;
&mod_driverops, /* Type of module. This one is a driver */
vioblk_ident, /* short description */
&vioblk_dev_ops /* driver specific ops */
};
static struct modlinkage modlinkage = {
{
(void *)&modldrv,
NULL,
},
};
DDI_NEVERSWAP_ACC, /* virtio is always native byte order */
};
static ddi_dma_attr_t vioblk_req_dma_attr = {
DMA_ATTR_V0, /* dma_attr version */
0, /* dma_attr_addr_lo */
0xFFFFFFFFFFFFFFFFull, /* dma_attr_addr_hi */
0x00000000FFFFFFFFull, /* dma_attr_count_max */
1, /* dma_attr_align */
1, /* dma_attr_burstsizes */
1, /* dma_attr_minxfer */
0xFFFFFFFFull, /* dma_attr_maxxfer */
0xFFFFFFFFFFFFFFFFull, /* dma_attr_seg */
1, /* dma_attr_sgllen */
1, /* dma_attr_granular */
0, /* dma_attr_flags */
};
/* DMA attr for the data blocks. */
static ddi_dma_attr_t vioblk_bd_dma_attr = {
DMA_ATTR_V0, /* dma_attr version */
0, /* dma_attr_addr_lo */
0xFFFFFFFFFFFFFFFFull, /* dma_attr_addr_hi */
0x00000000FFFFFFFFull, /* dma_attr_count_max */
1, /* dma_attr_align */
1, /* dma_attr_burstsizes */
1, /* dma_attr_minxfer */
0, /* dma_attr_maxxfer, set in attach */
0xFFFFFFFFFFFFFFFFull, /* dma_attr_seg */
0, /* dma_attr_sgllen, set in attach */
1, /* dma_attr_granular */
0, /* dma_attr_flags */
};
static int
{
struct vioblk_req *req;
int total_cookies, write;
total_cookies = 2;
return (EINVAL);
}
/* allocate top entry */
if (!ve_hdr) {
return (ENOMEM);
}
/* getting request */
/* Header */
sizeof (struct vioblk_req_hdr), B_TRUE);
/* Payload */
if (len > 0) {
}
/* Status */
/* sending the whole chain to the device */
return (DDI_SUCCESS);
}
/*
* Now in polling mode. Interrupts are off, so we
* 1) poll for the already queued requests to complete.
* 2) push our request.
* 3) wait for our request to complete.
*/
static int
{
int ret;
/* Prevent a hard hang. */
/* Poll for an empty queue */
/* Check if any pending requests completed. */
if (ret != DDI_INTR_CLAIMED) {
drv_usecwait(10);
tmout -= 10;
return (ETIMEDOUT);
}
}
if (ret)
return (ret);
/* Poll for an empty queue again. */
/* Check if any pending requests completed. */
if (ret != DDI_INTR_CLAIMED) {
drv_usecwait(10);
tmout -= 10;
return (ETIMEDOUT);
}
}
return (DDI_SUCCESS);
}
static int
{
int ret;
if (!sc->sc_in_poll_mode) {
}
} else {
if (sc->sc_in_poll_mode) {
sc->sc_in_poll_mode = 0;
}
}
return (ret);
}
static int
{
int ret;
if (!sc->sc_in_poll_mode) {
}
} else {
if (sc->sc_in_poll_mode) {
sc->sc_in_poll_mode = 0;
}
}
return (ret);
}
static int
{
int ret;
if (!ret)
return (ret);
}
static void
{
}
static int
{
return (0);
}
static int
{
int ret;
if (ret != DDI_SUCCESS)
goto out_alloc;
if (ret != DDI_DMA_MAPPED) {
ret = DDI_FAILURE;
goto out_map;
}
if (ret) {
goto out_rw;
}
/* wait for reply */
/* timeout */
if (ret < 0) {
return (DDI_FAILURE);
}
if (ret != DDI_SUCCESS) {
return (ret);
}
"devid %x%x%x%x%x%x%x%x%x%x%x%x%x%x%x%x%x%x%x%x",
return (0);
return (ret);
}
static void
{
char buf[512];
/* LINTED E_PTRDIFF_OVERFLOW */
/* LINTED E_PTRDIFF_OVERFLOW */
/* LINTED E_PTRDIFF_OVERFLOW */
if (features & VIRTIO_BLK_F_BARRIER)
/* LINTED E_PTRDIFF_OVERFLOW */
if (features & VIRTIO_BLK_F_SIZE_MAX)
/* LINTED E_PTRDIFF_OVERFLOW */
if (features & VIRTIO_BLK_F_SEG_MAX)
/* LINTED E_PTRDIFF_OVERFLOW */
if (features & VIRTIO_BLK_F_GEOMETRY)
/* LINTED E_PTRDIFF_OVERFLOW */
if (features & VIRTIO_BLK_F_RO)
/* LINTED E_PTRDIFF_OVERFLOW */
if (features & VIRTIO_BLK_F_BLK_SIZE)
/* LINTED E_PTRDIFF_OVERFLOW */
if (features & VIRTIO_BLK_F_SCSI)
/* LINTED E_PTRDIFF_OVERFLOW */
if (features & VIRTIO_BLK_F_FLUSH)
/* LINTED E_PTRDIFF_OVERFLOW */
if (features & VIRTIO_BLK_F_TOPOLOGY)
/* LINTED E_PTRDIFF_OVERFLOW */
/* LINTED E_PTRDIFF_OVERFLOW */
*bufp = '\0';
}
static int
{
"Host does not support RING_INDIRECT_DESC, bye.");
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
/* ARGSUSED */
{
struct vioblk_softc, sc_virtio);
int i = 0, error;
return (DDI_INTR_CLAIMED);
}
/* Note: blkdev tears down the payload mapping for us. */
/* returning payload back to blkdev */
switch (status) {
case VIRTIO_BLK_S_OK:
error = 0;
break;
case VIRTIO_BLK_S_IOERR:
break;
case VIRTIO_BLK_S_UNSUPP:
break;
default:
break;
}
if (type == VIRTIO_BLK_T_GET_ID) {
/* notify devid_init */
} else
i++;
}
/* update stats */
return (DDI_INTR_CLAIMED);
}
/* ARGSUSED */
{
return (DDI_INTR_CLAIMED);
}
static int
{
int ret;
struct virtio_int_handler vioblk_conf_h = {
};
struct virtio_int_handler vioblk_vq_h[] = {
{ vioblk_int_handler },
{ NULL },
};
return (ret);
}
static void
{
int i, qsize;
for (i = 0; i < qsize; i++) {
}
}
static int
{
int i, qsize;
int ret;
for (i = 0; i < qsize; i++) {
if (ret != DDI_SUCCESS) {
"Can't allocate dma handle for req "
"buffer %d", i);
goto exit;
}
sizeof (struct vioblk_req_hdr) + sizeof (uint8_t),
if (ret != DDI_DMA_MAPPED) {
"Can't bind req buffer %d", i);
goto exit;
}
}
return (0);
exit:
return (ENOMEM);
}
static int
{
if (rw == KSTAT_WRITE)
return (EACCES);
return (0);
}
static int
{
int ret = DDI_SUCCESS;
int instance;
struct vioblk_softc *sc;
struct virtio_softc *vsc;
struct vioblk_stats *ks_data;
switch (cmd) {
case DDI_ATTACH:
break;
case DDI_RESUME:
case DDI_PM_RESUME:
ret = DDI_FAILURE;
goto exit;
default:
ret = DDI_FAILURE;
goto exit;
}
/* Duplicate for faster access / less typing */
/*
* Initialize interrupt kstat. This should not normally fail, since
* we don't use a persistent stat. We do it this way to avoid having
* to test for it at run time on the hot path.
*/
sizeof (struct vioblk_stats) / sizeof (kstat_named_t),
goto exit_intrstat;
}
"total_rw_outofmemory", KSTAT_DATA_UINT64);
"total_rw_badoffset", KSTAT_DATA_UINT64);
"total_intr", KSTAT_DATA_UINT64);
"total_io_errors", KSTAT_DATA_UINT32);
"total_unsupp_errors", KSTAT_DATA_UINT32);
"total_nxio_errors", KSTAT_DATA_UINT32);
"total_rw_cacheflush", KSTAT_DATA_UINT64);
"max_rw_cookies", KSTAT_DATA_UINT32);
"max_intr_queue", KSTAT_DATA_UINT32);
/* map BAR0 */
if (ret != DDI_SUCCESS) {
goto exit_map;
}
if (vioblk_register_ints(sc)) {
goto exit_int;
}
if (ret)
goto exit_features;
else
}
}
/* Flushing is not supported. */
}
/* The max number of segments (cookies) in a request */
/* That's what Linux does. */
if (!sc->sc_seg_max)
/*
* SEG_MAX corresponds to the number of _data_
* blocks in a request
*/
}
/* The maximum size for a cookie in a request. */
}
/* The maximum request size */
"seg_size=%d, maxxfer=%" PRIu64,
goto exit_alloc1;
}
if (ret) {
goto exit_alloc2;
}
KM_SLEEP);
if (ret)
goto exit_enable_ints;
if (ret != DDI_SUCCESS) {
goto exit_attach_bd;
}
return (DDI_SUCCESS);
/*
* There is no virtio_disable_ints(), it's done in virtio_release_ints.
* If they ever get split, don't forget to add a call here.
*/
exit:
return (ret);
}
static int
{
switch (cmd) {
case DDI_DETACH:
break;
case DDI_PM_SUSPEND:
return (DDI_FAILURE);
default:
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
static int
{
return (DDI_SUCCESS);
}
int
_init(void)
{
int rv;
}
return (rv);
}
int
_fini(void)
{
int rv;
}
return (rv);
}
int
{
}