/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2013 Nexenta Systems, Inc. All rights reserved.
* Copyright 2012 Alexey Zaytsev <alexey.zaytsev@gmail.com>
*/
/* Based on the NetBSD virtio driver by Minoura Makoto. */
/*
* Copyright (c) 2010 Minoura Makoto.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <sys/autoconf.h>
#include <sys/ddi_impldefs.h>
#include <sys/bootconf.h>
#include <sys/bootsvcs.h>
#include <sys/sysmacros.h>
#include "virtiovar.h"
#include "virtioreg.h"
~(VIRTIO_PAGE_SIZE-1))
void
{
int old = 0;
if (status != 0) {
}
}
/*
* Negotiate features, save the result in sc->sc_features
*/
{
/* LINTED E_BAD_PTR_CAST_ALIGN */
/* LINTED E_BAD_PTR_CAST_ALIGN */
features);
return (host_features);
}
{
/* LINTED E_PTRDIFF_OVERFLOW */
/* LINTED E_PTRDIFF_OVERFLOW */
/* LINTED E_PTRDIFF_OVERFLOW */
/* LINTED E_PTRDIFF_OVERFLOW */
}
{
}
/*
* Device configuration registers.
*/
{
}
{
/* LINTED E_BAD_PTR_CAST_ALIGN */
}
{
/* LINTED E_BAD_PTR_CAST_ALIGN */
}
{
uint64_t r;
/* LINTED E_BAD_PTR_CAST_ALIGN */
r <<= 32;
/* LINTED E_BAD_PTR_CAST_ALIGN */
return (r);
}
void
{
}
void
{
/* LINTED E_BAD_PTR_CAST_ALIGN */
}
void
{
/* LINTED E_BAD_PTR_CAST_ALIGN */
}
void
{
/* LINTED E_BAD_PTR_CAST_ALIGN */
value & 0xFFFFFFFF);
/* LINTED E_BAD_PTR_CAST_ALIGN */
}
/*
*/
void
{
}
void
{
}
DMA_ATTR_V0, /* Version number */
0, /* low address */
0x00000FFFFFFFFFFF, /* high address. Has to fit into 32 bits */
/* after page-shifting */
0xFFFFFFFF, /* counter register max */
VIRTIO_PAGE_SIZE, /* page alignment required */
0x3F, /* burst sizes: 1 - 32 */
0x1, /* minimum transfer size */
0xFFFFFFFF, /* max transfer size */
0xFFFFFFFF, /* address register max */
1, /* no scatter-gather */
1, /* device operates on bytes */
0, /* attr flag: set to 0 */
};
DMA_ATTR_V0, /* Version number */
0, /* low address */
0xFFFFFFFFFFFFFFFF, /* high address */
0xFFFFFFFF, /* counter register max */
1, /* No specific alignment */
0x3F, /* burst sizes: 1 - 32 */
0x1, /* minimum transfer size */
0xFFFFFFFF, /* max transfer size */
0xFFFFFFFF, /* address register max */
1, /* no scatter-gather */
1, /* device operates on bytes */
0, /* attr flag: set to 0 */
};
/* Same for direct and indirect descriptors. */
};
static void
{
}
static int
{
unsigned int ncookies;
int ret;
if (ret != DDI_SUCCESS) {
"Failed to allocate dma handle for indirect descriptors, "
goto out_alloc_handle;
}
if (ret != DDI_SUCCESS) {
"Failed to allocate dma memory for indirect descriptors, "
goto out_alloc;
}
if (ret != DDI_DMA_MAPPED) {
"Failed to bind dma memory for indirect descriptors, "
goto out_bind;
}
/* We asked for a single segment */
return (0);
return (ret);
}
/*
* Initialize the vq structure.
*/
static int
{
int ret;
uint16_t i;
/* free slot management */
for (i = 0; i < vq_size; i++) {
if (indirect_num) {
if (ret)
goto out_indirect;
}
}
return (0);
for (i = 0; i < vq_size; i++) {
if (entry->qe_indirect_descs)
}
return (ret);
}
/*
*/
struct virtqueue *
unsigned int indirect_num, const char *name)
{
int ret;
unsigned int ncookies;
/* LINTED E_BAD_PTR_CAST_ALIGN */
/* LINTED E_BAD_PTR_CAST_ALIGN */
if (vq_size == 0) {
goto out;
}
/* size 0 => use native vq size, good for receive queues. */
if (size)
/* allocsize1: descriptor table + avail ring + pad */
/* allocsize2: used ring + pad */
sizeof (struct vring_used_elem) * vq_size);
if (ret != DDI_SUCCESS) {
"Failed to allocate dma handle for vq %d", index);
goto out_alloc_handle;
}
if (ret != DDI_SUCCESS) {
"Failed to allocate dma memory for vq %d", index);
goto out_alloc;
}
if (ret != DDI_DMA_MAPPED) {
"Failed to bind dma memory for vq %d", index);
goto out_bind;
}
/* We asked for a single segment */
/* and page-ligned buffers. */
/* Make sure all zeros hit the buffer before we point the host to it */
/* set the vq address */
/* LINTED E_BAD_PTR_CAST_ALIGN */
/* remember addresses and offsets for later use */
ASSERT(indirect_num == 0 ||
/* free slot management */
KM_SLEEP);
if (ret)
goto out_init;
"Allocated %d entries for vq %d:%s (%d indirect descs)",
return (vq);
out:
return (NULL);
}
void
{
int i;
/* tell device that there's no virtqueue any longer */
/* LINTED E_BAD_PTR_CAST_ALIGN */
/* LINTED E_BAD_PTR_CAST_ALIGN */
/* Free the indirect descriptors, if any. */
if (entry->qe_indirect_descs)
}
}
/*
* Free descriptor management.
*/
struct vq_entry *
{
return (NULL);
}
vq->vq_used_entries++;
qe->qe_indirect_next = 0;
return (qe);
}
void
{
vq->vq_used_entries--;
}
/*
* We (intentionally) don't have a global vq mutex, so you are
* entries before using the returned value. Have fun.
*/
{
/* vq->vq_freelist_lock would not help here. */
return (vq->vq_used_entries);
}
static inline void
{
/* 'write' - from the driver's point of view */
if (!write)
}
void
{
}
unsigned int
{
}
void
{
qe->qe_indirect_next++;
}
void
{
int i;
for (i = 0; i < ncookies; i++) {
}
}
void
{
/* Make sure the avail ring update hit the buffer */
/* Make sure the avail idx update hits the buffer */
/* Make sure we see the flags update */
/* LINTED E_BAD_PTR_CAST_ALIGN */
}
}
void
{
int idx;
/*
* Bind the descs together, paddr and len should be already
* set with virtio_ve_set
*/
do {
/* Bind the indirect descriptors */
uint16_t i = 0;
/*
* first indirect descriptor
*/
B_FALSE);
do {
i++;
}
}
} while (qe);
vq->vq_avail_idx++;
/* Make sure the bits hit the descriptor(s) */
/* Notify the device, if needed. */
if (sync)
}
/*
* Get a chain of descriptors from the used ring, if one is available.
*/
struct vq_entry *
{
int slot;
int usedidx;
/* No used entries? Bye. */
return (NULL);
}
vq->vq_used_idx++;
/* Make sure we do the next step _after_ checking the idx. */
return (head);
}
void
{
do {
}
void
{
}
static int
struct virtio_int_handler *config_handler,
{
int int_type;
int i;
int handler_count;
int ret;
/* If both MSI and MSI-x are reported, prefer MSI-x. */
if (intr_types & DDI_INTR_TYPE_MSIX)
/* Walk the handler table to get the number of handlers. */
for (handler_count = 0;
;
/* +1 if there is a config change handler. */
if (config_handler != NULL)
/* Number of MSIs supported by the device. */
if (ret != DDI_SUCCESS) {
return (ret);
}
/*
* Those who try to register more handlers then the device
* supports shall suffer.
*/
if (ret != DDI_SUCCESS) {
goto out_msi_alloc;
}
if (actual != handler_count) {
"Not enough MSI available: need %d, available %d",
goto out_msi_available;
}
if (config_handler != NULL) {
}
/* Assume they are all same priority */
if (ret != DDI_SUCCESS) {
goto out_msi_prio;
}
/* Add the vq handlers */
for (i = 0; vq_handlers[i].vh_func; i++) {
if (ret != DDI_SUCCESS) {
"ddi_intr_add_handler failed");
/* Remove the handlers that succeeded. */
while (--i >= 0) {
(void) ddi_intr_remove_handler(
sc->sc_intr_htable[i]);
}
goto out_add_handlers;
}
}
/* Don't forget the config handler */
if (config_handler != NULL) {
if (ret != DDI_SUCCESS) {
"ddi_intr_add_handler failed");
/* Remove the handlers that succeeded. */
while (--i >= 0) {
(void) ddi_intr_remove_handler(
sc->sc_intr_htable[i]);
}
goto out_add_handlers;
}
}
if (ret == DDI_SUCCESS) {
return (DDI_SUCCESS);
}
for (i = 0; i < actual; i++)
sizeof (ddi_intr_handle_t) * handler_count);
return (ret);
}
struct virtio_handler_container {
int nhandlers;
};
{
int i;
if (!isr_status)
return (DDI_INTR_UNCLAIMED);
if ((isr_status & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) &&
}
/* Notify all handlers */
}
return (DDI_INTR_CLAIMED);
}
/*
* config_handler and vq_handlers may be allocated on stack.
* Take precautions not to loose them.
*/
static int
struct virtio_int_handler *config_handler,
struct virtio_int_handler vq_handlers[])
{
int vq_handler_count;
int config_handler_count = 0;
int actual;
/* Walk the handler table to get the number of handlers. */
for (vq_handler_count = 0;
;
if (config_handler != NULL)
config_handler_count = 1;
sizeof (struct virtio_int_handler) * vq_handler_count);
if (config_handler != NULL) {
sizeof (struct virtio_int_handler));
}
/* Just a single entry for a single interrupt. */
if (ret != DDI_SUCCESS) {
"Failed to allocate a fixed interrupt: %d", ret);
goto out_int_alloc;
}
if (ret != DDI_SUCCESS) {
goto out_prio;
}
if (ret != DDI_SUCCESS) {
goto out_add_handlers;
}
return (DDI_SUCCESS);
return (ret);
}
/*
* We find out if we support MSI during this, and the register layout
* depends on the MSI (doh). Don't acces the device specific bits in
* BAR 0 before calling it!
*/
int
struct virtio_int_handler *config_handler,
struct virtio_int_handler vq_handlers[])
{
int ret;
int intr_types;
/* Default offset until MSI-X is enabled, if ever. */
/* Determine which types of interrupts are supported */
if (ret != DDI_SUCCESS) {
goto out_inttype;
}
/* If we have msi, let's use them. */
if (!ret)
return (0);
}
/* Fall back to old-fashioned interrupts. */
if (intr_types & DDI_INTR_TYPE_FIXED) {
"Using legacy interrupts");
}
"MSI failed and fixed interrupts not supported. Giving up.");
ret = DDI_FAILURE;
return (ret);
}
static int
{
int ret, i;
/* Number of handlers, not counting the counfig. */
if (sc->sc_intr_config)
/* Enable the interrupts. Either the whole block, or one by one. */
sc->sc_intr_num);
if (ret != DDI_SUCCESS) {
"Failed to enable MSI, falling back to INTx");
goto out_enable;
}
} else {
for (i = 0; i < sc->sc_intr_num; i++) {
if (ret != DDI_SUCCESS) {
"Failed to enable MSI %d, "
"falling back to INTx", i);
while (--i >= 0) {
(void) ddi_intr_disable(
sc->sc_intr_htable[i]);
}
goto out_enable;
}
}
}
/* Bind the allocated MSI to the queues and config */
for (i = 0; i < vq_handler_count; i++) {
int check;
/* LINTED E_BAD_PTR_CAST_ALIGN */
/* LINTED E_BAD_PTR_CAST_ALIGN */
/* LINTED E_BAD_PTR_CAST_ALIGN */
if (check != i) {
"for VQ %d, MSI %d. Check = %x", i, i, check);
goto out_bind;
}
}
if (sc->sc_intr_config) {
int check;
/* LINTED E_BAD_PTR_CAST_ALIGN */
/* LINTED E_BAD_PTR_CAST_ALIGN */
if (check != i) {
"for Config updates, MSI %d", i);
goto out_bind;
}
}
/* Configuration offset depends on whether MSI-X is used. */
else
return (DDI_SUCCESS);
/* Unbind the vqs */
for (i = 0; i < vq_handler_count - 1; i++) {
/* LINTED E_BAD_PTR_CAST_ALIGN */
/* LINTED E_BAD_PTR_CAST_ALIGN */
}
/* And the config */
/* LINTED E_BAD_PTR_CAST_ALIGN */
/* Disable the interrupts. Either the whole block, or one by one. */
sc->sc_intr_num);
if (ret != DDI_SUCCESS) {
"Failed to disable MSIs, won't be able to "
"reuse next time");
}
} else {
for (i = 0; i < sc->sc_intr_num; i++) {
if (ret != DDI_SUCCESS) {
"Failed to disable interrupt %d, "
"won't be able to reuse", i);
}
}
}
ret = DDI_FAILURE;
return (ret);
}
static int
{
int ret;
if (ret != DDI_SUCCESS) {
"Failed to enable interrupt: %d", ret);
}
return (ret);
}
/*
* the whole bunch even in the msi case.
*/
int
{
/* See if we are using MSI. */
return (virtio_enable_msi(sc));
return (virtio_enable_intx(sc));
}
void
{
int i;
int ret;
/* We were running with MSI, unbind them. */
/* Unbind all vqs */
/* LINTED E_BAD_PTR_CAST_ALIGN */
/* LINTED E_BAD_PTR_CAST_ALIGN */
}
/* And the config */
/* LINTED E_BAD_PTR_CAST_ALIGN */
}
/* Disable the interrupts. Either the whole block, or one by one. */
sc->sc_intr_num);
if (ret != DDI_SUCCESS) {
"Failed to disable MSIs, won't be able to "
"reuse next time");
}
} else {
for (i = 0; i < sc->sc_intr_num; i++) {
if (ret != DDI_SUCCESS) {
"Failed to disable interrupt %d, "
"won't be able to reuse", i);
}
}
}
for (i = 0; i < sc->sc_intr_num; i++) {
}
for (i = 0; i < sc->sc_intr_num; i++)
sc->sc_intr_num);
/* After disabling interrupts, the config offset is non-MSI-X. */
}
/*
* Module linkage information for the kernel.
*/
&mod_miscops, /* Type of module */
"VirtIO common library module",
};
{
(void *)&modlmisc,
}
};
int
_init(void)
{
return (mod_install(&modlinkage));
}
int
_fini(void)
{
return (mod_remove(&modlinkage));
}
int
{
}