vnet.c revision 07d06da50d310a325b457d6330165aebab1e0064
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include <sys/ethernet.h>
#include <sys/mac_provider.h>
#include <sys/mac_client.h>
#include <sys/mac_client_priv.h>
#include <sys/mac_ether.h>
#include <sys/vnet_mailbox.h>
#include <sys/vnet_common.h>
/*
* Function prototypes.
*/
/* DDI entrypoints */
/* MAC entrypoints */
static int vnet_m_start(void *);
static void vnet_m_stop(void *);
static int vnet_m_promisc(void *, boolean_t);
static int vnet_m_unicst(void *, const uint8_t *);
#ifdef VNET_IOC_DEBUG
#endif
static int vnet_ring_enable_intr(void *arg);
static int vnet_ring_disable_intr(void *arg);
/* vnet internal functions */
static int vnet_mac_register(vnet_t *);
static int vnet_hio_start(void *);
static void vnet_hio_stop(void *);
/* Forwarding database (FDB) routines */
static void vnet_res_start_task(void *arg);
static void vnet_res_start_task(void *arg);
/* Exported to vnet_gen */
vnet_res_t *vresp);
/* Exported to to vnet_dds */
/* Externs that are imported from vnet_gen */
extern int vgen_init_mdeg(void *arg);
extern void vgen_uninit(void *arg);
extern void vgen_mod_init(void);
extern int vgen_mod_cleanup(void);
extern void vgen_mod_fini(void);
extern int vgen_enable_intr(void *arg);
extern int vgen_disable_intr(void *arg);
/* Externs that are imported from vnet_dds */
extern void vdds_mod_init(void);
extern void vdds_mod_fini(void);
extern void vdds_cleanup_hybrid_res(void *arg);
/* Externs imported from mac_impl */
#define DRV_NAME "vnet"
#define VNET_FDBE_REFHOLD(p) \
{ \
atomic_inc_32(&(p)->refcnt); \
}
#define VNET_FDBE_REFRELE(p) \
{ \
atomic_dec_32(&(p)->refcnt); \
}
#ifdef VNET_IOC_DEBUG
#else
#define VNET_M_CALLBACK_FLAGS (MC_GETCAPAB)
#endif
static mac_callbacks_t vnet_m_callbacks = {
NULL, /* m_unicst entry must be NULL while rx rings are exposed */
NULL, /* m_tx entry must be NULL while tx rings are exposed */
};
static mac_callbacks_t vnet_hio_res_callbacks = {
0,
NULL,
NULL,
NULL,
NULL,
NULL,
};
/*
* Linked list of "vnet_t" structures - one per instance.
*/
/* Tunables */
/* Configure tx serialization in mac layer for the vnet device */
/*
* Set this to non-zero to enable additional internal receive buffer pools
* based on the MTU of the device for better performance at the cost of more
* memory consumption. This is turned off by default, to use allocb(9F) for
* receive buffer allocations of sizes > 2K.
*/
/* # of chains in fdb hash table */
/* Internal tunables */
/*
* Default vlan id. This is only used internally when the "default-vlan-id"
* property is not present in the MD device node. Therefore, this should not be
* used as a tunable; if this value is changed, the corresponding variable
* should be updated to the same value in vsw and also other vnets connected to
* the same vsw.
*/
/* delay in usec to wait for all references on a fdb entry to be dropped */
static struct ether_addr etherbroadcastaddr = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff
};
/* mac_open() retry delay in usec */
/* max # of mac_open() retries */
/*
* Property names
*/
static char macaddr_propname[] = "local-mac-address";
/*
* This is the string displayed by modinfo(1m).
*/
static char vnet_ident[] = "vnet driver";
extern struct mod_ops mod_driverops;
static struct cb_ops cb_vnetops = {
nulldev, /* cb_open */
nulldev, /* cb_close */
nodev, /* cb_strategy */
nodev, /* cb_print */
nodev, /* cb_dump */
nodev, /* cb_read */
nodev, /* cb_write */
nodev, /* cb_ioctl */
nodev, /* cb_devmap */
nodev, /* cb_mmap */
nodev, /* cb_segmap */
nochpoll, /* cb_chpoll */
ddi_prop_op, /* cb_prop_op */
NULL, /* cb_stream */
(int)(D_MP) /* cb_flag */
};
DEVO_REV, /* devo_rev */
0, /* devo_refcnt */
NULL, /* devo_getinfo */
nulldev, /* devo_identify */
nulldev, /* devo_probe */
vnetattach, /* devo_attach */
vnetdetach, /* devo_detach */
nodev, /* devo_reset */
&cb_vnetops, /* devo_cb_ops */
NULL, /* devo_power */
ddi_quiesce_not_supported, /* devo_quiesce */
};
&mod_driverops, /* Type of module. This one is a driver */
vnet_ident, /* ID string */
&vnetops /* driver specific ops */
};
static struct modlinkage modlinkage = {
};
#ifdef DEBUG
/*
* Print debug messages - set to 0xf to enable all msgs
*/
int vnet_dbglevel = 0x8;
static void
{
char buf[512];
} else {
}
}
#endif
/* _init(9E): initialize the loadable module */
int
_init(void)
{
int status;
if (status != 0) {
}
return (status);
}
/* _fini(9E): prepare the module for unloading. */
int
_fini(void)
{
int status;
status = vgen_mod_cleanup();
if (status != 0)
return (status);
if (status != 0)
return (status);
return (status);
}
/* _info(9E): return information about the loadable module */
int
{
}
/*
* attach(9E): attach a device to the system.
* called once for each instance of the device on the system.
*/
static int
{
int status;
int instance;
char qname[TASKQ_NAMELEN];
switch (cmd) {
case DDI_ATTACH:
break;
case DDI_RESUME:
case DDI_PM_RESUME:
default:
goto vnet_attach_fail;
}
/* allocate vnet_t and mac_t structures */
if (status != 0) {
goto vnet_attach_fail;
}
/* setup links to vnet_t from both devinfo and mac_t */
/* read the mac address */
if (status != DDI_SUCCESS) {
goto vnet_attach_fail;
}
if (reg == -1) {
goto vnet_attach_fail;
}
TASKQ_DEFAULTPRI, 0)) == NULL) {
instance);
goto vnet_attach_fail;
}
/* add to the list of vnet devices */
vnet_headp = vnetp;
/*
* Initialize the generic vnet plugin which provides communication via
* sun4v LDC (logical domain channel) based resources. This involves 2
* steps; first, vgen_init() is invoked to read the various properties
* of the vnet device from its MD node (including its mtu which is
* needed to mac_register()) and obtain a handle to the vgen layer.
* After mac_register() is done and we have a mac handle, we then
* invoke vgen_init_mdeg() which registers with the the MD event
* generator (mdeg) framework to allow LDC resource notifications.
* Note: this sequence also allows us to report the correct default #
* of pseudo rings (2TX and 3RX) in vnet_m_capab() which gets invoked
* in the context of mac_register(); and avoids conflicting with
* events in vgen.
*/
if (status != DDI_SUCCESS) {
goto vnet_attach_fail;
}
if (status != DDI_SUCCESS) {
goto vnet_attach_fail;
}
if (status != DDI_SUCCESS) {
goto vnet_attach_fail;
}
return (DDI_SUCCESS);
return (DDI_FAILURE);
}
/*
* detach(9E): detach a device from the system.
*/
static int
{
int instance;
goto vnet_detach_fail;
}
switch (cmd) {
case DDI_DETACH:
break;
case DDI_SUSPEND:
case DDI_PM_SUSPEND:
default:
goto vnet_detach_fail;
}
if (vnet_unattach(vnetp) != 0) {
goto vnet_detach_fail;
}
return (DDI_SUCCESS);
return (DDI_FAILURE);
}
/*
* Common routine to handle vnetattach() failure and vnetdetach(). Note that
* the only reason this function could fail is if mac_unregister() fails.
* Otherwise, this function must ensure that all resources are freed and return
* success.
*/
static int
{
/*
* Disable the mac device in the gldv3 subsystem. This can fail, in
* particular if there are still any open references to this mac
* device; in which case we just return failure without continuing to
* detach further.
* If it succeeds, we then invoke vgen_uninit() which should unregister
* any pseudo rings registered with the mac layer. Note we keep the
* AST_macreg flag on, so we can unregister with the mac layer at
* the end of this routine.
*/
if (attach_progress & AST_macreg) {
return (1);
}
}
/*
* Now that we have disabled the device, we must finish all other steps
* and successfully return from this function; otherwise we will end up
*
* First, release any hybrid resources assigned to this vnet device.
*/
if (attach_progress & AST_vdds_init) {
}
/*
* Uninit vgen. This stops further mdeg callbacks to this vnet
*/
}
/* Destroy the taskq. */
if (attach_progress & AST_taskq_create) {
}
/* Destroy fdb. */
if (attach_progress & AST_fdbh_alloc) {
}
/* Remove from the device list */
if (attach_progress & AST_vnet_list) {
/* unlink from instance(vnet_t) list */
break;
}
}
}
if (attach_progress & AST_ring_init) {
}
if (attach_progress & AST_macreg) {
}
if (attach_progress & AST_vnet_alloc) {
}
return (0);
}
static int
vnet_m_start(void *arg)
{
return (VNET_SUCCESS);
}
static void
vnet_m_stop(void *arg)
{
/*
* Set the flags appropriately; this should prevent starting of
* any new resources that are added(see vnet_res_start_task()),
* while we release the vrwlock in vnet_stop_resources() before
* stopping each resource.
*/
}
}
/* set the unicast mac address of the device */
static int
{
/*
* NOTE: setting mac address dynamically is not supported.
*/
return (VNET_FAILURE);
}
static int
{
int rv = VNET_SUCCESS;
}
}
return (rv);
}
/* set or clear promiscuous mode on the device */
static int
{
/*
* NOTE: setting promiscuous mode is not supported, just return success.
*/
return (VNET_SUCCESS);
}
/*
* Transmit a chain of packets. This function provides switching functionality
* based on the destination mac address to reach other guests (within ldoms) or
* external hosts.
*/
mblk_t *
{
struct ether_header *ehp;
void *tx_arg;
/*
* Find fdb entry for the destination
* and hold a reference to it.
*/
/*
* Destination found in FDB.
* The destination is a vnet device within ldoms
* and directly reachable, invoke the tx function
* in the fdb entry.
*/
/* tx done; now release ref on fdb entry */
/* m_tx failed */
break;
}
} else {
(IS_MULTICAST(ehp)));
/*
* Destination is not in FDB.
* If the destination is broadcast or multicast,
* then forward the packet to vswitch.
* If a Hybrid resource avilable, then send the
* unicast packet via hybrid resource, otherwise
* forward it to vswitch.
*/
} else {
}
/*
* no fdb entry to vsw? drop the packet.
*/
continue;
}
/* ref hold the fdb entry to vsw */
/*
* In the case of a hybrid resource we need to insert
* the tag for the pvid case here; unlike packets that
* layer does the tagging before sending it over ldc.
*/
/*
* Determine if the frame being transmitted
* over the hybrid resource is untagged. If so,
* insert the tag before transmitting.
*/
continue;
}
}
} else {
}
/* tx done; now release ref on fdb entry */
/* m_tx failed */
break;
}
}
}
return (mp);
}
/* get statistics from the device */
int
{
/*
* get the specified statistic from each transport and return the
* aggregate val. This obviously only works for counters.
*/
return (ENOTSUP);
}
}
return (0);
}
static void
{
int i;
for (i = 0; i < VNET_NUM_PSEUDO_TXRINGS; i++) {
}
/*
* Setup the first 3 Pseudo RX Rings that are reserved;
* 1 for LDC resource to vswitch + 2 for RX rings of Hybrid resource.
*/
for (i = VNET_NUM_PSEUDO_RXRINGS_DEFAULT;
i < rx_grp->max_ring_cnt; i++) {
}
}
static void
{
}
}
}
static vnet_pseudo_rx_ring_t *
{
int index;
/* no rings available */
return (NULL);
}
for (index = VNET_NUM_PSEUDO_RXRINGS_DEFAULT;
break;
}
}
return (rx_ringp);
}
static void
{
}
}
/* wrapper function for mac_register() */
static int
{
int err;
return (DDI_FAILURE);
/*
* MAC_VIRT_SERIALIZE flag is needed while hybridIO is enabled to
* workaround tx lock contention issues in nxge.
*/
if (vnet_mac_tx_serialize == B_TRUE) {
}
/*
* Finally, we're ready to register ourselves with the MAC layer
* interface; if this succeeds, we're all ready to start()
*/
}
/* read the mac address of the device */
static int
{
int rv;
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
static void
{
char hashname[MAXNAMELEN];
mod_hash_null_valdtor, sizeof (void *));
}
static void
{
/* destroy fdb-hash-table */
vnetp->fdb_nchains = 0;
}
}
/*
* Add an entry into the fdb.
*/
void
{
int rv;
/*
* If the entry being added corresponds to LDC_SERVICE resource,
* that is, vswitch connection, it is added to the hash and also
* the entry is cached, an additional reference count reflects
* this. The HYBRID resource is not added to the hash, but only
* cached, as it is only used for sending out packets for unknown
* unicast destinations.
*/
/*
* Note: duplicate keys will be rejected by mod_hash.
*/
if (rv != 0) {
return;
}
}
/* Cache the fdb entry to vsw-port */
/* Cache the fdb entry to hybrid resource */
}
}
/*
* Remove an entry from fdb.
*/
static void
{
int rv;
/*
* Remove the entry from fdb hash table.
* This prevents further references to this fdb entry.
*/
(mod_hash_val_t *)&tmp);
if (rv != 0) {
/*
* As the resources are added to the hash only
* after they are started, this can occur if
* a resource unregisters before it is ever started.
*/
return;
}
}
}
/*
* If there are threads already ref holding before the entry was
* removed from hash table, then wait for ref count to drop to zero.
*/
}
}
/*
* Search fdb for a given mac address. If an entry is found, hold
* a reference to it and return the entry; else returns NULL.
*/
static vnet_res_t *
{
int rv;
if (rv != 0)
return (NULL);
return (vresp);
}
/*
* Callback function provided to mod_hash_find_cb(). After finding the fdb
* entry corresponding to the key (macaddr), this callback will be invoked by
* mod_hash_find_cb() to atomically increment the reference count on the fdb
* entry before returning the found entry.
*/
static void
{
}
/*
* Frames received that are tagged with the pvid of the vnet device must be
* untagged before sending up the stack. This function walks the chain of rx
* frames, untags any such frames and returns the updated chain.
*
* Arguments:
* pvid: pvid of the vnet device for which packets are being received
* mp: head of pkt chain to be validated and untagged
*
* Returns:
* mp: head of updated chain of packets
*/
static void
{
struct ether_vlan_header *evhp;
continue;
}
}
/* build a chain of processed packets */
} else {
}
}
}
static void
{
return;
}
}
void
{
int i;
return;
}
/*
* Currently, the tx hwring API (used to access rings that belong to
* a Hybrid IO resource) does not provide us a per ring flow ctrl
* vgen layer. Thus we can't figure out which pseudo ring is being
* re-enabled for transmits. To work around this, when we get a tx
* restart notification from below, we simply propagate that to all
* the tx pseudo rings registered with the mac layer above.
*
* There are a couple of side effects with this approach, but they are
* not harmful, as outlined below:
*
* A) We might send an invalid ring_update() for a ring that is not
* really flow controlled. This will not have any effect in the mac
* layer and packets will continue to be transmitted on that ring.
*
* B) We might end up clearing the flow control in the mac layer for
* a ring that is still flow controlled in the underlying resource.
* This will result in the mac layer restarting transmit, only to be
* flow controlled again on that ring.
*/
}
}
/*
* Update the new mtu of vnet into the mac layer. First check if the device has
* been plumbed and if so fail the mtu update. Returns 0 on success.
*/
int
{
int rv;
return (EINVAL);
}
"update as the device is plumbed\n",
return (EBUSY);
}
/* update mtu in the mac layer */
if (rv != 0) {
"!vnet%d: Unable to update mtu with mac layer\n",
return (EIO);
}
return (0);
}
/*
* Update the link state of vnet to the mac layer.
*/
void
{
return;
}
return;
}
}
/*
* vio_net_resource_reg -- An interface called to register a resource
* with vnet.
* macp -- a GLDv3 mac_register that has all the details of
* a resource and its callbacks etc.
* type -- resource type.
* local_macaddr -- resource's MAC address. This is used to
* associate a resource with a corresponding vnet.
* remote_macaddr -- remote side MAC address. This is ignored for
* the Hybrid resources.
* vhp -- A handle returned to the caller.
* vcb -- A set of callbacks provided to the callers.
*/
{
vnetp = vnet_headp;
/* Setup kstats for hio resource */
"hio", vresp);
"create kstats for hio resource",
}
}
break;
}
}
return (ENXIO);
}
/* Bind the resource to pseudo ring(s) */
if (vnet_bind_rings(vresp) != 0) {
return (1);
}
/* Dispatch a task to start resources */
return (0);
}
/*
* vio_net_resource_unreg -- An interface to unregister a resource.
*/
void
{
/*
* Remove the resource from fdb; this ensures
* there are no references to the resource.
*/
/* Now remove the resource from the list */
}
static void
{
}
static vnet_res_t *
{
} else {
break;
}
}
}
return (vresp);
}
/*
* vnet_dds_rx -- an interface called by vgen to DDS messages.
*/
void
{
}
/*
* vnet_send_dds_msg -- An interface provided to DDS to send
* DDS messages. This simply sends meessages via vgen.
*/
int
{
int rv;
}
return (rv);
}
/*
* vnet_cleanup_hio -- an interface called by vgen to cleanup hio resources.
*/
void
{
}
/*
* vnet_handle_res_err -- A callback function called by a resource
* to report an error. For example, vgen can call to report
* Hybrid resource.
*/
/* ARGSUSED */
static void
{
return;
}
return;
}
}
/*
* vnet_dispatch_res_task -- A function to dispatch tasks start resources.
*/
static void
{
int rv;
/*
* Dispatch the task. It could be the case that vnetp->flags does
* not have VNET_STARTED set. This is ok as vnet_rest_start_task()
* can abort the task when the task is started. See related comments
* in vnet_m_stop() and vnet_stop_resources().
*/
vnetp, DDI_NOSLEEP);
if (rv != DDI_SUCCESS) {
"vnet%d:Can't dispatch start resource task",
}
}
/*
* vnet_res_start_task -- A taskq callback function that starts a resource.
*/
static void
vnet_res_start_task(void *arg)
{
}
}
/*
* vnet_start_resources -- starts all resources associated with
* a vnet.
*/
static void
{
int rv;
/* skip if it is already started */
continue;
}
if (rv == 0) {
/*
* Successfully started the resource, so now
* add it to the fdb.
*/
}
}
}
/*
* vnet_stop_resources -- stop all resources associated with a vnet.
*/
static void
{
/*
* Release the lock while invoking mc_stop() of the
* underlying resource. We hold a reference to this
* resource to prevent being removed from the list in
* vio_net_resource_unreg(). Note that new resources
* can be added to the head of the list while the lock
* is released, but they won't be started, as
* VNET_STARTED flag has been cleared for the vnet
* device in vnet_m_stop(). Also, while the lock is
* released a resource could be removed from the list
* in vio_net_resource_unreg(); but that is ok, as we
* re-acquire the lock and only then access the forward
* link (vresp->nextp) to continue with the next
* resource.
*/
}
}
}
/*
* Setup kstats for the HIO statistics.
* NOTE: the synchronization for the statistics is the
* responsibility of the caller.
*/
kstat_t *
{
KSTAT_TYPE_NAMED, size, 0);
return (NULL);
}
/* MIB II kstat variables */
return (ksp);
}
/*
* Destroy kstats.
*/
static void
{
}
/*
* Update the kstats.
*/
static int
{
/* not using hio resources, just return */
return (0);
}
if (rw == KSTAT_READ) {
/* MIB II kstat variables */
} else {
return (EACCES);
}
return (0);
}
static void
{
int stat;
/*
* get the specified statistics from the underlying nxge.
*/
switch (stat) {
case MAC_STAT_IPACKETS:
break;
case MAC_STAT_IERRORS:
break;
case MAC_STAT_OPACKETS:
break;
case MAC_STAT_OERRORS:
break;
case MAC_STAT_RBYTES:
break;
case MAC_STAT_OBYTES:
break;
case MAC_STAT_MULTIRCV:
break;
case MAC_STAT_MULTIXMT:
break;
case MAC_STAT_BRDCSTRCV:
break;
case MAC_STAT_BRDCSTXMT:
break;
case MAC_STAT_NOXMTBUF:
break;
case MAC_STAT_NORCVBUF:
break;
default:
/*
* parameters not interested.
*/
break;
}
}
}
}
static boolean_t
{
return (0);
}
switch (cap) {
case MAC_CAPAB_RINGS: {
/*
* Rings Capability Notes:
* We advertise rings to make use of the rings framework in
* gldv3 mac layer, to improve the performance. This is
* specifically needed when a Hybrid resource (with multiple
* leverage this for the normal case when no Hybrid resource is
* assigned.
*
* Ring Allocation:
* - TX path:
* We expose a pseudo ring group with 2 pseudo tx rings (as
* currently HybridIO exports only 2 rings) In the normal case,
* transmit traffic that comes down to the driver through the
* mri_tx (vnet_tx_ring_send()) entry point goes through the
* distributed switching algorithm in vnet and gets transmitted
* peer vnet. If and when a Hybrid resource is assigned to the
* vnet, we obtain the tx ring information of the Hybrid device
* (nxge) and map the pseudo rings 1:1 to the 2 hw tx rings.
* Traffic being sent over the Hybrid resource by the mac layer
* gets spread across both hw rings, as they are mapped to the
* 2 pseudo tx rings in vnet.
*
* - RX path:
* We expose a pseudo ring group with 3 pseudo rx rings (static
* rings) initially. The first (default) pseudo rx ring is
* reserved for the resource that connects to the vswitch
* service. The next 2 rings are reserved for a Hybrid resource
* that may be assigned to the vnet device. If and when a
* Hybrid resource is assigned to the vnet, we obtain the rx
* ring information of the Hybrid device (nxge) and map these
* pseudo rings 1:1 to the 2 hw rx rings. For each additional
* resource that connects to a peer vnet, we dynamically
* allocate a pseudo rx ring and map it to that resource, when
* the resource gets added; and the pseudo rx ring is
* dynamically registered with the upper mac layer. We do the
* reverse and unregister the ring with the mac layer when
* the resource gets removed.
*
* Synchronization notes:
* We don't need any lock to protect members of ring structure,
* specifically ringp->hw_rh, in either the TX or the RX ring,
* as explained below.
* - TX ring:
* ring->hw_rh is initialized only when a Hybrid resource is
* associated; and gets referenced only in vnet_hio_tx(). The
* Hybrid resource itself is available in fdb only after tx
* hwrings are found and mapped; i.e, in vio_net_resource_reg()
* we call vnet_bind_rings() first and then call
* vnet_start_resources() which adds an entry to fdb. For
* traffic going over LDC resources, we don't reference
* ring->hw_rh at all.
* - RX ring:
* For rings mapped to Hybrid resource ring->hw_rh is
* initialized and only then do we add the rx callback for
* the underlying Hybrid resource; we disable callbacks before
* we unmap ring->hw_rh. For rings mapped to LDC resources, we
* stop the rx callbacks (in vgen) before we remove ring->hw_rh
* (vio_net_resource_unreg()).
*/
/*
* The ring_cnt for rx grp is initialized in
* vnet_ring_grp_init(). Later, the ring_cnt gets
* updated dynamically whenever LDC resources are added
* or removed.
*/
} else {
/*
* The ring_cnt for tx grp is initialized in
* vnet_ring_grp_init() and remains constant, as we
* do not support dymanic tx rings for now.
*/
/*
* Transmit rings are not grouped; i.e, the number of
* transmit ring groups advertised should be set to 0.
*/
}
return (B_TRUE);
}
default:
break;
}
return (B_FALSE);
}
/*
* Callback funtion for MAC layer to get ring information.
*/
static void
{
switch (rtype) {
case MAC_RING_TYPE_RX: {
/* We advertised only one RX group */
/* Check the current # of rings in the rx group */
/* Get the ring based on the index */
/*
* Note: we don't need to save the incoming r_index in rx_ring,
* as vnet_ring_grp_init() would have initialized the index for
* each ring in the array.
*/
/* Set the poll function, as this is an rx ring */
break;
}
case MAC_RING_TYPE_TX: {
/*
* No need to check grp index; mac layer passes -1 for it.
*/
/* Check the # of rings in the tx group */
/* Get the ring based on the index */
/* Set the transmit function, as this is a tx ring */
break;
}
default:
break;
}
}
/*
* Callback funtion for MAC layer to get group information.
*/
static void
{
switch (type) {
case MAC_RING_TYPE_RX:
{
/* We advertised only one RX group */
break;
}
case MAC_RING_TYPE_TX:
{
/* We advertised only one TX group */
break;
}
default:
break;
}
}
static int
{
int err;
/*
* If this ring is mapped to a LDC resource, simply mark the state to
* indicate the ring is started and return.
*/
(VNET_RXRING_LDC_SERVICE|VNET_RXRING_LDC_GUEST)) != 0) {
return (0);
}
/*
* This must be a ring reserved for a hwring. If the hwring is not
* bound yet, simply mark the state to indicate the ring is started and
* return. If and when a hybrid resource is activated for this vnet
* device, we will bind the hwring and start it then. If a hwring is
* already bound, start it now.
*/
return (0);
}
if (err == 0) {
} else {
}
return (err);
}
static void
{
/*
* If this ring is mapped to a LDC resource, simply mark the state to
* indicate the ring is now stopped and return.
*/
(VNET_RXRING_LDC_SERVICE|VNET_RXRING_LDC_GUEST)) != 0) {
return;
}
/*
* This must be a ring reserved for a hwring. If the hwring is not
* bound yet, simply mark the state to indicate the ring is stopped and
* return. If a hwring is already bound, stop it now.
*/
return;
}
}
/* ARGSUSED */
static int
{
return (0);
}
static void
{
}
/*
* Disable polling for a ring and enable its interrupt.
*/
static int
vnet_ring_enable_intr(void *arg)
{
/*
* Ring enable intr func is being invoked, but the ring is
* not bound to any underlying resource ? This must be a ring
* reserved for Hybrid resource and no such resource has been
* assigned to this vnet device yet. We simply return success.
*/
return (0);
}
/*
* The rx ring has been bound to either a LDC or a Hybrid resource.
* Call the appropriate function to enable interrupts for the ring.
*/
} else {
}
}
/*
* Enable polling for a ring and disable its interrupt.
*/
static int
vnet_ring_disable_intr(void *arg)
{
/*
* Ring disable intr func is being invoked, but the ring is
* not bound to any underlying resource ? This must be a ring
* reserved for Hybrid resource and no such resource has been
* assigned to this vnet device yet. We simply return success.
*/
return (0);
}
/*
* The rx ring has been bound to either a LDC or a Hybrid resource.
* Call the appropriate function to disable interrupts for the ring.
*/
} else {
}
}
/*
* Poll 'bytes_to_pickup' bytes of message from the rx ring.
*/
static mblk_t *
{
return (NULL);
}
/*
* Packets received over a hybrid resource need additional
* processing to remove the tag, for the pvid case. The
* underlying resource is not aware of the vnet's pvid and thus
* packets are received with the vlan tag in the header; unlike
* packets that are received over a ldc channel in which case
*/
}
} else {
}
return (mp);
}
/* ARGSUSED */
void
{
/*
* Packets received over a hybrid resource need additional processing
* to remove the tag, for the pvid case. The underlying resource is
* not aware of the vnet's pvid and thus packets are received with the
* vlan tag in the header; unlike packets that are received over a ldc
* the tag.
*/
return;
}
}
}
static int
{
return (0);
}
return (EINVAL);
}
static int
{
return (0);
}
return (EINVAL);
}
int
{
char client_name[MAXNAMELEN];
int rv;
{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
return (EAGAIN);
}
do {
if (rv == 0) {
break;
}
return (rv);
}
ifname);
if (rv != 0) {
goto fail;
}
&diag);
if (rv != 0) {
goto fail;
}
if (rv != 0) {
goto fail;
}
/* add the recv callback */
/* add the notify callback - only tx updates for now */
vnetp);
return (0);
fail:
return (1);
}
void
{
}
}
}
}
}
}
/* Bind pseudo rings to hwrings */
static int
{
int hw_ring_cnt;
int i;
int rv;
/* Get the list of the underlying RX rings. */
/* We expect the the # of hw rx rings to match VNET_NUM_HYBRID_RINGS */
if (hw_ring_cnt != VNET_NUM_HYBRID_RINGS) {
"!vnet%d: vnet_bind_hwrings: bad rx hw_ring_cnt(%d)\n",
goto fail;
}
/*
* Quiesce the HW ring and the mac srs on the ring. Note
* that the HW ring will be restarted when the pseudo ring
* is started. At that time all the packets will be
* directly passed up to the pseudo RX ring and handled
* by mac srs created over the pseudo RX ring.
*/
}
/*
* Bind the pseudo rings to the hwrings and start the hwrings.
* Note we don't need to register these with the upper mac, as we have
* statically exported these pseudo rxrings which are reserved for
* rxrings of Hybrid resource.
*/
for (i = 0; i < VNET_NUM_HYBRID_RINGS; i++) {
/* Pick the rxrings reserved for Hybrid resource */
/* Store the hw ring handle */
/* Bind the pseudo ring to the underlying hwring */
/* Start the hwring if needed */
if (rv != 0) {
goto fail;
}
}
}
/* Get the list of the underlying TX rings. */
/* We expect the # of hw tx rings to match VNET_NUM_HYBRID_RINGS */
if (hw_ring_cnt != VNET_NUM_HYBRID_RINGS) {
"!vnet%d: vnet_bind_hwrings: bad tx hw_ring_cnt(%d)\n",
goto fail;
}
/*
* Now map the pseudo txrings to the hw txrings. Note we don't need
* to register these with the upper mac, as we have statically exported
* these rings. Note that these rings will continue to be used for LDC
* resources to peer vnets and vswitch (shared ring).
*/
}
return (0);
fail:
return (1);
}
/* Unbind pseudo rings from hwrings */
static void
{
int i;
for (i = 0; i < VNET_NUM_HYBRID_RINGS; i++) {
}
}
for (i = 0; i < VNET_NUM_HYBRID_RINGS; i++) {
/* Stop the hwring */
/* Teardown the hwring */
}
}
/*
* First clear the permanent-quiesced flag of the RX srs then
* restart the HW ring and the mac srs on the ring.
*/
}
}
/* Bind pseudo ring to a LDC resource */
static int
{
int rv;
int type;
if (type == VIO_NET_RES_LDC_SERVICE) {
/*
* Ring Index 0 is the default ring in the group and is
* reserved for LDC_SERVICE in vnet_ring_grp_init(). This ring
* is allocated statically and is reported to the mac layer
* in vnet_m_capab(). So, all we need to do here, is save a
* reference to the associated vresp.
*/
return (0);
}
goto fail;
}
/* Store the LDC resource itself as the ring handle */
/*
* Save a reference to the ring in the resource for lookup during
* unbind. Note this is only done for LDC resources. We don't need this
* in the case of a Hybrid resource (see vnet_bind_hwrings()), as its
* rx rings are mapped to reserved pseudo rx rings (index 1 and 2).
*/
/* Register the pseudo ring with upper-mac */
if (rv != 0) {
goto fail;
}
return (0);
fail:
return (1);
}
/* Unbind pseudo ring from a LDC resource */
static void
{
int type;
return;
}
if (type == VIO_NET_RES_LDC_SERVICE) {
/*
* Ring Index 0 is the default ring in the group and is
* reserved for LDC_SERVICE in vnet_ring_grp_init(). This ring
* is allocated statically and is reported to the mac layer
* in vnet_m_capab(). So, all we need to do here, is remove its
* reference to the associated vresp.
*/
return;
}
/* Unregister the pseudo ring with upper-mac */
/* Free the pseudo rx ring */
}
}
static void
{
case VIO_NET_RES_LDC_SERVICE:
case VIO_NET_RES_LDC_GUEST:
break;
case VIO_NET_RES_HYBRID:
break;
default:
break;
}
}
static int
{
int rv;
case VIO_NET_RES_LDC_SERVICE:
case VIO_NET_RES_LDC_GUEST:
break;
case VIO_NET_RES_HYBRID:
break;
default:
rv = 1;
break;
}
return (rv);
}
/* ARGSUSED */
int
{
return (0);
}
/*
* The start() and stop() routines for the Hybrid resource below, are just
* dummy functions. This is provided to avoid resource type specific code in
* vnet_start_resources() and vnet_stop_resources(). The starting and stopping
* of the Hybrid resource happens in the context of the mac_client interfaces
* that are invoked in vnet_hio_mac_init() and vnet_hio_mac_cleanup().
*/
/* ARGSUSED */
static int
vnet_hio_start(void *arg)
{
return (0);
}
/* ARGSUSED */
static void
vnet_hio_stop(void *arg)
{
}
mblk_t *
{
for (;;) {
break;
}
break;
}
return (mp);
}
static void
{
switch (type) {
case MAC_NOTE_TX:
break;
default:
break;
}
}
#ifdef VNET_IOC_DEBUG
/*
* The ioctl entry point is used only for debugging for now. The ioctl commands
* can be used to force the link state of the channel connected to vsw.
*/
static void
{
return;
}
case VNET_FORCE_LINK_DOWN:
case VNET_FORCE_LINK_UP:
break;
default:
break;
}
}
static void
{
return;
}
}
#else
static void
{
return;
}
/* ioctl support only for debugging */
}
#endif