/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2013 Nexenta Systems, Inc. All rights reserved.
*/
/*
* Copyright (c) 2007, The Ohio State University. All rights reserved.
*
* Portions of this source code is developed by the team members of
* The Ohio State University's Network-Based Computing Laboratory (NBCL),
* headed by Professor Dhabaleswar K. (DK) Panda.
*
* Acknowledgements to contributions from developors:
* Ranjit Noronha: noronha@cse.ohio-state.edu
* Lei Chai : chail@cse.ohio-state.edu
* Weikuan Yu : yuw@cse.ohio-state.edu
*
*/
/*
* The rpcib plugin. Implements the interface for RDMATF's
* interaction with IBTF.
*/
#include <sys/sysmacros.h>
#include <sys/pathname.h>
#include <sys/isa_defs.h>
#include <net/if_types.h>
#include <rpc/rpc_rdma.h>
/*
* Convenience structures for connection management
*/
typedef struct rpcib_ipaddrs {
typedef struct rpcib_ping {
} rpcib_ping_t;
/*
* Prototype declarations for driver ops
*/
void *, void **);
static int rpcib_do_ip_ioctl(int, int, void *);
static int rpcib_cache_kstat_update(kstat_t *, int);
static void rib_force_cleanup(void *);
static void rib_stop_hca_services(rib_hca_t *);
static void rib_attach_hca(void);
struct {
} rpcib_kstat = {
{"cache_limit", KSTAT_DATA_UINT64 },
{"cache_allocation", KSTAT_DATA_UINT64 },
{"cache_hits", KSTAT_DATA_UINT64 },
{"cache_misses", KSTAT_DATA_UINT64 },
{"cache_misses_above_the_limit", KSTAT_DATA_UINT64 },
};
/* rpcib cb_ops */
nulldev, /* open */
nulldev, /* close */
nodev, /* strategy */
nodev, /* print */
nodev, /* dump */
nodev, /* read */
nodev, /* write */
nodev, /* ioctl */
nodev, /* devmap */
nodev, /* mmap */
nodev, /* segmap */
nochpoll, /* poll */
ddi_prop_op, /* prop_op */
NULL, /* stream */
D_MP, /* cb_flag */
CB_REV, /* rev */
nodev, /* int (*cb_aread)() */
nodev /* int (*cb_awrite)() */
};
/*
* Device options
*/
DEVO_REV, /* devo_rev, */
0, /* refcnt */
rpcib_getinfo, /* info */
nulldev, /* identify */
nulldev, /* probe */
rpcib_attach, /* attach */
rpcib_detach, /* detach */
nodev, /* reset */
&rpcib_cbops, /* driver ops - devctl interfaces */
NULL, /* bus operations */
NULL, /* power */
ddi_quiesce_not_needed, /* quiesce */
};
/*
* Module linkage information.
*/
&mod_driverops, /* Driver module */
"RPCIB plugin driver", /* Driver name and version */
&rpcib_ops, /* Driver ops */
};
(void *)&rib_modldrv,
};
typedef struct rib_lrc_entry {
char *lrc_buf;
void *avl_node;
typedef struct cache_struct {
/*
* rib_stat: private data pointer used when registering
* with the IBTF. It is returned to the consumer
* in all callbacks.
*/
/*
* Old cards with Tavor driver have limited memory footprint
* when booted in 32bit. The rib_max_rbufs tunable can be
* tuned for more buffers if needed.
*/
#else
#endif /* !(_ELF64) && !(__sparc) */
/*
* State of the plugin.
* ACCEPT = accepting new connections and requests.
* NO_ACCEPT = not accepting new connection and requests.
* This should eventually move to rpcib_state_t structure, since this
* will tell in which state the plugin is for a particular type of service
* like NFS, NLM or v4 Callback deamon. The plugin might be in accept
* state for one and in no_accept state for the other.
*/
int plugin_state;
/*
* RPCIB RDMATF operations
*/
struct mrc buf_handle);
struct mrc buf_handle);
void *lrc);
rpcib_ping_t *, CONN **);
static void rib_server_side_cache_reclaim(void *argp);
static void rib_stop_services(rib_hca_t *);
static void rib_close_channels(rib_conn_list_t *);
static void rib_conn_close(void *);
static void rib_recv_rele(rib_qp_t *);
/*
* RPCIB addressing operations
*/
/*
* RDMA operations the RPCIB module exports
*/
};
/*
* RDMATF RPCIB plugin details
*/
"ibtf", /* api name */
0,
&rib_ops, /* rdma op vector for ibtf */
};
static void rib_svc_scq_handler(ibt_cq_hdl_t, void *);
static void rib_clnt_scq_handler(ibt_cq_hdl_t, void *);
static void rib_clnt_rcq_handler(ibt_cq_hdl_t, void *);
static void rib_svc_rcq_handler(ibt_cq_hdl_t, void *);
rib_qp_t **);
rib_qp_t **);
static int rib_free_sendwait(struct send_wid *);
static void rdma_done_rem_list(rib_qp_t *);
static void rib_async_handler(void *,
static int rib_free_svc_recv(struct svc_recv *);
static void rib_free_wid(struct recv_wid *);
static void rib_detach_hca(ibt_hca_hdl_t);
static void rib_close_a_channel(CONN *);
static void rib_send_hold(rib_qp_t *);
static void rib_send_rele(rib_qp_t *);
/*
* Registration with IBTF as a consumer
*/
rib_async_handler, /* async event handler */
NULL, /* Memory Region Handler */
};
/*
* Global strucuture
*/
typedef struct rpcib_s {
} rpcib_t;
/*
* debugging in rpcib kernel module.
* Set it to values greater that 1 to control
* the amount of debugging messages required.
*/
int rib_debug = 0;
int
_init(void)
{
int error;
if (error != 0) {
/*
* Could not load module
*/
return (error);
}
return (0);
}
int
_fini()
{
int status;
/*
* Remove module
*/
return (status);
}
return (0);
}
int
{
}
/*
* rpcib_getinfo()
* Given the device number, return the devinfo pointer or the
* instance number.
* Note: always succeed DDI_INFO_DEVT2INSTANCE, even before attach.
*/
/*ARGSUSED*/
static int
{
switch (cmd) {
case DDI_INFO_DEVT2DEVINFO:
else {
ret = DDI_FAILURE;
}
break;
case DDI_INFO_DEVT2INSTANCE:
break;
default:
ret = DDI_FAILURE;
}
return (ret);
}
static void
{
rib_stat->nhca_inited--;
}
}
static rdma_stat
{
if (ret != IBT_SUCCESS) {
#ifdef DEBUG
"ibt_unbind_all_services failed (%d)\n", (int)ret);
#endif
return (RDMA_FAILED);
}
if (ret != IBT_SUCCESS) {
#ifdef DEBUG
"ibt_deregister_service failed (%d)\n", (int)ret);
#endif
return (RDMA_FAILED);
}
}
return (RDMA_SUCCESS);
}
static int
{
switch (cmd) {
case DDI_ATTACH:
break;
case DDI_RESUME:
return (DDI_SUCCESS);
default:
return (DDI_FAILURE);
}
return (DDI_FAILURE);
}
/*
* Create the "rpcib" minor-node.
*/
if (ddi_create_minor_node(dip,
/* Error message, no cmn_err as they print on console */
return (DDI_FAILURE);
}
}
return (DDI_FAILURE);
}
if (ibt_status != IBT_SUCCESS) {
return (DDI_FAILURE);
}
goto open_fail;
}
"failed.");
goto register_fail;
}
/*
* Register with rdmatf
*/
"status = %d", r_status);
goto register_fail;
}
return (DDI_SUCCESS);
(void) rpcib_free_service_list();
return (DDI_FAILURE);
}
/*ARGSUSED*/
static int
{
switch (cmd) {
case DDI_DETACH:
break;
case DDI_SUSPEND:
default:
return (DDI_FAILURE);
}
/*
* Detach the hca and free resources
*/
if (rpcib_free_service_list() != RDMA_SUCCESS)
return (DDI_FAILURE);
return (DDI_SUCCESS);
}
static void rib_rbufpool_free(rib_hca_t *, int);
static void rib_rbufpool_deregister(rib_hca_t *, int);
/*
* One CQ pair per HCA
*/
static rdma_stat
{
&real_size);
if (status != IBT_SUCCESS) {
" status=%d", status);
error = RDMA_FAILED;
goto fail;
}
/*
* Enable CQ callbacks. CQ Callbacks are single shot
* (e.g. you have to call ibt_enable_cq_notify()
* after each callback to get another one).
*/
if (status != IBT_SUCCESS) {
"enable_cq_notify failed, status %d", status);
error = RDMA_FAILED;
goto fail;
}
return (error);
fail:
if (cq->rib_cq_hdl)
if (cq)
return (error);
}
/*
* rpcib_find_hca
*
* Caller should have already locked the hcas_lock before calling
* this function.
*/
static rib_hca_t *
{
return (hca);
}
static rdma_stat
{
int i;
return (RDMA_FAILED);
/*
* Open a hca and setup for RDMA
*/
continue;
if (ibt_status != IBT_SUCCESS) {
continue;
}
/*
* query HCA info
*/
if (ibt_status != IBT_SUCCESS) {
goto fail1;
}
/*
* One PD (Protection Domain) per HCA.
* A qp is allowed to access a memory region
* only when it's in the same PD as that of
* the memory region.
*/
if (ibt_status != IBT_SUCCESS) {
goto fail1;
}
/*
* query HCA ports
*/
if (ibt_status != IBT_SUCCESS) {
goto fail2;
}
/*
* Create 2 pairs of cq's (1 pair for client
* and the other pair for server) on this hca.
* If number of qp's gets too large, then several
* cq's will be needed.
*/
if (status != RDMA_SUCCESS) {
goto fail3;
}
if (status != RDMA_SUCCESS) {
goto fail3;
}
if (status != RDMA_SUCCESS) {
goto fail3;
}
if (status != RDMA_SUCCESS) {
goto fail3;
}
/*
* Create buffer pools.
* Note rib_rbuf_create also allocates memory windows.
*/
goto fail3;
}
goto fail3;
}
"rib_srvr_cache_%llx",
sizeof (cache_avl_struct_t), 0,
NULL,
NULL,
}
sizeof (cache_avl_struct_t),
/* Create kstats for the cache */
if (!stats_enabled) {
sizeof (rpcib_kstat) / sizeof (kstat_named_t),
if (ksp) {
}
}
}
ribstat->nhca_inited++;
continue;
}
/*
* return success if at least one new hca has been configured.
*/
return (RDMA_SUCCESS);
else
return (RDMA_FAILED);
}
/*
* Callback routines
*/
/*
* SCQ handlers
*/
/* ARGSUSED */
static void
{
int i;
/*
* Re-enable cq notify here to avoid missing any
* completion queue notification.
*/
while (ibt_status != IBT_CQ_EMPTY) {
if (ibt_status != IBT_SUCCESS)
return;
/*
* Got a send completion
*/
case IBT_WC_SUCCESS:
break;
default:
/*
* RC Send Q Error Code Local state Remote State
* ==================== =========== ============
* IBT_WC_BAD_RESPONSE_ERR ERROR None
* IBT_WC_LOCAL_LEN_ERR ERROR None
* IBT_WC_LOCAL_CHAN_OP_ERR ERROR None
* IBT_WC_LOCAL_PROTECT_ERR ERROR None
* IBT_WC_MEM_WIN_BIND_ERR ERROR None
* IBT_WC_REMOTE_INVALID_REQ_ERR ERROR ERROR
* IBT_WC_REMOTE_ACCESS_ERR ERROR ERROR
* IBT_WC_REMOTE_OP_ERR ERROR ERROR
* IBT_WC_RNR_NAK_TIMEOUT_ERR ERROR None
* IBT_WC_TRANS_TIMEOUT_ERR ERROR None
* IBT_WC_WR_FLUSHED_ERR ERROR None
*/
/*
* Channel in error state. Set connection to
* ERROR and cleanup will happen either from
* conn_release or from rib_conn_get
*/
break;
}
/*
* Notify poster
*/
} else {
/*
* Poster not waiting for notification.
* Free the send buffers and send_wid
*/
}
/* decrement the send ref count */
(void) rib_free_sendwait(wd);
}
}
}
}
/* ARGSUSED */
static void
{
int i;
/*
* Re-enable cq notify here to avoid missing any
* completion queue notification.
*/
while (ibt_status != IBT_CQ_EMPTY) {
if (ibt_status != IBT_SUCCESS)
return;
/*
* Got a send completion
*/
case IBT_WC_SUCCESS:
break;
default:
/*
* Channel in error state. Set connection to
* ERROR and cleanup will happen either from
* conn_release or conn timeout.
*/
break;
}
/*
* Update completion status and notify poster
*/
} else {
/*
* Poster not waiting for notification.
* Free the send buffers and send_wid
*/
}
/* decrement the send ref count */
(void) rib_free_sendwait(wd);
}
}
}
}
/*
* RCQ handler
*/
/* ARGSUSED */
static void
{
/*
* Re-enable cq notify here to avoid missing any
* completion queue notification.
*/
while (ibt_status != IBT_CQ_EMPTY) {
if (ibt_status != IBT_SUCCESS)
return;
struct reply *r;
/*
* Treat xid as opaque (xid is the first entity
* in the rpc rdma message).
*/
/* Skip xid and set the xdr position accordingly. */
if (vers != RPCRDMA_VERS) {
/*
* interoperate. Set connection to
* ERROR state and bail out.
*/
continue;
}
find_xid = 1;
switch (op) {
case RDMA_MSG:
case RDMA_NOMSG:
case RDMA_MSGP:
r->status = RDMA_SUCCESS;
r->bytes_xfer =
break;
default:
(void *)(uintptr_t)
break;
}
break;
}
}
if (find_xid == 0) {
/* RPC caller not waiting for reply */
int, xid);
}
/*
* Connection being flushed. Just free
* the posted buffer
*/
} else {
/*
* RC Recv Q Error Code Local state Remote State
* ==================== =========== ============
* IBT_WC_LOCAL_ACCESS_ERR ERROR ERROR when NAK recvd
* IBT_WC_LOCAL_LEN_ERR ERROR ERROR when NAK recvd
* IBT_WC_LOCAL_PROTECT_ERR ERROR ERROR when NAK recvd
* IBT_WC_LOCAL_CHAN_OP_ERR ERROR ERROR when NAK recvd
* IBT_WC_REMOTE_INVALID_REQ_ERR ERROR ERROR when NAK recvd
* IBT_WC_WR_FLUSHED_ERR None None
*/
/*
* Channel in error state. Set connection
* in ERROR state.
*/
}
}
}
/* Server side */
/* ARGSUSED */
static void
{
/*
* Re-enable cq notify here to avoid missing any
* completion queue notification.
*/
while (ibt_status != IBT_CQ_EMPTY) {
if (ibt_status != IBT_SUCCESS)
return;
/* s_recvp->vaddr stores data */
/*
* Treat xid as opaque (xid is the first entity
* in the rpc rdma message).
*/
/* Skip xid and set the xdr position accordingly. */
(void) rib_free_svc_recv(s_recvp);
continue;
}
if (vers != RPCRDMA_VERS) {
/*
* Drop rpc rdma message.
*/
(void) rib_free_svc_recv(s_recvp);
continue;
}
/*
* Is this for RDMA_DONE?
*/
/*
* Wake up the thread waiting on
* a RDMA_DONE for xid
*/
(void) rib_free_svc_recv(s_recvp);
continue;
}
if ((plugin_state == ACCEPT) &&
== NULL)
(void) strwaitbuf(
/*
* Plugin is in accept state, hence the master
* transport queue for this is still accepting
* requests. Hence we can call svc_queuereq to
* queue this recieved msg.
*/
FALSE);
} else {
/*
* The master transport for this is going
* away and the queue is not accepting anymore
* requests for krpc, so don't do anything, just
* free the msg.
*/
}
} else {
}
(void) rib_free_svc_recv(s_recvp);
}
}
static void
{
(void) rpcib_open_hcas(rib_stat);
}
/*
* Handles DR event of IBT_HCA_DETACH_EVENT.
*/
/* ARGSUSED */
static void
{
switch (code) {
case IBT_HCA_ATTACH_EVENT:
break;
case IBT_HCA_DETACH_EVENT:
#ifdef DEBUG
#endif
break;
case IBT_EVENT_PORT_UP:
/*
* A port is up. We should call rib_listen() since there is
* a chance that rib_listen() may have failed during
* rib_attach_hca() because the port had not been up yet.
*/
#ifdef DEBUG
#endif
break;
#ifdef DEBUG
case IBT_EVENT_PATH_MIGRATED:
"IBT_EVENT_PATH_MIGRATED\n");
break;
case IBT_EVENT_SQD:
break;
case IBT_EVENT_COM_EST:
break;
"IBT_ERROR_CATASTROPHIC_CHAN\n");
break;
"IBT_ERROR_INVALID_REQUEST_CHAN\n");
break;
"IBT_ERROR_ACCESS_VIOLATION_CHAN\n");
break;
"IBT_ERROR_PATH_MIGRATE_REQ\n");
break;
case IBT_ERROR_CQ:
break;
case IBT_ERROR_PORT_DOWN:
break;
case IBT_ASYNC_OPAQUE1:
break;
case IBT_ASYNC_OPAQUE2:
break;
case IBT_ASYNC_OPAQUE3:
break;
case IBT_ASYNC_OPAQUE4:
break;
#endif
default:
break;
}
}
/*
* Client's reachable function.
*/
static rdma_stat
{
if (status == RDMA_SUCCESS) {
/* release the reference */
(void) rib_conn_release(conn);
return (RDMA_SUCCESS);
} else {
return (RDMA_FAILED);
}
}
/* Client side qp creation */
static rdma_stat
{
/*
* Initialize
*/
/*
* Initialize the client credit control
* portion of the rdmaconn struct.
*/
cc_info->clnt_cc_granted_ops = 0;
cc_info->clnt_cc_in_flight_ops = 0;
return (RDMA_SUCCESS);
}
/* Server side qp creation */
static rdma_stat
{
/*
* Create the qp handle
*/
&chan_sizes);
} else {
goto fail;
}
if (ibt_status != IBT_SUCCESS) {
int, ibt_status);
goto fail;
}
kqp->q = q; /* server ONLY */
/*
* Set the private data area to qp to be used in callbacks
*/
/*
* Initialize the server credit control
* portion of the rdmaconn struct.
*/
return (RDMA_SUCCESS);
fail:
if (kqp)
return (RDMA_FAILED);
}
/* ARGSUSED */
{
/* got a connection close event */
case IBT_CM_EVENT_CONN_CLOSED:
{
/* check reason why connection was closed */
case IBT_CM_CLOSED_DREP_RCVD:
case IBT_CM_CLOSED_DUP:
case IBT_CM_CLOSED_ABORT:
case IBT_CM_CLOSED_ALREADY:
/*
* These cases indicate the local end initiated
* the closing of the channel. Nothing to do here.
*/
break;
default:
/*
* Reason for CONN_CLOSED event must be one of
* IBT_CM_CLOSED_DREQ_RCVD or IBT_CM_CLOSED_REJ_RCVD
* or IBT_CM_CLOSED_STALE. These indicate cases were
* the remote end is closing the channel. In these
* cases free the channel and transition to error
* state
*/
break;
}
/*
* Free the conn if c_ref is down to 0 already
*/
/*
* Remove from list and free conn
*/
(void) rib_disconnect_channel(conn,
&hca->cl_conn_list);
} else {
/*
* conn will be freed when c_ref goes to 0.
* Indicate to cleaning thread not to close
* the connection, but just free the channel.
*/
}
#ifdef DEBUG
if (rib_debug)
"(CONN_CLOSED) channel disconnected");
#endif
break;
}
break;
}
default:
break;
}
return (IBT_CM_ACCEPT);
}
/*
* Connect to the server.
*/
{
case AF_INET:
break;
case AF_INET6:
break;
}
case AF_INET:
break;
case AF_INET6:
break;
}
if (ibt_status != IBT_SUCCESS) {
return (-1);
}
/* Alloc a RC channel */
&chan_sizes);
} else {
return (RDMA_FAILED);
}
if (ibt_status != IBT_SUCCESS) {
int, ibt_status);
return (RDMA_FAILED);
}
/* Connect to the Server */
if (ibt_status != IBT_SUCCESS) {
/*
* Got IBT_CM_CONN_STALE probably because of stale
* data on the passive end of a channel that existed
* prior to reboot. Retry establishing a channel
* REFRESH_ATTEMPTS times, during which time the
* stale conditions on the server might clear up.
*/
goto refresh;
}
return (RDMA_FAILED);
}
/*
* Set the private data area to qp to be used in callbacks
*/
return (RDMA_SUCCESS);
}
{
return (RDMA_INVAL);
goto done2;
}
} else {
}
continue;
}
for (i = 0; i < addr_count; i++) {
num_paths_p = 0;
} else {
}
&num_paths_p, &srcip);
if (ibt_status == IBT_SUCCESS &&
num_paths_p != 0 &&
} else {
}
goto done1;
}
}
}
return (retval);
}
/*
* Close channel, remove from connection list and
* free up resources allocated for that channel.
*/
{
}
}
/*
* c_ref == 0 and connection is in C_DISCONN_PEND
*/
/*
* There is only one case where we get here with
* qp_hdl = NULL, which is during connection setup on
* the client. In such a case there are no posted
*/
while (qp->n_posted_rbufs)
while (qp->n_send_rbufs)
}
(void) rib_rem_replylist(qp);
}
}
}
}
}
/*
* Credit control cleanup.
*/
}
/*
* If HCA has been DETACHED and the srv/clnt_conn_list is NULL,
* then the hca is no longer being used.
*/
}
}
}
}
return (RDMA_SUCCESS);
}
/*
* All sends are done under the protection of
* the wdesc->sendwait_lock. n_send_rbufs count
* is protected using the send_rbufs_lock.
* lock ordering is:
* sendwait_lock -> send_rbufs_lock
*/
void
{
qp->n_send_rbufs++;
}
void
{
qp->n_send_rbufs--;
if (qp->n_send_rbufs == 0)
}
void
{
qp->n_posted_rbufs--;
if (qp->n_posted_rbufs == 0)
}
/*
* Wait for send completion notification. Only on receiving a
* notification be it a successful or error completion, free the
* send_wid.
*/
static rdma_stat
{
int i;
/*
* Wait for send to complete
*/
;
switch (cv_wait_ret) {
case -1: /* timeout */
break;
default: /* got send completion */
break;
}
} else {
;
switch (cv_wait_ret) {
case -1: /* timeout */
break;
case 0: /* interrupted */
break;
default: /* got send completion */
break;
}
}
}
/* got send completion */
case RDMA_CONNLOST:
break;
default:
error = RDMA_FAILED;
break;
}
}
}
(void) rib_free_sendwait(wd);
} else {
}
return (error);
}
static struct send_wid *
{
return (wd);
}
static int
{
return (0);
}
static rdma_stat
{
return (RDMA_SUCCESS);
}
return (RDMA_FAILED);
}
/*
* Send buffers are freed here only in case of error in posting
* on QP. If the post succeeded, the send buffers are freed upon
* send completion in rib_sendwait() or in the scq_handler.
*/
{
int i, nds;
nds = 0;
total_msg_size = 0;
return (RDMA_FAILED);
}
nds++;
}
if (send_sig) {
/* Set SEND_SIGNAL flag. */
for (i = 0; i < nds; i++) {
}
} else {
}
}
ibt_status != IBT_SUCCESS) {
if (send_sig) {
for (i = 0; i < nds; i++) {
}
(void) rib_free_sendwait(wdesc);
}
return (RDMA_CONNLOST);
}
if (send_sig) {
if (cv_sig) {
/*
* cv_wait for send to complete.
* We can fail due to a timeout or signal or
* unsuccessful send.
*/
return (ret);
}
}
return (RDMA_SUCCESS);
}
{
/* send-wait & cv_signal */
return (ret);
}
/*
* Deprecated/obsolete interface not used currently
* but earlier used for READ-READ protocol.
* Send RPC reply and wait for RDMA_DONE.
*/
{
/* No cv_signal (whether send-wait or no-send-wait) */
if (ret != RDMA_SUCCESS) {
} else {
/*
* Wait for RDMA_DONE from remote end
*/
if (cv_wait_ret < 0) {
ret = RDMA_TIMEDOUT;
}
}
return (ret);
}
static struct recv_wid *
{
return (rwid);
}
static void
{
}
{
int nds;
/*
* rdma_clnt_postrecv uses RECV_BUFFER.
*/
nds = 0;
ret = RDMA_FAILED;
goto done;
}
nds++;
}
if (nds != 1) {
ret = RDMA_FAILED;
goto done;
}
if (rwid) {
} else {
goto done;
}
if (!rep) {
goto done;
}
}
ibt_status != IBT_SUCCESS) {
ret = RDMA_CONNLOST;
goto done;
}
qp->n_posted_rbufs++;
return (RDMA_SUCCESS);
done:
}
return (ret);
}
{
int nds;
nds = 0;
return (RDMA_FAILED);
}
nds++;
}
if (nds != 1) {
return (RDMA_FAILED);
}
/* Use s_recvp's addr as wr id */
}
ibt_status != IBT_SUCCESS) {
(void) rib_free_svc_recv(s_recvp);
return (RDMA_CONNLOST);
}
return (RDMA_SUCCESS);
}
/* Client */
{
}
/* Client */
{
}
break;
}
}
return (RDMA_SUCCESS);
}
/* Server */
{
qp->n_posted_rbufs++;
return (RDMA_SUCCESS);
}
return (RDMA_FAILED);
}
/*
* Client side only interface to "recv" the rpc reply buf
* posted earlier by rib_post_resp(conn, cl, msgid).
*/
{
/*
* Find the reply structure for this msgid
*/
break;
}
/*
* If message not yet received, wait.
*/
timout = ddi_get_lbolt() +
;
switch (cv_wait_ret) {
case -1: /* timeout */
ret = RDMA_TIMEDOUT;
break;
case 0:
break;
default:
break;
}
}
/*
* Got message successfully
*/
} else {
/*
* Got error in reply message. Free
* recv buffer here.
*/
}
}
} else {
/*
* No matching reply structure found for given msgid on the
* reply wait list.
*/
ret = RDMA_INVAL;
}
/*
* Done.
*/
return (ret);
}
/*
* RDMA write a buffer to the remote address.
*/
{
int cv_sig;
return (RDMA_FAILED);
}
if (wait) {
cv_sig = 1;
} else {
if (n_writes > max_unsignaled_rws) {
n_writes = 0;
cv_sig = 1;
} else {
cv_sig = 0;
}
}
if (cv_sig) {
} else {
}
}
ibt_status != IBT_SUCCESS) {
if (cv_sig) {
(void) rib_free_sendwait(wdesc);
}
return (RDMA_CONNLOST);
}
/*
* Wait for send to complete
*/
if (cv_sig) {
if (ret != 0)
return (ret);
}
n_writes ++;
}
}
return (RDMA_SUCCESS);
}
/*
* RDMA Read a buffer from the remote address.
*/
{
int cv_sig = 0;
return (RDMA_FAILED);
}
/*
* Remote address is at the head chunk item in list.
*/
/*
* If there are multiple chunks to be read, and
* wait is set, ask for signal only for the last chunk
* and wait only on the last chunk. The completion of
* RDMA_READ on last chunk ensures that reads on all
* previous chunks are also completed.
*/
cv_sig = 1;
} else {
}
}
ibt_status != IBT_SUCCESS) {
(void) rib_free_sendwait(wdesc);
}
return (RDMA_CONNLOST);
}
/*
* Wait for send to complete if this is the
* last item in the list.
*/
if (ret != 0)
return (ret);
}
}
return (RDMA_SUCCESS);
}
/*
* rib_srv_cm_handler()
* Connection Manager callback to handle RC connection requests.
*/
/* ARGSUSED */
static ibt_cm_status_t
{
queue_t *q;
int i;
struct sockaddr_in *s;
/* got a connection request */
case IBT_CM_EVENT_REQ_RCV:
/*
* If the plugin is in the NO_ACCEPT state, bail out.
*/
if (plugin_state == NO_ACCEPT) {
return (IBT_CM_REJECT);
}
/*
* Need to send a MRA MAD to CM so that it does not
* timeout on us.
*/
q = rib_stat->q;
if (status) {
return (IBT_CM_REJECT);
}
/*
* Pre-posts RECV buffers
*/
for (i = 0; i < preposted_rbufs; i++) {
/*
* A connection is not established yet.
* Just flush the channel. Buffers
* posted till now will error out with
* IBT_WC_WR_FLUSHED_ERR.
*/
return (IBT_CM_REJECT);
}
if (status != RDMA_SUCCESS) {
/*
* A connection is not established yet.
* Just flush the channel. Buffers
* posted till now will error out with
* IBT_WC_WR_FLUSHED_ERR.
*/
return (IBT_CM_REJECT);
}
}
/*
* Get the address translation
*/
return (IBT_CM_REJECT);
}
&ipinfo) != IBT_SUCCESS) {
return (IBT_CM_REJECT);
}
case AF_INET:
KM_SLEEP);
s->sin_family = AF_INET;
s->sin_family = AF_INET;
sizeof (struct sockaddr_in);
((struct sockaddr_in *)
(uint32_t)~0;
((struct sockaddr_in *)
(sa_family_t)~0;
break;
case AF_INET6:
KM_SLEEP);
sizeof (struct in6_addr));
sizeof (struct in6_addr));
sizeof (struct sockaddr_in6);
(void) memset(&((struct sockaddr_in6 *)
sizeof (struct in6_addr));
((struct sockaddr_in6 *)
(sa_family_t)~0;
break;
default:
return (IBT_CM_REJECT);
}
break;
case IBT_CM_EVENT_CONN_CLOSED:
{
case IBT_CM_CLOSED_DREP_RCVD:
case IBT_CM_CLOSED_DUP:
case IBT_CM_CLOSED_ABORT:
case IBT_CM_CLOSED_ALREADY:
/*
* These cases indicate the local end initiated
* the closing of the channel. Nothing to do here.
*/
break;
default:
/*
* Reason for CONN_CLOSED event must be one of
* IBT_CM_CLOSED_DREQ_RCVD or IBT_CM_CLOSED_REJ_RCVD
* or IBT_CM_CLOSED_STALE. These indicate cases were
* the remote end is closing the channel. In these
* cases free the channel and transition to error
* state
*/
break;
}
/*
* Free the conn if c_ref goes down to 0
*/
/*
* Remove from list and free conn
*/
(void) rib_disconnect_channel(conn,
&hca->srv_conn_list);
} else {
/*
* conn will be freed when c_ref goes to 0.
* Indicate to cleaning thread not to close
* the connection, but just free the channel.
*/
}
break;
}
break;
}
case IBT_CM_EVENT_CONN_EST:
/*
* RTU received, hence connection established.
*/
if (rib_debug > 1)
"(CONN_EST) channel established");
break;
default:
if (rib_debug > 2) {
/* Let CM handle the following events. */
"server recv'ed IBT_CM_EVENT_REP_RCV\n");
"server recv'ed IBT_CM_EVENT_LAP_RCV\n");
"server recv'ed IBT_CM_EVENT_MRA_RCV\n");
"server recv'ed IBT_CM_EVENT_APR_RCV\n");
"server recv'ed IBT_CM_EVENT_FAILURE\n");
}
}
return (IBT_CM_DEFAULT);
}
/* accept all other CM messages (i.e. let the CM handle them) */
return (IBT_CM_ACCEPT);
}
static rdma_stat
{
/*
* Query all ports for the given HCA
*/
} else {
return (RDMA_FAILED);
}
if (ibt_status != IBT_SUCCESS) {
return (RDMA_FAILED);
}
int, num_ports);
for (i = 0; i < num_ports; i++) {
int, i+1);
int, i+1);
}
}
/*
* Get all the IP addresses on this system to register the
* given "service type" on all DNS recognized IP addrs.
* Each service type such as NFS will have all the systems
* IP addresses as its different names. For now the only
* type of service we support in RPCIB is NFS.
*/
/*
* Start registering and binding service to active
* on active ports on this HCA.
*/
nbinds = 0;
;
/*
* We use IP addresses as the service names for
* service registration. Register each of them
* with CM to obtain a svc_id and svc_hdl. We do not
* register the service with machine's loopback address.
*/
if ((ibt_status != IBT_SUCCESS) &&
(ibt_status != IBT_CM_SERVICE_EXISTS)) {
int, ibt_status);
return (RDMA_FAILED);
}
/*
* Allocate and prepare a service entry
*/
} else {
}
for (i = 0; i < num_ports; i++) {
continue;
gid.gid_prefix) &&
break;
}
/*
* port is alreay bound the the service
*/
int, i+1);
nbinds++;
continue;
}
(pkey != IB_PKEY_INVALID_FULL)) {
if (ibt_status == IBT_SUCCESS) {
sizeof (rib_hca_service_t),
KM_SLEEP);
nbinds++;
}
int, ibt_status);
}
}
}
if (nbinds == 0) {
return (RDMA_FAILED);
} else {
/*
* Put this plugin into accept state, since atleast
* one registration was successful.
*/
return (RDMA_SUCCESS);
}
}
void
{
int n_listening = 0;
/*
* if rd parameter is NULL then it means that rib_stat->q is
* already initialized by a call from RDMA and we just want to
* add a newly attached HCA to the same listening state as other
* HCAs.
*/
return;
}
} else {
}
/*
* First check if a hca is still attached
*/
continue;
}
/*
* Right now the only service type is NFS. Hence
* force feed this value. Ideally to communicate
* the service type it should be passed down in
* rdma_svc_data.
*/
if (status == RDMA_SUCCESS)
n_listening++;
}
/*
* Service active on an HCA, check rd->err_code for more
* explainable errors.
*/
if (rd) {
if (n_listening > 0) {
} else {
}
}
}
/* XXXX */
/* ARGSUSED */
static void
{
/*
* KRPC called the RDMATF to stop the listeners, this means
* stop sending incomming or recieved requests to KRPC master
* transport handle for RDMA-IB. This is also means that the
* master transport handle, responsible for us, is going away.
*/
/*
* First check if a hca is still attached
*/
continue;
}
}
/*
* Avoid rib_listen() using the stale q field.
* This could happen if a port goes up after all services
* are already unregistered.
*/
}
/*
* Traverse the HCA's service list to unbind and deregister services.
* For each bound service of HCA to be removed, first find the corresponding
* service handle (srv_hdl) and then unbind the service by calling
* ibt_unbind_service().
*/
static void
{
/*
* unbind and deregister the services for this service type.
* Right now there is only one service type. In future it will
* be passed down to this function.
*/
;
/*
* if sc is NULL then the service doesn't exist anymore,
* probably just removed completely through rib_stat.
*/
}
}
static struct svc_recv *
{
recvp->bytes_xfer = 0;
return (recvp);
}
static int
{
return (0);
}
static struct reply *
{
return (NULL);
}
rep->bytes_xfer = 0;
}
qp->rep_list_size++;
int, qp->rep_list_size);
return (rep);
}
static rdma_stat
{
struct reply *r, *n;
n = r->next;
(void) rib_remreply(qp, r);
}
return (RDMA_SUCCESS);
}
static int
{
}
}
qp->rep_list_size--;
int, qp->rep_list_size);
return (0);
}
struct mrc *buf_handle)
{
/*
* Note: ALL buffer pools use the same memory type RDMARW.
*/
if (status == RDMA_SUCCESS) {
} else {
buf_handle->mrc_lmr = 0;
buf_handle->mrc_rmr = 0;
}
return (status);
}
static rdma_stat
{
} else {
return (RDMA_FAILED);
}
if (ibt_status != IBT_SUCCESS) {
return (RDMA_FAILED);
}
return (RDMA_SUCCESS);
}
{
rib_lrc_entry_t *l;
/*
* Non-coherent memory registration.
*/
l = (rib_lrc_entry_t *)lrc;
if (l) {
if (l->registered) {
return (RDMA_SUCCESS);
} else {
/* Always register the whole buffer */
}
}
if (status == RDMA_SUCCESS) {
if (l) {
l->registered = TRUE;
}
} else {
buf_handle->mrc_lmr = 0;
buf_handle->mrc_rmr = 0;
}
return (status);
}
/* ARGSUSED */
{
/*
* Allow memory deregistration even if HCA is
* getting detached. Need all outstanding
* memory registrations to be deregistered
* before HCA_DETACH_EVENT can be accepted.
*/
return (RDMA_SUCCESS);
}
/* ARGSUSED */
{
rib_lrc_entry_t *l;
l = (rib_lrc_entry_t *)lrc;
if (l)
if (l->registered)
return (RDMA_SUCCESS);
return (RDMA_SUCCESS);
}
/* ARGSUSED */
{
if (cpu) {
/* make incoming data visible to memory */
} else {
/* make memory changes visible to IO */
}
} else {
return (RDMA_FAILED);
}
if (status == IBT_SUCCESS)
return (RDMA_SUCCESS);
else {
return (RDMA_FAILED);
}
}
/*
* XXXX ????
*/
static rdma_stat
{
/*
* XXXX Hack!
*/
return (RDMA_SUCCESS);
}
{
int i, j;
switch (ptype) {
case SEND_BUFFER:
break;
case RECV_BUFFER:
break;
default:
goto fail;
}
/*
* Register the pool.
*/
sizeof (ibt_mr_hdl_t), KM_SLEEP);
sizeof (ibt_mr_desc_t), KM_SLEEP);
goto fail;
}
if (ibt_status != IBT_SUCCESS) {
for (j = 0; j < i; j++) {
}
goto fail;
}
}
}
return (rbp);
fail:
if (bp) {
}
if (rbp) {
}
return (NULL);
}
static void
{
int i;
/*
* Obtain pool address based on type of pool
*/
switch (ptype) {
case SEND_BUFFER:
break;
case RECV_BUFFER:
break;
default:
return;
}
return;
/*
* Deregister the pool memory and free it.
*/
}
}
static void
{
/*
* Obtain pool address based on type of pool
*/
switch (ptype) {
case SEND_BUFFER:
break;
case RECV_BUFFER:
break;
default:
return;
}
return;
/*
* Free the pool memory.
*/
}
void
{
/*
* Deregister the pool memory and free it.
*/
}
/*
* Fetch a buffer from the pool of type specified in rdbuf->type.
*/
static rdma_stat
{
return (RDMA_SUCCESS);
}
case SEND_BUFFER:
break;
case RECV_BUFFER:
break;
default:
}
return (RDMA_SUCCESS);
} else
return (RDMA_FAILED);
}
/*
* Fetch a buffer of specified type.
* Note that rdbuf->handle is mw's rkey.
*/
static void *
{
void *buf;
int i;
/*
* Obtain pool address based on type of pool
*/
switch (ptype) {
case SEND_BUFFER:
break;
case RECV_BUFFER:
break;
default:
return (NULL);
}
return (NULL);
return (NULL);
}
/* XXXX put buf, rdbuf->handle.mrc_rmr, ... in one place. */
return (buf);
}
}
return (NULL);
}
static void
{
return;
}
}
static void
{
/*
* Obtain pool address based on type of pool
*/
switch (ptype) {
case SEND_BUFFER:
break;
case RECV_BUFFER:
break;
default:
return;
}
return;
/*
* Should never happen
*/
} else {
}
}
static rdma_stat
{
}
return (RDMA_SUCCESS);
}
static rdma_stat
{
}
}
return (RDMA_SUCCESS);
}
/* ARGSUSED */
static rdma_stat
{
return (status);
}
/*
* rib_find_hca_connection
*
* if there is an existing connection to the specified address then
* it will be returned in conn, otherwise conn will be set to NULL.
* Also cleans up any connection that is in error state.
*/
static int
{
/*
* First, clear up any connection in the ERROR state
*/
/*
* Remove connection from list and destroy it.
*/
rib_conn_close((void *)cn);
goto again;
}
continue;
}
continue;
}
/*
* source address is only checked for if there is one,
* this is the case for retries.
*/
/*
* Our connection. Give up conn list lock
* as we are done traversing the list.
*/
return (RDMA_SUCCESS);
}
/*
* Hold a reference to this conn before
* we give up the lock.
*/
timout = ddi_get_lbolt() +
;
if (cv_stat == 0) {
(void) rib_conn_release_locked(cn);
return (RDMA_INTR);
}
if (cv_stat < 0) {
(void) rib_conn_release_locked(cn);
return (RDMA_TIMEDOUT);
}
return (RDMA_SUCCESS);
} else {
(void) rib_conn_release_locked(cn);
return (RDMA_TIMEDOUT);
}
}
}
}
return (RDMA_FAILED);
}
/*
* Connection management.
* IBTF does not support recycling of channels. So connections are only
* in four states - C_CONN_PEND, or C_CONNECTED, or C_ERROR_CONN or
* C_DISCONN_PEND state. No C_IDLE state.
* C_CONN_PEND state: Connection establishment in progress to the server.
* C_CONNECTED state: A connection when created is in C_CONNECTED state.
* It has an RC channel associated with it. ibt_post_send/recv are allowed
* only in this state.
* C_ERROR_CONN state: A connection transitions to this state when WRs on the
* channel are completed in error or an IBT_CM_EVENT_CONN_CLOSED event
* happens on the channel or a IBT_HCA_DETACH_EVENT occurs on the HCA.
* C_DISCONN_PEND state: When a connection is in C_ERROR_CONN state and when
* c_ref drops to 0 (this indicates that RPC has no more references to this
* connection), the connection should be destroyed. A connection transitions
* into this state when it is being destroyed.
*/
/* ARGSUSED */
static rdma_stat
{
int status;
int s_addr_len;
char *s_addr_buf;
return (status);
}
} else
}
/*
* No existing connection found, establish a new connection.
*/
if (status != RDMA_SUCCESS) {
return (RDMA_FAILED);
}
} else {
return (RDMA_FAILED);
}
/*
* Channel to server doesn't exist yet, create one.
*/
return (RDMA_FAILED);
}
sizeof (struct sockaddr_in);
(uint32_t)~0;
(ushort_t)~0;
} else {
sizeof (struct sockaddr_in6);
(void) memset(
(sa_family_t)~0;
}
/*
* Add to conn list.
* We had given up the READER lock. In the time since then,
* another thread might have created the connection we are
* trying here. But for now, that is quiet alright - there
* might be two connections between a pair of hosts instead
* of one. If we really want to close that window,
* then need to check the list after acquiring the
* WRITER lock.
*/
/*
* This handles a case where the module or
* HCA detached in the time a connection is
* established. In such a case close the
* connection immediately if this is the
* only reference.
*/
rib_conn_close((void *)cn);
return (RDMA_FAILED);
}
/*
* Connection to be closed later when c_ref = 0
*/
}
if (status == RDMA_SUCCESS) {
} else {
}
return (status);
}
static void
{
/*
* Live connection in CONNECTED state.
*/
}
}
(void) rib_disconnect_channel(conn,
else
(void) rib_disconnect_channel(conn,
}
static void
{
int error;
return;
}
if ((idle_time <= rib_conn_timeout) &&
/*
* There was activity after the last timeout.
* Extend the conn life. Unless the conn is
* already in error state.
*/
return;
}
(void *)conn, DDI_NOSLEEP);
/*
* If taskq dispatch fails above, then reset the timeout
* to try again after 10 secs.
*/
if (error != DDI_SUCCESS) {
return;
}
}
static rdma_stat
{
return (rib_conn_release_locked(conn));
}
/*
* Expects conn->c_lock to be held on entry.
* c_lock released on return
*/
static rdma_stat
{
return (RDMA_SUCCESS);
}
/*
* If a conn is C_ERROR_CONN, close the channel.
*/
rib_conn_close((void *)conn);
return (RDMA_SUCCESS);
}
/*
* c_ref == 0, set a timeout for conn release
*/
}
return (RDMA_SUCCESS);
}
/*
* Add at front of list
*/
static struct rdma_done_list *
{
return (rd);
}
static void
{
struct rdma_done_list *r;
if (r != NULL) {
}
if (r != NULL) {
} else {
}
}
static void
{
struct rdma_done_list *r, *n;
n = r->next;
rdma_done_rm(qp, r);
}
}
static void
{
while (r) {
cv_signal(&r->rdma_done_cv);
return;
} else {
r = r->next;
}
}
int, xid);
}
/*
* Expects conn->c_lock to be held by the caller.
*/
static void
{
/* channel already freed */
return;
}
/*
* Call ibt_close_rc_channel in blocking mode
* with no callbacks.
*/
}
/*
* Goes through all connections and closes the channel
* This will cause all the WRs on those channels to be
* flushed.
*/
static void
{
goto next;
}
/*
* Live connection in CONNECTED state.
*/
/* Signal a pending rib_disconnect_channel() */
}
next:
}
}
/*
* Frees up all connections that are no longer being referenced
*/
static void
{
top:
/*
* At this point connection is either in ERROR
* or DISCONN_PEND state. If in DISCONN_PEND state
* then some other thread is culling that connection.
* If not and if c_ref is 0, then destroy the connection.
*/
/*
* Cull the connection
*/
goto top;
} else {
/*
* conn disconnect already scheduled or will
* happen from conn_release when c_ref drops to 0.
*/
}
}
/*
* At this point, only connections with c_ref != 0 are on the list
*/
}
/*
* Free all the HCA resources and close
* the hca.
*/
static void
{
if (rib_mod.rdma_count == 0)
(void) rdma_unregister_mod(&rib_mod);
}
static void
{
}
/*
* conn_lists are NULL, so destroy
* buffers, close hca and be done.
*/
}
}
}
}
/*
* Cleans and closes up all uses of the HCA
*/
static void
{
/*
* Mark as detached and remove from
* hca list.
*/
rib_stat->nhca_inited--;
break;
}
}
return;
/*
* Stop all services on the HCA
* Go through cl_conn_list and close all rc_channels
* Go through svr_conn_list and close all rc_channels
* Free connections whose c_ref has dropped to 0
* Destroy all CQs
* Deregister and released all buffer pool memory after all
* connections are destroyed
* Free the protection domain
* ibt_close_hca()
*/
}
static void
{
if (rb->registered)
(void) rib_deregistermem_via_hca(hca,
}
}
}
static void
{
return;
}
if (rb->registered)
(void) rib_deregistermem_via_hca(hca,
}
if (hca->server_side_cache) {
}
return;
}
}
}
static int
{
return (0);
return (-1);
return (1);
}
static void
{
rib_server_side_cache_reclaim((void *)hca);
if (hca->server_side_cache) {
}
}
}
static void
{
(void) ddi_taskq_dispatch(
(void *)hca, DDI_NOSLEEP);
}
static rib_lrc_entry_t *
{
goto error_alloc;
/* Am I above the cache limit */
rib_force_cleanup((void *)hca);
/* Allocate and register the buffer directly */
goto error_alloc;
}
/* Recheck to make sure no other thread added the entry in */
/* Allocate an avl tree entry */
rcas = (cache_avl_struct_t *)
}
}
hca->cache_hits++;
} else {
/* Am I above the cache limit */
rib_force_cleanup((void *)hca);
/* Allocate and register the buffer directly */
goto error_alloc;
}
hca->cache_misses++;
/* Allocate a reply_buf entry */
reply_buf = (rib_lrc_entry_t *)
}
return (reply_buf);
reply_buf = (rib_lrc_entry_t *)
return (reply_buf);
}
/*
* Return a pre-registered back to the cache (without
* unregistering the buffer)..
*/
static void
{
goto error_free;
if ((rcas = (cache_avl_struct_t *)
goto error_free;
} else {
}
return;
if (reg_buf->registered)
(void) rib_deregistermem_via_hca(hca,
}
static rdma_stat
{
/*
* Note: ALL buffer pools use the same memory type RDMARW.
*/
if (status == RDMA_SUCCESS) {
} else {
buf_handle->mrc_lmr = 0;
buf_handle->mrc_rmr = 0;
}
return (status);
}
/* ARGSUSED */
static rdma_stat
{
return (RDMA_SUCCESS);
}
/* ARGSUSED */
static rdma_stat
{
return (RDMA_SUCCESS);
}
/*
* Check if the IP interface named by `lifrp' is RDMA-capable.
*/
static boolean_t
{
char *cp;
return (B_TRUE);
/*
* Strip off the logical interface portion before getting
* intimate with the name.
*/
*cp = '\0';
}
static int
{
int err = 0;
} else {
return (EPROTO);
}
} else {
return (EPROTO);
}
return (err);
}
/*
* Issue an SIOCGLIFCONF down to IP and return the result in `lifcp'.
* lifcp->lifc_buf is dynamically allocated to be *bufsizep bytes.
*/
static int
{
int err;
if (err != 0)
return (err);
/*
* Pad the interface count to account for additional interfaces that
* may have been configured between the SIOCGLIFNUM and SIOCGLIFCONF.
*/
if (err != 0) {
return (err);
}
return (0);
}
static boolean_t
{
return (B_FALSE);
return (B_FALSE);
}
/*
* Worst case is that all of the addresses are IB-capable and have
* the same address family, so size our buffers accordingly.
*/
continue;
sizeof (struct sockaddr_in));
sizeof (struct sockaddr_in6));
}
}
return (B_TRUE);
}
/* ARGSUSED */
static int
{
if (KSTAT_WRITE == rw) {
return (EACCES);
}
}
return (0);
}