/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
*/
/* Copyright (c) 2013, OmniTI Computer Consulting, Inc. All rights reserved. */
/*
* Copyright 2015 Nexenta Systems, Inc. All rights reserved.
*/
#include <sys/sysmacros.h>
#include <sys/socketvar.h>
#include <sys/isa_defs.h>
#include <sys/inttypes.h>
#include <sys/sendfile.h>
#ifdef SOCK_TEST
#else
#endif /* SOCK_TEST */
extern int xnet_truncate_print;
extern void nl7c_init(void);
extern int sockfs_defer_nl7c_init;
/*
* as there isn't a formal definition of IOV_MAX ???
*/
/*
* Kernel component of socket creation.
*
* The socket library determines which version number to use.
* First the library calls this with a NULL devpath. If this fails
* for the appropriate transport. If one is found it will pass in the
* devpath for the kernel to use.
*/
int
int version)
{
int fd;
int error;
int type;
char *buf;
MAXPATHLEN, &kdevpathlen)) != 0) {
}
} else {
}
/* Allocate a file descriptor for the socket */
}
/*
* Now fill in the entries that falloc reserved
*/
if (type_w_flags & SOCK_NDELAY) {
}
if (type_w_flags & SOCK_NONBLOCK) {
}
if ((type_w_flags & SOCK_CLOEXEC) != 0) {
}
return (fd);
}
/*
* Map from a file descriptor to a socket node.
* Returns with the file descriptor held i.e. the caller has to
* use releasef when done with the file descriptor.
*/
struct sonode *
{
eprintline(*errorp);
return (NULL);
}
/* Check if it is a socket */
eprintline(*errorp);
return (NULL);
}
/*
* Use the stream head to find the real socket vnode.
* This is needed when namefs sits above sockfs.
*/
return (NULL);
}
} else {
}
if (fpp)
return (so);
}
/*
* Allocate and copyin a sockaddr.
* Ensures NULL termination for AF_UNIX addresses by extending them
* with one NULL byte if need be. Verifies that the length is not
* excessive to prevent an application from consuming all of kernel
* memory. Returns NULL when an error occurred.
*/
static struct sockaddr *
int *errorp)
{
char *faddr;
if (namelen > SO_MAXARGSIZE) {
return (NULL);
}
return (NULL);
}
/*
* Add space for NULL termination if needed.
* Do a quick check if the last byte is NUL.
*/
/* Check if there is any NULL termination */
size_t i;
int foundnull = 0;
if (faddr[i] == '\0') {
foundnull = 1;
break;
}
}
if (!foundnull) {
/* Add extra byte for NUL padding */
char *nfaddr;
/* NUL terminate */
namelen++;
}
}
}
/*
*/
static int
{
if (ulen != 0) {
return (EFAULT);
}
} else
ulen = 0;
return (EFAULT);
}
return (0);
}
/*
* If klen is greater than ulen it still uses the non-truncated
* klen to update ulenp.
*/
static int
{
else if (ulen != 0 && xnet_truncate_print) {
printf("sockfs: truncating copyout of address using "
"XNET semantics for pid = %d. Lengths %d, %d\n",
}
if (ulen != 0) {
return (EFAULT);
} else
klen = 0;
} else
klen = 0;
return (EFAULT);
}
return (0);
}
/*
* The socketpair() code in libsocket creates two sockets (using
* to connect the two sockets together.
*
* For a SOCK_STREAM socketpair a listener is needed - in that case this
* routine will create a new file descriptor as part of accepting the
* connection. The library socketpair() will check if svs[2] has changed
* in which case it will close the changed fd.
*
* Note that this code could use the TPI feature of accepting the connection
* on the listening endpoint. However, that would require significant changes
* to soaccept.
*/
int
{
int error;
int orig_flags;
if (error && do_useracc)
}
error = EOPNOTSUPP;
goto done;
}
/*
* The code below makes assumptions about the "sockfs" implementation.
* So make sure that the correct implementation is really used.
*/
/*
* Bind both sockets and connect them with each other.
*/
if (error) {
goto done;
}
if (error) {
goto done;
}
namelen = sizeof (struct sockaddr_ux);
0, _SOCONNECT_NOXLATE, CRED());
if (error) {
goto done;
}
0, _SOCONNECT_NOXLATE, CRED());
if (error) {
goto done;
}
} else {
/*
* Bind both sockets, with so1 being a listener.
* Connect so2 to so1 - nonblocking to avoid waiting for
* soaccept to complete.
* Accept a connection on so1. Pass out the new fd as sv[0].
* The library will detect the changed fd and close
* the original one.
*/
int nfd;
/*
* We could simply call socket_listen() here (which would do the
* binding automatically) if the code didn't rely on passing
* _SOBIND_NOXLATE to the TPI implementation of socket_bind().
*/
CRED());
if (error) {
goto done;
}
if (error) {
goto done;
}
namelen = sizeof (struct sockaddr_ux);
if (error) {
if (error != EINPROGRESS) {
}
}
if (error) {
goto done;
}
/* wait for so2 being SS_CONNECTED ignoring signals */
if (error != 0) {
goto done;
}
goto done;
}
/*
* copy over FNONBLOCK and FNDELAY flags should they exist
*/
/*
* fill in the entries that falloc reserved
*/
/*
* get the original flags before we release
*/
/*
* If FD_CLOEXEC was set on the filedescriptor we're
* swapping out, we should set it on the new one too.
*/
if (orig_flags & FD_CLOEXEC) {
}
/*
* The socketpair library routine will close the original
* svs[0] when this code passes out a different file
* descriptor.
*/
}
}
return (0);
done:
}
int
{
int error;
/* Allocate and copyin name */
/*
* namelen.
*/
}
} else {
namelen = 0;
}
switch (version) {
default:
break;
case SOV_XPG4_2:
break;
case SOV_SOCKBSD:
break;
}
done:
if (error)
return (0);
}
/* ARGSUSED2 */
int
{
int error;
if (error)
return (0);
}
/*ARGSUSED3*/
int
int flags)
{
int error;
int nfd;
int ssflags;
}
/* Translate SOCK_ flags to their SS_ variant */
ssflags = 0;
if (flags & SOCK_NONBLOCK)
ssflags |= SS_NONBLOCK;
if (flags & SOCK_NDELAY)
}
if (namelen != 0) {
if (error && do_useracc) {
}
} else
} else {
namelen = 0;
}
/*
* Allocate the user fd before socket_accept() in order to
* catch EMFILE errors before calling socket_accept().
*/
}
if (error) {
}
if (namelen != 0) {
} else {
}
}
if (error) {
}
}
/*
* fill in the entries that falloc reserved
*/
/*
* Act on SOCK_CLOEXEC from flags
*/
if (flags & SOCK_CLOEXEC) {
}
/*
* Copy FNDELAY and FNONBLOCK from listener to acceptor
* and from ssflags
*/
int arg = 0;
/*
* This code is a simplification of the F_SETFL code in fcntl()
* Ignore any errors from VOP_SETFL.
*/
!= 0) {
error = 0;
} else {
}
}
return (nfd);
}
int
{
int error;
/* Allocate and copyin name */
if (namelen != 0) {
}
} else
if (name)
if (error)
return (0);
}
/*ARGSUSED2*/
int
{
int error;
if (error)
return (0);
}
/*
* Common receive routine.
*/
static ssize_t
int flags,
int *flagsp)
{
void *name;
void *control;
int error;
if (error) {
}
if (error)
goto err;
/*
* Clear internal flag.
*/
/*
* Determine MSG_CTRUNC. sorecvmsg sets MSG_CTRUNC only
* when controllen is zero and there is control data to
* copy out.
*/
if (controllen != 0 &&
}
goto err;
}
}
/*
* Note: This MUST be done last. There can be no "goto err" after this
* point since it could make so_closefds run twice on some part
* of the file descriptor array.
*/
if (controllen != 0) {
if (!(flags & MSG_XPG4_2)) {
/*
* Good old msg_accrights can only return a multiple
* of 4 bytes.
*/
}
if (error)
goto err;
controllen = 0;
}
}
if (msg->msg_namelen != 0)
if (msg->msg_controllen != 0)
err:
/*
* If we fail and the control part contains file descriptors
* we have to close the fd's.
*/
if (msg->msg_controllen != 0)
!(flags & MSG_XPG4_2), 0);
if (msg->msg_namelen != 0)
if (msg->msg_controllen != 0)
}
/*
* Native system call
*/
{
}
auio.uio_loffset = 0;
lmsg.msg_namelen = 0;
lmsg.msg_controllen = 0;
}
{
}
auio.uio_loffset = 0;
sizeof (lmsg.msg_namelen)))
} else {
lmsg.msg_namelen = 0;
}
lmsg.msg_controllen = 0;
}
/*
* Uses the MSG_XPG4_2 flag to determine if the caller is using
* struct omsghdr or struct nmsghdr.
*/
{
int iovcnt;
int i;
int *flagsp;
model = get_udatamodel();
if (flags & MSG_XPG4_2) {
} else {
/*
* Assumes that nmsghdr and omsghdr are identically shaped
* except for the added msg_flags field.
*/
}
/*
* Code below us will kmem_alloc memory and hang it
* off msg_control and msg_name fields. This forces
* us to copy the structure to its native form.
*/
}
#ifdef _SYSCALL32_IMPL
/*
* 32-bit callers need to have their iovec expanded, while ensuring
* that they can't move more than 2Gbytes of data in a single call.
*/
if (model == DATAMODEL_ILP32) {
count32 = 0;
for (i = 0; i < iovcnt; i++) {
}
} else
#endif /* _SYSCALL32_IMPL */
}
len = 0;
for (i = 0; i < iovcnt; i++) {
}
}
auio.uio_loffset = 0;
(do_useracc == 0 ||
B_WRITE) != 0)) {
}
}
/*
* Common send function.
*/
static ssize_t
{
void *name;
void *control;
int error;
else
/* Allocate and copyin name and control */
goto done3;
/* copyin_name null terminates addresses for AF_UNIX */
} else {
}
/*
* Verify that the length is not excessive to prevent
* an application from consuming all of kernel memory.
*/
if (controllen > SO_MAXARGSIZE) {
goto done2;
}
goto done1;
}
} else {
}
if (error != 0) {
}
}
/*
* Native system call
*/
{
}
auio.uio_loffset = 0;
if (!(flags & MSG_XPG4_2)) {
/*
* implementation we set EOR for all send* calls.
*/
}
}
/*
* Uses the MSG_XPG4_2 flag to determine if the caller is using
* struct omsghdr or struct nmsghdr.
*/
{
int iovcnt;
int i;
model = get_udatamodel();
if (flags & MSG_XPG4_2) {
} else {
/*
* Assumes that nmsghdr and omsghdr are identically shaped
* except for the added msg_flags field.
*/
/*
* implementation we set EOR for all send* calls.
*/
}
/*
* Code below us will kmem_alloc memory and hang it
* off msg_control and msg_name fields. This forces
* us to copy the structure to its native form.
*/
/*
* Unless this is XPG 4.2 we allow iovcnt == 0 to
* be compatible with SunOS 4.X and 4.4BSD.
*/
}
#ifdef _SYSCALL32_IMPL
/*
* 32-bit callers need to have their iovec expanded, while ensuring
* that they can't move more than 2Gbytes of data in a single call.
*/
if (model == DATAMODEL_ILP32) {
if (iovcnt != 0 &&
count32 = 0;
for (i = 0; i < iovcnt; i++) {
}
} else
#endif /* _SYSCALL32_IMPL */
if (iovcnt != 0 &&
}
len = 0;
for (i = 0; i < iovcnt; i++) {
}
}
auio.uio_loffset = 0;
}
{
}
auio.uio_loffset = 0;
if (!(flags & MSG_XPG4_2)) {
/*
* implementation we set EOR for all send* calls.
*/
}
}
/*ARGSUSED3*/
int
{
int error;
goto bad;
goto rel_out;
}
(void *)sock_addrp, sock_addrlen);
}
}
/*ARGSUSED3*/
int
{
int error;
goto bad;
goto rel_out;
}
CRED())) == 0) {
(void *)sock_addrp, sock_addrlen);
}
}
/*ARGSUSED5*/
int
int level,
int option_name,
void *option_value,
int version)
{
void *optval;
int error;
}
/*
* Verify that the length is not excessive to prevent
* an application from consuming all of kernel memory.
*/
if (optlen > SO_MAXARGSIZE) {
}
optlen_res = optlen;
CRED());
if (error) {
}
optval, optlen_res);
if (error)
return (0);
}
/*ARGSUSED5*/
int
int level,
int option_name,
void *option_value,
int version)
{
int error;
if (option_value != NULL) {
if (option_len != 0) {
/*
* Verify that the length is not excessive to prevent
* an application from consuming all of kernel memory.
*/
if (option_len > SO_MAXARGSIZE) {
goto done2;
}
goto done1;
}
}
} else
option_len = 0;
if (error)
return (0);
}
static int
{
int error = 0;
return (EINVAL);
/*
* Copyin the name.
* This also makes it possible to check for too long pathnames.
* Compress the space needed for the name before passing it
* to soconfig - soconfig will store the string until
* the configuration is removed.
*/
return (error);
}
/* For device */
/*
* Special handling for NCA:
*
* DEV_NCA is never opened even if an application
* requests for AF_NCA. The device opened is instead a
* predefined AF_INET transport (NCA_INET_DEV).
*
* Prior to Volo (PSARC/2007/587) NCA would determine
* the device using a lookup, which worked then because
* all protocols were based on TPI. Since TPI is no
* longer the default, we have to explicitly state
* which device to use.
*/
/* only support entry <28, 2, 0> */
protocol != 0) {
return (EINVAL);
}
} else {
}
} else {
/* For socket module */
pathlen = 0;
}
/* sockparams_create frees mod name and devpath upon failure */
if (error != 0)
}
return (error);
}
static int
{
}
static int
{
int error;
return (error);
return (ENXIO);
if (ent->sofe_refcnt == 0) {
} else {
/* let the last socket free the filter */
}
return (0);
}
static int
{
int error;
&len)) != 0) {
return (error);
}
if (get_udatamodel() == DATAMODEL_NATIVE) {
return (EFAULT);
}
}
#ifdef _SYSCALL32_IMPL
else {
return (EFAULT);
}
}
#endif /* _SYSCALL32_IMPL */
return (error);
}
/*
* A filter must specify at least one socket tuple.
*/
if (filprop.sfp_socktuple_cnt == 0 ||
return (EINVAL);
}
/*
* Verify the hint, and copy in the hint argument, if necessary.
*/
case SOF_HINT_BEFORE:
case SOF_HINT_AFTER:
return (error);
}
/* FALLTHRU */
case SOF_HINT_TOP:
case SOF_HINT_BOTTOM:
/* hints cannot be used with programmatic filters */
return (EINVAL);
}
break;
case SOF_HINT_NONE:
break;
default:
/* bad hint value */
return (EINVAL);
}
if (get_udatamodel() == DATAMODEL_NATIVE) {
tuplesz)) {
return (EFAULT);
}
}
#ifdef _SYSCALL32_IMPL
else {
int i;
return (EFAULT);
}
}
}
#endif /* _SYSCALL32_IMPL */
/* Sockets can start using the filter as soon as the filter is added */
return (error);
}
/*
* Socket configuration system call. It is used to add and remove
* socket types.
*/
int
{
int error = 0;
if (sockfs_defer_nl7c_init) {
nl7c_init();
}
switch (cmd) {
case SOCKCONFIG_ADD_SOCK:
break;
case SOCKCONFIG_REMOVE_SOCK:
break;
case SOCKCONFIG_ADD_FILTER:
break;
case SOCKCONFIG_REMOVE_FILTER:
break;
case SOCKCONFIG_GET_SOCKTABLE:
break;
default:
#ifdef DEBUG
#endif
break;
}
if (error != 0) {
}
return (0);
}
/*
* Sendfile is implemented through two schemes, direct I/O or by
* caching in the filesystem page cache. We cache the input file by
* default and use direct I/O only if sendfile_max_size is set
* appropriately as explained below. Note that this logic is consistent
* with other filesystems where caching is turned on by default
* unless explicitly turned off by using the DIRECTIO ioctl.
*
* We choose a slightly different scheme here. One can turn off
* caching by setting sendfile_max_size to 0. One can also enable
* caching of files <= sendfile_max_size by setting sendfile_max_size
* to an appropriate value. By default sendfile_max_size is set to the
* maximum value so that all files are cached. In future, we may provide
* better interfaces for caching the file.
*
* Sendfile through Direct I/O (Zero copy)
* --------------------------------------
*
* As disks are normally slower than the network, we can't have a
* single thread that reads the disk and writes to the network. We
* need to have parallelism. This is done by having the sendfile
* thread create another thread that reads from the filesystem
* and queues it for network processing. In this scheme, the data
* is never copied anywhere i.e it is zero copy unlike the other
* scheme.
*
* We have a sendfile queue (snfq) where each sendfile
* request (snf_req_t) is queued for processing by a thread. Number
* of threads is dynamically allocated and they exit if they are idling
* beyond a specified amount of time. When each request (snf_req_t) is
* processed by a thread, it produces a number of mblk_t structures to
* be consumed by the sendfile thread. snf_deque and snf_enque are
* used for consuming and producing mblks. Size of the filesystem
* read is determined by the tunable (sendfile_read_size). A single
* mblk holds sendfile_read_size worth of data (except the last
* read of the file) which is sent down as a whole to the network.
* sendfile_read_size is set to 1 MB as this seems to be the optimal
* value for the UFS filesystem backed by a striped storage array.
*
* Synchronisation between read (producer) and write (consumer) threads.
* --------------------------------------------------------------------
*
* sr_lock protects sr_ib_head and sr_ib_tail. The lock is held while
* adding and deleting items in this list. Error can happen anytime
* during read or write. There could be unprocessed mblks in the
* sr_ib_XXX list when a read or write error occurs. Whenever error
* is encountered, we need two things to happen :
*
* a) One of the threads need to clean the mblks.
* b) When one thread encounters an error, the other should stop.
*
* For (a), we don't want to penalize the reader thread as it could do
* some useful work processing other requests. For (b), the error can
* be detected by examining sr_read_error or sr_write_error.
* sr_lock protects sr_read_error and sr_write_error. If both reader and
* writer encounters error, we need to report the write error back to
* the application as that's what would have happened if the operations
* were done sequentially. With this in mind, following should work :
*
* - Check for errors before read or write.
* - If the reader encounters error, set the error in sr_read_error.
* Check sr_write_error, if it is set, send cv_signal as it is
* waiting for reader to complete. If it is not set, the writer
* is either running sinking data to the network or blocked
* because of flow control. For handling the latter case, we
* always send a signal. In any case, it will examine sr_read_error
* and return. sr_read_error is marked with SR_READ_DONE to tell
* the writer that the reader is done in all the cases.
* - If the writer encounters error, set the error in sr_write_error.
* The reader thread is either blocked because of flow control or
* running reading data from the disk. For the former, we need to
* wakeup the thread. Again to keep it simple, we always wake up
* the reader thread. Then, wait for the read thread to complete
* if it is not done yet. Cleanup and return.
*
* High and low water marks for the read thread.
* --------------------------------------------
*
* If sendfile() is used to send data over a slow network, we need to
* make sure that the read thread does not produce data at a faster
* rate than the network. This can happen if the disk is faster than
* the network. In such a case, we don't want to build a very large queue.
* But we would still like to get all of the network throughput possible.
* This implies that network should never block waiting for data.
* As there are lot of disk throughput/network throughput combinations
* possible, it is difficult to come up with an accurate number.
* A typical 10K RPM disk has a max seek latency 17ms and rotational
* latency of 3ms for reading a disk block. Thus, the total latency to
* initiate a new read, transfer data from the disk and queue for
* transmission would take about a max of 25ms. Todays max transfer rate
* control, it would take 25ms to get new data ready for transmission.
* We have to make sure that network is not idling, while we are initiating
* 2.5MB of data. Rounding off, we keep the low water mark to be 3MB of data.
* We need to pick a high water mark so that the woken up thread would
* do considerable work before blocking again to prevent thrashing. Currently,
* we pick this to be 10 times that of the low water mark.
*
* Sendfile with segmap caching (One copy from page cache to mblks).
* ----------------------------------------------------------------
*
* We use the segmap cache for caching the file, if the size of file
* is <= sendfile_max_size. In this case we don't use threads as VM
* is reasonably fast enough to keep up with the network. If the underlying
* transport allows, we call segmap_getmapflt() to map MAXBSIZE (8K) worth
* of data into segmap space, and use the virtual address from segmap
* directly through desballoc() to avoid copy. Once the transport is done
* with the data, the mapping will be released through segmap_release()
* called by the call-back routine.
*
* If zero-copy is not allowed by the transport, we simply call VOP_READ()
* to copy the data from the filesystem into our temporary network buffer.
*
* To disable caching, set sendfile_max_size to 0.
*/
void
sendfile_init(void)
{
/* Cache all files by default. */
}
/*
* Queues a mblk_t for network processing.
*/
static void
{
} else {
}
(sr->sr_write_error == 0)) {
}
}
/*
* De-queues a mblk_t for network processing.
*/
static mblk_t *
{
/*
* If we have encountered an error on read or read is
* completed and no more mblks, return NULL.
* We need to check for NULL sr_mp_head also as
* the reads could have completed and there is
* nothing more to come.
*/
return (NULL);
}
/*
* To start with neither SR_READ_DONE is marked nor
* the error is set. When we wake up from cv_wait,
* following are the possibilities :
*
* a) sr_read_error is zero and mblks are queued.
* b) sr_read_error is set to SR_READ_DONE
* and mblks are queued.
* c) sr_read_error is set to SR_READ_DONE
* and no mblks.
* d) sr_read_error is set to some error other
* than SR_READ_DONE.
*/
}
/* Handle (a) and (b) first - the normal case. */
return (mp);
}
/* Handle (c) and (d). */
return (NULL);
}
/*
* Reads data from the filesystem and queues it for network processing.
*/
void
{
int ret_size;
int error;
int extra = 0;
int maxblk = 0;
int wroff = 0;
/*
* Ignore the error for filesystems that doesn't support DIRECTIO.
*/
/*
* Get the extra space to insert a header and a trailer.
*/
} else {
}
}
/*
* Socket filters can limit the mblk size,
* so limit reads to maxblk if there are
* filters present.
*/
if (is_system_labeled()) {
} else {
}
break;
}
/* Error or Reached EOF ? */
break;
}
}
}
void
snf_async_thread(void)
{
for (;;) {
/*
* If we didn't find a entry, then block until woken up
* again and then look through the queues again.
*/
if (time_left <= 0) {
snfq->snfq_svc_threads--;
thread_exit();
/* NOTREACHED */
}
snfq->snfq_idle_cnt++;
snfq->snfq_idle_cnt--;
}
snfq->snfq_req_cnt--;
}
}
{
/*
* store sd_qn_maxpsz into sr_maxpsz while we have stream head.
* stream might be closed before thread returns from snf_async_read.
*/
} else {
}
/*
* See whether we need another thread for servicing this
* request. If there are already enough requests queued
* for the threads, create one if not exceeding
* snfq_max_threads.
*/
snfq->snfq_svc_threads++;
}
} else {
}
snfq->snfq_req_cnt++;
return (sr);
}
int
{
int iosize;
int error = 0;
short fflag;
int ksize;
ksize = 0;
*count = 0;
return (EAGAIN);
/*
* We check for read error in snf_deque. It has to check
* for successful READ_DONE and return NULL, and we might
* as well make an additional check there.
*/
break;
}
if (error != 0) {
break;
}
}
/* Look at the big comments on why we cv_signal here. */
/* Wait for the reader to complete always. */
}
/* If there is no write error, check for read error. */
if (error == 0)
if (error != 0) {
}
}
return (error);
}
/* Maximum no.of pages allocated by vpm for sendfile at a time */
/*
* Maximum no.of elements in the list returned by vpm, including
* NULL for the last entry
*/
typedef struct {
unsigned int snfv_ref;
typedef struct {
/*
* The callback function used for vpm mapped mblks called when the last ref of
* the mblk is dropped which normally occurs when TCP receives the ack. But it
* can be the driver too due to lazy reclaim.
*/
void
{
}
}
/*
* The callback function used for segmap'ped mblks called when the last ref of
* the mblk is dropped which normally occurs when TCP receives the ack. But it
* can be the driver too due to lazy reclaim.
*/
void
{
/*
* We don't need to call segmap_fault(F_SOFTUNLOCK) for
* segmap_kpm as long as the latter never falls back to
* "use_segmap_range". (See segmap_getmapflt().)
*
* Using S_OTHER saves an redundant hat_setref() in
* segmap_unlock()
*/
}
}
/*
* Use segmap or vpm instead of bcopy to send down a desballoca'ed, mblk.
* When segmap is used, the mblk contains a segmap slot of no more
* than MAXBSIZE.
*
* With vpm, a maximum of SNF_MAXVMAPS page-sized mappings can be obtained
* in each iteration and sent by socket_sendmblk until an error occurs or
* the requested size has been transferred. An mblk is esballoca'ed from
* each mapped page and a chain of these mblk is sent to the transport layer.
* vpm will be called to unmap the pages when all mblks have been freed by
* free_func.
*
* At the end of the whole sendfile() operation, we wait till the data from
* the last mblk is ack'ed by the transport before returning so that the
* caller of sendfile() can safely modify the file content.
*/
int
{
int mapoff;
int chain_size;
int error;
short fflag;
int ksize;
ksize = 0;
for (;;) {
break;
}
if (vpm_enable) {
int mblk_size;
int maxsize;
int i;
KM_SLEEP);
/*
* Get vpm mappings for maxsize with read access.
* If the pages aren't available yet, we get
* DEADLK, so wait and try again a little later using
* an increasing wait. We might be here a long time.
*
* If delay_sig returns EINTR, be sure to exit and
* pass it up to the caller.
*/
deadlk_wait = 0;
break;
}
}
if (error != 0) {
goto out;
}
/* Construct the mblk chain from the page mappings */
chain_size = 0;
total_size > 0; i++) {
mapoff, total_size);
/*
* We return EAGAIN after unmapping the pages
* if we cannot allocate the the head of the
* chain. Otherwise, we continue sending the
* mblks constructed so far.
*/
if (i == 0) {
S_READ);
sizeof (snf_vmap_desbinfo));
goto out;
}
break;
}
/* Mark this dblk with the zero-copy flag */
chain_size += mblk_size;
total_size -= mblk_size;
mapoff = 0;
if (i > 0)
else
}
} else {
/* vpm not supported. fallback to segmap */
if (chain_size > total_size)
/*
* we don't forcefault because we'll call
* segmap_fault(F_SOFTLOCK) next.
*
* S_READ will get the ref bit set (by either
* segmap_getmapflt() or segmap_fault()) and page
* shared locked.
*/
/*
* We must call segmap_fault() even for segmap_kpm
* because that's how error gets returned.
* (segmap_getmapflt() never fails but segmap_fault()
* does.)
*
* If the pages aren't available yet, we get
* DEADLK, so wait and try again a little later using
* an increasing wait. We might be here a long time.
*
* If delay_sig returns EINTR, be sure to exit and
* pass it up to the caller.
*/
deadlk_wait = 0;
break;
}
}
if (error != 0) {
goto out;
}
goto out;
}
/* Mark this dblk with the zero-copy flag */
fileoff += chain_size;
total_size -= chain_size;
}
if (total_size == 0 && !nowait) {
}
if (error != 0) {
/*
* mp contains the mblks that were not sent by
* socket_sendmblk. Use its size to update *count
*/
return (error);
}
ksize += chain_size;
if (total_size == 0)
goto done;
if (error)
break;
/* Read as much as possible. */
break;
}
out:
done:
if (dowait) {
} else {
break;
}
}
}
}
return (error);
}
int
{
int iosize;
int extra = 0;
int error;
short fflag;
int ksize;
int ioflag;
int maxblk = 0;
int wroff = 0;
/*
* Get the extra space to insert a header and a trailer.
*/
} else {
}
}
ksize = 0;
/* If read sync is not asked for, filter sync flags */
for (;;) {
break;
}
/*
* Socket filters can limit the mblk size,
* so limit reads to maxblk if there are
* filters present.
*/
if (is_system_labeled()) {
} else {
}
break;
}
error = 0;
break;
}
if (error != 0) {
return (error);
}
if (size == 0)
goto done;
if (error)
break;
/* Read as much as possible. */
size = 0;
}
done:
return (error);
}
#if defined(_SYSCALL32_IMPL) || defined(_ILP32)
/*
* Largefile support for 32 bit applications only.
*/
int
{
int error = 0;
if (sfv_len < 0) {
goto out;
}
/* Same checks as in pread */
if (sfv_off > MAXOFFSET_T) {
goto out;
}
/*
* There are no more checks on sfv_len. So, we cast it to
* u_offset_t and share the snf_direct_io/snf_cache code between
* 32 bit and 64 bit.
*
* TODO: should do nbl_need_check() like read()?
*/
if (sfv_len > sendfile_max_size) {
&count);
goto out;
}
/*
* Grab the lock as a reader to prevent the file size
* from changing underneath.
*/
goto out;
}
/* Read as much as possible. */
/*
* When the NOWAIT flag is not set, we enable zero-copy only if the
* transfer size is large enough. This prevents performance loss
* when the caller sends the file piece by piece.
*/
} else {
}
}
if (dozcopy) {
} else {
} else {
}
else
}
out:
return (error);
}
#endif
#ifdef _SYSCALL32_IMPL
/*
* recv32(), recvfrom32(), send32(), sendto32(): intentionally return a
* ssize_t rather than ssize32_t; see the comments above read32 for details.
*/
{
}
{
}
{
}
{
}
#endif /* _SYSCALL32_IMPL */
/*
* Function wrappers (mostly around the sonode switch) for
* backward compatibility.
*/
int
{
}
int
{
int error;
return (error);
}
int
{
}
int
{
}
int
{
}
int
{
}
int
{
}
int
{
}
int
{
CRED()));
}
/*
* Because this is backward compatibility interface it only needs to be
* able to handle the creation of TPI sockfs sockets.
*/
struct sonode *
int *errorp)
{
} else {
/* Cannot fail, only bumps so_count */
} else {
}
}
return (so);
}