socksyscalls.c revision 740243730195c25d65f2a1987de1b96cc6783fde
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
#include <sys/sysmacros.h>
#include <sys/socketvar.h>
#include <sys/isa_defs.h>
#include <sys/inttypes.h>
#include <sys/sendfile.h>
#ifdef SOCK_TEST
#else
#define do_useracc 1
#endif /* SOCK_TEST */
extern int xnet_truncate_print;
/*
* as there isn't a formal definition of IOV_MAX ???
*/
#define MSG_MAXIOVLEN 16
/*
* Kernel component of socket creation.
*
* The socket library determines which version number to use.
* First the library calls this with a NULL devpath. If this fails
* for the appropriate transport. If one is found it will pass in the
* devpath for the kernel to use.
*/
int
{
int fd;
int error;
int saved_error = 0;
/*
* The request is for an NCA socket so for NL7C use the
* INET domain instead and mark NL7C_AF_NCA below.
*/
/*
* NL7C is not supported in non-global zones,
* we enforce this restriction here.
*/
if (getzoneid() != GLOBAL_ZONEID) {
}
}
/*
* If there is either an EPROTONOSUPPORT or EPROTOTYPE error
* it makes sense doing the wildcard lookup since the
* protocol might not be in the table.
*/
saved_error = error;
/*
* Try wildcard lookup. Never use devpath for wildcards.
*/
/*
* Can't find in kernel table - have library
* the devpath (The library will do this if it didn't
* already pass in a devpath).
*/
if (saved_error != 0)
error = saved_error;
}
}
/* Check the device policy */
}
if (protocol == IPPROTO_SCTP) {
} else {
}
}
}
if (wildcard) {
/*
* Issue SO_PROTOTYPE setsockopt.
*/
&protocol,
(t_uscalar_t)sizeof (protocol));
if (error) {
/*
* Setsockopt often fails with ENOPROTOOPT but socket()
* should fail with EPROTONOSUPPORT/EPROTOTYPE.
*/
error = saved_error;
else
}
}
}
/*
* Now fill in the entries that falloc reserved
*/
return (fd);
}
/*
* Map from a file descriptor to a socket node.
* Returns with the file descriptor held i.e. the caller has to
* use releasef when done with the file descriptor.
*/
static struct sonode *
{
eprintline(*errorp);
return (NULL);
}
/* Check if it is a socket */
eprintline(*errorp);
return (NULL);
}
/*
* Use the stream head to find the real socket vnode.
* This is needed when namefs sits above sockfs.
*/
return (NULL);
}
} else {
}
if (fpp)
return (so);
}
/*
* Allocate and copyin a sockaddr.
* Ensures NULL termination for AF_UNIX addresses by extending them
* with one NULL byte if need be. Verifies that the length is not
* excessive to prevent an application from consuming all of kernel
* memory. Returns NULL when an error occurred.
*/
static struct sockaddr *
int *errorp)
{
char *faddr;
if (namelen > SO_MAXARGSIZE) {
return (NULL);
}
return (NULL);
}
/*
* Add space for NULL termination if needed.
* Do a quick check if the last byte is NUL.
*/
/* Check if there is any NULL termination */
size_t i;
int foundnull = 0;
if (faddr[i] == '\0') {
foundnull = 1;
break;
}
}
if (!foundnull) {
/* Add extra byte for NUL padding */
char *nfaddr;
/* NUL terminate */
namelen++;
}
}
}
/*
*/
static int
{
if (ulen != 0) {
return (EFAULT);
}
} else
ulen = 0;
return (EFAULT);
}
return (0);
}
/*
* If klen is greater than ulen it still uses the non-truncated
* klen to update ulenp.
*/
static int
{
else if (ulen != 0 && xnet_truncate_print) {
printf("sockfs: truncating copyout of address using "
"XNET semantics for pid = %d. Lengths %d, %d\n",
}
if (ulen != 0) {
return (EFAULT);
} else
klen = 0;
} else
klen = 0;
return (EFAULT);
}
return (0);
}
/*
* The socketpair() code in libsocket creates two sockets (using
* to connect the two sockets together.
*
* For a SOCK_STREAM socketpair a listener is needed - in that case this
* routine will create a new file descriptor as part of accepting the
* connection. The library socketpair() will check if svs[2] has changed
* in which case it will close the changed fd.
*
* Note that this code could use the TPI feature of accepting the connection
* on the listening endpoint. However, that would require significant changes
* to soaccept.
*/
int
{
int svs[2];
int error;
struct sockaddr_ux *name;
if (error && do_useracc)
}
error = EOPNOTSUPP;
goto done;
}
/*
* The code below makes assumptions about the "sockfs" implementation.
* So make sure that the correct implementation is really used.
*/
/*
* Bind both sockets and connect them with each other.
*/
if (error) {
goto done;
}
if (error) {
goto done;
}
namelen = sizeof (struct sockaddr_ux);
0, _SOCONNECT_NOXLATE);
if (error) {
goto done;
}
0, _SOCONNECT_NOXLATE);
if (error) {
goto done;
}
} else {
/*
* Bind both sockets, with so1 being a listener.
* Connect so2 to so1 - nonblocking to avoid waiting for
* soaccept to complete.
* Accept a connection on so1. Pass out the new fd as sv[0].
* The library will detect the changed fd and close
* the original one.
*/
int nfd;
/*
* We could simply call SOP_LISTEN() here (which would do the
* binding automatically) if the code didn't rely on passing
* _SOBIND_NOXLATE to the TPI implementation of SOP_BIND().
*/
if (error) {
goto done;
}
if (error) {
goto done;
}
namelen = sizeof (struct sockaddr_ux);
if (error) {
if (error != EINPROGRESS) {
goto done;
}
}
if (error) {
goto done;
}
/* wait for so2 being SS_CONNECTED ignoring signals */
if (error != 0) {
goto done;
}
goto done;
}
/*
* fill in the entries that falloc reserved
*/
/*
* The socketpair library routine will close the original
* svs[0] when this code passes out a different file
* descriptor.
*/
}
}
return (0);
done:
}
int
{
int error;
/* Allocate and copyin name */
/*
* namelen.
*/
}
} else {
namelen = 0;
}
switch (version) {
default:
break;
case SOV_XPG4_2:
break;
case SOV_SOCKBSD:
break;
}
done:
if (error)
return (0);
}
/* ARGSUSED2 */
int
{
int error;
if (error)
return (0);
}
/*ARGSUSED3*/
int
{
int error;
int nfd;
}
if (namelen != 0) {
if (error && do_useracc) {
}
} else
} else {
namelen = 0;
}
/*
* Allocate the user fd before SOP_ACCEPT() in order to
* catch EMFILE errors before calling SOP_ACCEPT().
*/
}
if (error) {
}
/*
* so_faddr_sa can not go away even though we are not holding so_lock.
* However, in theory its content could change from underneath us.
* But this is not possible in practice since it can only
* change due to either some socket system call
* or due to a T_CONN_CON being received from the stream head.
* can do any system call on nso and T_CONN_CON can not arrive
* on a socket that is already connected.
* Thus there is no reason to hold so_lock here.
*
* SOP_ACCEPT() is required to have set the valid bit for the faddr,
* but it could be instantly cleared by a disconnect from the transport.
* For that reason we ignore it here.
*/
if (error) {
}
}
/*
* fill in the entries that falloc reserved
*/
/*
* Copy FNDELAY and FNONBLOCK from listener to acceptor
*/
int arg = 0;
/*
* This code is a simplification of the F_SETFL code in fcntl()
* Ignore any errors from VOP_SETFL.
*/
error = 0;
} else {
}
}
return (nfd);
}
int
{
int error;
/* Allocate and copyin name */
if (namelen != 0) {
}
} else
if (name)
if (error)
return (0);
}
/*ARGSUSED2*/
int
{
int error;
if (error)
return (0);
}
/*
* Common receive routine.
*/
static ssize_t
int flags,
int *flagsp)
{
void *name;
void *control;
int error;
if (error) {
}
if (error)
goto err;
/*
* Clear internal flag.
*/
/*
* Determine MSG_CTRUNC. sorecvmsg sets MSG_CTRUNC only
* when controllen is zero and there is control data to
* copy out.
*/
if (controllen != 0 &&
}
goto err;
}
}
/*
* Note: This MUST be done last. There can be no "goto err" after this
* point since it could make so_closefds run twice on some part
* of the file descriptor array.
*/
if (controllen != 0) {
if (!(flags & MSG_XPG4_2)) {
/*
* Good old msg_accrights can only return a multiple
* of 4 bytes.
*/
}
if (error)
goto err;
controllen = 0;
}
}
if (msg->msg_namelen != 0)
if (msg->msg_controllen != 0)
err:
/*
* If we fail and the control part contains file descriptors
* we have to close the fd's.
*/
if (msg->msg_controllen != 0)
!(flags & MSG_XPG4_2), 0);
if (msg->msg_namelen != 0)
if (msg->msg_controllen != 0)
}
/*
* Native system call
*/
{
}
auio.uio_loffset = 0;
lmsg.msg_namelen = 0;
lmsg.msg_controllen = 0;
}
{
}
auio.uio_loffset = 0;
sizeof (lmsg.msg_namelen)))
} else {
lmsg.msg_namelen = 0;
}
lmsg.msg_controllen = 0;
}
/*
* Uses the MSG_XPG4_2 flag to determine if the caller is using
* struct omsghdr or struct nmsghdr.
*/
{
int iovcnt;
int i;
int *flagsp;
model = get_udatamodel();
if (flags & MSG_XPG4_2) {
} else {
/*
* Assumes that nmsghdr and omsghdr are identically shaped
* except for the added msg_flags field.
*/
}
/*
* Code below us will kmem_alloc memory and hang it
* off msg_control and msg_name fields. This forces
* us to copy the structure to its native form.
*/
}
#ifdef _SYSCALL32_IMPL
/*
* 32-bit callers need to have their iovec expanded, while ensuring
* that they can't move more than 2Gbytes of data in a single call.
*/
if (model == DATAMODEL_ILP32) {
count32 = 0;
for (i = 0; i < iovcnt; i++) {
}
} else
#endif /* _SYSCALL32_IMPL */
}
len = 0;
for (i = 0; i < iovcnt; i++) {
}
}
auio.uio_loffset = 0;
(do_useracc == 0 ||
B_WRITE) != 0)) {
}
}
/*
* Common send function.
*/
static ssize_t
{
void *name;
void *control;
int error;
else
/* Allocate and copyin name and control */
goto done3;
/* copyin_name null terminates addresses for AF_UNIX */
} else {
}
/*
* Verify that the length is not excessive to prevent
* an application from consuming all of kernel memory.
*/
if (controllen > SO_MAXARGSIZE) {
goto done2;
}
goto done1;
}
} else {
}
if (error != 0) {
}
}
/*
* Native system call
*/
{
}
auio.uio_loffset = 0;
if (!(flags & MSG_XPG4_2)) {
/*
* implementation we set EOR for all send* calls.
*/
}
}
/*
* Uses the MSG_XPG4_2 flag to determine if the caller is using
* struct omsghdr or struct nmsghdr.
*/
{
int iovcnt;
int i;
model = get_udatamodel();
if (flags & MSG_XPG4_2) {
} else {
/*
* Assumes that nmsghdr and omsghdr are identically shaped
* except for the added msg_flags field.
*/
/*
* implementation we set EOR for all send* calls.
*/
}
/*
* Code below us will kmem_alloc memory and hang it
* off msg_control and msg_name fields. This forces
* us to copy the structure to its native form.
*/
/*
* Unless this is XPG 4.2 we allow iovcnt == 0 to
* be compatible with SunOS 4.X and 4.4BSD.
*/
}
#ifdef _SYSCALL32_IMPL
/*
* 32-bit callers need to have their iovec expanded, while ensuring
* that they can't move more than 2Gbytes of data in a single call.
*/
if (model == DATAMODEL_ILP32) {
if (iovcnt != 0 &&
count32 = 0;
for (i = 0; i < iovcnt; i++) {
}
} else
#endif /* _SYSCALL32_IMPL */
if (iovcnt != 0 &&
}
len = 0;
for (i = 0; i < iovcnt; i++) {
}
}
auio.uio_loffset = 0;
}
{
}
auio.uio_loffset = 0;
if (!(flags & MSG_XPG4_2)) {
/*
* implementation we set EOR for all send* calls.
*/
}
}
/*ARGSUSED3*/
int
{
int error;
union {
struct sockaddr_in sin;
struct sockaddr_in6 sin6;
} sin; /* Temporary buffer, common case */
void *addr; /* Temporary buffer, uncommon case */
goto bad;
goto rel_out;
}
/*
* If a connect or accept has been done, unless we're an Xnet socket,
* the remote address has already been updated in so_faddr_sa.
*/
goto rel_out;
}
size = 0;
} else {
/*
* Allocate temporary to avoid holding so_lock across
* copyout
*/
}
/* Prevent so_faddr_sa/len from changing while accessed */
goto free_out;
}
if (size != 0)
}
/*ARGSUSED3*/
int
{
int error;
union {
struct sockaddr_in sin;
struct sockaddr_in6 sin6;
} sin; /* Temporary buffer, common case */
void *addr; /* Temporary buffer, uncommon case */
goto bad;
goto rel_out;
}
/*
* If a bind or accept has been done, unless we're an Xnet endpoint,
* the local address has already been updated in so_laddr_sa.
*/
goto rel_out;
}
size = 0;
} else {
/*
* Allocate temporary to avoid holding so_lock across
* copyout
*/
}
/* Prevent so_laddr_sa/len from changing while accessed */
if (size != 0)
}
/*ARGSUSED5*/
int
getsockopt(int sock,
int level,
int option_name,
void *option_value,
int version)
{
void *optval;
int error;
}
/*
* Verify that the length is not excessive to prevent
* an application from consuming all of kernel memory.
*/
if (optlen > SO_MAXARGSIZE) {
}
optlen_res = optlen;
if (error) {
}
optval, optlen_res);
if (error)
return (0);
}
/*ARGSUSED5*/
int
setsockopt(int sock,
int level,
int option_name,
void *option_value,
int version)
{
int error;
if (option_value != NULL) {
if (option_len != 0) {
/*
* Verify that the length is not excessive to prevent
* an application from consuming all of kernel memory.
*/
if (option_len > SO_MAXARGSIZE) {
goto done2;
}
goto done1;
}
}
} else
option_len = 0;
if (error)
return (0);
}
/*
* Add config info when devpath is non-NULL; delete info when devpath is NULL.
* devpath is a user address.
*/
int
{
char *kdevpath; /* Copied in devpath string */
int error = 0;
/* Deleting an entry */
kdevpathlen = 0;
} else {
/*
* Adding an entry.
* Copyin the devpath.
* This also makes it possible to check for too long pathnames.
* Compress the space needed for the devpath before passing it
* to soconfig - soconfig will store the string until
* the configuration is removed.
*/
char *buf;
&kdevpathlen)) != 0) {
goto done;
}
}
done:
if (error) {
}
return (0);
}
/*
* Sendfile is implemented through two schemes, direct I/O or by
* caching in the filesystem page cache. We cache the input file by
* default and use direct I/O only if sendfile_max_size is set
* appropriately as explained below. Note that this logic is consistent
* with other filesystems where caching is turned on by default
* unless explicitly turned off by using the DIRECTIO ioctl.
*
* We choose a slightly different scheme here. One can turn off
* caching by setting sendfile_max_size to 0. One can also enable
* caching of files <= sendfile_max_size by setting sendfile_max_size
* to an appropriate value. By default sendfile_max_size is set to the
* maximum value so that all files are cached. In future, we may provide
* better interfaces for caching the file.
*
* Sendfile through Direct I/O (Zero copy)
* --------------------------------------
*
* As disks are normally slower than the network, we can't have a
* single thread that reads the disk and writes to the network. We
* need to have parallelism. This is done by having the sendfile
* thread create another thread that reads from the filesystem
* and queues it for network processing. In this scheme, the data
* is never copied anywhere i.e it is zero copy unlike the other
* scheme.
*
* We have a sendfile queue (snfq) where each sendfile
* request (snf_req_t) is queued for processing by a thread. Number
* of threads is dynamically allocated and they exit if they are idling
* beyond a specified amount of time. When each request (snf_req_t) is
* processed by a thread, it produces a number of mblk_t structures to
* be consumed by the sendfile thread. snf_deque and snf_enque are
* used for consuming and producing mblks. Size of the filesystem
* read is determined by the tuneable (sendfile_read_size). A single
* mblk holds sendfile_read_size worth of data (except the last
* read of the file) which is sent down as a whole to the network.
* sendfile_read_size is set to 1 MB as this seems to be the optimal
* value for the UFS filesystem backed by a striped storage array.
*
* Synchronisation between read (producer) and write (consumer) threads.
* --------------------------------------------------------------------
*
* sr_lock protects sr_ib_head and sr_ib_tail. The lock is held while
* adding and deleting items in this list. Error can happen anytime
* during read or write. There could be unprocessed mblks in the
* sr_ib_XXX list when a read or write error occurs. Whenever error
* is encountered, we need two things to happen :
*
* a) One of the threads need to clean the mblks.
* b) When one thread encounters an error, the other should stop.
*
* For (a), we don't want to penalise the reader thread as it could do
* some useful work processing other requests. For (b), the error can
* be detected by examining sr_read_error or sr_write_error.
* sr_lock protects sr_read_error and sr_write_error. If both reader and
* writer encounters error, we need to report the write error back to
* the application as that's what would have happened if the operations
* were done sequentially. With this in mind, following should work :
*
* - Check for errors before read or write.
* - If the reader encounters error, set the error in sr_read_error.
* Check sr_write_error, if it is set, send cv_signal as it is
* waiting for reader to complete. If it is not set, the writer
* is either running sinking data to the network or blocked
* because of flow control. For handling the latter case, we
* always send a signal. In any case, it will examine sr_read_error
* and return. sr_read_error is marked with SR_READ_DONE to tell
* the writer that the reader is done in all the cases.
* - If the writer encounters error, set the error in sr_write_error.
* The reader thread is either blocked because of flow control or
* running reading data from the disk. For the former, we need to
* wakeup the thread. Again to keep it simple, we always wake up
* the reader thread. Then, wait for the read thread to complete
* if it is not done yet. Cleanup and return.
*
* High and low water marks for the read thread.
* --------------------------------------------
*
* If sendfile() is used to send data over a slow network, we need to
* make sure that the read thread does not produce data at a faster
* rate than the network. This can happen if the disk is faster than
* the network. In such a case, we don't want to build a very large queue.
* But we would still like to get all of the network throughput possible.
* This implies that network should never block waiting for data.
* As there are lot of disk throughput/network throughput combinations
* possible, it is difficult to come up with an accurate number.
* A typical 10K RPM disk has a max seek latency 17ms and rotational
* latency of 3ms for reading a disk block. Thus, the total latency to
* initiate a new read, transfer data from the disk and queue for
* transmission would take about a max of 25ms. Todays max transfer rate
* control, it would take 25ms to get new data ready for transmission.
* We have to make sure that network is not idling, while we are initiating
* 2.5MB of data. Roundig off, we keep the low water mark to be 3MB of data.
* We need to pick a high water mark so that the woken up thread would
* do considerable work before blocking again to prevent thrashing. Currently,
* we pick this to be 10 times that of the low water mark.
*
* Sendfile with segmap caching (One copy from page cache to mblks).
* ----------------------------------------------------------------
*
* We use the segmap cache for caching the file, if the size of file
* is <= sendfile_max_size. In this case we don't use threads as VM
* is reasonably fast enough to keep up with the network. If the underlying
* transport allows, we call segmap_getmapflt() to map MAXBSIZE (8K) worth
* of data into segmap space, and use the virtual address from segmap
* directly through desballoc() to avoid copy. Once the transport is done
* with the data, the mapping will be released through segmap_release()
* called by the call-back routine.
*
* If zero-copy is not allowed by the transport, we simply call VOP_READ()
* to copy the data from the filesystem into our temporary network buffer.
*
* To disable caching, set sendfile_max_size to 0.
*/
struct sendfile_stats sf_stats;
struct sendfile_queue *snfq;
void
sendfile_init(void)
{
/* Cache all files by default. */
}
/*
* Queues a mblk_t for network processing.
*/
static void
{
} else {
}
(sr->sr_write_error == 0)) {
}
}
/*
* De-queues a mblk_t for network processing.
*/
static mblk_t *
{
/*
* If we have encountered an error on read or read is
* completed and no more mblks, return NULL.
* We need to check for NULL sr_mp_head also as
* the reads could have completed and there is
* nothing more to come.
*/
return (NULL);
}
/*
* To start with neither SR_READ_DONE is marked nor
* the error is set. When we wake up from cv_wait,
* following are the possibilities :
*
* a) sr_read_error is zero and mblks are queued.
* b) sr_read_error is set to SR_READ_DONE
* and mblks are queued.
* c) sr_read_error is set to SR_READ_DONE
* and no mblks.
* d) sr_read_error is set to some error other
* than SR_READ_DONE.
*/
}
/* Handle (a) and (b) first - the normal case. */
return (mp);
}
/* Handle (c) and (d). */
return (NULL);
}
/*
* Reads data from the filesystem and queues it for network processing.
*/
void
{
int ret_size;
int error;
/*
* Ignore the error for filesystems that doesn't support DIRECTIO.
*/
break;
}
/* Error or Reached EOF ? */
break;
}
}
}
void
snf_async_thread(void)
{
for (;;) {
/*
* If we didn't find a entry, then block until woken up
* again and then look through the queues again.
*/
if (time_left <= 0) {
snfq->snfq_svc_threads--;
thread_exit();
/* NOTREACHED */
}
snfq->snfq_idle_cnt++;
snfq->snfq_idle_cnt--;
}
snfq->snfq_req_cnt--;
}
}
{
/*
* store sd_qn_maxpsz into sr_maxpsz while we have stream head.
* stream might be closed before thread returns from snf_async_read.
*/
if (stp->sd_qn_maxpsz > 0) {
} else {
}
/*
* See whether we need another thread for servicing this
* request. If there are already enough requests queued
* for the threads, create one if not exceeding
* snfq_max_threads.
*/
snfq->snfq_svc_threads++;
}
} else {
}
snfq->snfq_req_cnt++;
return (sr);
}
int
{
int iosize;
int error = 0;
short fflag;
int ksize;
ksize = 0;
*count = 0;
return (EAGAIN);
/*
* We check for read error in snf_deque. It has to check
* for successful READ_DONE and return NULL, and we might
* as well make an additional check there.
*/
break;
}
break;
}
}
/* Look at the big comments on why we cv_signal here. */
/* Wait for the reader to complete always. */
}
/* If there is no write error, check for read error. */
if (error == 0)
if (error != 0) {
}
}
return (error);
}
typedef struct {
/*
* The callback function when the last ref of the mblk is dropped,
* normally occurs when TCP receives the ack. But it can be the driver
* too due to lazy reclaim.
*/
void
{
if (!segmap_kpm) {
/*
* We don't need to call segmap_fault(F_SOFTUNLOCK) for
* segmap_kpm as long as the latter never falls back to
* "use_segmap_range". (See segmap_getmapflt().)
*
* Using S_OTHER saves an redundant hat_setref() in
* segmap_unlock()
*/
}
}
/*
* Use segmap instead of bcopy to send down a chain of desballoca'ed, mblks.
* Each mblk contains a segmap slot of no more than MAXBSIZE. The total
* length of a chain is no more than sd_qn_maxpsz.
*
* At the end of the whole sendfile() operation, we wait till the data from
* the last mblk is ack'ed by the transport before returning so that the
* caller of sendfile() can safely modify the file content.
*/
int
{
int mapoff;
int error;
short fflag;
int ksize;
ksize = 0;
for (;;) {
break;
}
iosize = 0;
do {
/*
* we don't forcefault because we'll call
* segmap_fault(F_SOFTLOCK) next.
*
* S_READ will get the ref bit set (by either
* segmap_getmapflt() or segmap_fault()) and page
* shared locked.
*/
/*
* We must call segmap_fault() even for segmap_kpm
* because that's how error gets returned.
* (segmap_getmapflt() never fails but segmap_fault()
* does.)
*/
S_READ) != 0) {
goto out;
}
goto out;
}
/* Mark this dblk with the zero-copy flag */
else
}
return (error);
}
if (size == 0)
goto done;
if (error)
break;
/* Read as much as possible. */
break;
}
out:
done:
if (dowait) {
break;
}
}
}
return (error);
}
int
{
int iosize;
int error;
short fflag;
int ksize;
int ioflag;
ksize = 0;
/* If read sync is not asked for, filter sync flags */
for (;;) {
break;
}
break;
}
error = 0;
break;
}
return (error);
}
if (size == 0)
goto done;
if (error)
break;
/* Read as much as possible. */
size = 0;
}
done:
return (error);
}
#if defined(_SYSCALL32_IMPL) || defined(_ILP32)
/*
* Largefile support for 32 bit applications only.
*/
int
{
int error = 0;
if (sfv_len < 0) {
goto out;
}
/* Same checks as in pread */
if (sfv_off > MAXOFFSET_T) {
goto out;
}
/*
* There are no more checks on sfv_len. So, we cast it to
* u_offset_t and share the snf_direct_io/snf_cache code between
* 32 bit and 64 bit.
*
* TODO: should do nbl_need_check() like read()?
*/
if (sfv_len > sendfile_max_size) {
&count);
goto out;
}
/*
* Grab the lock as a reader to prevent the file size
* from changing underneath.
*/
goto out;
}
/* Read as much as possible. */
else
/*
* When the NOWAIT flag is not set, we enable zero-copy only if the
* transfer size is large enough. This prevents performance loss
* when the caller sends the file piece by piece.
*/
int on = 1;
} else {
}
}
if (dozcopy) {
} else {
}
out:
return (error);
}
#endif
#ifdef _SYSCALL32_IMPL
/*
* recv32(), recvfrom32(), send32(), sendto32(): intentionally return a
* ssize_t rather than ssize32_t; see the comments above read32 for details.
*/
{
}
{
}
{
}
{
}
#endif /* _SYSCALL32_IMPL */
/*
* Function wrappers (mostly arround the sonode switch) for
* backward compatibility.
*/
int
{
}
int
{
int error;
return (error);
}
int
{
}
int
{
}
int
{
}
int
{
}
int
{
return (SOP_GETPEERNAME(so));
}
int
{
return (SOP_GETSOCKNAME(so));
}
int
{
}
int
{
flags));
}
int
{
}
/*
* Because this is backward compatibility interface it only needs to be
* able to handle the creation of TPI sockfs sockets.
*/
struct sonode *
{
errorp));
}