/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
*/
/*
* Copyright 2015, Joyent, Inc.
*/
#include <sys/sysmacros.h>
#include <sys/resource.h>
#include <sys/vfs_opreg.h>
#include <sys/pathname.h>
#include <sys/bootconf.h>
#include <vm/seg_kmem.h>
caller_context_t *);
caller_context_t *, int *, pathname_t *);
caller_context_t *, vsecattr_t *);
char *, struct cred *, caller_context_t *, int);
caller_context_t *, int, vsecattr_t *);
static void udf_inactive(struct vnode *,
struct cred *, caller_context_t *);
caller_context_t *);
caller_context_t *);
caller_context_t *);
caller_context_t *);
caller_context_t *);
/*
* Structures to control multiple IO operations to get or put pages
* that are backed by discontiguous blocks. The master struct is
* a dummy that holds the original bp from pageio_setup. The
* slave struct holds the working bp's to do the actual IO. Once
* all the slave IOs complete. The master is processed as if a single
* IO op has completed.
*/
typedef struct mio_master {
} mio_master_t;
typedef struct mio_slave {
} mio_slave_t;
};
/* ARGSUSED */
static int32_t
{
ud_printf("udf_open\n");
return (0);
}
/* ARGSUSED */
static int32_t
{
ud_printf("udf_close\n");
/*
* Push partially filled cluster at last close.
* ``last close'' is approximated because the dnlc
* may have a hold on the vnode.
*/
if (ip->i_delaylen) {
ip->i_delaylen = 0;
}
}
return (0);
}
/* ARGSUSED */
static int32_t
{
ud_printf("udf_read\n");
#ifdef __lock_lint
#endif
/*
* udf_getattr ends up being called by chklock
*/
if (error) {
goto end;
}
}
end:
#ifdef __lock_lint
#endif
return (error);
}
/* ARGSUSED */
static int32_t
{
ud_printf("udf_write\n");
#ifdef __lock_lint
#endif
/*
* ud_getattr ends up being called by chklock
*/
if (error) {
goto end;
}
}
/*
* Throttle writes.
*/
ud_throttles++;
}
}
/*
* Write to the file
*/
/*
* In append mode start at end of file.
*/
}
end:
#ifdef __lock_lint
#endif
return (error);
}
/* ARGSUSED */
static int32_t
{
return (ENOTTY);
}
/* ARGSUSED */
static int32_t
{
ud_printf("udf_getattr\n");
/*
* for performance, if only the size is requested don't bother
* with anything else.
*/
return (0);
}
} else {
}
case VBLK:
break;
case VCHR:
break;
default:
break;
}
return (0);
}
static int
{
}
/*ARGSUSED4*/
static int32_t
{
ud_printf("udf_setattr\n");
/*
* not updates allowed to 4096 files
*/
return (EINVAL);
}
/*
* Cannot set these attributes
*/
return (EINVAL);
}
if (error)
goto update_inode;
/*
* Change file access modes.
*/
}
}
}
}
/*
* Truncate file. Must have write permission and not be a directory.
*/
goto update_inode;
}
goto update_inode;
}
goto update_inode;
}
goto update_inode;
}
}
/*
* Change file access or modified times.
*/
}
gethrestime(&now);
}
}
} else {
}
return (error);
}
/* ARGSUSED */
static int32_t
{
ud_printf("udf_access\n");
return (EIO);
}
}
/* ARGSUSED */
static int32_t
char *nm,
int *direntflags,
{
ud_printf("udf_lookup\n");
/*
* Null component name is a synonym for directory being searched.
*/
if (*nm == '\0') {
error = 0;
goto out;
}
/*
* Fast path: Check the directory name lookup cache.
*/
/*
* Check accessibility of directory.
*/
}
} else {
}
if (error == 0) {
}
/*
* If vnode is a device return special vnode instead.
*/
} else {
}
}
}
out:
return (error);
}
/* ARGSUSED */
static int32_t
char *name,
{
ud_printf("udf_create\n");
if (*name == '\0') {
/*
* Null component name refers to the directory itself.
*/
} else {
}
#ifdef __lock_lint
#else
}
#endif
/*
* If the file already exists and this is a non-exclusive create,
* check permissions and allow access for non-directories.
* Read-only create of an existing directory is also allowed.
* We fail an exclusive create of anything which already exists.
*/
} else if (mode) {
} else {
error = 0;
}
}
if (error) {
goto out;
/*
* Truncate regular files, if requested by caller.
* Grab i_rwlock to make sure no one else is
* currently writing to the file (we promised
* bmap we would do this).
* Must get the locks in the correct order.
*/
} else {
}
}
}
if (error == 0) {
}
#ifdef __lock_lint
#else
}
#endif
if (error) {
goto out;
}
/*
* If vnode is a device return special vnode instead.
*/
goto out;
}
}
out:
return (error);
}
/* ARGSUSED */
static int32_t
char *nm,
int flags)
{
ud_printf("udf_remove\n");
return (error);
}
/* ARGSUSED */
static int32_t
char *tnm,
int flags)
{
ud_printf("udf_link\n");
}
/*
* Do not allow links to directories
*/
return (EPERM);
}
return (EPERM);
if (error == 0) {
}
return (error);
}
/* ARGSUSED */
static int32_t
char *snm,
char *tnm,
int flags)
{
ud_printf("udf_rename\n");
}
/*
* Look up inode of file we're supposed to rename.
*/
return (error);
}
/*
* be sure this is not a directory with another file system mounted
* over it. If it is just give up the locks, and return with
* EBUSY
*/
goto errout;
}
/*
* Make sure we can delete the source entry. This requires
* write permission on the containing directory. If that
* directory is "sticky" it further requires (except for
* privileged users) that the user own the directory or the
* source entry, or else have permission to write the source
* entry.
*/
goto errout;
}
/*
* Check for renaming '.' or '..' or alias of '.'
*/
goto errout;
}
}
/* Notify the target dir. if not the same as the source dir. */
/*
* Link source to the target.
*/
/*
* ESAME isn't really an error; it indicates that the
* operation should not be done because the source and target
* are the same file, but that no error should be reported.
*/
error = 0;
}
goto errout;
}
/*
* Unlink the source.
* Remove the source entry. ud_dirremove() checks that the entry
* still reflects sip, and returns an error if it doesn't.
* If the entry has changed just forget about it. Release
* the source inode.
*/
error = 0;
}
if (error == 0) {
/*
* vnevent_rename_dest and vnevent_rename_dest_dir are called
* in ud_direnter().
*/
}
return (error);
}
/* ARGSUSED */
static int32_t
char *dirname,
int flags,
{
ud_printf("udf_mkdir\n");
if (error == 0) {
}
return (error);
}
/* ARGSUSED */
static int32_t
char *nm,
int flags)
{
ud_printf("udf_rmdir\n");
return (error);
}
/* ARGSUSED */
static int32_t
int flags)
{
ud_printf("udf_readdir\n");
if (eofp) {
*eofp = 1;
}
return (0);
}
if (offset == 0) {
goto end;
}
outcount++;
} else if (offset == 0x10) {
offset = 0;
}
if (error != 0) {
break;
}
break;
}
} else {
break;
}
if (length == 0) {
continue;
}
if (!outcount) {
}
break;
}
&dummy);
}
outcount++;
}
}
end:
}
/*
* In case of error do not call uiomove.
* Return the error to the caller.
*/
}
}
return (error);
}
/* ARGSUSED */
static int32_t
char *linkname,
char *target,
int flags)
{
ud_printf("udf_symlink\n");
if (error == 0) {
/*
* If the first character in target is "/"
* then skip it and create entry for it
*/
if (*target == '/') {
while (*target == '/') {
target++;
}
}
target ++;
}
/*
* We got the next component of the
* path name. Create path_comp of
* appropriate type
*/
/*
* Dot entry.
*/
/*
* DotDot entry.
*/
} else {
/*
* convert the user given name
* into appropriate form to be put
* on the media
*/
break;
}
/* LINTED */
}
while (*target == '/') {
target++;
}
break;
}
}
if (error == 0) {
}
}
if (error) {
goto update_inode;
}
}
}
}
}
return (error);
}
/* ARGSUSED */
static int32_t
{
ud_printf("udf_readlink\n");
return (EINVAL);
}
return (EIO);
}
if (size == 0) {
return (0);
}
goto end;
}
off = 0;
case 1 :
break;
case 2 :
goto end;
}
uname[0] = '/';
break;
case 3 :
break;
case 4 :
break;
case 5 :
break;
}
break;
default :
goto end;
}
}
if (len == 0) {
/*
* special case link to /
*/
len = 1;
} else {
}
}
end:
}
}
}
return (error);
}
/* ARGSUSED */
static int32_t
{
ud_printf("udf_fsync\n");
}
if (error == 0) {
}
return (error);
}
/* ARGSUSED */
static void
{
ud_printf("udf_iinactive\n");
}
/* ARGSUSED */
static int32_t
{
ud_printf("udf_fid\n");
return (ENOSPC);
}
return (0);
}
/* ARGSUSED2 */
static int
{
ud_printf("udf_rwlock\n");
if (write_lock) {
} else {
}
#ifdef __lock_lint
#endif
return (write_lock);
}
/* ARGSUSED */
static void
{
ud_printf("udf_rwunlock\n");
#ifdef __lock_lint
#endif
}
/* ARGSUSED */
static int32_t
{
}
static int32_t
struct flk_callback *flk_cbp,
{
ud_printf("udf_frlock\n");
/*
* If file is being mapped, disallow frlock.
* XXX I am not holding tlock while checking i_mapcnt because the
* current locking strategy drops all locks before calling fs_frlock.
* So, mapcnt could change before we enter fs_frlock making is
* meaningless to have held tlock in the first place.
*/
return (EAGAIN);
}
}
/*ARGSUSED6*/
static int32_t
{
ud_printf("udf_space\n");
}
return (error);
}
/* ARGSUSED */
static int32_t
{
ud_printf("udf_getpage\n");
if (protp) {
}
return (ENOSYS);
}
#ifdef __lock_lint
#else
if (dolock) {
}
#endif
/*
* We may be getting called as a side effect of a bmap using
* fbread() when the blocks might be being allocated and the
* size has not yet been up'ed. In this case we want to be
* able to return zero pages if we get back UDF_HOLE from
* calling bmap for a non write case here. We also might have
* to read some frags from the disk into a page if we are
* extending the number of frags for a given lbn in bmap().
*/
#ifdef __lock_lint
#else
if (dolock) {
}
#endif
return (EFAULT);
}
/*
* Must hold i_contents lock throughout the call to pvn_getpages
* since locked pages are returned from each call to ud_getapage.
* Must *not* return locked pages and then try for contents lock
* due to lock ordering requirements (inode > page)
*/
/*
* We must acquire the RW_WRITER lock in order to
* call bmap_write().
*/
goto retrylock;
}
}
/*
* May be allocating disk blocks for holes here as
* a result of mmap faults. write(2) does the bmap_write
* in this case.
*/
/*
* the variable "bnp" is to simplify the expression for
* the compiler; * just passing in &bn to bmap_write
* causes a compiler "loop"
*/
} else {
}
if (error) {
goto update_inode;
}
}
}
/*
* Can be a reader from now on.
*/
#ifdef __lock_lint
}
#else
}
#endif
/*
* We remove PROT_WRITE in cases when the file has UDF holes
* because we don't want to call bmap_read() to check each
* page if it is backed with a disk block.
*/
*protp &= ~PROT_WRITE;
}
error = 0;
/*
* The loop looks up pages in the range <off, off + len).
* For each page, we first check if we should initiate an asynchronous
* read ahead before we call page_lookup (we may sleep in page_lookup
* for a previously initiated disk read).
*/
/*
* Handle async getpage (faultahead)
*/
continue;
}
/*
* Check if we should initiate read ahead of next cluster.
* We call page_exists only when we need to confirm that
* we have the current page before we initiate the read ahead.
*/
if (seqmode &&
/*
* We found the page in the page cache.
*/
} else {
/*
* We have to create the page, or read it from disk.
*/
goto error_out;
}
pl++;
}
}
}
/*
* Return pages up to plsz if they are in the page cache.
* We cannot return pages if there is a chance that they are
* backed with a UDF hole and rw is S_WRITE or S_CREATE.
*/
break;
}
}
if (plarr)
/*
* Release any pages we have locked.
*/
page_unlock(*--pl);
}
#ifdef __lock_lint
#else
if (dolock) {
}
#endif
/*
* If the inode is not already marked for IACC (in rwip() for read)
* and the inode is not marked for no access time update (in rwip()
* for write) then update the inode access time and mod time now.
*/
}
}
}
return (error);
}
/* ARGSUSED */
static int32_t
{
ud_printf("udf_putpage\n");
#ifdef __lock_lint
#endif
goto out;
}
goto out;
}
/*
* If nobody stalled, start a new cluster.
*/
if (ip->i_delaylen == 0) {
goto out;
}
/*
* If we have a full cluster or they are not contig,
* then push last cluster and start over.
*/
/* LMXXX - flags are new val, not old */
goto out;
}
/*
* There is something there, it's not full, and
* it is contig.
*/
goto out;
}
/*
* Must have weird flags or we are not clustering.
*/
}
out:
#ifdef __lock_lint
#endif
return (error);
}
/* ARGSUSED */
static int32_t
{
ud_printf("udf_map\n");
goto end;
}
goto end;
}
goto end;
}
/*
* If file is being locked, disallow mapping.
*/
goto end;
}
if (error != 0) {
goto end;
}
end:
return (error);
}
/* ARGSUSED */
static int32_t
{
ud_printf("udf_addmap\n");
return (ENOSYS);
}
return (0);
}
/* ARGSUSED */
static int32_t
{
ud_printf("udf_delmap\n");
return (ENOSYS);
}
return (0);
}
/* ARGSUSED */
static int32_t
{
ud_printf("udf_l_pathconf\n");
if (cmd == _PC_FILESIZEBITS) {
/*
* udf supports 64 bits as file size
* but there are several other restrictions
* it only supports 32-bit block numbers and
* daddr32_t is only and int32_t so taking these
* into account we can stay just as where ufs is
*/
*valp = 41;
} else if (cmd == _PC_TIMESTAMP_RESOLUTION) {
/* nanosecond timestamp resolution */
*valp = 1L;
} else {
}
return (error);
}
#ifndef __lint
#endif
/*
* Assumption is that there will not be a pageio request
* to a enbedded file
*/
/* ARGSUSED */
static int32_t
{
return (EINVAL);
}
/*
* We need a better check. Ideally, we would use another
* vnodeops so that hlocked and forcibly unmounted file
* systems would return EIO where appropriate and w/o the
* need for these checks.
*/
return (EIO);
}
#ifdef __lock_lint
#else
if (dolock) {
}
#endif
/*
* Break the io request into chunks, one for each contiguous
* stretch of disk blocks in the target file.
*/
contig = 0;
break;
}
break;
}
/*
* Check if more than one I/O is
* required to complete the given
* I/O operation
*/
multi_io = 0;
} else {
multi_io = 1;
}
}
/*
* ub.ub_pageios.value.ul++;
*/
if (multi_io == 0) {
(void) bdev_strategy(bp);
} else {
if (error != 0) {
break;
}
}
} else {
}
/*
* If the request is not B_ASYNC, wait for i/o to complete
* and re-assemble the page list to return to the caller.
* If it is B_ASYNC we leave the page list in pieces and
* cleanup() will dispose of them.
*/
if (error) {
break;
}
}
}
if (error) {
/* Cleanup unprocessed parts of list */
} else {
}
} else {
/* Re-assemble list and let caller clean up */
}
}
#ifdef __lock_lint
#else
if (dolock) {
}
#endif
return (error);
}
/* -------------------- local functions --------------------------- */
{
ud_printf("ud_rdwri\n");
} else {
}
if (aresid) {
}
return (error);
}
/*
* Free behind hacks. The pager is busted.
* XXX - need to pass the information down to writedone() in a flag like B_SEQ
* or B_FREE_IF_TIGHT_ON_MEMORY.
*/
/* ARGSUSED */
{
/*
* Figure out whether the page can be created, or must be
* read from the disk
*/
return (EINVAL);
}
} else {
/*
* Some other thread has entered the page.
* ud_getpage will retry page_lookup.
*/
return (0);
}
/*
* Fill the page with as much data as we can from the file.
*/
if (err) {
return (err);
}
/*
* XXX ??? ufs has io_len instead of pgoff below
*/
/*
* If the file access is sequential, initiate read ahead
* of the next cluster.
*/
}
}
return (err);
}
/* ARGSUSED */
void
{
/*
* Is this test needed?
*/
return;
}
contig = 0;
return;
}
/*
* Some other thread has entered the page.
* So no read head done here (ie we will have to and wait
* for the read when needed).
*/
return;
}
}
int
{
/*
* Embedded file read file_entry
* from buffer cache and copy the required
* portions
*/
/*
* mapin to kvm
*/
/*
* mapout of kvm
*/
}
} else {
/*
* Get the continuous size and block number
* at offset "off"
*/
goto out;
/*
* Zero part of the page which we are not
* going to read from the disk.
*/
/*
* This is a HOLE. Just zero out
* the page
*/
goto out;
}
}
multi_io = 1;
} else {
}
}
/*
* Get a bp and initialize it
*/
/*
* Start I/O
*/
if (multi_io == 0) {
/*
* Single I/O is sufficient for this page
*/
(void) bdev_strategy(bp);
} else {
/*
* We need to do the I/O in
* piece's
*/
if (error != 0) {
goto out;
}
}
/*
* Wait for i/o to complete.
*/
if (error) {
goto out;
}
}
}
}
out:
return (error);
}
{
ud_printf("ud_putpages\n");
return (EINVAL);
}
/*
* any pages in this inode.
* The inode lock is held during i/o.
*/
if (len == 0) {
}
#ifdef __lock_lint
#else
if (dolock) {
}
#endif
if (!vn_has_cached_data(vp)) {
#ifdef __lock_lint
#else
if (dolock) {
}
#endif
return (0);
}
if (len == 0) {
/*
* Search the entire vp list for pages >= off.
*/
} else {
/*
* Loop over all offsets in the range looking for
* pages to deal with.
*/
} else {
}
/*
* If we are not invalidating, synchronously
* freeing or writing pages, use the routine
* page_lookup_nowait() to prevent reclaiming
* them from the free list.
*/
} else {
}
} else {
if (err != 0) {
break;
}
/*
* "io_off" and "io_len" are returned as
* the range of pages we actually wrote.
* This allows us to skip ahead more quickly
* since several pages may've been dealt
* with by this iteration of the loop.
*/
}
}
}
/*
* We have just sync'ed back all the pages on
* the inode, turn off the IMODTIME flag.
*/
}
#ifdef __lock_lint
#else
if (dolock) {
}
#endif
return (err);
}
/* ARGSUSED */
{
ud_printf("ud_putapage\n");
/*
* If the modified time on the inode has not already been
* This gives us approximate modified times for mmap'ed files
* which are modified via stores in the user address space.
*/
}
/*
* Align the request to a block boundry (for old file systems),
* and go ask bmap() how contiguous things are for this file.
*/
/* block align it */
if (io_len == 0) {
}
} else {
}
return (error);
}
}
}
} else {
goto out;
}
multi_io = 1;
}
}
}
if (contig & PAGEOFFSET) {
}
}
if (io_len == 0) {
}
/*
* write throttle
*/
if (multi_io == 0) {
(void) bdev_strategy(bp);
} else {
if (error != 0) {
goto out;
}
}
/*
* Wait for i/o to complete.
*/
}
}
}
out:
}
if (offp) {
}
if (lenp) {
}
return (error);
}
{
if (ud_WRITES) {
}
}
} else {
}
return (0);
}
/* ARGSUSED3 */
{
return (EIO);
}
return (0);
}
return (EINVAL);
}
return (0);
}
do {
error = 0;
goto out;
}
n = (int)diff;
}
dofree = ud_freebehind &&
off > ud_smallfile;
#ifndef __lock_lint
}
#endif
flags = 0;
if (!error) {
/*
* If read a whole block, or read to eof,
* won't need this buffer again soon.
*/
}
/*
* In POSIX SYNC (FSYNC and FDSYNC) read mode,
* we want to make sure that the page which has
* been read, is written on disk if it is dirty.
* And corresponding indirect blocks should also
* be flushed out.
*/
}
} else {
}
#ifndef __lock_lint
}
#endif
out:
/*
* Inode is updated according to this table if FRSYNC is set.
*
* FSYNC FDSYNC(posix.4)
* --------------------------
* always IATTCHG|IBDWRITE
*/
}
}
/*
* If we've already done a partial read, terminate
* the read but return no error.
*/
error = 0;
}
return (error);
}
{
return (EIO);
}
return (EFBIG);
}
/*
* see udf_l_pathconf
*/
}
mutex_enter(&p->p_lock);
p, RCA_UNSAFE_SIGINFO);
mutex_exit(&p->p_lock);
return (EFBIG);
}
return (EINVAL);
}
return (0);
}
iupdat_flag = 1;
}
do {
goto out;
}
}
/*
* We are extending the length of the file.
* bmap is used so that we are sure that
* if we need to allocate new blocks, that it
* is done here before we up the file size.
*/
if (error) {
break;
}
i_size_changed = 1;
/*
* If we are writing from the beginning of
* the mapping, we can just create the
* pages without having to read them.
*/
pagecreate = (mapon == 0);
} else if (n == MAXBSIZE) {
/*
* Going to do a whole mappings worth,
* so we can just create the pages w/o
* having to read them in. But before
* we do that, we need to make sure any
* needed blocks are allocated first.
*/
if (error) {
break;
}
pagecreate = 1;
} else {
pagecreate = 0;
}
/*
* Touch the page and fault it in if it is not in
* core before segmap_getmapflt can lock it. This
* is to avoid the deadlock if the buffer is mapped
* to the same file through mmap which we want to
* write to.
*/
uio_prefaultpages((long)n, uio);
/*
* segmap_pagecreate() returns 1 if it calls
* page_create_va() to allocate any pages.
*/
newpage = 0;
if (pagecreate) {
(size_t)n, 0);
}
if (pagecreate &&
/*
* We created pages w/o initializing them completely,
* thus we need to zero the part that wasn't set up.
* This happens on most EOF write cases and if
* we had some sort of error during the uiomove.
*/
}
/*
* Unlock the pages allocated by page_create_va()
* in segmap_pagecreate()
*/
if (newpage) {
}
if (error) {
/*
* If we failed on a write, we may have already
* allocated file blocks as well as pages. It's
* hard to undo the block allocation, but we must
* be sure to invalidate any pages that may have
* been allocated.
*/
} else {
flags = 0;
/*
* Force write back for synchronous write cases.
*/
/*
* If the sticky bit is set but the
* execute bit is not set, we do a
* synchronous write back and free
* the page when done. We set up swap
* files to be handled this way to
* prevent servers from keeping around
* the client's swap pages too long.
* XXX - there ought to be a better way.
*/
iupdat_flag = 0;
} else {
}
/*
* Have written a whole block.
* Start an asynchronous write and
* mark the buffer to indicate that
* it won't be needed again soon.
*/
}
/*
* If the operation failed and is synchronous,
* then we need to unwind what uiomove() last
* did so we can potentially return an error to
* the caller. If this write operation was
* done in two pieces and the first succeeded,
* then we won't return an error for the second
* piece that failed. However, we only want to
* return a resid value that reflects what was
* really done.
*
* Failures for non-synchronous operations can
* be ignored since the page subsystem will
* retry the operation until it succeeds or the
* file system is unmounted.
*/
if (error) {
} else {
error = 0;
}
}
}
/*
* Re-acquire contents lock.
*/
/*
* If the uiomove() failed or if a synchronous
* page push failed, fix up i_size.
*/
if (error) {
if (i_size_changed) {
/*
* The uiomove failed, and we
* allocated blocks,so get rid
* of them.
*/
}
} else {
/*
* XXX - Can this be out of the loop?
*/
if (i_size_changed) {
}
(IEXEC >> 10))) != 0 &&
/*
* Clear Set-UID & Set-GID bits on
* successful write if not privileged
* and at least one of the execute bits
* is set. If we always clear Set-GID,
* mandatory file and record locking is
* unuseable.
*/
}
}
out:
/*
* Inode is updated according to this table -
*
* FSYNC FDSYNC(posix.4)
* --------------------------
* always@ IATTCHG|IBDWRITE
*
* @ - If we are doing synchronous write the only time we should
* not be sync'ing the ip here is if we have the stickyhack
* activated, the file is marked with the sticky bit and
* no exec bit, the file length has not been changed and
* no new blocks have been allocated during this write.
*/
/*
* we have eliminated nosync
*/
}
}
/*
* If we've already done a partial-write, terminate
* the write but return no error.
*/
error = 0;
}
return (error);
}
{
/*
* Figure out how many buffers to allocate
*/
io_count = 0;
contig = 0;
goto end;
}
if (contig == 0) {
goto end;
}
io_count ++;
} else {
/*
* HOLE
*/
/*
* This is a hole and is read
* it should be filled with 0's
*/
}
}
}
if (io_count != 0) {
/*
* Allocate memory for all the
* required number of buffers
*/
alloc_sz = sizeof (mio_master_t) +
(sizeof (mio_slave_t) * io_count);
goto end;
}
/*
* initialize master
*/
/*
* Initialize buffers
*/
io_count = 0;
contig = 0;
goto end;
}
}
/*
* Clone the buffer
* and prepare to start I/O
*/
io_count++;
ms ++;
}
}
/*
* Start I/O's
*/
for (i = 0; i < io_count; i++) {
ms ++;
}
}
end:
if (error != 0) {
}
}
return (error);
}
{
/*
* Propagate error and byte count info from slave struct to
* the master struct
*/
/*
* If multiple slave buffers get
* error we forget the old errors
* this is ok because we any way
* cannot return multiple errors
*/
}
/*
* free up the resources allocated to cloned buffers.
*/
if (resid == 0) {
/*
* This is the last I/O operation
* clean up and return the original buffer
*/
}
}
return (0);
}