lofi.c revision a19609f85693e4e7d7e744d836a4e87193c934e4
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
*/
/*
* lofi (loopback file) driver - allows you to attach a file to a device,
* which can then be accessed through that device. The simple model is that
* you tell lofi to open a file, and then use the block device you get as
* you would any block device. lofi translates access to the block device
* into I/O on the underlying file. This is mostly useful for
* mounting images of filesystems.
*
* during attach, and is minor number 0. lofiadm communicates with lofi through
* ioctls on this device. When a file is attached to lofi, block and character
* are identified by their minor number, and the minor number is also used
* we'll have to divide the minor number space to identify fdisk partitions
* and slices, and the name will then be the minor number shifted down a
* few bits. Minor devices are tracked with state structures handled with
* ddi_soft_state(9F) for simplicity.
*
* A file attached to lofi is opened when attached and not closed until
* explicitly detached from lofi. This seems more sensible than deferring
* One is that any failure is likely to be noticed by the person (or script)
* running lofiadm. Another is that it would be a security problem if the
* file was replaced by another one after being added but before being opened.
*
* The only hard part about lofi is the ioctls. In order to support things
* like 'newfs' on a lofi device, it needs to support certain disk ioctls.
* So it has to fake disk geometry and partition information. More may need
* to be faked if your favorite utility doesn't work and you think it should
* (fdformat doesn't work because it really wants to know the type of floppy
* controller to talk to, and that didn't seem easy to fake. Or possibly even
* necessary, since we have mkfs_pcfs now).
*
* Normally, a lofi device cannot be detached if it is open (i.e. busy). To
* support simulation of hotplug events, an optional force flag is provided.
* If a lofi device is open when a force detach is requested, then the
* underlying file is closed and any subsequent operations return EIO. When the
* device is closed for the last time, it will be cleaned up at that time. In
* addition, the DKIOCSTATE ioctl will return DKIO_DEV_GONE when the device is
* detached but not removed.
*
* Known problems:
*
* UFS logging. Mounting a UFS filesystem image "logging"
* works for basic copy testing but wedges during a build of ON through
* that image. Some deadlock in lufs holding the log mutex and then
* getting stuck on a buf. So for now, don't do that.
*
* Direct I/O. Since the filesystem data is being cached in the buffer
* cache, _and_ again in the underlying filesystem, it's tempting to
* enable direct I/O on the underlying file. Don't, because that deadlocks.
* I think to fix the cache-twice problem we might need filesystem support.
*
* Interesting things to do:
*
* Allow multiple files for each device. A poor-man's metadisk, basically.
*
* Pass-through ioctls on block devices. You can (though it's not
* documented), give lofi a block device as a file name. Then we shouldn't
* need to fake a geometry, however, it may be relevant if you're replacing
* metadisk, or using lofi to get crypto.
* In fact this even makes sense if you have lofi "above" metadisk.
*
* Encryption:
* Each lofi device can have its own symmetric key and cipher.
* They are passed to us by lofiadm(1m) in the correct format for use
*
* Each block has its own IV, that is calculated in lofi_blk_mech(), based
* on the "master" key held in the lsp and the block number of the buffer.
*/
#include <sys/sysmacros.h>
#include <sys/pathname.h>
#include <sys/id_space.h>
#include <LzmaDec.h>
/*
* Crypto metadata, if it exists, is located at the end of the boot block
* (BBOFF + BBSIZE, which is SBOFF). The super block and everything after
* is offset by the size of the crypto metadata which is handled by
* lsp->ls_crypto_offset.
*/
#define NBLOCKS_PROP_NAME "Nblocks"
#define SIZE_PROP_NAME "Size"
#define ZONE_PROP_NAME "zone"
return (EINVAL); \
}
static void *lofi_statep = NULL;
static id_space_t *lofi_minor_id;
static zone_key_t lofi_zone_key;
/*
* Because lofi_taskq_nthreads limits the actual swamping of the device, the
* maxalloc parameter (lofi_taskq_maxalloc) should be tuned conservatively
* high. If we want to be assured that the underlying device is always busy,
* we must be sure that the number of bytes enqueued when the number of
* enqueued tasks exceeds maxalloc is sufficient to keep the device busy for
* the duration of the sleep time in taskq_ent_alloc(). That is, lofi should
* set maxalloc to be the maximum throughput (in bytes per second) of the
* underlying device divided by the minimum I/O size. We assume a realistic
* maximum throughput of one hundred megabytes per second; we set maxalloc on
* the lofi task queue to be 104857600 divided by DEV_BSIZE.
*/
/*
* To avoid decompressing data in a compressed segment multiple times
* when accessing small parts of a segment's data, we cache and reuse
* the uncompressed segment's data.
*
* A single cached segment is sufficient to avoid lots of duplicate
* segment decompress operations. A small cache size also reduces the
* memory footprint.
*
* lofi_max_comp_cache is the maximum number of decompressed data segments
* cached for each compressed lofi image. It can be set to 0 to disable
* caching.
*/
};
/*ARGSUSED*/
static void
{
}
/*ARGSUSED*/
static void
{
}
/*
* Free data referenced by the linked list of cached uncompressed
* segments.
*/
static void
{
struct lofi_comp_cache *lc;
}
}
static int
{
}
static int
{
switch (otyp) {
case OTYP_CHR:
break;
case OTYP_BLK:
break;
case OTYP_LYR:
lsp->ls_lyr_open_count++;
break;
default:
return (-1);
}
return (0);
}
static void
{
switch (otyp) {
case OTYP_CHR:
lsp->ls_chr_open = 0;
break;
case OTYP_BLK:
lsp->ls_blk_open = 0;
break;
case OTYP_LYR:
lsp->ls_lyr_open_count--;
break;
default:
break;
}
}
static void
{
if (lsp->ls_crypto_enabled) {
/*
* Clean up the crypto state so that it doesn't hang around
* in memory after we are done with it.
*/
}
}
}
}
}
static void
{
int i;
/*
* Free pre-allocated compressed buffers
*/
for (i = 0; i < lofi_taskq_nthreads; i++) {
}
sizeof (struct compbuf) * lofi_taskq_nthreads);
}
/*
* Free cached decompressed segment data
*/
if (lsp->ls_uncomp_seg_sz > 0) {
lsp->ls_uncomp_seg_sz = 0;
}
}
static void
{
char namebuf[50];
}
/*ARGSUSED*/
static void
{
struct lofi_state *lsp;
struct lofi_state *next;
/* lofi_destroy() frees lsp */
continue;
/*
* No in-zone processes are running, but something has this
* open. It's either a global zone process, or a lofi
* mount. In either case we set ls_cleanup so the last
* user destroys the device.
*/
} else {
}
}
}
/*ARGSUSED*/
static int
{
struct lofi_state *lsp;
/*
*/
return (EINVAL);
/* master control device */
if (minor == 0) {
return (0);
}
/* otherwise, the mapping should already exist */
return (EINVAL);
}
return (ENXIO);
}
return (EINVAL);
}
return (0);
}
/*ARGSUSED*/
static int
{
struct lofi_state *lsp;
return (EINVAL);
}
if (minor == 0) {
return (0);
}
/*
* If we forcibly closed the underlying device (li_force), or
* asked for cleanup (li_cleanup), finish up if we're the last
* out of the door.
*/
}
return (0);
}
/*
* Sets the mechanism's initialization vector (IV) if one is needed.
* The IV is computed from the data block number. lsp->ls_mech is
* altered so that:
* lsp->ls_mech.cm_param_len is set to the IV len.
* lsp->ls_mech.cm_param is set to the IV.
*/
static int
{
int ret;
char *iv;
void *data;
return (CRYPTO_DEVICE_ERROR);
/* lsp->ls_mech.cm_param{_len} has already been set for static iv */
return (CRYPTO_SUCCESS);
}
/*
* if kmem already alloced from previous call and it's the same size
* we need now, just recycle it; allocate new kmem only if we have to
*/
} else {
}
switch (lsp->ls_iv_type) {
case IVM_ENC_BLKNO:
/* iv is not static, lblkno changes each time */
break;
default:
data = 0;
datasz = 0;
break;
}
/*
* write blkno into the iv buffer padded on the left in case
* blkno ever grows bigger than its current longlong_t size
* or a variation other than blkno is used for the iv data
*/
/* encrypt the data in-place to get the IV */
if (ret != CRYPTO_SUCCESS) {
return (ret);
}
/* clean up the iv from the last computation */
return (CRYPTO_SUCCESS);
}
/*
* Performs encryption and decryption of a chunk of data of size "len",
* one DEV_BSIZE block at a time. "len" is assumed to be a multiple of
* DEV_BSIZE.
*/
static int
{
int ret;
/*
* to break it into DEV_BSIZE pieces to capture blkno incrementing
*/
}
do {
if (ret != CRYPTO_SUCCESS)
continue;
if (op_encrypt) {
} else {
}
if (ciphertext != NULL)
lblkno++;
if (ret != CRYPTO_SUCCESS) {
}
return (ret);
}
#define RDWR_RAW 1
#define RDWR_BCOPY 2
static int
{
int isread;
int error;
/*
* Note: offset is already shifted by lsp->ls_crypto_offset
* when it gets here.
*/
if (isread) {
if (method == RDWR_BCOPY) {
/* DO NOT update bp->b_resid for bcopy */
error = 0;
} else { /* RDWR_RAW */
&resid);
}
B_FALSE) != CRYPTO_SUCCESS) {
/*
* XXX: original code didn't set residual
* back to len because no error was expected
* from bcopy() if encryption is not enabled
*/
if (method != RDWR_BCOPY)
}
}
return (error);
} else {
if (lsp->ls_crypto_enabled) {
/* don't do in-place crypto to keep bufaddr intact */
B_TRUE) != CRYPTO_SUCCESS) {
if (method != RDWR_BCOPY)
return (EIO);
}
}
if (method == RDWR_BCOPY) {
/* DO NOT update bp->b_resid for bcopy */
error = 0;
} else { /* RDWR_RAW */
&resid);
}
if (lsp->ls_crypto_enabled) {
}
return (error);
}
}
static int
struct lofi_state *lsp)
{
int error;
int isread;
int smflags;
int save_error;
/*
* Note: offset is already shifted by lsp->ls_crypto_offset
* when it gets here.
*/
if (lsp->ls_crypto_enabled)
/*
* segmap always gives us an 8K (MAXBSIZE) chunk, aligned on
* an 8K boundary, but the buf transfer address may not be
* aligned on more than a 512-byte boundary (we don't enforce
* that even though we could). This matters since the initial
* part of the transfer may not start at offset 0 within the
* segmap'd chunk. So we have to compensate for that with
* 'mapoffset'. Subsequent chunks always start off at the
* beginning, and the last is capped by b_resid
*
* Visually, where "|" represents page map boundaries:
* alignedoffset (mapaddr begins at this segmap boundary)
* | offset (from beginning of file)
* | | len
* v v v
* ===|====X========|====...======|========X====|====
* /-------------...---------------/
* /----/--------/----...------/--------/
* ^ ^ ^ ^ ^
* | | | | nth xfersize (<= MAXBSIZE)
* | | 2nd thru n-1st xfersize (= MAXBSIZE)
* | 1st xfersize (<= MAXBSIZE)
* mapoffset (offset into 1st segmap, non-0 1st time, 0 thereafter)
*
* Notes: "alignedoffset" is "offset" rounded down to nearest
* MAXBSIZE boundary. "len" is next page boundary of size
* PAGESIZE after "alignedoffset".
*/
do {
/*
* Now fault in the pages. This lets us check
* for errors before we reference mapaddr and
* try to resolve the fault in bcopy (which would
* panic instead). And this can easily happen,
* particularly if you've lofi'd a file over NFS
* and someone deletes the file on the server.
*/
if (error) {
else
break;
}
/* error may be non-zero for encrypted lofi */
if (error == 0) {
}
smflags = 0;
if (isread) {
/*
* If we're reading an entire page starting
* at a page boundary, there's a good chance
* we won't need it again. Put it on the
* head of the freelist.
*/
smflags |= SM_DONTNEED;
} else {
/*
* Write back good pages, it is okay to
* always release asynchronous here as we'll
* follow with VOP_FSYNC for B_SYNC buffers.
*/
if (error == 0)
}
if (error == 0)
error = save_error;
/* only the first map may start partial */
mapoffset = 0;
return (error);
}
/*
* Check if segment seg_index is present in the decompressed segment
* data cache.
*
* Returns a pointer to the decompressed segment data cache entry if
* found, and NULL when decompressed data for this segment is not yet
* cached.
*/
static struct lofi_comp_cache *
{
struct lofi_comp_cache *lc;
/*
* Decompressed segment data was found in the
* cache.
*
* The cache uses an LRU replacement strategy;
* move the entry to head of list.
*/
return (lc);
}
}
return (NULL);
}
/*
* Add the data for a decompressed segment at segment index
* seg_index to the cache of the decompressed segments.
*
* Returns a pointer to the cache element structure in case
* the data was added to the cache; returns NULL when the data
* wasn't cached.
*/
static struct lofi_comp_cache *
{
struct lofi_comp_cache *lc;
}
/*
* Do not cache when disabled by tunable variable
*/
if (lofi_max_comp_cache == 0)
return (NULL);
/*
* When the cache has not yet reached the maximum allowed
* number of segments, allocate a new cache element.
* Otherwise the cache is full; reuse the last list element
* (LRU) for caching the decompressed segment data.
*
* The cache element for the new decompressed segment data is
* added to the head of the list.
*/
} else {
return (NULL);
}
/*
* Free old uncompressed segment data when reusing a cache
* entry.
*/
return (lc);
}
/*ARGSUSED*/
static int
{
return (-1);
return (0);
}
/*ARGSUSED*/
static int
{
void *actual_src;
return (-1);
}
return (0);
}
/*
* This is basically what strategy used to be before we found we
* needed task queues.
*/
static void
lofi_strategy_task(void *arg)
{
int error;
int syncflag = 0;
struct lofi_state *lsp;
goto errout;
}
}
if (lsp->ls_crypto_enabled) {
/* encrypted data really begins after crypto header */
}
goto errout;
}
/*
* If we're writing and the buffer was not B_ASYNC
* we'll follow up with a VOP_FSYNC() to force any
* asynchronous I/O to stable storage.
*/
/*
* We used to always use vn_rdwr here, but we cannot do that because
* we might decide to read or write from the the underlying
* file during this call, which would be a deadlock because
* we have the rw_lock. So instead we page, unless it's not
* mapable or it's a character device or it's an encrypted lofi.
*/
lsp->ls_crypto_enabled) {
NULL);
} else if (lsp->ls_uncomp_seg_sz == 0) {
} else {
struct lofi_comp_cache *lc;
uint64_t i;
int j;
/*
* From here on we're dealing primarily with compressed files
*/
/*
* Compressed files can only be read from and
* not written to
*/
goto done;
}
/*
* Compute starting and ending compressed segment numbers
* We use only bitwise operations avoiding division and
* modulus because we enforce the compression segment size
* to a power of 2
*/
/*
* Check the decompressed segment cache.
*
* The cache is used only when the requested data
* is within a segment. Requests that cross
* segment boundaries bypass the cache.
*/
/*
* Request doesn't cross a segment boundary,
* now check the cache.
*/
/*
* We've found the decompressed segment
* data in the cache; reuse it.
*/
error = 0;
goto done;
}
}
/*
* Align start offset to block boundary for segmap
*/
/*
* We're dealing with the last segment of
* the compressed file -- the size of this
* segment *may not* be the same as the
* segment size for the file
*/
} else {
}
/*
* Preserve original request paramaters
*/
/*
* Assign the calculated parameters
*/
/*
* Buffers to hold compressed segments are pre-allocated
* on a per-thread basis. Find a pre-allocated buffer
* that is not currently in use and mark it for use.
*/
for (j = 0; j < lofi_taskq_nthreads; j++) {
break;
}
}
ASSERT(j < lofi_taskq_nthreads);
/*
* If the pre-allocated buffer size does not match
* the size of the I/O request, re-allocate it with
* the appropriate size
*/
KM_SLEEP);
}
/*
* Map in the calculated number of blocks
*/
if (error != 0)
goto done;
/*
* decompress compressed blocks start
*/
/*
* The last segment is special in that it is
* most likely not going to be the same
* (uncompressed) size as the other segments.
*/
} else {
}
/*
* Each of the segment index entries contains
* the starting block number for that segment.
* The number of compressed bytes in a segment
* is thus the difference between the starting
* block number of this segment and the starting
* block number of the next segment.
*/
lsp->ls_comp_seg_index[i];
/*
* The first byte in a compressed segment is a flag
* that indicates whether this segment is compressed
* at all.
*
* The variable 'useg' is used (instead of
* uncompressed_seg) in this loop to keep a
* reference to the uncompressed segment.
*
* N.B. If 'useg' is replaced with uncompressed_seg,
* it leads to memory leaks and heap corruption in
* corner cases where compressed segments lie
* adjacent to uncompressed segments.
*/
if (*cmpbuf == UNCOMPRESSED) {
} else {
if (uncompressed_seg == NULL)
KM_SLEEP);
goto done;
}
}
/*
* Determine how much uncompressed data we
* have to copy and copy it
*/
if (i == eblkno)
sblkoff = 0;
break;
} /* decompress compressed blocks ends */
/*
* Skip to done if there is no uncompressed data to cache
*/
if (uncompressed_seg == NULL)
goto done;
/*
* Add the data for the last decompressed segment to
* the cache.
*
* In case the uncompressed segment data was added to (and
* is referenced by) the cache, make sure we don't free it
* here.
*/
uncompressed_seg)) != NULL) {
}
done:
if (compressed_seg != NULL) {
}
if (uncompressed_seg != NULL)
} /* end of handling compressed files */
} else {
}
}
if (--lsp->ls_vp_iocount == 0)
}
static int
{
struct lofi_state *lsp;
/*
* We cannot just do I/O here, because the current thread
* _might_ end up back in here because the underlying filesystem
* wants a buffer, which eventually gets into bio_recycle and
* might call into lofi to write out a delayed-write buffer.
* This is bad if the filesystem above lofi is the same as below.
*
* We could come up with a complex strategy using threads to
* do the I/O asynchronously, or we could use task queues. task
* queues were incredibly easy so they win.
*/
return (0);
}
return (0);
}
if (lsp->ls_crypto_enabled) {
/* encrypted data really begins after crypto header */
}
/* EOF */
} else {
/* writes should fail */
}
return (0);
}
return (0);
}
lsp->ls_vp_iocount++;
}
return (0);
}
/*ARGSUSED2*/
static int
{
return (EINVAL);
}
/*ARGSUSED2*/
static int
{
return (EINVAL);
}
/*ARGSUSED2*/
static int
{
return (EINVAL);
}
/*ARGSUSED2*/
static int
{
return (EINVAL);
}
/*ARGSUSED*/
static int
{
switch (infocmd) {
case DDI_INFO_DEVT2DEVINFO:
return (DDI_SUCCESS);
case DDI_INFO_DEVT2INSTANCE:
*result = 0;
return (DDI_SUCCESS);
}
return (DDI_FAILURE);
}
static int
{
int error;
if (cmd != DDI_ATTACH)
return (DDI_FAILURE);
if (!lofi_minor_id)
return (DDI_FAILURE);
if (error == DDI_FAILURE) {
return (DDI_FAILURE);
}
DDI_PSEUDO, NULL);
if (error == DDI_FAILURE) {
return (DDI_FAILURE);
}
/* driver handles kernel-issued IOCTLs */
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
static int
{
if (cmd != DDI_DETACH)
return (DDI_FAILURE);
if (!list_is_empty(&lofi_list)) {
return (DDI_FAILURE);
}
if (zone_key_delete(lofi_zone_key) != 0)
return (DDI_SUCCESS);
}
/*
* With addition of encryption, be careful that encryption key is wiped before
* kernel memory structures are freed, and also that key is not accidentally
* passed out into userland structures.
*/
static void
{
/* Make sure this encryption key doesn't stick around */
}
/*
* the lofi_ioctl structure.
*/
int
int flag)
{
struct lofi_ioctl *klip;
int error;
if (error)
goto err;
/* ensure NULL termination */
goto err;
}
return (0);
err:
return (error);
}
int
int flag)
{
int error;
/*
* NOTE: Do NOT copy the crypto_key_t "back" to userland.
* This ensures that an attacker can't trivially find the
* key for a mapping just by issuing the ioctl.
*
* It can still be found by poking around in kmem with mdb(1),
* but there is no point in making it easy when the info isn't
* of any use in this direction anyway.
*
* Either way we don't actually have the raw key stored in
* a form that we can get it anyway, since we just used it
* to create a ctx template and didn't keep "the original".
*/
if (error)
return (EFAULT);
return (0);
}
static int
{
return (0);
return (EPERM);
}
/*
* Find the lofi state for the given filename. We compare by vnode to
* allow the global zone visibility into NGZ lofi nodes.
*/
static int
{
struct lofi_state *lsp;
int err = 0;
goto out;
}
}
goto out;
}
}
out:
return (err);
}
/*
* Find the minor for the given filename, checking the zone can access
* it.
*/
static int
{
int err = 0;
return (err);
return (err);
return (0);
}
/*
* Fakes up a disk geometry, and one big partition, based on the size
* of the file. This is needed because we allow newfs'ing the device,
* and newfs will do several disk ioctls to figure out the geometry and
* partition information. It uses that information to determine the parameters
* to pass to mkfs. Geometry is pretty much irrelevant these days, but we
* have to support it.
*/
static void
{
/* dk_geom - see dkio(7I) */
/*
* dkg_ncyl _could_ be set to one here (one big cylinder with gobs
* of sectors), but that breaks programs like fdisk which want to
* partition a disk by cylinder. With one cylinder, you can't create
* an fdisk partition and put pcfs on it for testing (hard to pick
* a number between one and one).
*
* The cheezy floppy test is an attempt to not have too few cylinders
* for a small file, or so many on a big file that you waste space
* for backup superblocks or cylinder group structures.
*/
else
/* in case file file is < 100k */
/* vtoc - see dkio(7I) */
/*
* A compressed file is read-only, other files can
* be read-write
*/
if (lsp->ls_uncomp_seg_sz > 0) {
} else {
}
/*
* The partition size cannot just be the number of sectors, because
* that might not end on a cylinder boundary. And if that's the case,
*/
/* dk_cinfo - see dkio(7I) */
/*
* newfs uses this to set maxcontig. Must not be < 16, or it
* will be 0 when newfs multiplies it by DEV_BSIZE and divides
* it by the block size. Then tunefs doesn't work because
* maxcontig is 0.
*/
}
/*
* map in a compressed file
*
* Read in the header and the index that follows.
*
* The header is as follows -
*
* Signature (name of the compression algorithm)
* Compression segment size (a multiple of 512)
* Number of index entries
* Size of the last block
* The array containing the index entries
*
* The header information is always stored in
* network byte order on disk.
*/
static int
{
int error;
/* The signature has already been read */
/*
* The compressed segment size must be a power of 2
*/
return (EINVAL);
;
lsp->ls_comp_seg_shift = i;
sizeof (lsp->ls_uncomp_last_seg_sz));
/*
* Compute the total size of the uncompressed data
* for use in fake_disk_geometry and other calculations.
* Disk geometry has to be faked with respect to the
* actual uncompressed data size rather than the
* compressed file size.
*/
lsp->ls_vp_size =
/*
* Index size is rounded up to DEV_BSIZE for ease
* of segmapping
*/
sizeof (lsp->ls_uncomp_seg_sz) +
sizeof (lsp->ls_comp_index_sz) +
sizeof (lsp->ls_uncomp_last_seg_sz);
index_sz += header_len;
/*
* Read in the index -- this has a side-effect
* of reading in the header as well
*/
if (error != 0)
return (error);
/* Skip the header, this is where the index really begins */
/*LINTED*/
/*
* Now recompute offsets in the index to account for
* the header length
*/
for (i = 0; i < lsp->ls_comp_index_sz; i++) {
}
return (error);
}
static int
{
struct crypto_meta chead;
char *marker;
int error;
int ret;
int i;
if (!klip->li_crypto_enabled)
return (0);
/*
* All current algorithms have a max of 448 bits.
*/
return (EINVAL);
return (EINVAL);
return (EINVAL);
}
/* this is just initialization here */
return (EINVAL);
}
/* iv mech must itself take a null iv */
/*
* Create ctx using li_cipher & the raw li_key after checking
* that it isn't a weak key.
*/
if (ret != CRYPTO_SUCCESS) {
return (EINVAL);
}
if (error != 0)
return (error);
/*
* This is the case where the header in the lofi image is already
* initialized to indicate it is encrypted.
*/
/*
* The encryption header information is laid out this way:
* 6 bytes: hex "CFLOFI"
* 2 bytes: version = 0 ... for now
* 96 bytes: reserved1 (not implemented yet)
* 4 bytes: data_sector = 2 ... for now
* more... not implemented yet
*/
/* copy the magic */
/* read the encryption version number */
/* read a chunk of reserved data */
/* read block number where encrypted data begins */
/* and ignore the rest until it is implemented */
return (0);
}
/*
* We've requested encryption, but no magic was found, so it must be
* a new image.
*/
for (i = 0; i < sizeof (struct crypto_meta); i++) {
if (buf[i] != '\0')
return (EINVAL);
}
marker += sizeof (lofi_crypto_magic);
/* write the header */
if (error != 0)
return (error);
/* fix things up so it looks like we read this info */
sizeof (lofi_crypto_magic));
return (0);
}
/*
* Check to see if the passed in signature is a valid one. If it is
* valid, return the index into lofi_compress_table.
*
* Return -1 if it is invalid
*/
static int
lofi_compress_select(const char *signature)
{
int i;
for (i = 0; i < LOFI_COMPRESS_FUNCTIONS; i++) {
return (i);
}
return (-1);
}
static int
{
int compress_index;
int error;
if (error != 0)
return (error);
return (0);
/* compression and encryption are mutually exclusive */
if (lsp->ls_crypto_enabled)
return (ENOTSUP);
/* initialize compression info for compressed lofi */
sizeof (lsp->ls_comp_algorithm));
/* Finally setup per-thread pre-allocated buffers */
}
/*
* map a file to a minor number. Return the minor number.
*/
static int
{
struct lofi_ioctl *klip;
int error;
int flag;
char namebuf[50];
if (error != 0)
return (error);
return (error);
}
goto err;
}
if (pickminor) {
goto err;
}
} else {
goto err;
}
}
if (error) {
/* try read-only */
&vp, 0, 0);
if (error)
goto err;
}
goto err;
}
if (error)
goto err;
/* the file needs to be a multiple of the block size */
goto err;
}
/* lsp alloc+init */
if (error == DDI_FAILURE) {
goto err;
}
lsp->ls_uncomp_seg_sz = 0;
lsp->ls_crypto_offset = 0;
/*
* save open mode so file can be closed properly and vnode counts
* updated correctly.
*/
/*
* Try to handle stacked lofs vnodes.
*/
/*
* We need to use the realvp for uniqueness
* checking, but keep the stacked vp for
* LOFI_GET_FILENAME display.
*/
}
}
goto err;
}
goto err;
goto err;
/* create minor nodes */
DDI_PSEUDO, NULL);
if (error != DDI_SUCCESS) {
goto err;
}
DDI_PSEUDO, NULL);
if (error != DDI_SUCCESS) {
/* remove block node */
goto err;
}
/* create DDI properties */
goto nodeerr;
}
!= DDI_PROP_SUCCESS) {
goto nodeerr;
}
goto nodeerr;
}
if (rvalp)
return (0);
err:
} else {
}
}
return (error);
}
/*
* unmap a file.
*/
static int
{
struct lofi_state *lsp;
struct lofi_ioctl *klip;
int err;
if (err != 0)
return (err);
if (byfilename) {
return (err);
}
return (ENXIO);
} else {
}
return (ENXIO);
}
/*
* If it's still held open, we'll do one of three things:
*
* If no flag is set, just return EBUSY.
*
* If the 'cleanup' flag is set, unmap and remove the device when
* the last user finishes.
*
* If the 'force' flag is set, then we forcibly close the underlying
* file. Subsequent operations will fail, and the DKIOCSTATE ioctl
* will return DKIO_DEV_GONE. When the device is last closed, the
* device will be cleaned up appropriately.
*
* This is complicated by the fact that we may have outstanding
* I/O, we keep a count of the number of outstanding I/O requests
* should be dispatched (ls_vp_closereq).
*
* and then close the underlying vnode.
*/
/* wake up any threads waiting on dkiocstate */
while (lsp->ls_vp_iocount > 0)
goto out;
} else if (klip->li_cleanup) {
return (0);
}
return (EBUSY);
}
out:
return (0);
}
/*
* get the filename given the minor number, or the minor number given
* the name.
*/
/*ARGSUSED*/
static int
{
struct lofi_ioctl *klip;
struct lofi_state *lsp;
int error;
if (error != 0)
return (error);
switch (which) {
case LOFI_GET_FILENAME:
return (EINVAL);
}
return (ENXIO);
}
/*
* This may fail if, for example, we're trying to look
* up a zoned NFS path from the global zone.
*/
sizeof (klip->li_filename));
}
sizeof (klip->li_algorithm));
return (error);
case LOFI_GET_MINOR:
if (error == 0)
if (error == 0)
return (error);
case LOFI_CHECK_COMPRESSED:
if (error != 0) {
return (error);
}
sizeof (klip->li_algorithm));
return (error);
default:
return (EINVAL);
}
}
static int
int *rvalp)
{
int error;
enum dkio_state dkstate;
struct lofi_state *lsp;
/* lofi ioctls only apply to the master device */
if (minor == 0) {
/*
* the query command only need read-access - i.e., normal
* users are allowed to do those on the ctl device as
* long as they can open it read-only.
*/
switch (cmd) {
case LOFI_MAP_FILE:
return (EPERM);
case LOFI_MAP_FILE_MINOR:
return (EPERM);
case LOFI_UNMAP_FILE:
return (EPERM);
case LOFI_UNMAP_FILE_MINOR:
return (EPERM);
case LOFI_GET_FILENAME:
case LOFI_GET_MINOR:
/*
* This API made limited sense when this value was fixed
* at LOFI_MAX_FILES. However, its use to iterate
* across all possible devices in lofiadm means we don't
* want to return L_MAXMIN32, but the highest
* *allocated* minor.
*/
case LOFI_GET_MAXMINOR:
minor = 0;
if (lofi_access(lsp) != 0)
continue;
}
if (error)
return (EFAULT);
return (0);
case LOFI_CHECK_COMPRESSED:
default:
return (EINVAL);
}
}
return (ENXIO);
}
/*
* We explicitly allow DKIOCSTATE, but all other ioctls should fail with
* EIO as if the device was no longer present.
*/
return (EIO);
/* these are for faking out utilities like newfs */
switch (cmd) {
case DKIOCGVTOC:
case DDI_MODEL_ILP32: {
return (EFAULT);
break;
}
case DDI_MODEL_NONE:
return (EFAULT);
break;
}
return (0);
case DKIOCINFO:
if (error)
return (EFAULT);
return (0);
case DKIOCG_VIRTGEOM:
case DKIOCG_PHYGEOM:
case DKIOCGGEOM:
if (error)
return (EFAULT);
return (0);
case DKIOCSTATE:
/*
* Normally, lofi devices are always in the INSERTED state. If
* a device is forcefully unmapped, then the device transitions
* to the DKIO_DEV_GONE state.
*/
flag) != 0)
return (EFAULT);
lsp->ls_vp_iocount++;
!lsp->ls_vp_closereq) {
/*
* By virtue of having the device open, we know that
* 'lsp' will remain valid when we return.
*/
&lsp->ls_vp_lock)) {
lsp->ls_vp_iocount--;
return (EINTR);
}
}
lsp->ls_vp_iocount--;
return (EFAULT);
return (0);
default:
return (ENOTTY);
}
}
static struct cb_ops lofi_cb_ops = {
lofi_open, /* open */
lofi_close, /* close */
lofi_strategy, /* strategy */
nodev, /* print */
nodev, /* dump */
lofi_read, /* read */
lofi_write, /* write */
lofi_ioctl, /* ioctl */
nodev, /* devmap */
nodev, /* mmap */
nodev, /* segmap */
nochpoll, /* poll */
ddi_prop_op, /* prop_op */
0, /* streamtab */
};
DEVO_REV, /* devo_rev, */
0, /* refcnt */
lofi_info, /* info */
nulldev, /* identify */
nulldev, /* probe */
lofi_attach, /* attach */
lofi_detach, /* detach */
nodev, /* reset */
&lofi_cb_ops, /* driver operations */
NULL, /* no bus operations */
NULL, /* power */
ddi_quiesce_not_needed, /* quiesce */
};
"loopback file driver",
&lofi_ops,
};
static struct modlinkage modlinkage = {
&modldrv,
};
int
_init(void)
{
int error;
sizeof (struct lofi_state), 0);
if (error)
return (error);
if (error) {
}
return (error);
}
int
_fini(void)
{
int error;
if (!list_is_empty(&lofi_list)) {
return (EBUSY);
}
if (error)
return (error);
return (error);
}
int
{
}