/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* pseudo scsi disk driver
*/
#include <sys/emul64cmd.h>
#include <sys/emul64var.h>
/*
*/
#define MODE_SENSE_PC_CURRENT 0
/*
* Byte conversion macros
*/
#if defined(_BIG_ENDIAN)
#define ushort_to_scsi_ushort(n) (n)
#define uint32_to_scsi_uint32(n) (n)
#define uint64_to_scsi_uint64(n) (n)
#elif defined(_LITTLE_ENDIAN)
#define ushort_to_scsi_ushort(n) \
((((n) & 0x00ff) << 8) | \
(((n) & 0xff00) >> 8))
#define uint32_to_scsi_uint32(n) \
((((n) & 0x000000ff) << 24) | \
(((n) & 0x0000ff00) << 8) | \
(((n) & 0x00ff0000) >> 8) | \
(((n) & 0xff000000) >> 24))
#define uint64_to_scsi_uint64(n) \
((((n) & 0x00000000000000ff) << 56) | \
(((n) & 0x000000000000ff00) << 40) | \
(((n) & 0x0000000000ff0000) << 24) | \
(((n) & 0x00000000ff000000) << 8) | \
(((n) & 0x000000ff00000000) >> 8) | \
(((n) & 0x0000ff0000000000) >> 24) | \
(((n) & 0x00ff000000000000) >> 40) | \
(((n) & 0xff00000000000000) >> 56))
#else
#endif
/*
* struct prop_map
*
* This structure maps a property name to the place to store its value.
*/
struct prop_map {
};
static int emul64_debug_blklist = 0;
/*
* Some interesting statistics. These are protected by the
* emul64_stats_mutex. It would be nice to have an ioctl to print them out,
* but we don't have the development time for that now. You can at least
* look at them with adb.
*/
/* EMUL64_WRITE_OFF. */
/* EMUL64_WRITE_OFF. */
/* including skipped and actual. */
/* in I/O operations. */
/* currently held in memory */
/* list of non-zero blocks. */
/* mechanism to dispatch work. */
/* If the number of entries in the */
/* exceeds the maximum for the queue */
/* the queue a 1 second delay is */
/* encountered in taskq_ent_alloc. */
/* This counter counts the number */
/* times that this happens. */
/*
* Since emul64 does no physical I/O, operations that would normally be I/O
* intensive become CPU bound. An example of this is RAID 5
* initialization. When the kernel becomes CPU bound, it looks as if the
* machine is hung.
*
* To avoid this problem, we provide a function, emul64_yield_check, that does a
* delay from time to time to yield up the CPU. The following variables
* are tunables for this algorithm.
*
* emul64_num_delay_called Number of times we called delay. This is
* not really a tunable. Rather it is a
* counter that provides useful information
* for adjusting the tunables.
* emul64_yield_length Number of microseconds to yield the CPU.
* emul64_yield_period Number of I/O operations between yields.
* emul64_yield_enable emul64 will yield the CPU, only if this
* variable contains a non-zero value. This
* allows the yield functionality to be turned
* off for experimentation purposes.
*
* The value of 1000 for emul64_yield_period has been determined by
* experience with running the tests.
*/
/*
* This array establishes a set of tunable variables that can be set by
* defining properties in the emul64.conf file.
*/
"emul64_collect_stats", &emul64_collect_stats,
"emul64_yield_length", &emul64_yield_length,
"emul64_yield_period", &emul64_yield_period,
"emul64_yield_enable", &emul64_yield_enable,
"emul64_max_task", &emul64_max_task,
"emul64_task_nthreads", &emul64_task_nthreads
};
/* ncyl=250000 acyl=2 nhead=24 nsect=357 */
static int bsd_mode_sense_dad_mode_geometry(struct scsi_pkt *);
static int bsd_mode_sense_dad_mode_err_recov(struct scsi_pkt *);
static int bsd_mode_sense_modepage_disco_reco(struct scsi_pkt *);
static int bsd_mode_sense_dad_mode_format(struct scsi_pkt *);
static int bsd_mode_sense_dad_mode_cache(struct scsi_pkt *);
int, unsigned char *);
int, unsigned char *);
static void emul64_yield_check();
/*
* Initialize globals in this file.
*/
void
{
}
/*
* Clean up globals in this file.
*/
void
{
if (emul64_zeros != NULL) {
emul64_zeros = NULL;
}
}
/*
* Attempt to get the values of the properties that are specified in the
* emul64_properties array. If the property exists, copy its value to the
* specified location. All the properties have been assigned default
* values in this driver, so if we cannot get the property that is not a
* problem.
*/
void
{
uint_t i;
int *properties;
for (pmp = emul64_properties, i = 0;
i < sizeof (emul64_properties) / sizeof (struct prop_map);
i++, pmp++) {
&count) == DDI_PROP_SUCCESS) {
if (count >= 1) {
}
ddi_prop_free((void *) properties);
}
}
}
int
{
return (-1);
return (0);
return (1);
}
/* ARGSUSED 0 */
int
{
return (0);
}
/* ARGSUSED 0 */
int
{
return (0);
}
/* ARGSUSED 0 */
int
{
return (0);
}
int
{
emul64_name, 6);
return (EIO);
}
return (0);
}
int
{
emul64_name, 22);
return (EIO);
}
/* @22: */
/*
* Instances seem to be assigned sequentially, so it unlikely that we
* will have more than 65535 of them.
*/
return (0);
}
int
{
emul64_name, (int)sizeof (inq));
return (EIO);
}
return (0);
}
case 0x00:
case 0x83:
default:
"unsupported 0x%x",
return (0);
}
}
/* set up the inquiry data we return */
sizeof (tgt->emul64_tgt_inq));
return (0);
}
/* ARGSUSED 0 */
int
{
return (0);
}
int
{
int nblks;
case SCMD_READ:
if (emul64debug) {
"read g0 blk=%lld (0x%llx) nblks=%d\n",
}
break;
case SCMD_WRITE:
if (emul64debug) {
"write g0 blk=%lld (0x%llx) nblks=%d\n",
}
break;
case SCMD_READ_G1:
if (emul64debug) {
"read g1 blk=%lld (0x%llx) nblks=%d\n",
}
break;
case SCMD_WRITE_G1:
if (emul64debug) {
"write g1 blk=%lld (0x%llx) nblks=%d\n",
}
break;
case SCMD_READ_G4:
lblkno <<= 32;
if (emul64debug) {
"read g4 blk=%lld (0x%llx) nblks=%d\n",
}
break;
case SCMD_WRITE_G4:
lblkno <<= 32;
if (emul64debug) {
"write g4 blk=%lld (0x%llx) nblks=%d\n",
}
break;
default:
break;
}
"pkt_resid: 0x%lx, lblkno %lld, nblks %d",
return (0);
}
int
{
int page_code;
emul64_name, 9);
return (EIO);
}
if (page_code) {
return (0);
}
return (0);
}
int
{
int page_control;
int page_code;
int rval = 0;
case SCMD_MODE_SENSE:
if (emul64debug) {
"page=0x%x control=0x%x nbytes=%d\n",
GETG0COUNT(cdb));
}
break;
case SCMD_MODE_SENSE_G1:
if (emul64debug) {
"page=0x%x control=0x%x nbytes=%d\n",
GETG1COUNT(cdb));
}
break;
default:
return (EIO);
}
switch (page_code) {
case DAD_MODE_GEOMETRY:
break;
case DAD_MODE_ERR_RECOV:
break;
case MODEPAGE_DISCO_RECO:
break;
case DAD_MODE_FORMAT:
break;
case DAD_MODE_CACHE:
break;
default:
break;
}
return (rval);
}
static int
{
int page_control;
int ncyl;
int rval = 0;
if (emul64debug) {
}
"size %d required\n",
return (EIO);
}
header.bdesc_length = 0;
switch (page_control) {
case MODE_SENSE_PC_CURRENT:
case MODE_SENSE_PC_DEFAULT:
case MODE_SENSE_PC_SAVED:
break;
case MODE_SENSE_PC_CHANGEABLE:
break;
}
rval = 0;
return (rval);
}
static int
{
int page_control;
int rval = 0;
if (emul64debug) {
}
"size %d required\n",
return (EIO);
}
header.bdesc_length = 0;
switch (page_control) {
case MODE_SENSE_PC_CURRENT:
case MODE_SENSE_PC_DEFAULT:
case MODE_SENSE_PC_SAVED:
break;
case MODE_SENSE_PC_CHANGEABLE:
break;
}
rval = 0;
return (rval);
}
static int
{
int rval = 0;
int page_control;
if (emul64debug) {
}
"size %d required\n",
return (EIO);
}
header.bdesc_length = 0;
switch (page_control) {
case MODE_SENSE_PC_CURRENT:
case MODE_SENSE_PC_DEFAULT:
case MODE_SENSE_PC_SAVED:
break;
case MODE_SENSE_PC_CHANGEABLE:
break;
}
rval = 0;
return (rval);
}
static int
{
int page_control;
int rval = 0;
if (emul64debug) {
}
"size %d required\n",
return (EIO);
}
header.bdesc_length = 0;
switch (page_control) {
case MODE_SENSE_PC_CURRENT:
case MODE_SENSE_PC_DEFAULT:
case MODE_SENSE_PC_SAVED:
break;
case MODE_SENSE_PC_CHANGEABLE:
break;
}
rval = 0;
return (rval);
}
static int
{
int page_control;
int rval = 0;
if (emul64debug) {
}
"size %d required\n",
return (EIO);
}
header.bdesc_length = 0;
switch (page_control) {
case MODE_SENSE_PC_CURRENT:
case MODE_SENSE_PC_DEFAULT:
case MODE_SENSE_PC_SAVED:
break;
case MODE_SENSE_PC_CHANGEABLE:
break;
}
rval = 0;
return (rval);
}
/* ARGSUSED 0 */
int
{
return (0);
}
int
{
int rval = 0;
else
sizeof (struct scsi_capacity));
return (rval);
}
int
{
int rval = 0;
cap.sc_prot_en = 0;
sizeof (struct scsi_capacity_16));
return (rval);
}
int
{
return (bsd_scsi_read_capacity_8(pkt));
}
/* ARGSUSED 0 */
int
{
return (0);
}
/* ARGSUSED 0 */
int
{
return (0);
}
int
{
return (0);
}
/* ARGSUSED 0 */
int
{
return (0);
}
static int
{
int i = 0;
if (emul64debug) {
"<%d,%d> blk %llu (0x%llx) nblks %d\n",
}
goto unlocked_out;
}
if (emul64_collect_stats) {
}
/*
* Keep the ioctls from changing the nowrite list for the duration
* of this I/O by grabbing emul64_tgt_nw_lock. This will keep the
* results from our call to bsd_tgt_overlap from changing while we
* do the I/O.
*/
switch (overlap) {
case O_SAME:
case O_SUBSET:
case O_OVERLAP:
"read to blocked area %lld,%d\n",
goto errout;
case O_NONE:
break;
}
for (i = 0; i < nblks; i++) {
if (emul64_debug_blklist)
"%d of %d: blkno %lld\n",
break;
if (blk) {
} else {
}
blkno++;
}
}
static int
{
int i = 0;
if (emul64debug) {
"<%d,%d> blk %llu (0x%llx) nblks %d\n",
}
goto unlocked_out;
}
if (emul64_collect_stats) {
}
/*
* Keep the ioctls from changing the nowrite list for the duration
* of this I/O by grabbing emul64_tgt_nw_lock. This will keep the
* results from our call to bsd_tgt_overlap from changing while we
* do the I/O.
*/
switch (overlap) {
case O_SAME:
case O_SUBSET:
if (emul64_collect_stats) {
}
return (0);
case O_OVERLAP:
case O_NONE:
break;
}
for (i = 0; i < nblks; i++) {
/*
* If there was no overlap for the entire I/O range
* or if there is no overlap for this particular
* block, then we need to do the write.
*/
if (emul64_debug_blklist)
"%d of %d: blkno %lld\n",
"blkno %lld, tgt_sectors %lld\n",
break;
}
if (blk) {
}
} else {
if (blk) {
} else {
}
}
}
blkno++;
}
/*
* Now that we're done with our I/O, allow the ioctls to change the
* nowrite list.
*/
}
{
while (tgt) {
break;
}
}
return (tgt);
}
/*
* Free all blocks that are part of the specified range.
*/
int
{
/*
* We need to get the next block pointer now, because blk
* will be freed inside the if statement.
*/
}
}
return (0);
}
static blklist_t *
{
return (blk);
}
static void
{
if (emul64_debug_blklist)
emul64_name, blkno);
if (emul64_collect_stats) {
}
}
}
static void
{
if (emul64_debug_blklist)
if (emul64_collect_stats) {
}
}
/*
* Look for overlap between a nowrite range and a block range.
*
* NOTE: Callers of this function must hold the tgt->emul64_tgt_nw_lock
* lock. For the purposes of this function, a reader lock is
* sufficient.
*/
static emul64_rng_overlap_t
{
}
return (rv);
}
/*
* Operations that do a lot of I/O, such as RAID 5 initializations, result
* in a CPU bound kernel when the device is an emul64 device. This makes
* the machine look hung. To avoid this problem, give up the CPU from time
* to time.
*/
static void
{
/* cv_timed wait. */
if (emul64_yield_enable == 0)
return;
if (emul64_waiting == TRUE) {
/*
* Another thread has already started the timer. We'll
* just wait here until their time expires, and they
* broadcast to us. When they do that, we'll return and
* let our caller do more I/O.
*/
} else if (emul64_io_count++ > emul64_yield_period) {
/*
* Set emul64_waiting to let other threads know that we
* have started the timer.
*/
if (ticks == 0)
ticks = 1;
emul64_io_count = 0;
/* Broadcast in case others are waiting. */
}
}