nv_sata.c revision a9d5ae2e08c504913c088349c3d4c144f3c92be8
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
*
* based chipsets.
*
* NCQ
* ---
*
* A portion of the NCQ is in place, but is incomplete. NCQ is disabled
* and is likely to be revisited in the future.
*
*
* Power Management
* ----------------
*
* Normally power management would be responsible for ensuring the device
* is quiescent and then changing power states to the device, such as
* that it is only available as part of a larger southbridge chipset, so
* removing power to the device isn't possible. Switches to control
* be supported but changes to these states are apparently are ignored.
* The only further PM that the driver _could_ do is shut down the PHY,
* but in order to deliver the first rev of the driver sooner than later,
* that will be deferred until some future phase.
*
* Since the driver currently will not directly change any power state to
* the device, no power() entry point will be required. However, it is
* possible that in ACPI power state S3, aka suspend to RAM, that power
* can be removed to the device, and the driver cannot rely on BIOS to
* have reset any state. For the time being, there is no known
* non-default configurations that need to be programmed. This judgement
* is based on the port of the legacy ata driver not having any such
* functionality and based on conversations with the PM team. If such a
* restoration is later deemed necessary it can be incorporated into the
* DDI_RESUME processing.
*
*/
#include <sys/byteorder.h>
#ifdef SGPIO_SUPPORT
#endif
/*
* Function prototypes for driver entry points
*/
/*
* Function prototypes for entry points from sata service module
* These functions are distinguished from other local functions
* by the prefix "nv_sata_"
*/
/*
* Local function prototypes
*/
#ifdef NV_MSI_SUPPORTED
#endif
#ifdef NCQ
#endif
int state);
static void nv_timeout(void *);
int flag);
sata_pkt_t *spkt);
#ifdef SGPIO_SUPPORT
static void nv_sgp_activity_led_ctl(void *arg);
#endif
/*
* DMA attributes for the data buffer for x86. dma_attr_burstsizes is unused.
* Verify if needed if ported to other ISA.
*/
static ddi_dma_attr_t buffer_dma_attr = {
DMA_ATTR_V0, /* dma_attr_version */
0, /* dma_attr_addr_lo: lowest bus address */
0xffffffffull, /* dma_attr_addr_hi: */
4, /* dma_attr_align */
1, /* dma_attr_burstsizes. */
1, /* dma_attr_minxfer */
0xffffffffull, /* dma_attr_maxxfer including all cookies */
0xffffffffull, /* dma_attr_seg */
NV_DMA_NSEGS, /* dma_attr_sgllen */
512, /* dma_attr_granular */
0, /* dma_attr_flags */
};
static ddi_dma_attr_t buffer_dma_40bit_attr = {
DMA_ATTR_V0, /* dma_attr_version */
0, /* dma_attr_addr_lo: lowest bus address */
0xffffffffffull, /* dma_attr_addr_hi: */
4, /* dma_attr_align */
1, /* dma_attr_burstsizes. */
1, /* dma_attr_minxfer */
0xffffffffull, /* dma_attr_maxxfer including all cookies */
0xffffffffull, /* dma_attr_seg */
NV_DMA_NSEGS, /* dma_attr_sgllen */
512, /* dma_attr_granular */
0, /* dma_attr_flags */
};
/*
* DMA attributes for PRD tables
*/
DMA_ATTR_V0, /* dma_attr_version */
0, /* dma_attr_addr_lo */
0xffffffffull, /* dma_attr_addr_hi */
4, /* dma_attr_align */
1, /* dma_attr_burstsizes */
1, /* dma_attr_minxfer */
NV_BM_64K_BOUNDARY, /* dma_attr_maxxfer */
1, /* dma_attr_sgllen */
1, /* dma_attr_granular */
0 /* dma_attr_flags */
};
/*
* Device access attributes
*/
static ddi_device_acc_attr_t accattr = {
};
#ifdef SGPIO_SUPPORT
nv_open, /* open */
nv_close, /* close */
nodev, /* strategy (block) */
nodev, /* print (block) */
nodev, /* dump (block) */
nodev, /* read */
nodev, /* write */
nv_ioctl, /* ioctl */
nodev, /* devmap */
nodev, /* mmap */
nodev, /* segmap */
nochpoll, /* chpoll */
ddi_prop_op, /* prop_op */
NULL, /* streams */
CB_REV /* rev */
};
#endif /* SGPIO_SUPPORT */
static struct dev_ops nv_dev_ops = {
DEVO_REV, /* devo_rev */
0, /* refcnt */
nv_getinfo, /* info */
nulldev, /* identify */
nulldev, /* probe */
nv_attach, /* attach */
nv_detach, /* detach */
nodev, /* no reset */
#ifdef SGPIO_SUPPORT
&nv_cb_ops, /* driver operations */
#else
(struct cb_ops *)0, /* driver operations */
#endif
NULL, /* bus operations */
NULL, /* power */
nv_quiesce /* quiesce */
};
/*
* Request Sense CDB for ATAPI
*/
0,
0,
0,
0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0 /* pad out to max CDB length */
};
extern struct mod_ops mod_driverops;
&mod_driverops, /* driverops */
&nv_dev_ops, /* driver ops */
};
static struct modlinkage modlinkage = {
&modldrv,
};
/*
* Wait for a signature.
* If this variable is non-zero, the driver will wait for a device signature
* before reporting a device reset to the sata module.
* Some (most?) drives will not process commands sent to them before D2H FIS
* is sent to a host.
*/
int nv_wait_for_signature = 1;
/*
* Check for a signature availability.
* If this variable is non-zero, the driver will check task file error register
* for indication of a signature availability before reading a signature.
* Task file error register bit 0 set to 1 indicates that the drive
* is ready and it has sent the D2H FIS with a signature.
* This behavior of the error register is not reliable in the mcp5x controller.
*/
int nv_check_tfr_error = 0;
/*
* Max signature acquisition time, in milliseconds.
* The driver will try to acquire a device signature within specified time and
* quit acquisition operation if signature was not acquired.
*/
/*
* If this variable is non-zero, the driver will wait for a signature in the
* nv_monitor_reset function without any time limit.
* Used for debugging and drive evaluation.
*/
int nv_wait_here_forever = 0;
/*
* Reset after hotplug.
* If this variable is non-zero, driver will reset device after hotplug
* (device attached) interrupt.
* If the variable is zero, driver will not reset the new device nor will it
* try to read device signature.
* Chipset is generating a hotplug (device attached) interrupt with a delay, so
* the device should have already sent the D2H FIS with the signature.
*/
int nv_reset_after_hotplug = 1;
/*
* Delay after device hotplug.
* It specifies the time between detecting a hotplugged device and sending
* a notification to the SATA module.
* It is used when device is not reset after hotpugging and acquiring signature
* may be unreliable. The delay should be long enough for a device to become
* ready to accept commands.
*/
int nv_hotplug_delay = NV_HOTPLUG_DELAY;
/*
* Maximum number of consecutive interrupts processed in the loop in the
* single invocation of the port interrupt routine.
*/
/*
* wait between checks of reg status
*/
int nv_usec_delay = NV_WAIT_REG_CHECK;
/*
* The following is needed for nv_vcmn_err()
*/
static char nv_log_buf[NV_STRING_512];
int nv_debug_flags = NVDBG_ALWAYS;
int nv_log_to_console = B_FALSE;
int nv_log_delay = 0;
int nv_prom_print = B_FALSE;
/*
* for debugging
*/
#ifdef DEBUG
int ncq_commands = 0;
int non_ncq_commands = 0;
#endif
/*
* Opaque state pointer to be initialized by ddi_soft_state_init()
*/
/*
* Map from CBP to shared space
*
* Control Block Pointer as well as the corresponding Control Block) that
* is shared across all driver instances associated with that part. The
* Control Block is used to update and query the LED state for the devices
* on the controllers associated with those instances. There is also some
* driver state (called the 'common' area here) associated with each SGPIO
* Control Block. The nv_sgp_cpb2cmn is used to map a given CBP to its
* control area.
*
* The driver can also use this mapping array to determine whether the
* common area for a given CBP has been initialized, and, if it isn't
* initialized, initialize it.
*
* When a driver instance with a CBP value that is already in the array is
* initialized, it will use the pointer to the previously initialized common
* area associated with that SGPIO CBP value, rather than initialize it
* itself.
*
* nv_sgp_c2c_mutex is used to synchronize access to this mapping array.
*/
#ifdef SGPIO_SUPPORT
static kmutex_t nv_sgp_c2c_mutex;
#endif
/* We still have problems in 40-bit DMA support, so disable it by default */
int nv_sata_40bit_dma = B_FALSE;
static sata_tran_hotplug_ops_t nv_hotplug_ops = {
SATA_TRAN_HOTPLUG_OPS_REV_1, /* structure version */
nv_sata_activate, /* activate port. cfgadm -c connect */
nv_sata_deactivate /* deactivate port. cfgadm -c disconnect */
};
/*
* nv module initialization
*/
int
_init(void)
{
int error;
#ifdef SGPIO_SUPPORT
int i;
#endif
if (error != 0) {
return (error);
}
#ifdef SGPIO_SUPPORT
for (i = 0; i < NV_MAX_CBPS; i++) {
nv_sgp_cbp2cmn[i].c2cm_cbp = 0;
}
#endif
return (error);
}
if (error != 0) {
return (error);
}
return (error);
}
/*
* nv module uninitialize
*/
int
_fini(void)
{
int error;
if (error != 0) {
return (error);
}
/*
* remove the resources allocated in _init()
*/
#ifdef SGPIO_SUPPORT
#endif
return (error);
}
/*
* nv _info entry point
*/
int
{
}
/*
* these wrappers for ddi_{get,put}8 are for observability
* with dtrace
*/
#ifdef DEBUG
static void
{
}
static void
{
}
static uint32_t
{
}
static void
{
}
static uint16_t
{
}
static uint8_t
{
}
#else
#endif
/*
* Driver attach
*/
static int
{
#ifdef SGPIO_SUPPORT
int rlen;
#endif
switch (cmd) {
case DDI_ATTACH:
"nv_attach(): DDI_ATTACH inst %d", inst));
if (status != DDI_SUCCESS) {
break;
}
"inst %d: silicon revid is %x nv_debug_flags=%x",
} else {
break;
}
/*
*/
if (subclass & PCI_MASS_RAID) {
"attach failed: RAID mode not supported");
break;
}
/*
* the 6 bars of the controller are:
* 0: port 0 task file
* 1: port 0 status
* 2: port 1 task file
* 3: port 1 status
* 4: bus master for both ports
* 5: extended registers for SATA features
*/
if (status != DDI_SUCCESS) {
"ddi_regs_map_setup failure for bar"
break;
}
}
/*
* initialize controller structures
*/
if (status == NV_FAILURE) {
break;
}
/*
* initialize mutexes
*/
/*
* get supported interrupt types
*/
DDI_SUCCESS) {
"!ddi_intr_get_supported_types failed");
"interrupt supported types failed"));
break;
}
"ddi_intr_get_supported_types() returned: 0x%x",
intr_types));
#ifdef NV_MSI_SUPPORTED
if (intr_types & DDI_INTR_TYPE_MSI) {
"using MSI interrupt type"));
/*
* Try MSI first, but fall back to legacy if MSI
* attach fails
*/
"MSI interrupt setup done"));
} else {
"!MSI registration failed "
"will try Legacy interrupts");
}
}
#endif
/*
* Either the MSI interrupt setup has failed or only
* the fixed interrupts are available on the system.
*/
if (!(attach_state & ATTACH_PROGRESS_INTR_ADDED) &&
(intr_types & DDI_INTR_TYPE_FIXED)) {
"using Legacy interrupt type"));
"Legacy interrupt setup done"));
} else {
"!legacy interrupt setup failed");
"legacy interrupt setup failed"));
break;
}
}
if (!(attach_state & ATTACH_PROGRESS_INTR_ADDED)) {
"no interrupts registered"));
break;
}
#ifdef SGPIO_SUPPORT
/*
* save off the controller number
*/
/*
* initialize SGPIO
*/
#endif /* SGPIO_SUPPORT */
/*
* Initiate link processing and device identification
*/
/*
* attach to sata module
*/
DDI_ATTACH) != DDI_SUCCESS) {
break;
}
return (DDI_SUCCESS);
case DDI_RESUME:
"nv_attach(): DDI_RESUME inst %d", inst));
return (DDI_FAILURE);
}
/*
*/
/*
* Need to set bit 2 to 1 at config offset 0x50
* to enable access to the bar5 registers.
*/
}
for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
}
return (DDI_SUCCESS);
default:
return (DDI_FAILURE);
}
/*
* DDI_ATTACH failure path starts here
*/
if (attach_state & ATTACH_PROGRESS_INTR_ADDED) {
}
if (attach_state & ATTACH_PROGRESS_SATA_MODULE) {
/*
* Remove timers
*/
int port = 0;
if (nvp->nvp_timeout_id != 0) {
}
}
}
if (attach_state & ATTACH_PROGRESS_MUTEX_INIT) {
}
if (attach_state & ATTACH_PROGRESS_CTL_SETUP) {
}
if (attach_state & ATTACH_PROGRESS_BARS) {
while (--bar >= 0) {
}
}
if (attach_state & ATTACH_PROGRESS_STATEP_ALLOC) {
}
if (attach_state & ATTACH_PROGRESS_CONF_HANDLE) {
}
return (DDI_FAILURE);
}
static int
{
switch (cmd) {
case DDI_DETACH:
/*
* Remove interrupts
*/
/*
* Remove timers
*/
if (nvp->nvp_timeout_id != 0) {
}
}
/*
* Remove maps
*/
for (i = 0; i < 6; i++) {
}
/*
* Destroy mutexes
*/
/*
* Uninitialize the controller structures
*/
#ifdef SGPIO_SUPPORT
/*
* release SGPIO resources
*/
#endif
/*
* unregister from the sata module
*/
/*
* Free soft state
*/
return (DDI_SUCCESS);
case DDI_SUSPEND:
for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
}
return (DDI_SUCCESS);
default:
return (DDI_FAILURE);
}
}
/*ARGSUSED*/
static int
{
int instance;
switch (infocmd) {
case DDI_INFO_DEVT2DEVINFO:
return (DDI_SUCCESS);
} else {
return (DDI_FAILURE);
}
case DDI_INFO_DEVT2INSTANCE:
break;
default:
break;
}
return (DDI_SUCCESS);
}
#ifdef SGPIO_SUPPORT
/* ARGSUSED */
static int
{
return (ENXIO);
}
return (0);
}
/* ARGSUSED */
static int
{
return (0);
}
/* ARGSUSED */
static int
{
int inst;
int status;
int drive;
struct dc_led_ctl led;
if (inst == -1) {
return (EBADF);
}
return (EBADF);
}
return (EIO);
}
switch (cmd) {
case DEVCTL_SET_LED:
sizeof (struct dc_led_ctl), mode);
if (status != 0)
return (EFAULT);
/*
* Since only the first two controller currently support
* SGPIO (as per NVIDIA docs), this code will as well.
* Note that this validate the port value within led_state
* as well.
*/
return (ENXIO);
return (EINVAL);
}
} else {
return (ENXIO);
}
}
} else {
return (ENXIO);
}
}
break;
case DEVCTL_GET_LED:
sizeof (struct dc_led_ctl), mode);
if (status != 0)
return (EFAULT);
/*
* Since only the first two controller currently support
* SGPIO (as per NVIDIA docs), this code will as well.
* Note that this validate the port value within led_state
* as well.
*/
return (ENXIO);
else
else
} else {
return (ENXIO);
}
} else {
/*
* Not really off, but never set and no constant for
* tri-state
*/
}
sizeof (struct dc_led_ctl), mode);
if (status != 0)
return (EFAULT);
break;
case DEVCTL_NUM_LEDS:
/*
* According to documentation, NVIDIA SGPIO is supposed to
* support blinking, but it does not seem to work in practice.
*/
sizeof (struct dc_led_ctl), mode);
if (status != 0)
return (EFAULT);
break;
default:
return (EINVAL);
}
return (0);
}
#endif /* SGPIO_SUPPORT */
/*
* Called by sata module to probe a port. Port and device state
* are not changed here... only reported back to the sata module.
*
*/
static int
{
return (SATA_FAILURE);
}
"nv_sata_probe: enter cport: 0x%x, pmport: 0x%x, "
/*
* This check seems to be done in the SATA module.
* It may not be required here
*/
"port inactive. Use cfgadm to activate");
return (SATA_SUCCESS);
}
"probe: port failed"));
return (SATA_SUCCESS);
}
if (qual == SATA_ADDR_PMPORT) {
"controller does not support port multiplier");
return (SATA_SUCCESS);
}
/*
* We are waiting for reset to complete and to fetch
* a signature.
* Reset will cause the link to go down for a short period of
* time. If reset processing continues for less than
* NV_LINK_DOWN_TIMEOUT, fake the status of the link so that
* we will not report intermittent link down.
* Maybe we should report previous link state?
*/
return (SATA_SUCCESS);
}
}
/*
* Just report the current port state
*/
#ifdef SGPIO_SUPPORT
}
#endif
return (SATA_SUCCESS);
}
/*
* Called by sata module to start a new command.
*/
static int
{
int ret;
"nv_sata_start: port not yet initialized"));
return (SATA_TRAN_PORT_ERROR);
}
"nv_sata_start: NV_PORT_INACTIVE"));
return (SATA_TRAN_PORT_ERROR);
}
"nv_sata_start: NV_PORT_FAILED state"));
return (SATA_TRAN_PORT_ERROR);
}
"still waiting for reset completion"));
/*
* If in panic, timeouts do not occur, so fake one
* so that the signature can be acquired to complete
* the reset handling.
*/
if (ddi_in_panic()) {
}
return (SATA_TRAN_BUSY);
}
"nv_sata_start: SATA_DTYPE_NONE"));
return (SATA_TRAN_PORT_ERROR);
}
"port multipliers not supported by controller");
return (SATA_TRAN_CMD_UNSUPPORTED);
}
/*
* after a device reset, and then when sata module restore processing
* is complete, the sata module will set sata_clear_dev_reset which
* indicates that restore processing has completed and normal
* non-restore related commands should be processed.
*/
"nv_sata_start: clearing NV_PORT_RESTORE"));
}
/*
* if the device was recently reset as indicated by NV_PORT_RESTORE,
* only allow commands which restore device state. The sata module
* marks such commands with with sata_ignore_dev_reset.
*
* during coredump, nv_reset is called and but then the restore
* doesn't happen. For now, workaround by ignoring the wait for
* restore if the system is panicing.
*/
(ddi_in_panic() == 0)) {
"nv_sata_start: waiting for restore "));
return (SATA_TRAN_BUSY);
}
"nv_sata_start: NV_PORT_ABORTING"));
return (SATA_TRAN_BUSY);
}
/* Clear SError to be able to check errors after the command failure */
if (spkt->satapkt_op_mode &
return (ret);
}
/*
* start command asynchronous command
*/
return (ret);
}
/*
* SATA_OPMODE_POLLING implies the driver is in a
* synchronous mode, and SATA_OPMODE_SYNCH is also set.
* If only SATA_OPMODE_SYNCH is set, the driver can use
* interrupts and sleep wait on a cv.
*
* If SATA_OPMODE_POLLING is set, the driver can't use
* interrupts and must busy wait and simulate the
* interrupts by waiting for BSY to be cleared.
*
* Synchronous mode has to return BUSY if there are
* any other commands already on the drive.
*/
static int
{
int ret;
"nv_sata_satapkt_sync: device is busy, sync cmd rejected"
"ncq_run: %d non_ncq_run: %d spkt: %p",
return (SATA_TRAN_BUSY);
}
/*
* if SYNC but not POLL, verify that this is not on interrupt thread.
*/
servicing_interrupt()) {
"SYNC mode not allowed during interrupt"));
return (SATA_TRAN_BUSY);
}
/*
* disable interrupt generation if in polled mode
*/
}
}
return (ret);
}
" done % reason %d", ret));
return (ret);
}
/*
* non-polling synchronous mode handling. The interrupt will signal
* when the IO is completed.
*/
}
return (SATA_TRAN_ACCEPTED);
}
static int
{
int ret;
#if ! defined(__lock_lint)
#endif
for (;;) {
NV_DELAY_NSEC(400);
"SATA_STATUS_BSY"));
return (SATA_TRAN_ACCEPTED);
}
/*
* Simulate interrupt.
*/
if (ret != DDI_INTR_CLAIMED) {
" unclaimed -- resetting"));
return (SATA_TRAN_ACCEPTED);
}
#if ! defined(__lock_lint)
/*
* packet is complete
*/
return (SATA_TRAN_ACCEPTED);
}
#endif
}
/*NOTREACHED*/
}
/*
* Called by sata module to abort outstanding packets.
*/
/*ARGSUSED*/
static int
{
"abort request failed: port inactive");
return (SATA_FAILURE);
}
/*
* spkt == NULL then abort all commands
*/
if (c_a) {
"packets aborted running=%d", c_a));
ret = SATA_SUCCESS;
} else {
} else {
"can't find spkt to abort"));
}
ret = SATA_FAILURE;
}
return (ret);
}
/*
* if spkt == NULL abort all pkts running, otherwise
* abort the requested packet. must be called with nv_mutex
* held and returns with it held. Not NCQ aware.
*/
static int
{
/*
* return if the port is not configured
*/
"nv_abort_active: not configured so returning"));
return (0);
}
for (i = 0; i < nvp->nvp_queue_depth; i++) {
/*
* skip if not active command in slot
*/
continue;
}
/*
* if a specific packet was requested, skip if
* this is not a match
*/
continue;
}
/*
* stop the hardware. This could need reworking
* when NCQ is enabled in the driver.
*/
if (reset_once == B_FALSE) {
/*
* stop DMA engine
*/
/*
* Reset only if explicitly specified by the arg flag
*/
reset_once = B_TRUE;
}
}
aborted++;
}
return (aborted);
}
/*
* Called by sata module to reset a port, device, or the controller.
*/
static int
{
int ret = SATA_SUCCESS;
case SATA_ADDR_CPORT:
/*FALLTHROUGH*/
case SATA_ADDR_DCPORT:
break;
case SATA_ADDR_CNTRL:
"nv_sata_reset: constroller reset not supported"));
break;
case SATA_ADDR_PMPORT:
case SATA_ADDR_DPMPORT:
"nv_sata_reset: port multipliers not supported"));
/*FALLTHROUGH*/
default:
/*
* unsupported case
*/
ret = SATA_FAILURE;
break;
}
if (ret == SATA_SUCCESS) {
/*
* If the port is inactive, do a quiet reset and don't attempt
* to wait for reset completion or do any post reset processing
*/
nvp->nvp_reset_time = 0;
}
/*
* clear the port failed flag
*/
}
return (ret);
}
/*
* Sata entry point to handle port activation. cfgadm -c connect
*/
static int
{
/* Initiate link probing and device signature acquisition */
nvp->nvp_signature = 0;
return (SATA_SUCCESS);
}
/*
* Sata entry point to handle port deactivation. cfgadm -c disconnect
*/
static int
{
/*
* make the device inaccessible
*/
/*
* disable the interrupts on port
*/
return (SATA_SUCCESS);
}
/*
* find an empty slot in the driver's queue, increment counters,
* and then invoke the appropriate PIO or DMA start routine.
*/
static int
{
if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
(cmd == SATAC_READ_FPDMA_QUEUED)) {
nvp->nvp_ncq_run++;
/*
* search for an empty NCQ slot. by the time, it's already
* been determined by the caller that there is room on the
* queue.
*/
on_bit <<= 1) {
break;
}
}
/*
* the first empty slot found, should not exceed the queue
* depth of the drive. if it does it's an error.
*/
nvp->nvp_sactive);
on_bit));
ncq = NVSLOT_NCQ;
} else {
nvp->nvp_non_ncq_run++;
slot = 0;
}
/*
* the sata module doesn't indicate which commands utilize the
* DMA engine, so find out using this switch table.
*/
case SATAC_READ_DMA_EXT:
case SATAC_WRITE_DMA_EXT:
case SATAC_WRITE_DMA:
case SATAC_READ_DMA:
case SATAC_READ_DMA_QUEUED:
case SATAC_WRITE_DMA_QUEUED:
case SATAC_READ_FPDMA_QUEUED:
case SATAC_WRITE_FPDMA_QUEUED:
break;
default:
}
if ((direction == SATA_DIR_READ) ||
(direction == SATA_DIR_WRITE)) {
/*
* Freeing DMA resources allocated by the framework
* now to avoid buffer overwrite (dma sync) problems
* when the buffer is released at command completion.
* Primarily an issue on systems with more than
* 4GB of memory.
*/
}
} else if (direction == SATA_DIR_NODATA_XFER) {
} else if (direction == SATA_DIR_READ) {
/*
* Freeing DMA resources allocated by the framework now to
* avoid buffer overwrite (dma sync) problems when the buffer
* is released at command completion. This is not an issue
* for write because write does not update the buffer.
* Primarily an issue on systems with more than 4GB of memory.
*/
} else if (direction == SATA_DIR_WRITE) {
} else {
" %d cookies %d cmd %x",
goto fail;
}
#ifdef SGPIO_SUPPORT
#endif
/*
* start timer if it's not already running and this packet
* is not requesting polled mode.
*/
if ((nvp->nvp_timeout_id == 0) &&
}
return (SATA_TRAN_ACCEPTED);
}
fail:
if (ncq == NVSLOT_NCQ) {
nvp->nvp_ncq_run--;
} else {
nvp->nvp_non_ncq_run--;
}
nv_slotp->nvslot_flags = 0;
return (ret);
}
/*
* Check if the signature is ready and if non-zero translate
* it into a solaris sata defined type.
*/
static void
{
/*
* Task file error register bit 0 set to 1 indicate that drive
* is ready and have sent D2H FIS with a signature.
*/
if (nv_check_tfr_error != 0) {
if (!(tfr_error & SATA_ERROR_ILI)) {
"nv_read_signature: signature not ready"));
return;
}
}
switch (nvp->nvp_signature) {
case NV_SIG_DISK:
break;
case NV_SIG_ATAPI:
"drive is an optical device"));
break;
case NV_SIG_PM:
"device is a port multiplier"));
break;
case NV_SIG_NOTREADY:
"signature not ready"));
break;
default:
break;
}
if (nvp->nvp_signature) {
}
}
/*
* Set up a new timeout or complete a timeout.
* Timeout value has to be specified in microseconds. If time is zero, no new
* timeout is scheduled.
* Must be called at the end of the timeout routine.
*/
static void
{
/*
* Since we are dropping the mutex for untimeout,
* the timeout may be executed while we are trying to
* untimeout and setting up a new timeout.
* If nvp_timeout_duration is 0, then this function
* was re-entered. Just exit.
*/
return;
}
nvp->nvp_timeout_duration = 0;
if (nvp->nvp_timeout_id == 0) {
/* Start new timer */
drv_usectohz(time));
} else {
/*
* If the currently running timeout is due later than the
* requested one, restart it with a new expiration.
* Our timeouts do not need to be accurate - we would be just
* checking that the specified time was exceeded.
*/
if (old_duration > time) {
drv_usectohz(time));
}
}
}
int nv_reset_length = NV_RESET_LENGTH;
/*
* Reset the port
*
* Entered with nvp mutex held
*/
static void
{
int i, j, reset = 0;
/*
* stop DMA engine.
*/
/*
* Issue hardware reset; retry if necessary.
*/
for (i = 0; i < NV_RESET_ATTEMPTS; i++) {
/*
* Clear signature registers
*/
/* Clear task file error register */
/*
* assert reset in PHY by writing a 1 to bit 0 scontrol
*/
/* Wait at least 1ms, as required by the spec */
/* Reset all accumulated error bits */
/* de-assert reset in PHY */
/*
* Wait up to 10ms for COMINIT to arrive, indicating that
* the device recognized COMRESET.
*/
for (j = 0; j < 10; j++) {
(SSTATUS_GET_DET(sstatus) ==
reset = 1;
break;
}
}
if (reset == 1)
break;
}
if (reset == 0) {
"(serr 0x%x) after %d attempts", serr, i));
} else {
nvp->nvp_reset_time)));
}
if (servicing_interrupt()) {
}
}
/*
*/
/* ARGSUSED */
static void
{
/*
* clear any previous interrupts asserted
*/
/*
* These are the interrupts to accept for now. The spec
* says these are enable bits, but nvidia has indicated
* these are masking bits. Even though they may be masked
* out to prevent asserting the main interrupt, they can
* still be asserted while reading the interrupt status
* register, so that needs to be considered in the interrupt
* handler.
*/
~(MCP5X_INT_IGNORE));
}
/*
* Allow the driver to program the BM on the first command instead
* of waiting for an interrupt.
*/
#ifdef NCQ
#endif
/*
* mcp55 rev A03 and above supports 40-bit physical addressing.
* Enable DMA to take advantage of that.
*
*/
if (nv_sata_40bit_dma == B_TRUE) {
"rev id is %X and"
" is capable of 40-bit DMA addressing",
reg32 | NV_40BIT_PRD);
} else {
"40-bit DMA disabled by nv_sata_40bit_dma"));
}
} else {
}
}
/*
* Initialize register handling specific to ck804
*/
static void
{
int j;
/*
* delay hotplug interrupts until PHYRDY.
*/
/*
* enable hot plug interrupts for channel x and y
*/
NV_HIRQ_EN | reg16);
NV_HIRQ_EN | reg16);
/*
* clear any existing interrupt pending then enable
*/
for (j = 0; j < NV_MAX_PORTS(nvc); j++) {
}
}
/*
* Initialize the controller and set up driver data structures.
* determine if ck804 or mcp5x class.
*/
static int
{
struct sata_hba_tran stran;
int j, ck804;
#ifdef SGPIO_SUPPORT
#endif
/*
* Need to set bit 2 to 1 at config offset 0x50
* to enable access to the bar5 registers.
*/
if (!(reg32 & NV_BAR5_SPACE_EN)) {
}
/*
* Determine if this is ck804 or mcp5x. ck804 will map in the
* task file registers into bar5 while mcp5x won't. The offset of
* the task file registers in mcp5x's space is unused, so it will
* return zero. So check one of the task file registers to see if it is
* writable and reads back what was written. If it's mcp5x it will
* return back 0xff whereas ck804 will return the value written.
*/
for (j = 1; j < 3; j++) {
if (reg8 != j) {
break;
}
}
} else {
}
KM_SLEEP);
/*
* initialize registers common to all chipsets
*/
for (j = 0; j < NV_MAX_PORTS(nvc); j++) {
/*
* Initialize dma handles, etc.
* If it fails, the port is in inactive state.
*/
(void) nv_init_port(nvp);
}
/*
* initialize register by calling chip specific reg initialization
*/
/* initialize the hba dma attribute */
else
return (NV_SUCCESS);
}
/*
* Initialize data structures with enough slots to handle queuing, if
* enabled. NV_QUEUE_SLOTS will be set to 1 or 32, depending on whether
* NCQ support is built into the driver and enabled. It might have been
* better to derive the true size from the drive itself, but the sata
* module only sends down that information on the first NCQ command,
* which means possibly re-sizing the structures on an interrupt stack,
* making error handling more messy. The easy way is to just allocate
* all 32 slots, which is what most drives support anyway.
*/
static int
{
int rc, i;
"nv_init_port previously initialized"));
return (NV_SUCCESS);
} else {
}
KM_SLEEP);
for (i = 0; i < NV_QUEUE_SLOTS; i++) {
if (rc != DDI_SUCCESS) {
return (NV_FAILURE);
}
&(nvp->nvp_sg_acc_hdl[i]));
if (rc != DDI_SUCCESS) {
return (NV_FAILURE);
}
if (rc != DDI_DMA_MAPPED) {
return (NV_FAILURE);
}
}
/*
* nvp_queue_depth represents the actual drive queue depth, not the
* number of slots allocated in the structures (which may be more).
* Actual queue depth is only learned after the first NCQ command, so
* initialize it to 1 for now.
*/
/*
* Port is initialized whether the device is attached or not.
* Link processing and device identification will be started later,
* after interrupts are initialized.
*/
nvp->nvp_signature = 0;
return (NV_SUCCESS);
}
/*
* Establish initial link & device type
* Called only from nv_attach
* Loops up to approximately 210ms; can exit earlier.
* The time includes wait for the link up and completion of the initial
* signature gathering operation.
*/
static void
{
/*
* Initiate device identification, if any is attached
* and reset was not already applied by hot-plug
* event processing.
*/
}
}
}
/*
* Wait up to 10ms for links up.
* Spec says that link should be up in 1ms.
*/
for (i = 0; i < 10; i++) {
links_up = 0;
(SSTATUS_GET_DET(sstatus) ==
}
"nv_init_port_link_processing()"
"link up; time from reset %dms",
nvp->nvp_reset_time)));
links_up++;
}
}
break;
}
}
"%d links up", links_up));
/*
* At this point, if any device is attached, the link is established.
* Wait till devices are ready to be accessed, no more than 200ms.
* 200ms is empirical time in which a signature should be available.
*/
for (i = 0; i < 200; i++) {
ready_ports = 0;
(SSTATUS_GET_DET(sstatus) ==
NV_PORT_RESET_RETRY))) {
/*
* Reset already processed
*/
"nv_init_port_link_processing()"
"device ready; port state %x; "
nvp->nvp_reset_time)));
ready_ports++;
}
}
if (ready_ports == links_up) {
break;
}
}
"%d devices ready", ready_ports));
}
/*
* Free dynamically allocated structures for port.
*/
static void
{
int i;
/*
* It is possible to reach here before a port has been initialized or
* after it has already been uninitialized. Just return in that case.
*/
return;
}
/*
* Mark port unusable now.
*/
"nv_uninit_port uninitializing"));
for (i = 0; i < NV_QUEUE_SLOTS; i++) {
if (nvp->nvp_sg_paddr[i]) {
}
}
}
}
sizeof (ddi_dma_handle_t) * NV_QUEUE_SLOTS);
sizeof (ddi_acc_handle_t) * NV_QUEUE_SLOTS);
}
/*
* Cache register offsets and access handles to frequently accessed registers
* which are common to either chipset.
*/
static void
{
if (port == 0) {
bm_addr_offset = 0;
} else {
}
(long)bm_addr_offset;
}
}
static void
{
int port;
}
}
/*
* ck804 interrupt. This is a wrapper around ck804_intr_process so
* that interrupts from other devices can be disregarded while dtracing.
*/
/* ARGSUSED */
static uint_t
{
return (DDI_INTR_UNCLAIMED);
if (intr_status == 0) {
return (DDI_INTR_UNCLAIMED);
}
return (DDI_INTR_CLAIMED);
}
/*
* Main interrupt handler for ck804. handles normal device
* interrupts as well as port hot plug and remove interrupts.
*
*/
static void
{
int port, i;
int nvcleared = 0;
int port_mask_hot[] = {
};
int port_mask_pm[] = {
};
"ck804_intr_process entered intr_status=%x", intr_status));
/*
* For command completion interrupt, explicit clear is not required.
* however, for the error cases explicit clear is performed.
*/
continue;
}
"ck804_intr_process interrupt on port %d", port));
/*
* there was a corner case found where an interrupt
* arrived before nvp_slot was set. Should
* probably should track down why that happens and try
* to eliminate that source and then get rid of this
* check.
*/
"received before initialization "
"completed status=%x", status));
/*
* clear interrupt bits
*/
continue;
}
" no command in progress status=%x", status));
/*
* clear interrupt bits
*/
continue;
}
if (!(bmstatus & BMISX_IDEINTS)) {
continue;
}
if (status & SATA_STATUS_BSY) {
continue;
}
continue;
}
}
}
/*
* interrupts. Frequently both the ADD and the REMOVE bits
* are asserted, whether it was a remove or add. Use sstatus
* to distinguish hot add from hot remove.
*/
clear_bits = 0;
"clearing PM interrupt bit: %x",
}
if (clear_bits != 0) {
goto clear;
} else {
continue;
}
}
/*
* reaching here means there was a hot add or remove.
*/
if ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) ==
nv_report_add_remove(nvp, 0);
} else {
}
/*
* clear interrupt bits. explicit interrupt clear is
* required for hotplug interrupts.
*/
/*
* make sure it's flushed and cleared. If not try
* again. Sometimes it has been observed to not clear
* on the first try.
*/
/*
* make 10 additional attempts to clear the interrupt
*/
"still not clear try=%d", intr_status,
++nvcleared));
}
/*
* if still not clear, log a message and disable the
* port. highly unlikely that this path is taken, but it
* gives protection against a wedged interrupt.
*/
if (intr_status & clear_bits) {
B_TRUE);
"interrupt. disabling port intr_status=%X",
}
}
}
/*
* Interrupt handler for mcp5x. It is invoked by the wrapper for each port
* on the controller, to handle completion and hot plug and remove events.
*
*/
static uint_t
{
int ret = DDI_INTR_UNCLAIMED;
int loop_cnt = 0;
do {
/*
* read current interrupt status
*/
/*
* MCP5X_INT_IGNORE interrupts will show up in the status,
* but are masked out from causing an interrupt to be generated
* to the processor. Ignore them here by masking them out.
*/
int_status &= ~(MCP5X_INT_IGNORE);
/*
* exit the loop when no more interrupts to process
*/
if (int_status == 0) {
break;
}
if (int_status & MCP5X_INT_COMPLETE) {
"mcp5x_packet_complete_intr"));
/*
* since int_status was set, return DDI_INTR_CLAIMED
* from the DDI's perspective even though the packet
* completion may not have succeeded. If it fails,
* need to manually clear the interrupt, otherwise
* clearing is implicit.
*/
NV_FAILURE) {
} else {
intr_cycles = 0;
}
}
if (int_status & MCP5X_INT_DMA_SETUP) {
/*
* Needs to be cleared before starting the BM, so do it
* now. make sure this is still working.
*/
#ifdef NCQ
#endif
}
if (int_status & MCP5X_INT_REM) {
clear |= MCP5X_INT_REM;
} else if (int_status & MCP5X_INT_ADD) {
clear |= MCP5X_INT_ADD;
nv_report_add_remove(nvp, 0);
}
if (clear) {
clear = 0;
}
/* Protect against a stuck interrupt */
if (intr_cycles++ == NV_MAX_INTR_LOOP) {
"processing. Disabling port int_status=%X"
B_TRUE);
}
} while (loop_cnt++ < nv_max_intr_loops);
"Exiting with multiple intr loop count %d", loop_cnt));
}
(NVDBG_INTR | NVDBG_VERBOSE)) {
if (int_status & MCP5X_INT_COMPLETE) {
"mcp55_intr_port: Exiting with altstatus %x, "
"bmicx %x, int_status2 %X, int_status %X, ret %x,"
}
}
/*
* To facilitate debugging, keep track of the length of time spent in
* the port interrupt routine.
*/
return (ret);
}
/* ARGSUSED */
static uint_t
{
int ret;
return (DDI_INTR_UNCLAIMED);
return (ret);
}
#ifdef NCQ
/*
* with software driven NCQ on mcp5x, an interrupt occurs right
* before the drive is ready to do a DMA transfer. At this point,
* the PRD table needs to be programmed and the DMA engine enabled
* and ready to go.
*
* -- MCP_SATA_AE_INT_STATUS_SDEV_DMA_SETUP indicates the interrupt
* -- MCP_SATA_AE_NCQ_PDEV_DMA_SETUP_TAG shows which command is ready
* -- clear bit 0 of master command reg
* -- program PRD
* -- clear the interrupt status bit for the DMA Setup FIS
* -- set bit 0 of the bus master command register
*/
static int
{
int slot;
"this is should not be executed at all until NCQ");
/*
* halt the DMA engine. This step is necessary according to
* the mcp5x spec, probably since there may have been a "first" packet
* that already programmed the DMA engine, but may not turn out to
* be the first one processed.
*/
if (bmicx & BMICX_SSBM) {
"another packet. Cancelling and reprogramming"));
}
return (DDI_INTR_CLAIMED);
}
#endif /* NCQ */
/*
* packet completion interrupt. If the packet is complete, invoke
* the packet completion callback.
*/
static int
{
int sactive;
return (NV_FAILURE);
}
/*
* Commands may have been processed by abort or timeout before
* interrupt processing acquired the mutex. So we may be processing
* an interrupt for packets that were already removed.
* For functionning NCQ processing all slots may be checked, but
* with NCQ disabled (current code), relying on *_run flags is OK.
*/
if (nvp->nvp_non_ncq_run) {
/*
* If the just completed item is a non-ncq command, the busy
* bit should not be set
*/
if (status & SATA_STATUS_BSY) {
"unexpected SATA_STATUS_BSY set");
/*
* calling function will clear interrupt. then
* the real interrupt will either arrive or the
* packet timeout handling will take over and
* reset.
*/
return (NV_FAILURE);
}
} else {
/*
* Pre-NCQ code!
* Nothing to do. The packet for the command that just
* completed is already gone. Just clear the interrupt.
*/
(void) nv_bm_status_clear(nvp);
return (NV_SUCCESS);
/*
* NCQ check for BSY here and wait if still bsy before
* continuing. Rather than wait for it to be cleared
* when starting a packet and wasting CPU time, the starting
* thread can exit immediate, but might have to spin here
* for a bit possibly. Needs more work and experimentation.
*
*/
}
/*
* active_pkt_bit will represent the bitmap of the single completed
* packet. Because of the nature of sw assisted NCQ, only one
* command will complete per interrupt.
*/
if (ncq_command == B_FALSE) {
active_pkt = 0;
} else {
/*
* NCQ: determine which command just completed, by examining
* which bit cleared in the register since last written.
*/
/*
* this failure path needs more work to handle the
* error condition and recovery.
*/
if (active_pkt_bit == 0) {
"nvp->nvp_sactive %X", sactive,
return (NV_FAILURE);
}
}
/*
* make sure only one bit is ever turned on
*/
}
}
return (NV_SUCCESS);
}
static void
{
nvp->nvp_ncq_run--;
} else {
nvp->nvp_non_ncq_run--;
}
/*
* mark the packet slot idle so it can be reused. Do this before
* calling satapkt_comp so the slot can be reused.
*/
/*
* If this is not timed polled mode cmd, which has an
* active thread monitoring for completion, then need
* to signal the sleeping thread that the cmd is complete.
*/
}
return;
}
}
}
/*
* check whether packet is ncq command or not. for ncq command,
* start it if there is still room on queue. for non-ncq command only
* start if no other command is running.
*/
static int
{
(cmd == SATAC_READ_FPDMA_QUEUED));
(nvp->nvp_ncq_run > 0)) {
/*
* next command is non-ncq which can't run
* concurrently. exit and return queue full.
*/
return (SATA_TRAN_QUEUE_FULL);
}
}
/*
* ncq == B_TRUE
*/
/*
* cannot start any NCQ commands when there
* is a non-NCQ command running.
*/
return (SATA_TRAN_QUEUE_FULL);
}
#ifdef NCQ
/*
* this is not compiled for now as satapkt_device.satadev_qdepth
* is being pulled out until NCQ support is later addressed
*
* nvp_queue_depth is initialized by the first NCQ command
* received.
*/
"nv_process_queue: nvp_queue_depth set to %d",
nvp->nvp_queue_depth));
}
#endif
/*
* max number of NCQ commands already active
*/
return (SATA_TRAN_QUEUE_FULL);
}
}
/*
* configure INTx and legacy interrupts
*/
static int
{
/*
* get number of interrupts
*/
"ddi_intr_get_nintrs() failed, "
return (DDI_FAILURE);
}
/*
* allocate an array of interrupt handles
*/
/*
* call ddi_intr_alloc()
*/
"ddi_intr_alloc() failed, rc %d", rc);
return (DDI_FAILURE);
}
"ddi_intr_alloc: requested: %d, received: %d",
goto failure;
}
/*
* get intr priority
*/
DDI_SUCCESS) {
goto failure;
}
/*
* Test for high level mutex
*/
"nv_add_legacy_intrs: high level intr not supported");
goto failure;
}
for (x = 0; x < actual; x++) {
"ddi_intr_add_handler() failed");
goto failure;
}
}
/*
* call ddi_intr_enable() for legacy interrupts
*/
for (x = 0; x < nvc->nvc_intr_cnt; x++) {
}
return (DDI_SUCCESS);
/*
* free allocated intr and nvc_htable
*/
for (y = 0; y < actual; y++) {
}
return (DDI_FAILURE);
}
#ifdef NV_MSI_SUPPORTED
/*
* configure MSI interrupts
*/
static int
{
/*
* get number of interrupts
*/
"ddi_intr_get_nintrs() failed, "
return (DDI_FAILURE);
}
/*
* get number of available interrupts
*/
"ddi_intr_get_navail() failed, "
return (DDI_FAILURE);
}
"ddi_intr_get_nvail returned %d ddi_intr_get_nintrs: %d",
}
/*
* allocate an array of interrupt handles
*/
"ddi_intr_alloc() failed, rc %d", rc);
return (DDI_FAILURE);
}
/*
* Use interrupt count returned or abort?
*/
}
/*
* get priority for first msi, assume remaining are all the same
*/
DDI_SUCCESS) {
goto failure;
}
/*
* test for high level mutex
*/
"nv_add_msi_intrs: high level intr not supported");
goto failure;
}
/*
* Call ddi_intr_add_handler()
*/
for (x = 0; x < actual; x++) {
"ddi_intr_add_handler() failed");
goto failure;
}
}
nvc->nvc_intr_cnt);
} else {
/*
* Call ddi_intr_enable() for MSI non block enable
*/
for (x = 0; x < nvc->nvc_intr_cnt; x++) {
}
}
return (DDI_SUCCESS);
/*
* free allocated intr and nvc_htable
*/
for (y = 0; y < actual; y++) {
}
return (DDI_FAILURE);
}
#endif
static void
{
int x, i;
/*
* prevent controller from generating interrupts by
* masking them out. This is an extra precaution.
*/
for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
}
/*
* disable all interrupts
*/
nvc->nvc_intr_cnt);
} else {
for (x = 0; x < nvc->nvc_intr_cnt; x++) {
}
}
for (x = 0; x < nvc->nvc_intr_cnt; x++) {
}
}
/*
* variable argument wrapper for cmn_err. prefixes the instance and port
* number if possible
*/
static void
{
char port[NV_STRING_10];
char inst[NV_STRING_10];
if (nvc) {
} else {
inst[0] = '\0';
}
if (nvp) {
} else {
port[0] = '\0';
}
/*
* normally set to log to console but in some debug situations it
* may be useful to log only to a file.
*/
if (nv_log_to_console) {
if (nv_prom_print) {
} else {
}
} else {
}
}
/*
* wrapper for cmn_err
*/
static void
{
}
#if defined(DEBUG)
/*
* prefixes the instance and port number if possible to the debug message
*/
static void
{
if ((nv_debug_flags & flag) == 0) {
return;
}
/*
* useful for some debugging situations
*/
if (nv_log_delay) {
}
}
#endif /* DEBUG */
/*
* program registers which are common to all commands
*/
static void
{
if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
(cmd == SATAC_READ_FPDMA_QUEUED)) {
}
/*
* select the drive
*/
/*
* make certain the drive selected
*/
return;
}
case ATA_ADDR_LBA:
break;
case ATA_ADDR_LBA28:
"ATA_ADDR_LBA28 mode"));
/*
* NCQ only uses 48-bit addressing
*/
break;
case ATA_ADDR_LBA48:
"ATA_ADDR_LBA48 mode"));
/*
* for NCQ, tag goes into count register and real sector count
* into features register. The sata module does the translation
* in the satacmd.
*/
} else {
}
/*
* send the high-order half first
*/
/*
* Send the low-order half
*/
break;
case 0:
/*
* non-media access commands such as identify and features
* take this path.
*/
break;
default:
break;
}
}
/*
* start a command that involves no media access
*/
static int
{
/*
* This next one sets the controller in motion
*/
return (SATA_TRAN_ACCEPTED);
}
static int
{
/*
* Get the current BM status
*/
/*
* Clear the latches (and preserve the other bits)
*/
return (ret);
}
/*
* program the bus master DMA engine with the PRD address for
* the active slot command, and start the DMA engine.
*/
static void
{
== SATA_DIR_READ) {
} else {
}
"nv_start_dma_engine entered"));
#if NOT_USED
/*
* NOT NEEDED. Left here of historical reason.
* Reset the controller's interrupt and error status bits.
*/
(void) nv_bm_status_clear(nvp);
#endif
/*
* program the PRD table physical start address
*/
/*
* set the direction control and start the DMA controller
*/
}
/*
* start dma command, either in or out
*/
static int
{
#ifdef NCQ
#endif
" satacmd_num_dma_cookies=%d", NV_DMA_NSEGS,
return (NV_FAILURE);
}
/*
* start the drive in motion
*/
/*
* the drive starts processing the transaction when the cmd register
* is written. This is done here before programming the DMA engine to
* parallelize and save some time. In the event that the drive is ready
* before DMA, it will wait.
*/
#ifdef NCQ
if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
(cmd == SATAC_READ_FPDMA_QUEUED)) {
}
#endif
/*
* copy the PRD list to PRD table in DMA accessible memory
* so that the controller can access it.
*/
/* Set the number of bytes to transfer, 0 implies 64KB */
if (size == 0x10000)
size = 0;
/*
* If this is a 40-bit address, copy bits 32-40 of the
* physical address to bits 16-24 of the PRD count.
*/
}
/*
* set the end of table flag for the last entry
*/
}
}
#ifdef NCQ
/*
* optimization: for SWNCQ, start DMA engine if this is the only
* command running. Preliminary NCQ efforts indicated this needs
* more debugging.
*
* if (nvp->nvp_ncq_run <= 1)
*/
"NOT NCQ so starting DMA NOW non_ncq_commands=%d"
} else {
}
#endif /* NCQ */
return (SATA_TRAN_ACCEPTED);
}
/*
* start a PIO data-in ATA command
*/
static int
{
/*
* This next one sets the drive in motion
*/
return (SATA_TRAN_ACCEPTED);
}
/*
* start a PIO data-out ATA command
*/
static int
{
/*
* this next one sets the drive in motion
*/
/*
* wait for the busy bit to settle
*/
NV_DELAY_NSEC(400);
/*
* wait for the drive to assert DRQ to send the first chunk
* of data. Have to busy wait because there's no interrupt for
* the first chunk. This is bad... uses a lot of cycles if the
* drive responds too slowly or if the wait loop granularity
* is too large. It's even worse if the drive is defective and
* the loop times out.
*/
4000000, 0) == B_FALSE) {
goto error;
}
/*
* send the first block.
*/
/*
* If nvslot_flags is not set to COMPLETE yet, then processing
* is OK so far, so return. Otherwise, fall into error handling
* below.
*/
return (SATA_TRAN_ACCEPTED);
}
/*
* there was an error so reset the device and complete the packet.
*/
return (SATA_TRAN_PORT_ERROR);
}
/*
* start a ATAPI Packet command (PIO data in or out)
*/
static int
{
"nv_start_pkt_pio: start"));
/*
* Write the PACKET command to the command register. Normally
* this would be done through nv_program_taskfile_regs(). It
* is done here because some values need to be overridden.
*/
/* select the drive */
/* make certain the drive selected */
"nv_start_pkt_pio: drive select failed"));
return (SATA_TRAN_PORT_ERROR);
}
/*
* The command is always sent via PIO, despite whatever the SATA
* framework sets in the command. Overwrite the DMA bit to do this.
* Also, overwrite the overlay bit to be safe (it shouldn't be set).
*/
/* set appropriately by the sata framework */
/* initiate the command by writing the command register last */
/* Give the host controller time to do its thing */
NV_DELAY_NSEC(400);
/*
* Wait for the device to indicate that it is ready for the command
* ATAPI protocol state - HP0: Check_Status_A
*/
4000000, 0) == B_FALSE) {
/*
* Either an error or device fault occurred or the wait
* timed out. According to the ATAPI protocol, command
* completion is also possible. Other implementations of
* this protocol don't handle this last case, so neither
* does this code.
*/
(SATA_STATUS_ERR | SATA_STATUS_DF)) {
"nv_start_pkt_pio: device error (HP0)"));
} else {
"nv_start_pkt_pio: timeout (HP0)"));
}
return (SATA_TRAN_PORT_ERROR);
}
/*
* Put the ATAPI command in the data register
* ATAPI protocol state - HP1: Send_Packet
*/
/*
* See you in nv_intr_pkt_pio.
* ATAPI protocol state - HP3: INTRQ_wait
*/
"nv_start_pkt_pio: exiting into HP3"));
return (SATA_TRAN_ACCEPTED);
}
/*
* Interrupt processing for a non-data ATA command.
*/
static void
{
/*
* check for errors
*/
nvp->nvp_altstatus);
} else {
}
}
/*
* ATA command, PIO data in
*/
static void
{
int count;
if (status & SATA_STATUS_BSY) {
nvp->nvp_altstatus);
return;
}
/*
* check for errors
*/
SATA_STATUS_ERR)) != SATA_STATUS_DRQ) {
return;
}
/*
* read the next chunk of data (if any)
*/
/*
* read count bytes
*/
if (nv_slotp->nvslot_byte_count != 0) {
/*
* more to transfer. Wait for next interrupt.
*/
return;
}
/*
* transfer is complete. wait for the busy bit to settle.
*/
NV_DELAY_NSEC(400);
}
/*
* ATA command PIO data out
*/
static void
{
int count;
/*
* clear the IRQ
*/
if (status & SATA_STATUS_BSY) {
/*
* this should not happen
*/
nvp->nvp_altstatus);
return;
}
/*
* check for errors
*/
return;
}
/*
* this is the condition which signals the drive is
* no longer ready to transfer. Likely that the transfer
* completed successfully, but check that byte_count is
* zero.
*/
if ((status & SATA_STATUS_DRQ) == 0) {
if (nv_slotp->nvslot_byte_count == 0) {
/*
* complete; successful transfer
*/
} else {
/*
* error condition, incomplete transfer
*/
}
return;
}
/*
* write the next chunk of data
*/
/*
* read or write count bytes
*/
}
/*
*
* Under normal circumstances, one of four different interrupt scenarios
* will result in this function being called:
*
* 1. Packet command data transfer
* 2. Packet command completion
* 3. Request sense data transfer
* 4. Request sense command completion
*/
static void
{
int count;
/* ATAPI protocol state - HP2: Check_Status_B */
"nv_intr_pkt_pio: status 0x%x", status));
if (status & SATA_STATUS_BSY) {
} else {
}
"nv_intr_pkt_pio: busy - status 0x%x", status));
return;
}
if ((status & SATA_STATUS_DF) != 0) {
/*
* On device fault, just clean up and bail. Request sense
* will just default to its NO SENSE initialized value.
*/
}
nvp->nvp_altstatus);
"nv_intr_pkt_pio: device fault"));
return;
}
if ((status & SATA_STATUS_ERR) != 0) {
/*
* On command error, figure out whether we are processing a
* request sense. If so, clean up and bail. Otherwise,
* do a REQUEST SENSE.
*/
NV_FAILURE) {
spkt);
}
nvp->nvp_altstatus);
} else {
}
"nv_intr_pkt_pio: error (status 0x%x)", status));
return;
}
/*
* REQUEST SENSE command processing
*/
if ((status & (SATA_STATUS_DRQ)) != 0) {
/* ATAPI state - HP4: Transfer_Data */
/* read the byte count from the controller */
"nv_intr_pkt_pio: ctlr byte count - %d",
ctlr_count));
if (ctlr_count == 0) {
/* no data to transfer - some devices do this */
"nv_intr_pkt_pio: done (no data)"));
return;
}
/* transfer the data */
/* consume residual bytes */
ctlr_count -= count;
if (ctlr_count > 0) {
}
"nv_intr_pkt_pio: transition to HP2"));
} else {
/* still in ATAPI state - HP2 */
/*
* In order to avoid clobbering the rqsense data
* set by the SATA framework, the sense data read
* from the device is put in a separate buffer and
* copied into the packet after the request sense
* command successfully completes.
*/
"nv_intr_pkt_pio: request sense done"));
}
return;
}
/*
* Normal command processing
*/
if ((status & (SATA_STATUS_DRQ)) != 0) {
/* ATAPI protocol state - HP4: Transfer_Data */
/* read the byte count from the controller */
if (ctlr_count == 0) {
/* no data to transfer - some devices do this */
"nv_intr_pkt_pio: done (no data)"));
return;
}
"nv_intr_pkt_pio: drive_bytes 0x%x", ctlr_count));
"nv_intr_pkt_pio: byte_count 0x%x",
/* transfer the data */
if (direction == SATA_DIR_READ) {
ctlr_count -= count;
if (ctlr_count > 0) {
/* consume remainding bytes */
for (; ctlr_count > 0;
ctlr_count -= 2)
"nv_intr_pkt_pio: bytes remained"));
}
} else {
}
"nv_intr_pkt_pio: transition to HP2"));
} else {
/* still in ATAPI state - HP2 */
"nv_intr_pkt_pio: done"));
}
}
/*
*/
static void
{
/*
* stop DMA engine.
*/
/*
* get the status and clear the IRQ, and check for DMA error
*/
/*
* check for drive errors
*/
(void) nv_bm_status_clear(nvp);
return;
}
/*
* check for bus master errors
*/
if (bm_status & BMISX_IDERR) {
nvp->nvp_altstatus);
return;
}
}
/*
* Wait for a register of a controller to achieve a specific state.
* To return normally, all the bits in the first sub-mask must be ON,
* all the bits in the second sub-mask must be OFF.
* If timeout_usec microseconds pass without the controller achieving
* the desired bit configuration, return TRUE, else FALSE.
*
* hybrid waiting algorithm: if not in interrupt context, busy looping will
* occur for the first 250 us, then switch over to a sleeping wait.
*
*/
int
int type_wait)
{
int first_time = B_TRUE;
for (;;) {
return (B_TRUE);
}
/*
* store the start time and calculate the end
* time. also calculate "start_sleep" which is
* the point after which the driver will stop busy
* waiting and change to sleep waiting.
*/
if (first_time) {
/*
* start and end are in nanoseconds
*/
/*
* add 1 ms to start
*/
if (servicing_interrupt()) {
}
}
break;
}
#if ! defined(__lock_lint)
delay(1);
#endif
} else {
}
}
return (B_FALSE);
}
/*
* This is a slightly more complicated version that checks
* for error conditions and bails-out rather than looping
* until the timeout is exceeded.
*
* hybrid waiting algorithm: if not in interrupt context, busy looping will
* occur for the first 250 us, then switch over to a sleeping wait.
*/
int
int type_wait)
{
int first_time = B_TRUE;
for (;;) {
/*
* check for expected condition
*/
return (B_TRUE);
}
/*
* check for error conditions
*/
(val & failure_offbits2) == 0) {
return (B_FALSE);
}
(val & failure_offbits3) == 0) {
return (B_FALSE);
}
/*
* store the start time and calculate the end
* time. also calculate "start_sleep" which is
* the point after which the driver will stop busy
* waiting and change to sleep waiting.
*/
if (first_time) {
/*
* start and end are in nanoseconds
*/
/*
* add 1 ms to start
*/
if (servicing_interrupt()) {
}
} else {
}
break;
}
#if ! defined(__lock_lint)
delay(1);
#endif
} else {
}
}
return (B_FALSE);
}
/*
* nv_port_state_change() reports the state of the port to the
* sata module by calling sata_hba_event_notify(). This
* function is called any time the state of the port is changed
*/
static void
{
"nv_port_state_change: event 0x%x type 0x%x state 0x%x "
/*
* When NCQ is implemented sactive and snotific field need to be
* updated.
*/
}
/*
* Monitor reset progress and signature gathering.
* This function may loop, so it should not be called from interrupt
* context.
*
* Entered with nvp mutex held.
*/
static void
{
int send_notification = B_FALSE;
/*
* We do not know here the reason for port reset.
* Check the link status. The link needs to be active before
* we can check the link's status.
*/
/*
* Either link is not active or there is no device
* If the link remains down for more than NV_LINK_DOWN_TIMEOUT
* (milliseconds), abort signature acquisition and complete
* reset processing.
* The link will go down when COMRESET is sent by nv_reset(),
* so it is practically nvp_reset_time milliseconds.
*/
"nv_monitor_reset: no link - ending signature "
"acquisition; time after reset %ldms",
nvp->nvp_reset_time)));
}
/*
* Else, if the link was lost (i.e. was present before)
* the controller should generate a 'remove' interrupt
* that will cause the appropriate event notification.
*/
return;
}
"nv_monitor_reset: link up after reset; time %ldms",
if (nvp->nvp_signature != 0) {
/*
* The link is up. The signature was acquired before (device
* was present).
* But we may need to wait for the signature (D2H FIS) before
* accessing the drive.
*/
if (nv_wait_for_signature != 0) {
nvp->nvp_signature = 0;
if (nvp->nvp_signature == 0) {
#ifdef NV_DEBUG
/* FOR DEBUGGING */
if (nv_wait_here_forever) {
drv_usecwait(1000);
goto sig_read;
}
#endif
/*
* Wait, but not endlessly.
*/
if (TICK_TO_MSEC(ddi_get_lbolt() -
nvp->nvp_reset_time) <
drv_usecwait(1000);
goto sig_read;
/*
* Retry reset.
*/
"nv_monitor_reset: retrying reset "
"time after first reset: %ldms",
nvp->nvp_reset_time)));
goto sig_read;
}
"nv_monitor_reset: terminating signature "
"acquisition (1); time after reset: %ldms",
nvp->nvp_reset_time)));
} else {
"nv_monitor_reset: signature acquired; "
"time after reset: %ldms",
nvp->nvp_reset_time)));
}
}
/*
* Clear reset state, set device reset recovery state
*/
/*
* Need to send reset event notification
*/
} else {
/*
* The link is up. The signature was not acquired before.
* We can try to fetch a device signature.
*/
if (nvp->nvp_signature != 0) {
/*
* Got device signature.
*/
"nv_monitor_reset: signature acquired; "
"time after reset: %ldms",
nvp->nvp_reset_time)));
/* Clear internal reset state */
if (dev_type != SATA_DTYPE_NONE) {
/*
* We acquired the signature for a
* pre-existing device that was not identified
* before and and was reset.
* Need to enter the device reset recovery
* state and to send the reset notification.
*/
} else {
/*
* Else, We acquired the signature because a new
* device was attached (the driver attach or
* a hot-plugged device). There is no need to
* enter the device reset recovery state or to
* send the reset notification, but we may need
* to send a device attached notification.
*/
SATA_ADDR_CPORT, 0);
}
}
} else {
if (TICK_TO_MSEC(ddi_get_lbolt() -
drv_usecwait(1000);
goto acquire_signature;
/*
* Some drives may require additional
* reset(s) to get a valid signature
* (indicating that the drive is ready).
* If a drive was not just powered
* up, the signature should be available
* within few hundred milliseconds
* after reset. Therefore, if more than
* NV_SIG_ACQUISITION_TIME has elapsed
* while waiting for a signature, reset
* device again.
*/
"nv_monitor_reset: retrying reset "
"time after first reset: %ldms",
nvp->nvp_reset_time)));
drv_usecwait(1000);
goto acquire_signature;
}
/*
* Terminating signature acquisition.
* Hopefully, the drive is ready.
* The SATA module can deal with this as long as it
* knows that some device is attached and a device
* responds to commands.
*/
}
SATA_ADDR_CPORT, 0);
}
"nv_monitor_reset: terminating signature "
"acquisition (2); time after reset: %ldms",
nvp->nvp_reset_time)));
}
}
if (send_notification) {
}
}
/*
* Send a hotplug (add device) notification at the appropriate time after
* hotplug detection.
* Relies on nvp_reset_time set at a hotplug detection time.
* Called only from nv_timeout when NV_PORT_HOTPLUG_DELAY flag is set in
* the nvp_state.
*/
static void
{
"nv_delay_hotplug_notification: notifying framework after "
nvp->nvp_reset_time)));
SATA_ADDR_CPORT, 0);
}
}
/*
* timeout processing:
*
* Check if any packets have crossed a timeout threshold. If so,
* abort the packet. This function is not NCQ-aware.
*
* If reset was invoked, call reset monitoring function.
*
* Timeout frequency may be lower for checking packet timeout (1s)
* and higher for reset monitoring (1ms)
*
*/
static void
nv_timeout(void *arg)
{
static int intr_warn_once = 0;
nvp->nvp_timeout_id = 0;
/*
* If the port is not in the init state, ignore it.
*/
"nv_timeout: port uninitialized"));
next_timeout = 0;
goto finished;
}
goto finished;
}
goto finished;
}
/*
* Not yet NCQ-aware - there is only one command active.
*/
/*
* perform timeout checking and processing only if there is an
* active packet on the port
*/
#if ! defined(__lock_lint) && defined(DEBUG)
#endif
/*
* timeout not needed if there is a polling thread
*/
next_timeout = 0;
goto finished;
}
spkt->satapkt_time) {
nvp->nvp_serror);
"nv_timeout: aborting: "
"nvslot_stime: %ld max ticks till timeout: "
"%ld cur_time: %ld cmd=%x lba=%d",
"nv_timeout: SError at timeout: 0x%x", serr));
"nv_timeout: previous cmd=%x",
nvp->nvp_previous_cmd));
nvp->nvp_altstatus);
"nv_timeout: altstatus %x, bmicx %x, "
int_status));
if (int_status & MCP5X_INT_COMPLETE) {
/*
* Completion interrupt was missed!
* Issue warning message once
*/
if (!intr_warn_once) {
"nv_sata: missing command "
"completion interrupt(s)!");
intr_warn_once = 1;
}
nvp, "timeout detected with "
"interrupt ready - calling "
"int directly"));
} else {
/*
* True timeout and not a missing
* interrupt.
*/
}
} else {
}
} else {
#ifdef NV_DEBUG
if (nv_debug_flags & NVDBG_VERBOSE) {
"nv_timeout:"
" still in use so restarting timeout"));
}
#endif
}
} else {
/*
* there was no active packet, so do not re-enable timeout
*/
next_timeout = 0;
#ifdef NV_DEBUG
if (nv_debug_flags & NVDBG_VERBOSE) {
"nv_timeout: no active packet so not re-arming "
"timeout"));
}
#endif
}
if (next_timeout != 0) {
}
}
/*
* enable or disable the 3 interrupt types the driver is
* interested in: completion, add and remove.
*/
static void
{
if (flag & NV_INTR_DISABLE_NON_BLOCKING) {
int_en);
return;
}
/*
* controller level lock also required since access to an 8-bit
* interrupt register is shared between both channels.
*/
if (flag & NV_INTR_CLEAR_ALL) {
"ck804_set_intr: NV_INTR_CLEAR_ALL"));
"interrupt bits cleared %x",
}
}
if (flag & NV_INTR_DISABLE) {
"ck804_set_intr: NV_INTR_DISABLE"));
int_en);
}
if (flag & NV_INTR_ENABLE) {
int_en);
}
}
/*
* enable or disable the 3 interrupts the driver is interested in:
* completion interrupt, hot add, and hot remove interrupt.
*/
static void
{
if (flag & NV_INTR_DISABLE_NON_BLOCKING) {
return;
}
if (flag & NV_INTR_CLEAR_ALL) {
"mcp5x_set_intr: NV_INTR_CLEAR_ALL"));
}
if (flag & NV_INTR_ENABLE) {
}
if (flag & NV_INTR_DISABLE) {
"mcp5x_set_intr: NV_INTR_DISABLE"));
}
}
static void
{
return;
}
/* Enable interrupt */
/*
* Power may have been removed to the port and the
* Force a reset which will cause a probe and re-establish
* any state needed on the drive.
*/
#ifdef SGPIO_SUPPORT
#endif
}
static void
{
#ifdef SGPIO_SUPPORT
#endif
return;
}
/*
* Stop the timeout handler.
* (It will be restarted in nv_reset() during nv_resume().)
*/
if (nvp->nvp_timeout_id) {
nvp->nvp_timeout_id = 0;
}
/* Disable interrupt */
}
static void
{
struct sata_cmd_flags flags;
return;
}
/*
* in the error case, implicitly set the return of regs needed
* for error handling.
*/
nvp->nvp_altstatus);
if (status & SATA_STATUS_ERR) {
}
/*
* set HOB so that high byte will be read
*/
/*
* get the requested high bytes
*/
if (flags.sata_copy_out_sec_count_msb) {
}
if (flags.sata_copy_out_lba_low_msb) {
}
if (flags.sata_copy_out_lba_mid_msb) {
}
if (flags.sata_copy_out_lba_high_msb) {
}
}
/*
* disable HOB so that low byte is read
*/
/*
* get the requested low bytes
*/
if (flags.sata_copy_out_sec_count_lsb) {
}
if (flags.sata_copy_out_lba_low_lsb) {
}
if (flags.sata_copy_out_lba_mid_lsb) {
}
if (flags.sata_copy_out_lba_high_lsb) {
}
/*
* get the device register if requested
*/
if (flags.sata_copy_out_device_reg) {
}
/*
* get the error register if requested
*/
if (flags.sata_copy_out_error_reg) {
}
}
/*
* Hot plug and remove interrupts can occur when the device is reset. Just
* masking the interrupt doesn't always work well because if a
* different interrupt arrives on the other port, the driver can still
* end up checking the state of the other port and discover the hot
* interrupt flag is set even though it was masked. Checking for recent
* reset activity and then ignoring turns out to be the easiest way.
*
* Entered with nvp mutex held.
*/
static void
{
int i;
"time (ticks) %d", nv_lbolt));
/*
* wait up to 1ms for sstatus to settle and reflect the true
* status of the port. Failure to do so can create confusion
* in probe, where the incorrect sstatus value can still
* persist.
*/
for (i = 0; i < 1000; i++) {
if ((flags == NV_PORT_HOTREMOVED) &&
((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) !=
break;
}
if ((flags != NV_PORT_HOTREMOVED) &&
((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) ==
break;
}
drv_usecwait(1);
}
"sstatus took %d us for DEVPRE_PHYCOM to settle", i));
if (flags == NV_PORT_HOTREMOVED) {
B_FALSE);
/*
* No device, no point of bothering with device reset
*/
nvp->nvp_signature = 0;
"nv_report_add_remove() hot removed"));
SATA_ADDR_CPORT, 0);
} else {
/*
* This is a hot plug or link up indication
* Now, re-check the link state - no link, no device
*/
/*
* Real device attach - there was no device
* attached to this port before this report
*/
"nv_report_add_remove() new device hot"
"plugged"));
(NV_PORT_RESET_RETRY | NV_PORT_RESET))) {
nvp->nvp_signature = 0;
if (nv_reset_after_hotplug != 0) {
/*
* Send reset to obtain a device
* signature
*/
"nv_report_add_remove() "
"resetting device"));
} else {
}
}
if (nv_reset_after_hotplug == 0) {
/*
* In case a hotplug interrupt
* is generated right after a
* link is up, delay reporting
* a hotplug event to let the
* drive to initialize and to
* send a D2H FIS with a
* signature.
* The timeout will issue an
* event notification after
* the NV_HOTPLUG_DELAY
* milliseconds delay.
*/
/*
* Make sure timer is running.
*/
} else {
SATA_ADDR_CPORT, 0);
}
}
return;
}
/*
* Othervise it is a bogus attach, indicating recovered
* link loss. No real need to report it after-the-fact.
* But we may keep some statistics, or notify the
* sata module by reporting LINK_LOST/LINK_ESTABLISHED
* events to keep track of such occurrences.
* Anyhow, we may want to terminate signature
* acquisition.
*/
"nv_report_add_remove() ignoring plug interrupt "
"- recovered link?"));
(NV_PORT_RESET_RETRY | NV_PORT_RESET)) {
"nv_report_add_remove() - "
"time since last reset %dms",
nvp->nvp_reset_time)));
/*
* If the driver does not have to wait for
* a signature, then terminate reset processing
* now.
*/
if (nv_wait_for_signature == 0) {
nvp, "nv_report_add_remove() - ",
"terminating signature acquisition",
", time after reset: %dms",
nvp->nvp_reset_time)));
/*
* It is not the initial device
* probing, so notify sata
* module that device was
* reset
*/
}
}
}
return;
}
"ignoring add dev interrupt - "
"link is down or no device!"));
}
}
/*
* Get request sense data and stuff it the command's sense buffer.
* Start a request sense command in order to get sense data to insert
* in the sata packet's rqsense buffer. The command completion
* processing is in nv_intr_pkt_pio.
*
* The sata framework provides a function to allocate and set-up a
* request sense packet command. The reasons it is not being used here is:
* a) it cannot be called in an interrupt context and this function is
* called in an interrupt context.
* b) it allocates DMA resources that are not used here because this is
* implemented using PIO.
*
* If, in the future, this is changed to use DMA, the sata framework should
* be used to allocate and set-up the error retrieval (request sense)
* command.
*/
static int
{
"nv_start_rqsense_pio: start"));
/* clear the local request sense buffer before starting the command */
/* Write the request sense PACKET command */
/* select the drive */
/* make certain the drive selected */
"nv_start_rqsense_pio: drive select failed"));
return (NV_FAILURE);
}
/* set up the command */
/* initiate the command by writing the command register last */
NV_DELAY_NSEC(400);
/*
* Wait for the device to indicate that it is ready for the command
* ATAPI protocol state - HP0: Check_Status_A
*/
4000000, 0) == B_FALSE) {
(SATA_STATUS_ERR | SATA_STATUS_DF)) {
"nv_start_rqsense_pio: rqsense dev error (HP0)"));
} else {
"nv_start_rqsense_pio: rqsense timeout (HP0)"));
}
return (NV_FAILURE);
}
/*
* Put the ATAPI command in the data register
* ATAPI protocol state - HP1: Send_Packet
*/
"nv_start_rqsense_pio: exiting into HP3"));
return (NV_SUCCESS);
}
/*
* quiesce(9E) entry point.
*
* This function is called when the system is single-threaded at high
* PIL with preemption disabled. Therefore, this function must not be
* blocked.
*
* This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
* DDI_FAILURE indicates an error condition and should almost never happen.
*/
static int
{
return (DDI_FAILURE);
/*
* Stop the controllers from generating interrupts.
*/
/*
* clear signature registers
*/
nvp->nvp_signature = 0;
/*
* assert reset in PHY by writing a 1 to bit 0 scontrol
*/
/*
* wait 1ms
*/
drv_usecwait(1000);
/*
* de-assert reset in PHY
*/
}
return (DDI_SUCCESS);
}
#ifdef SGPIO_SUPPORT
/*
* NVIDIA specific SGPIO LED support
* Please refer to the NVIDIA documentation for additional details
*/
/*
* nv_sgp_led_init
* Detect SGPIO support. If present, initialize.
*/
static void
{
int i;
char tqname[SGPIO_TQ_NAME_LEN];
/*
* Initialize with appropriately invalid values in case this function
* exits without initializing SGPIO (for example, there is no SGPIO
* support).
*/
nvc->nvc_sgp_csr = 0;
/*
* Only try to initialize SGPIO LED support if this property
* indicates it should be.
*/
"enable-sgpio-leds", 0) != 1)
return;
/*
* CK804 can pass the sgpio_detect test even though it does not support
* SGPIO, so don't even look at a CK804.
*/
return;
/*
* The NVIDIA SGPIO support can nominally handle 6 drives.
* However, the current implementation only supports 4 drives.
* With two drives per controller, that means only look at the
* first two controllers.
*/
return;
/* confirm that the SGPIO registers are there */
"SGPIO registers not detected"));
return;
}
/* save off the SGPIO_CSR I/O address */
/* map in Control Block */
/* initialize the SGPIO h/w */
"!Unable to initialize SGPIO");
}
/*
* Initialize the shared space for this instance. This could
* involve allocating the space, saving a pointer to the space
* and starting the taskq that actually turns the LEDs on and off.
* Or, it could involve just getting the pointer to the already
* allocated space.
*/
/* try and find our CBP in the mapping table */
for (i = 0; i < NV_MAX_CBPS; i++) {
break;
}
if (nv_sgp_cbp2cmn[i].c2cm_cbp == 0)
break;
}
if (i >= NV_MAX_CBPS) {
/*
* CBP to shared space mapping table is full
*/
"!LED handling not initialized - too many controllers");
/*
* Allocate the shared space, point the SGPIO scratch register
* at it and start the led update taskq.
*/
/* allocate shared space */
KM_SLEEP);
"!Failed to allocate shared data");
return;
}
/* initialize the shared data structure */
cmn->nvs_connected = 0;
cmn->nvs_activity = 0;
/* put the address in the SGPIO scratch register */
#if defined(__amd64)
#else
#endif
/* add an entry to the cbp to cmn mapping table */
/* i should be the next available table position */
/* start the activity LED taskq */
/*
* The taskq name should be unique and the time
*/
TASKQ_DEFAULTPRI, 0);
cmn->nvs_taskq_delay = 0;
"!Failed to start activity LED taskq");
} else {
}
} else {
}
}
/*
* nv_sgp_detect
* Read the SGPIO_CSR and SGPIO_CBP values from PCI config space and
* report back whether both were readable.
*/
static int
{
/* get the SGPIO_CSRP */
if (*csrpp == 0) {
return (NV_FAILURE);
}
/* SGPIO_CSRP is good, get the SGPIO_CBP */
if (*cbpp == 0) {
return (NV_FAILURE);
}
/* SGPIO_CBP is good, so we must support SGPIO */
return (NV_SUCCESS);
}
/*
* nv_sgp_init
* Initialize SGPIO.
* The initialization process is described by NVIDIA, but the hardware does
* omitted.
*/
static int
{
int seq;
int rval = NV_SUCCESS;
int drive_count;
/* SGPIO logic is in reset state and requires initialization */
/* noting the Sequence field value */
/* issue SGPIO_CMD_READ_PARAMS command */
/* poll for command completion */
for (;;) {
/* break on error */
"Command error during initialization"));
rval = NV_FAILURE;
break;
}
/* command processing is taking place */
"Sequence number change error"));
}
break;
}
/* if completion not detected in 2000ms ... */
break;
/* wait 400 ns before checking again */
NV_DELAY_NSEC(400);
}
}
if (rval == NV_FAILURE)
return (rval);
"SGPIO logic not operational after init - state %d",
/*
* Should return (NV_FAILURE) but the hardware can be
* operational even if the SGPIO Status does not indicate
* this.
*/
}
/*
* NVIDIA recommends reading the supported drive count even
* though they also indicate that it is always 4 at this time.
*/
if (drive_count != SGPIO_DRV_CNT_VALUE) {
"SGPIO reported undocumented drive count - %d",
drive_count));
}
"initialized ctlr: %d csr: 0x%08x",
return (rval);
}
static int
{
return (NV_FAILURE);
return (NV_SUCCESS);
}
/*
* nv_sgp_csr_read
* This is just a 32-bit port read from the value that was obtained from the
* PCI config space.
*
* XXX It was advised to use the in[bwl] function for this, even though they
* are obsolete interfaces.
*/
static int
{
}
/*
* nv_sgp_csr_write
* This is just a 32-bit I/O port write. The port number was obtained from
* the PCI config space.
*
* XXX It was advised to use the out[bwl] function for this, even though they
* are obsolete interfaces.
*/
static void
{
}
/*
* nv_sgp_write_data
* Cause SGPIO to send Control Block data
*/
static int
{
/* issue command */
/* poll for completion */
for (;;) {
/* break on error completion */
break;
/* break on successful completion */
break;
/* Wait 400 ns and try again */
NV_DELAY_NSEC(400);
break;
}
return (NV_SUCCESS);
return (NV_FAILURE);
}
/*
* nv_sgp_activity_led_ctl
* This is run as a taskq. It wakes up at a fixed interval and checks to
* see if any of the activity LEDs need to be changed.
*/
static void
nv_sgp_activity_led_ctl(void *arg)
{
volatile nv_sgp_cb_t *cbp;
int i;
do {
/* save off the old state of all of the LEDs */
int, old_leds);
new_led_state = 0;
/* for each drive */
for (i = 0; i < SGPIO_DRV_CNT_VALUE; i++) {
/* get the current state of the LEDs for the drive */
/* if not connected, turn off activity */
drv_leds &= ~TR_ACTIVE_MASK;
new_led_state &= SGPIO0_TR_DRV_CLR(i);
continue;
}
/* connected, but not active */
drv_leds &= ~TR_ACTIVE_MASK;
new_led_state &= SGPIO0_TR_DRV_CLR(i);
continue;
}
/* connected and active */
/* was enabled, so disable */
drv_leds &= ~TR_ACTIVE_MASK;
drv_leds |=
new_led_state &= SGPIO0_TR_DRV_CLR(i);
} else {
/* was disabled, so enable */
drv_leds &= ~TR_ACTIVE_MASK;
new_led_state &= SGPIO0_TR_DRV_CLR(i);
}
/*
* clear the activity bit
* if there is drive activity again within the
* loop interval (now 1/16 second), nvs_activity
* will be reset and the "connected and active"
* condition above will cause the LED to blink
* off and on at the loop interval rate. The
* rate may be increased (interval shortened) as
* long as it is not more than 1/30 second.
*/
}
/* write out LED values */
"nv_sgp_write_data failure updating active LED"));
}
/* now rest for the interval */
if (ticks > 0)
ddi_get_lbolt() + ticks);
} while (ticks > 0);
}
/*
* nv_sgp_drive_connect
* Set the flag used to indicate that the drive is attached to the HBA.
* Used to let the taskq know that it should turn the Activity LED on.
*/
static void
{
return;
}
/*
* nv_sgp_drive_disconnect
* Clears the flag used to indicate that the drive is no longer attached
* to the HBA. Used to let the taskq know that it should turn the
* Activity LED off. The flag that indicates that the drive is in use is
* also cleared.
*/
static void
{
return;
}
/*
* nv_sgp_drive_active
* Sets the flag used to indicate that the drive has been accessed and the
* LED should be flicked off, then on. It is cleared at a fixed time
* interval by the LED taskq and set by the sata command start.
*/
static void
{
return;
}
/*
* nv_sgp_locate
* maintained in the SGPIO Control Block.
*/
static void
{
return;
return;
leds &= ~TR_LOCATE_MASK;
}
}
/*
* nv_sgp_error
* maintained in the SGPIO Control Block.
*/
static void
{
return;
return;
leds &= ~TR_ERROR_MASK;
}
}
static void
{
int drive, i;
/*
* If the SGPIO Control Block isn't mapped or the shared data
* structure isn't present in this instance, there isn't much that
* can be cleaned up.
*/
return;
/* turn off activity LEDs for this controller */
/* get the existing LED state */
/* turn off port 0 */
/* turn off port 1 */
/* set the new led state, which should turn off this ctrl's LEDs */
(void) nv_sgp_write_data(nvc);
/* clear the controller's in use bit */
if (cmn->nvs_in_use == 0) {
/* if all "in use" bits cleared, take everything down */
/* allow activity taskq to exit */
cmn->nvs_taskq_delay = 0;
/* then destroy it */
}
/* turn off all of the LEDs */
(void) nv_sgp_write_data(nvc);
/* zero out the CBP to cmn mapping */
for (i = 0; i < NV_MAX_CBPS; i++) {
break;
}
if (nv_sgp_cbp2cmn[i].c2cm_cbp == 0)
break;
}
/* free resources */
}
/* unmap the SGPIO Control Block */
}
#endif /* SGPIO_SUPPORT */