mr_sas_tbolt.c revision 1a5e258f5471356ca102c7176637cdce45bac147
/*
* mr_sas_tbolt.c: source for mr_sas driver for New Generation.
* i.e. Thunderbolt and Invader
*
* Solaris MegaRAID device driver for SAS2.0 controllers
* Copyright (c) 2008-2012, LSI Logic Corporation.
* All rights reserved.
*
* Version:
* Author:
* Swaminathan K S
* Arun Chandrashekhar
* Manju R
* Rasheed
* Shakeel Bukhari
*/
/*
* Copyright 2013 Nexenta Systems, Inc. All rights reserved.
*/
#include <sys/byteorder.h>
#include "ld_pd_map.h"
#include "mr_sas.h"
#include "fusion.h"
/*
* FMA header files
*/
/* Pre-TB command size and TB command size. */
extern ddi_dma_attr_t mrsas_generic_dma_attr;
extern uint32_t mrsas_tbolt_max_cap_maxxfer;
extern struct ddi_device_acc_attr endian_attr;
extern int debug_level_g;
extern unsigned int enable_fp;
volatile int dump_io_wait_time = 90;
extern volatile int debug_timeout_g;
extern int mrsas_issue_pending_cmds(struct mrsas_instance *);
extern void push_pending_mfi_pkt(struct mrsas_instance *,
struct mrsas_cmd *);
/* Local static prototypes. */
static int mrsas_tbolt_check_map_info(struct mrsas_instance *);
static int mrsas_tbolt_sync_map_info(struct mrsas_instance *);
static int mrsas_tbolt_prepare_pkt(struct scsa_cmd *);
#ifdef PDSUPPORT
static void mrsas_tbolt_get_pd_info(struct mrsas_instance *,
struct mrsas_tbolt_pd_info *, int);
#endif /* PDSUPPORT */
static int debug_tbolt_fw_faults_after_ocr_g = 0;
/*
* destroy_mfi_mpi_frame_pool
*/
void
{
int i;
/* return all mfi frames to pool */
for (i = 0; i < MRSAS_APP_RESERVED_CMDS; i++) {
(void) mrsas_free_dma_obj(instance,
cmd->frame_dma_obj);
}
}
}
/*
* destroy_mpi2_frame_pool
*/
void
{
(void) mrsas_free_dma_obj(instance,
}
}
/*
* mrsas_tbolt_free_additional_dma_buffer
*/
void
{
int i;
(void) mrsas_free_dma_obj(instance,
}
(void) mrsas_free_dma_obj(instance,
}
for (i = 0; i < 2; i++) {
(void) mrsas_free_dma_obj(instance,
instance->ld_map_obj[i]);
}
}
}
/*
* free_req_desc_pool
*/
void
{
(void) mrsas_free_dma_obj(instance,
}
(void) mrsas_free_dma_obj(instance,
}
}
/*
* ThunderBolt(TB) Request Message Frame Pool
*/
int
{
int i = 0;
sgl_sz = 1024;
/* Allocating additional 256 bytes to accomodate SMID 0. */
"max_cmd %x", max_cmd));
"request message frame pool size %x", total_size));
/*
* ThunderBolt(TB) We need to create a single chunk of DMA'ble memory
* and then split the memory to 1024 commands. Each command should be
* able to contain a RAID MESSAGE FRAME which will embed a MFI_FRAME
* within it. Further refer the "alloc_req_rep_desc" function where
*/
0xFFFFFFFFU;
0xFFFFFFFFU;
"mr_sas: could not alloc mpi2 frame pool");
return (DDI_FAILURE);
}
(uint32_t)
(void *)instance->io_request_frames));
"io req_base_phys 0x%x", io_req_base_phys));
for (i = 0; i < max_cmd; i++) {
offset = i * MRSAS_THUNDERBOLT_MSG_SIZE;
(i * SENSE_LENGTH));
(i * SENSE_LENGTH));
}
return (DDI_SUCCESS);
}
/*
* alloc_additional_dma_buffer for AEN
*/
int
{
int i;
/* Initialize buffer status as free */
0xFFFFFFFFU;
"mr_sas: could not alloc reply queue");
return (DDI_FAILURE);
}
/* allocate evt_detail */
"could not allocate data transfer buffer.");
}
sizeof (struct mrsas_evt_detail));
for (i = 0; i < 2; i++) {
/* allocate the data transfer buffer */
0xFFFFFFFFU;
"could not allocate data transfer buffer.");
}
}
return (DDI_SUCCESS);
return (DDI_FAILURE);
}
{
"Invalid SMID 0x%x request for descriptor", index));
return (NULL);
}
((char *)instance->request_message_pool +
(sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION) * index));
"request descriptor : 0x%08lx", (unsigned long)req_desc));
"request descriptor base phy : 0x%08lx",
(unsigned long)instance->request_message_pool_phy));
return ((MRSAS_REQUEST_DESCRIPTOR_UNION *)req_desc);
}
/*
* Allocate Request and Reply Queue Descriptors.
*/
int
{
int i, max_reply_q_sz;
/*
* ThunderBolt(TB) There's no longer producer consumer mechanism.
* Once we have an interrupt we are supposed to scan through the list of
* reply descriptors and process them accordingly. We would be needing
* to allocate memory for 1024 reply descriptors
*/
/* Allocate Reply Descriptors */
(uint_t)sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
/* reply queue size should be multiple of 16 */
(uint_t)sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
"mr_sas: could not alloc reply queue");
return (DDI_FAILURE);
}
/* virtual address of reply queue */
(void *)instance->reply_frame_pool));
/* initializing reply address to 0xFFFFFFFF */
for (i = 0; i < instance->reply_q_depth; i++) {
reply_desc++;
}
(int)sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
/* Allocate Request Descriptors */
(int)sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
request_q_sz = 8 *
(instance->max_fw_cmds);
0xFFFFFFFFU;
"mr_sas: could not alloc request queue desc");
goto fail_undo_reply_queue;
}
/* virtual address of request queue desc */
return (DDI_SUCCESS);
(void) mrsas_free_dma_obj(instance,
}
return (DDI_FAILURE);
}
/*
* mrsas_alloc_cmd_pool_tbolt
*
* TODO: merge tbolt-specific code into mrsas_alloc_cmd_pool() to have single
* routine
*/
int
{
int i;
int count;
"max_cmd %x", max_cmd));
/*
* instance->cmd_list is an array of struct mrsas_cmd pointers.
* Allocate the dynamic array first and then allocate individual
* commands.
*/
/* create a frame pool and assign one frame to each cmd */
}
/* add all the commands to command pool */
/* cmd index 0 reservered for IOC INIT */
for (i = 1; i < reserve_cmd; i++) {
}
for (i = reserve_cmd; i < max_cmd; i++) {
}
return (DDI_SUCCESS);
if (count > 0) {
/* free each cmd */
for (i = 0; i < count; i++) {
sizeof (struct mrsas_cmd));
}
}
}
return (DDI_FAILURE);
}
/*
* free_space_for_mpi2
*/
void
{
/* already freed */
return;
}
/* First free the additional DMA buffer */
/* Free the MPI message pool */
/* Free the MFI frame pool */
/* Free all the commands in the cmd_list */
/* Free the cmd_list buffer itself */
}
/*
*/
int
{
/* Allocate command pool (memory for cmd_list & individual commands) */
if (mrsas_alloc_cmd_pool_tbolt(instance)) {
return (DDI_FAILURE);
}
/* Initialize single reply size and Message size */
(sizeof (MPI2_RAID_SCSI_IO_REQUEST) -
sizeof (MPI2_SGE_IO_UNION)))/ sizeof (MPI2_SGE_IO_UNION);
MRSAS_THUNDERBOLT_MSG_SIZE) / sizeof (MPI2_SGE_IO_UNION);
/* Reduce SG count by 1 to take care of group cmds feature in FW */
sizeof (MPI2_SGE_IO_UNION)) / 16;
instance->reply_read_index = 0;
/* Allocate Request and Reply descriptors Array */
if (alloc_req_rep_desc(instance)) {
"Error, allocating memory for descripter-pool");
goto mpi2_undo_cmd_pool;
}
/* Allocate MFI Frame pool - for MPI-MFI passthru commands */
if (create_mfi_frame_pool(instance)) {
"Error, allocating memory for MFI frame-pool");
}
/* Allocate MPI2 Message pool */
/*
* Make sure the buffer is alligned to 256 for raid message packet
* create a io request pool and assign one frame to each cmd
*/
if (create_mpi2_frame_pool(instance)) {
"Error, allocating memory for MPI2 Message-pool");
goto mpi2_undo_mfi_frame_pool;
}
#ifdef DEBUG
#endif
/* Allocate additional dma buffer */
"Error, allocating tbolt additional DMA buffer");
goto mpi2_undo_message_pool;
}
return (DDI_SUCCESS);
return (DDI_FAILURE);
}
/*
* mrsas_init_adapter_tbolt - Initialize fusion interface adapter.
*/
int
{
/*
* Reduce the max supported cmds by 1. This is to ensure that the
* reply_q_sz (1 more than the max cmd that driver may send)
* does not exceed max cmds that the FW can support
*/
}
/* create a pool of commands */
" alloc_space_for_mpi2() failed.");
return (DDI_FAILURE);
}
/* Send ioc init message */
/* NOTE: the issue_init call does FMA checking already. */
" mrsas_issue_init_mpi2() failed.");
goto fail_init_fusion;
}
"mrsas_init_adapter_tbolt: SUCCESSFUL"));
return (DDI_SUCCESS);
return (DDI_FAILURE);
}
/*
* init_mpi2
*/
int
{
int ret_val = DDI_SUCCESS;
/* allocate DMA buffer for IOC INIT message */
"could not allocate data transfer buffer.");
return (DDI_FAILURE);
}
"mrsas_issue_init_mpi2 _phys adr: %x",
/* Initialize and send ioc init message */
if (ret_val == DDI_FAILURE) {
"mrsas_issue_init_mpi2: Failed"));
goto fail_init_mpi2;
}
/* free IOC init DMA buffer */
!= DDI_SUCCESS) {
"mrsas_issue_init_mpi2: Free Failed"));
return (DDI_FAILURE);
}
(void) mrsas_tbolt_sync_map_info(instance);
/* No mrsas_cmd to send, so send NULL. */
goto fail_init_mpi2;
"mrsas_issue_init_mpi2: SUCCESSFUL"));
return (DDI_SUCCESS);
return (DDI_FAILURE);
}
static int
{
int numbytes;
struct mrsas_init_frame2 *mfiFrameInit2;
struct mrsas_header *frame_hdr;
struct mrsas_drv_ver drv_ver_info;
#ifdef DEBUG
(int)sizeof (*mfiFrameInit2)));
(int)sizeof (struct mrsas_init_frame2)));
(int)sizeof (Mpi2IOCInitRequest_t)));
#endif
/* set MsgVersion and HeaderVersion host driver was built with */
0);
/*
* These addresses are set using the DMA cookie addresses from when the
* memory was allocated. Sense buffer hi address should be 0.
* ddi_put32(accessp, &init->SenseBufferAddressHigh, 0);
*/
&init->SenseBufferAddressHigh, 0);
&init->ReplyFreeQueueAddress, 0);
return (DDI_FAILURE);
}
cmd->retry_count_for_ocr = 0;
cmd->drv_pkt_time = 0;
/* Init the MFI Header */
sizeof (Mpi2IOCInitRequest_t));
(int)init->ReplyDescriptorPostQueueAddress));
/* fill driver version information */
/* allocate the driver version data transfer buffer */
"fusion init: Could not allocate driver version buffer.");
return (DDI_FAILURE);
}
/* copy driver version to dma buffer */
/* send driver version physical address to firmware */
(int)sizeof (Mpi2IOCInitRequest_t)));
(int)sizeof (struct mrsas_init_frame2)));
/* disable interrupts before sending INIT2 frame */
/* issue the init frame */
frame_hdr->cmd_status));
&mfiFrameInit2->cmd_status) == 0) {
} else {
goto fail_ioc_init;
}
return (DDI_SUCCESS);
return (DDI_FAILURE);
}
int
{
int i;
for (i = 0; i < wait_time; i++) {
/*
* Check For Outstanding poll Commands
* except ldsync command and aen command
*/
break;
}
/* complete commands from reply queue */
}
return (1);
}
return (0);
}
/*
* scsi_pkt handling
*
* Visible to the external world via the transport structure.
*/
int
{
"mrsas_tran_start:TBOLT return TRAN_FATAL_ERROR "
"for IO, as the HBA doesnt take any more IOs");
if (pkt) {
}
return (TRAN_FATAL_ERROR);
}
if (instance->adapterresetinprogress) {
"returning mfi_pkt and setting TRAN_BUSY\n"));
return (TRAN_BUSY);
}
(void) mrsas_tbolt_prepare_pkt(acmd);
/*
* Check if the command is already completed by the mrsas_build_cmd()
* routine. In which case the busy_flag would be clear and scb will be
* NULL and appropriate reason provided in pkt_reason field
*/
if (cmd_done) {
}
return (TRAN_ACCEPT);
}
return (TRAN_BUSY);
}
"Command Queue Full... Returning BUSY");
return (TRAN_BUSY);
}
/* Synchronize the Cmd frame for the controller */
} else {
(void) wait_for_outstanding_poll_io(instance);
}
return (TRAN_ACCEPT);
}
/*
* prepare the pkt:
* the pkt may have been resubmitted or just reused so
* initialize some fields and do some checks.
*/
static int
{
/*
* Reinitialize some fields that need it; the packet may
* have been resubmitted
*/
pkt->pkt_statistics = 0;
/*
* zero status byte.
*/
return (0);
}
int
{
int sg_to_process;
uint32_t i, j;
"chkpnt: Building Chained SGL :%d", __LINE__));
/* Calulate SGE size in number of Words(32bit) */
/* Clear the datalen before updating it. */
*datalen = 0;
/* set data transfer flag. */
} else {
}
"[Max SGE Count Exceeded]:%x", numElements));
return (numElements);
}
/* set end element in main message frame */
/* prepare the scatter-gather list for the firmware */
}
for (i = 0; i < endElement; i++, scsi_raid_io_sgl_ieee++) {
if (i == (numElements - 1)) {
}
}
#ifdef DEBUG
#endif
}
/* check if chained SGL required */
if (i < numElements) {
if ((ioFlags &
} else {
&scsi_raid_io->ChainOffset, 0);
}
} else {
}
/* prepare physical chain element */
} else {
}
(sizeof (MPI2_SGE_IO_UNION) * (numElements - i)));
sg_to_process = numElements - i;
"[Additional SGE Count]:%x", endElement));
/* point to the chained SGL buffer */
/* build rest of the SGL in chained buffer */
for (j = 0; j < sg_to_process; j++, scsi_raid_io_sgl_ieee++) {
if (i == (numElements - 1)) {
}
}
#if DEBUG
"[SGL Address]: %" PRIx64,
#endif
i++;
}
}
return (0);
} /*end of BuildScatterGather */
/*
* build_cmd
*/
static struct mrsas_cmd *
{
uint8_t fp_possible = 0;
uint32_t start_lba_hi = 0;
uint32_t start_lba_lo = 0;
struct IO_REQUEST_INFO io_info;
"chkpnt: Entered mrsas_tbolt_build_cmd:%d", __LINE__));
/* find out if this is logical or physical drive command. */
*cmd_done = 0;
/* get the command packet */
return (NULL);
}
ReqDescUnion->Words = 0;
/* lets get the command directions */
}
}
} else {
}
/* get SCSI_IO raid message frame pointer */
/* zero out SCSI_IO raid message frame */
/* Set the ldTargetId set by BuildRaidContext() */
/* Copy CDB to scsi_io_request message frame */
/*
* Just the CDB length, rest of the Flags are zero
* This will be modified later.
*/
case SCMD_READ:
case SCMD_WRITE:
case SCMD_READ_G1:
case SCMD_WRITE_G1:
case SCMD_READ_G4:
case SCMD_WRITE_G4:
case SCMD_READ_G5:
case SCMD_WRITE_G5:
/* Initialize sense Information */
"Sense buffer ptr NULL "));
}
/* 6-byte cdb */
<< 16));
/* 10-byte cdb */
/* 12-byte cdb */
lba_count = (
/* 16-byte cdb */
lba_count = (
start_lba_lo = (
start_lba_hi = (
}
"controller limit 0x%x sectors",
}
else
/* Acquire SYNC MAP UPDATE lock */
if ((MR_TargetIdToLdGet(
"targetId >= MAX_LOGICAL_DRIVES || "
"!instance->fast_path_io");
fp_possible = 0;
/* Set Regionlock flags to BYPASS */
/* io_request->RaidContext.regLockFlags = 0; */
} else {
}
if (!enable_fp)
fp_possible = 0;
"instance->fast_path_io %d fp_possible %d",
if (fp_possible) {
/* Check for DIF enabled LD */
/* Prepare 32 Byte CDB for DIF capable Disk */
} else {
(uint8_t *)&pd_cmd_cdblen,
}
&scsi_raid_io->IoFlags);
if (regLockFlags == REGION_TYPE_UNUSED)
IoFlags |=
regLockFlags |=
&scsi_raid_io->ChainOffset, 0);
((0x01 << MPI2_NSEG_FLAGS_SHIFT) |
}
if ((instance->load_balance_info[
&io_info);
cmd->load_balance_flag |=
} else {
cmd->load_balance_flag &=
}
} else {
if (regLockFlags == REGION_TYPE_UNUSED) {
}
regLockFlags |=
((0x01 << MPI2_NSEG_FLAGS_SHIFT) |
}
} /* Not FP */
/* Release SYNC MAP UPDATE lock */
/*
*/
/* Construct SGL */
scsi_raid_io, &datalen);
break;
#ifndef PDSUPPORT /* if PDSUPPORT, skip break and fall through */
} else {
break;
#endif
}
default:
case 0x35: { /* SCMD_SYNCHRONIZE_CACHE */
*cmd_done = 1;
return (NULL);
}
case SCMD_MODE_SENSE:
case SCMD_MODE_SENSE_G1: {
switch (page_code) {
case 0x3:
case 0x4:
(void) mrsas_mode_sense_build(pkt);
*cmd_done = 1;
return (NULL);
}
break;
}
default: {
/*
* Here we need to handle PASSTHRU for
* Logical Devices. Like Inquiry etc.
*/
/* Acquire SYNC MAP UPDATE lock */
/* Set regLockFlasgs to REGION_TYPE_BYPASS */
0);
0);
/* Release SYNC MAP UPDATE lock */
} else {
}
/*
* scsi_io_request.
*/
/* Construct SGL */
scsi_raid_io, &datalen);
"tbolt_build_cmd CDB[0] =%x, TargetID =%x\n",
"data length = %x\n",
"cdb length = %x\n",
acmd->cmd_cdblen));
}
break;
}
}
return (cmd);
}
{
}
void
{
" [req desc low part] %x \n",
if (pkt) {
"ISSUED CMD TO FW : called : cmd:"
": %p instance : %p pkt : %p pkt_time : %x\n",
if (instance->adapterresetinprogress) {
"TBOLT Reset the scsi_pkt timer"));
} else {
}
} else {
"ISSUED CMD TO FW : called : cmd : %p, instance: %p"
}
/* Issue the command to the FW */
}
/*
* issue_cmd_in_sync_mode
*/
int
{
int i;
struct mrsas_header *hdr;
(CE_NOTE, "tbolt_issue_cmd_in_sync_mode: cmd->[SMID]=0x%X",
if (instance->adapterresetinprogress) {
"RESET-IN-PROGRESS, issue cmd & return."));
return (DDI_SUCCESS);
} else {
"tbolt_issue_cmd_in_sync_mode: pushing the pkt"));
}
"HighQport offset :%p",
"LowQport offset :%p",
}
if (i < (msecs -1)) {
return (DDI_SUCCESS);
} else {
return (DDI_FAILURE);
}
}
/*
* issue_cmd_in_poll_mode
*/
int
{
int i;
struct mrsas_header *frame_hdr;
(CE_NOTE, "tbolt_issue_cmd_in_poll_mode: cmd->[SMID]=0x%X",
/* issue the frame using inbound queue port */
for (i = 0; i < msecs && (
== MFI_CMD_STATUS_POLL_MODE); i++) {
/* wait for cmd_status to change from 0xFF */
}
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
void
{
/* TODO: For Thunderbolt/Invader also clear intr on enable */
/* writel(~0, ®s->outbound_intr_status); */
/* readl(®s->outbound_intr_status); */
/* dummy read to force PCI flush */
(void) RD_OB_INTR_MASK(instance);
}
void
{
/* Dummy readl to force pci flush */
(void) RD_OB_INTR_MASK(instance);
}
int
{
/* check if it is our interrupt */
"chkpnt: Entered tbolt_intr_ack status = %d", status));
if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) {
return (DDI_INTR_UNCLAIMED);
}
return (DDI_INTR_UNCLAIMED);
}
/* clear the interrupt by writing back the same value */
/* dummy READ */
(void) RD_OB_INTR_STATUS(instance);
}
return (DDI_INTR_CLAIMED);
}
/*
* get_raid_msg_pkt : Get a command from the free pool
* After successful allocation, the caller of this routine
* must clear the frame buffer (memset to zero) before
* using the packet further.
*
* ***** Note *****
* After clearing the frame buffer the context id of the
* frame buffer SHOULD be restored back.
*/
struct mrsas_cmd *
{
if (!mlist_empty(head)) {
}
cmd->retry_count_for_ocr = 0;
cmd->drv_pkt_time = 0;
}
sizeof (Mpi2RaidSCSIIORequest_t));
return (cmd);
}
struct mrsas_cmd *
{
if (!mlist_empty(head)) {
}
cmd->retry_count_for_ocr = 0;
cmd->drv_pkt_time = 0;
}
sizeof (Mpi2RaidSCSIIORequest_t));
}
return (cmd);
}
/*
* return_raid_msg_pkt : Return a cmd to free command pool
*/
void
{
}
void
{
}
void
{
return;
}
if (!ReqDescUnion) {
return;
}
ReqDescUnion->Words = 0;
/* get raid message frame pointer */
}
/* LSI put hardcoded 1024 instead of MEGASAS_MAX_SZ_CHAIN_FRAME. */
"[MFI CMD PHY ADDRESS]:%" PRIx64,
}
void
{
struct scsi_arq_status *arqstat;
if (status != MFI_STAT_OK) {
} else {
}
/* regular commands */
case MPI2_FUNCTION_SCSI_IO_REQUEST : /* Fast Path IO. */
}
"FastPath IO Completion Success "));
/* FALLTHRU */
case MPI2_FUNCTION_LD_IO_REQUEST : { /* Regular Path IO. */
}
}
pkt->pkt_statistics = 0;
struct scsi_inquiry *inq;
if (acmd->cmd_dmacount != 0) {
inq = (struct scsi_inquiry *)
/* don't expose physical drives to OS */
(status == MFI_STAT_OK)) {
#ifdef PDSUPPORT
} else if ((status == MFI_STAT_OK) &&
#endif
} else {
/* for physical disk */
}
}
}
switch (status) {
case MFI_STAT_OK:
break;
break;
break;
case MFI_STAT_SCSI_IO_FAILED:
break;
"tbolt_complete_cmd: scsi_done with error"));
(CE_WARN, "TEST_UNIT_READY fail"));
} else {
arqstat->sts_rqpkt_resid = 0;
(CE_NOTE, "Copying Sense data %x",
sizeof (struct scsi_extended_sense),
}
break;
case MFI_STAT_LD_OFFLINE:
"tbolt_complete_cmd: ld offline "
"CDB[0]=0x%x targetId=0x%x devhandle=0x%x",
/* UNDO: */
break;
"tbolt_complete_cmd: device not found error"));
break;
arqstat->sts_rqpkt_resid = 0;
/*
* LOGICAL BLOCK ADDRESS OUT OF RANGE:
* ASC: 0x21h; ASCQ: 0x00h;
*/
break;
case MFI_STAT_INVALID_CMD:
case MFI_STAT_INVALID_DCMD:
default:
break;
}
if (acmd->cmd_dmahandle) {
DDI_SUCCESS) {
pkt->pkt_statistics = 0;
}
}
/* Call the callback routine */
break;
}
case MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /* MFA command. */
"LDMAP sync command SMID RECEIVED 0x%X",
"map sync failed, status = 0x%x.",
} else {
"map sync received, switched map_id to %"
}
} else {
instance->fast_path_io = 0;
}
"instance->fast_path_io %d",
instance->fast_path_io));
(void) mrsas_tbolt_sync_map_info(instance);
}
break;
}
"AEN command SMID RECEIVED 0x%X",
"aborted_aen returned"));
} else {
}
}
"Sync-mode Command Response SMID RECEIVED 0x%X",
} else {
"tbolt_complete_cmd: Wrong SMID RECEIVED 0x%X",
}
break;
default:
/* free message */
(CE_NOTE, "tbolt_complete_cmd: Unknown Type!!!!!!!!"));
break;
}
}
{
union desc_value d_val;
struct mrsas_header *hdr;
0, 0, DDI_DMA_SYNC_FORDEV);
0, 0, DDI_DMA_SYNC_FORCPU);
return (DDI_INTR_UNCLAIMED);
!= DDI_SUCCESS) {
(CE_WARN, "mr_sas_tbolt_process_outstanding_cmd(): "
"FMA check, returning DDI_INTR_UNCLAIMED"));
return (DDI_INTR_CLAIMED);
}
/* Read Reply descriptor */
0, 0, DDI_DMA_SYNC_FORCPU);
"Reply Desc at Break = %p Words = %" PRIx64,
break;
}
if (!cmd) {
"outstanding_cmd: Invalid command "
" or Poll commad Received in completion path"));
} else {
if (hdr) {
"tbolt_process_outstanding_cmd:"
" mlist_del_init(&cmd->list)."));
}
} else {
if (pkt) {
"tbolt_process_outstanding_cmd:"
"mlist_del_init(&cmd->list)."));
}
}
}
/* set it back to all 1s. */
instance->reply_read_index = 0;
}
/* Get the next reply descriptor */
if (!instance->reply_read_index)
else
desc++;
"Next Reply Desc = %p Words = %" PRIx64,
break;
} /* End of while loop. */
/* update replyIndex to FW */
0, 0, DDI_DMA_SYNC_FORDEV);
0, 0, DDI_DMA_SYNC_FORCPU);
return (DDI_INTR_CLAIMED);
}
/*
* complete_cmd_in_sync_mode - Completes an internal command
* @instance: Adapter soft state
* @cmd: Command to be completed
*
* The issue_cmd_in_sync_mode() function waits for a command to complete
* after it issues a command. This function wakes up that waiting routine by
* calling wake_up() on the wait queue.
*/
void
{
cmd->cmd_status = 0;
}
}
/*
* mrsas_tbolt_get_ld_map_info - Returns ld_map structure
* instance: Adapter soft state
*
* Issues an internal command (DCMD) to get the FW's controller PD
* list structure. This information is mainly used to find out SYSTEM
* supported by the FW.
*/
int
{
int ret = 0;
struct mrsas_dcmd_frame *dcmd;
"Failed to get a cmd from free-pool in get_ld_map_info()");
return (DDI_FAILURE);
}
size_map_info = sizeof (MR_FW_RAID_MAP) +
(sizeof (MR_LD_SPAN_MAP) *
(MAX_LOGICAL_DRIVES - 1));
"size_map_info : 0x%x", size_map_info));
if (!ci) {
return (-1);
}
ret = 0;
} else {
ret = -1;
}
return (ret);
}
void
{
uint32_t i;
union desc_value d_val;
"i=%d, %x:%x",
}
}
/*
* mrsas_tbolt_command_create - Create command for fast path.
* @io_info: MegaRAID IO request packet pointer.
*
* Create the command for fast path.
*/
void
{
/* Prepare 32-byte CDB if DIF is supported on this device */
else
/* Verify within linux driver, set to MEGASAS_RD_WR_PROTECT_CHECK_ALL */
/* LOGICAL BLOCK ADDRESS */
/* Logical block reference tag */
/* Specify 32-byte cdb */
/* Transfer length */
/* set SCSI IO EEDPFlags */
/* set SCSI IO EEDPFlags bits */
/*
* For READ commands, the EEDPFlags shall be set to specify to
* Increment the Primary Reference Tag, to Check the Reference
* Tag, and to Check and Remove the Protection Information
* fields.
*/
} else {
/*
* For WRITE commands, the EEDPFlags shall be set to specify to
* Increment the Primary Reference Tag, and to Insert
* Protection Information fields.
*/
}
}
/*
* mrsas_tbolt_set_pd_lba - Sets PD LBA
* @cdb: CDB
* @cdb_len: cdb length
* @start_blk: Start block of IO
*
* Used to set the PD LBA in CDB for FP IOs
*/
static void
{
/* Some drives don't support 16/12 byte CDB's, convert to 10 */
(start_blk <= 0xffffffff)) {
if (cdb_len == 16) {
} else {
}
/* Set transfer length */
cdb_len = 10;
/* Convert to 16 byte CDB for large LBA's */
(CE_NOTE, "Converting 6/10/12 CDB to 16 byte CDB"));
switch (cdb_len) {
case 6:
break;
case 10:
break;
case 12:
break;
}
/* Transfer length */
/* Specify 16-byte cdb */
cdb_len = 16;
/* convert to 10 byte CDB */
/* Set transfer length */
/* Specify 10-byte cdb */
cdb_len = 10;
}
/* Fall through Normal case, just load LBA here */
switch (cdb_len) {
case 6:
{
break;
}
case 10:
break;
case 12:
break;
case 16:
break;
}
*cdb_len_ptr = cdb_len;
}
static int
{
if (!mrsas_tbolt_get_ld_map_info(instance)) {
(CE_CONT, "MR_ValidateMapInfo success"));
(CE_NOTE, "instance->fast_path_io %d",
instance->fast_path_io));
return (DDI_SUCCESS);
}
}
instance->fast_path_io = 0;
return (DDI_FAILURE);
}
/*
* Marks HBA as bad. This will be called either when an
* IO packet times out even after 3 FW resets
* or FW is found to be fault even after 3 continuous resets.
*/
void
{
return;
"Writing to doorbell with MFI_STOP_ADP "));
/* Flush */
(void) RD_RESERVED0_REGISTER(instance);
(void) mrsas_print_pending_cmds(instance);
(void) mrsas_complete_pending_cmds(instance);
}
void
{
int i;
instance->reply_read_index = 0;
/* initializing reply address to 0xFFFFFFFF */
for (i = 0; i < instance->reply_q_depth; i++) {
reply_desc++;
}
}
int
{
uint32_t i;
"mrsas_tbolt_reset_ppc entered"));
"no more resets as HBA has been marked dead ");
return (DDI_FAILURE);
}
"adpterresetinprogress flag set, time %llx", gethrtime()));
/* Add delay inorder to complete the ioctl & io cmds in-flight */
for (i = 0; i < 3000; i++) {
}
instance->reply_read_index = 0;
":Resetting TBOLT "));
"mrsas_tbolt_reset_ppc: magic number written "
"to write sequence register"));
"mrsas_tbolt_reset_ppc: READ HOSTDIAG SUCCESS "
"to write sequence register"));
while (status & DIAG_TBOLT_RESET_ADAPTER) {
if (retry++ == 100) {
"mrsas_tbolt_reset_ppc:"
"resetadapter bit is set already "
"check retry count %d", retry);
return (DDI_FAILURE);
}
}
while ((status & DIAG_TBOLT_RESET_ADAPTER)) {
if (retry++ == 100) {
/* Dont call kill adapter here */
/* RESET BIT ADAPTER is cleared by firmare */
/* mrsas_tbolt_kill_adapter(instance); */
"mr_sas %d: %s(): RESET FAILED; return failure!!!",
return (DDI_FAILURE);
}
}
(CE_NOTE, "mrsas_tbolt_reset_ppc: Adapter reset complete"));
"Calling mfi_state_transition_to_ready"));
retry = 0;
}
if (abs_state <= MFI_STATE_FW_INIT) {
"mrsas_tbolt_reset_ppc: firmware state < MFI_STATE_FW_INIT"
"state = 0x%x, RETRY RESET.", abs_state);
goto retry_reset;
}
/* Mark HBA as bad, if FW is fault after 3 continuous resets */
"mrsas_tbolt_reset_ppc :before fake: FW is not ready "
"FW state = 0x%x", fw_state));
if (debug_tbolt_fw_faults_after_ocr_g == 1)
(CE_NOTE, "mrsas_tbolt_reset_ppc : FW is not ready "
"FW state = 0x%x", fw_state));
if (fw_state == MFI_STATE_FAULT) {
/* increment the count */
< MAX_FW_RESET_COUNT) {
"FW is in fault after OCR count %d "
"Retry Reset",
goto retry_reset;
} else {
"Max Reset Count exceeded >%d"
"Mark HBA as bad, KILL adapter",
return (DDI_FAILURE);
}
}
}
/* reset the counter as FW is up after OCR */
"Calling mrsas_issue_init_mpi2"));
"INIT failed Retrying Reset");
goto retry_reset;
}
"mrsas_issue_init_mpi2 Done"));
"Calling mrsas_print_pending_cmd"));
(void) mrsas_print_pending_cmds(instance);
"mrsas_print_pending_cmd done"));
instance->fw_outstanding = 0;
"Calling mrsas_issue_pending_cmds"));
(void) mrsas_issue_pending_cmds(instance);
"issue_pending_cmds done."));
"Calling aen registration"));
"adpterresetinprogress flag unset"));
return (DDI_SUCCESS);
}
/*
* mrsas_sync_map_info - Returns FW's ld_map structure
* @instance: Adapter soft state
*
* Issues an internal command (DCMD) to get the FW's controller PD
* list structure. This information is mainly used to find out SYSTEM
* supported by the FW.
*/
static int
{
int ret = 0, i;
struct mrsas_dcmd_frame *dcmd;
"mrsas_tbolt_sync_map_info(). ");
return (DDI_FAILURE);
}
/* Clear the frame buffer and assign back the context id */
(CE_NOTE, "i : 0x%x, Seq Num : 0x%x, Sync Reqd : 0x%x",
i, ld_sync->ldTargetId));
}
size_map_info = sizeof (MR_FW_RAID_MAP) +
return (ret);
}
/*
* abort_syncmap_cmd
*/
int
struct mrsas_cmd *cmd_to_abort)
{
int ret = 0;
struct mrsas_abort_frame *abort_fr;
if (!cmd) {
"Failed to get a cmd from free-pool abort_syncmap_cmd().");
return (DDI_FAILURE);
}
/* Clear the frame buffer and assign back the context id */
/* prepare and issue the abort frame */
&abort_fr->abort_mfi_phys_addr_hi, 0);
"abort_ldsync_cmd: issue_cmd_in_poll_mode failed"));
ret = -1;
} else {
ret = 0;
}
return (ret);
}
#ifdef PDSUPPORT
/*
* Even though these functions were originally intended for 2208 only, it
* turns out they're useful for "Skinny" support as well. In a perfect world,
* these two functions would be either in mr_sas.c, or in their own new source
* file. Since this driver needs some cleanup anyway, keep this portion in
* mind as well.
*/
int
{
struct scsi_device *sd;
if (ldip) {
}
"mr_sas:DELETING STALE ENTRY rval = %d "
return (NDI_FAILURE);
}
return (NDI_SUCCESS);
}
pds = (struct mrsas_tbolt_pd_info *)
/* Check for Disk */
if ((dtype == DTYPE_DIRECT)) {
if ((dtype == DTYPE_DIRECT) &&
return (NDI_FAILURE);
}
"Phys. device found: tgt %d dtype %d: %s",
} else {
rval = NDI_FAILURE;
"scsi_hba_probe Failed: tgt %d dtype %d: %s",
}
/* sd_unprobe is blank now. Free buffer manually */
}
} else {
"Device not supported: tgt %d lun %d dtype %d",
rval = NDI_FAILURE;
}
rval));
return (rval);
}
static void
{
struct mrsas_dcmd_frame *dcmd;
else
if (!cmd) {
(CE_WARN, "Failed to get a cmd for get pd info"));
return;
}
/* Clear the frame buffer and assign back the context id */
sizeof (struct mrsas_tbolt_pd_info));
sizeof (struct mrsas_tbolt_pd_info));
else
}
#endif