Lines Matching defs:skdev

102 static int skd_format_internal_skspcl(struct skd_device *skdev);
104 static void skd_destroy_mutex(skd_device_t *skdev);
106 static void skd_request_fn_not_online(skd_device_t *skdev);
111 static void skd_release_intr(skd_device_t *skdev);
112 static void skd_isr_fwstate(struct skd_device *skdev);
113 static void skd_isr_msg_from_dev(struct skd_device *skdev);
114 static void skd_soft_reset(struct skd_device *skdev);
115 static void skd_refresh_device_data(struct skd_device *skdev);
119 static char *skd_pci_info(struct skd_device *skdev, char *str, size_t len);
291 * Inputs: skdev - device state structure.
303 skd_reg_write64(struct skd_device *skdev, uint64_t val, uint32_t offset)
309 addr = (uint64_t *)(skdev->dev_iobase + offset);
310 ddi_put64(skdev->dev_handle, addr, val);
317 * Inputs: skdev - device state structure.
324 skd_reg_read32(struct skd_device *skdev, uint32_t offset)
330 addr = (uint32_t *)(skdev->dev_iobase + offset);
331 return (ddi_get32(skdev->dev_handle, addr));
338 * Inputs: skdev - device state structure.
346 skd_reg_write32(struct skd_device *skdev, uint32_t val, uint32_t offset)
352 addr = (uint32_t *)(skdev->dev_iobase + offset);
353 ddi_put32(skdev->dev_handle, addr, val);
365 * Inputs: skdev - device state structure
371 skd_name(struct skd_device *skdev)
373 (void) snprintf(skdev->id_str, sizeof (skdev->id_str), "%s:", DRV_NAME);
375 return (skdev->id_str);
383 * Inputs: skdev - device state structure.
391 skd_pci_find_capability(struct skd_device *skdev, int cap)
397 status = pci_config_get16(skdev->pci_handle, PCI_CONF_STAT);
402 hdr = pci_config_get8(skdev->pci_handle, PCI_CONF_HEADER);
407 pos = pci_config_get8(skdev->pci_handle, PCI_CONF_CAP_PTR);
411 id = pci_config_get8(skdev->pci_handle, pos+PCI_CAP_ID);
416 pos = pci_config_get8(skdev->pci_handle, pos+PCI_CAP_NEXT_PTR);
426 * Inputs: skdev - device state structure.
435 skd_io_done(skd_device_t *skdev, skd_buf_private_t *pbuf,
446 skdev->iodone_wioc++;
449 skdev->iodone_wnioc++;
452 skdev->iodone_wdebug++;
455 skdev->iodone_unknown++;
459 skdev->ios_errors++;
461 "!%s:skd_io_done:ERR=%d %lld-%ld %s", skdev->name,
487 skd_device_t *skdev;
489 skdev = ddi_get_soft_state(skd_state, ddi_get_instance(dip));
494 skd_disable_interrupts(skdev);
495 skd_soft_reset(skdev);
504 * Inputs: skdev - Device state.
511 skd_quiesce_dev(skd_device_t *skdev)
518 switch (skdev->state) {
521 Dcmn_err(CE_NOTE, "%s: stopping queue", skdev->name);
533 cmn_err(CE_NOTE, "state [%d] not implemented", skdev->state);
547 * Inputs: skdev - Device state.
554 skd_unquiesce_dev(struct skd_device *skdev)
558 skd_log_skdev(skdev, "unquiesce");
559 if (skdev->state == SKD_DRVR_STATE_ONLINE) {
564 if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) {
573 skdev->state = SKD_DRVR_STATE_BUSY;
582 switch (skdev->state) {
592 skdev->state = SKD_DRVR_STATE_ONLINE;
593 Dcmn_err(CE_NOTE, "%s: sTec s1120 ONLINE", skdev->name);
594 Dcmn_err(CE_NOTE, "%s: Starting request queue", skdev->name);
597 skdev->name,
598 skdev->queue_depth_limit,
599 skdev->hard_queue_depth_limit,
600 skdev->soft_queue_depth_limit,
601 skdev->queue_depth_lowat);
603 skdev->gendisk_on = 1;
604 cv_signal(&skdev->cv_waitq);
609 skdev->state);
625 * Inputs: skdev - device state structure.
634 skd_blkdev_preop_sg_list(struct skd_device *skdev,
653 ASSERT(n_sg <= skdev->sgs_per_request);
692 * Inputs: skdev - device state structure.
700 skd_blkdev_postop_sg_list(struct skd_device *skdev,
716 * Inputs: skdev - device state structure.
725 skd_start(skd_device_t *skdev)
730 struct waitqueue *waitq = &skdev->waitqueue;
750 if (skdev->queue_depth_busy >= skdev->queue_depth_limit) {
752 skdev->queue_depth_busy,
753 skdev->queue_depth_limit);
757 WAITQ_LOCK(skdev);
759 WAITQ_UNLOCK(skdev);
764 skreq = skdev->skreq_free_list;
766 WAITQ_UNLOCK(skdev);
773 skdev->skreq_free_list = skreq->next;
781 skmsg = skdev->skmsg_free_list;
783 WAITQ_UNLOCK(skdev);
790 skdev->skmsg_free_list = skmsg->next;
808 pbuf = skd_get_queued_pbuf(skdev);
809 WAITQ_UNLOCK(skdev);
850 skd_blkdev_preop_sg_list(skdev, skreq, &sg_byte_count);
869 skreq->timeout_stamp = skdev->timeout_stamp;
872 atomic_inc_32(&skdev->timeout_slot[timo_slot]);
873 atomic_inc_32(&skdev->queue_depth_busy);
876 skreq->id, skdev->queue_depth_busy, timo_slot);
883 atomic_inc_64(&skdev->active_cmds);
886 skdev->fitmsg_sent1++;
887 skd_send_fitmsg(skdev, skmsg);
905 skdev->active_cmds++;
907 skdev->fitmsg_sent2++;
908 skd_send_fitmsg(skdev, skmsg);
916 * Inputs: skdev - device state structure.
924 skd_end_request(struct skd_device *skdev,
927 skdev->ios_completed++;
928 skd_io_done(skdev, skreq->pbuf, error, SKD_IODONE_WIOC);
937 * Inputs: skdev - device state structure.
946 skd_end_request_abnormal(skd_device_t *skdev, skd_buf_private_t *pbuf,
949 skd_io_done(skdev, pbuf, error, mode);
957 * Inputs: skdev - device state structure.
963 skd_request_fn_not_online(skd_device_t *skdev)
968 ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE);
970 skd_log_skdev(skdev, "req_not_online");
972 switch (skdev->state) {
1006 ASSERT(WAITQ_LOCK_HELD(skdev));
1007 if (SIMPLEQ_EMPTY(&skdev->waitqueue))
1010 while ((pbuf = skd_get_queued_pbuf(skdev)))
1011 skd_end_request_abnormal(skdev, pbuf, error, SKD_IODONE_WNIOC);
1013 cv_signal(&skdev->cv_waitq);
1020 static void skd_timer_tick_not_online(struct skd_device *skdev);
1026 * Inputs: skdev - device state structure.
1032 skd_timer_tick(skd_device_t *skdev)
1036 skdev->timer_active = 1;
1038 if (skdev->state != SKD_DRVR_STATE_ONLINE) {
1039 skd_timer_tick_not_online(skdev);
1043 skdev->timeout_stamp++;
1044 timo_slot = skdev->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
1051 if (skdev->timeout_slot[timo_slot] == 0) {
1057 skdev->timeout_slot[timo_slot],
1058 skdev->queue_depth_busy);
1059 skdev->timer_countdown = SKD_TIMER_SECONDS(3);
1060 skdev->state = SKD_DRVR_STATE_DRAINING_TIMEOUT;
1061 skdev->timo_slot = timo_slot;
1064 skdev->timer_active = 0;
1072 * Inputs: skdev - device state structure.
1078 skd_timer_tick_not_online(struct skd_device *skdev)
1081 skdev->state, skdev->timer_countdown);
1083 ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE);
1085 switch (skdev->state) {
1091 skdev->drive_state, skdev->state);
1098 skdev->state, skdev->timer_countdown);
1099 if (skdev->timer_countdown > 0) {
1100 skdev->timer_countdown--;
1104 skdev->state, skdev->timer_countdown);
1105 skd_restart_device(skdev);
1110 if (skdev->timer_countdown > 0) {
1111 skdev->timer_countdown--;
1118 skdev->state = SKD_DRVR_STATE_FAULT;
1121 skd_name(skdev), skdev->drive_state);
1124 skd_start(skdev);
1127 skdev->gendisk_on = -1;
1129 cv_signal(&skdev->cv_waitq);
1140 skdev->name,
1141 skdev->timo_slot,
1142 skdev->timer_countdown,
1143 skdev->queue_depth_busy,
1144 skdev->timeout_slot[skdev->timo_slot]);
1146 if (skdev->timeout_slot[skdev->timo_slot] == 0) {
1148 skdev->state = SKD_DRVR_STATE_ONLINE;
1149 skd_start(skdev);
1152 if (skdev->timer_countdown > 0) {
1153 skdev->timer_countdown--;
1156 skd_restart_device(skdev);
1160 if (skdev->timer_countdown > 0) {
1161 skdev->timer_countdown--;
1169 skdev->state = SKD_DRVR_STATE_FAULT;
1171 skd_name(skdev), skdev->drive_state);
1183 if ((skdev->drive_state == FIT_SR_DRIVE_SOFT_RESET) ||
1184 (skdev->drive_state == FIT_SR_DRIVE_FAULT) ||
1185 (skdev->drive_state == FIT_SR_DRIVE_STATE_MASK)) {
1194 ASSERT(!WAITQ_LOCK_HELD(skdev));
1195 INTR_LOCK(skdev);
1196 skd_recover_requests(skdev);
1197 INTR_UNLOCK(skdev);
1200 skd_start(skdev);
1202 skdev->gendisk_on = -1;
1203 cv_signal(&skdev->cv_waitq);
1220 * Inputs: skdev - device state structure.
1228 skd_device_t *skdev = (skd_device_t *)arg;
1231 ADAPTER_STATE_LOCK(skdev);
1232 if (skdev->skd_timer_timeout_id != 0) {
1233 ADAPTER_STATE_UNLOCK(skdev);
1235 skd_timer_tick(skdev);
1236 ADAPTER_STATE_LOCK(skdev);
1238 if (skdev->skd_timer_timeout_id != 0) {
1239 skdev->skd_timer_timeout_id =
1243 ADAPTER_STATE_UNLOCK(skdev);
1250 * Inputs: skdev - device state structure.
1256 skd_start_timer(struct skd_device *skdev)
1259 ADAPTER_STATE_LOCK(skdev);
1260 ASSERT(skdev->skd_timer_timeout_id == 0);
1266 skdev->skd_timer_timeout_id = timeout(skd_timer, skdev, 1);
1267 ADAPTER_STATE_UNLOCK(skdev);
1279 * Inputs: skdev - device state structure.
1285 skd_format_internal_skspcl(struct skd_device *skdev)
1287 struct skd_special_context *skspcl = &skdev->internal_skspcl;
1316 * Inputs: skdev - device state structure.
1324 skd_send_internal_skspcl(struct skd_device *skdev,
1436 skd_send_special_fitmsg(skdev, skspcl);
1443 * Inputs: skdev - device state structure.
1449 skd_refresh_device_data(struct skd_device *skdev)
1451 struct skd_special_context *skspcl = &skdev->internal_skspcl;
1453 Dcmn_err(CE_NOTE, "refresh_device_data: state=%d", skdev->state);
1455 skd_send_internal_skspcl(skdev, skspcl, TEST_UNIT_READY);
1463 * Inputs: skdev - device state structure.
1473 skd_complete_internal(struct skd_device *skdev,
1484 ASSERT(skspcl == &skdev->internal_skspcl);
1504 skd_send_internal_skspcl(skdev, skspcl,
1507 if (skdev->state == SKD_DRVR_STATE_STOPPING) {
1510 "state 0x%x", skdev->name, skdev->state);
1516 skdev->name);
1517 skd_send_internal_skspcl(skdev, skspcl, 0x00);
1524 skdev->read_cap_is_valid = 0;
1528 skdev->read_cap_last_lba = cap;
1529 skdev->read_cap_blocksize =
1533 cap *= skdev->read_cap_blocksize;
1536 skdev->read_cap_last_lba,
1537 skdev->read_cap_last_lba,
1538 skdev->read_cap_blocksize,
1541 Nblocks = skdev->read_cap_last_lba + 1;
1543 skdev->Nblocks = Nblocks;
1544 skdev->read_cap_is_valid = 1;
1546 skd_send_internal_skspcl(skdev, skspcl, INQUIRY2);
1550 skd_send_internal_skspcl(skdev, skspcl,
1556 skdev->inquiry_is_valid = 0;
1558 skdev->inquiry_is_valid = 1;
1561 bcopy(&buf[4], skdev->inq_serial_num, 12);
1562 skdev->inq_serial_num[12] = '\0';
1564 char *tmp = skdev->inq_vendor_id;
1569 tmp = skdev->inq_product_id;
1573 tmp = skdev->inq_product_rev;
1579 if (skdev->state != SKD_DRVR_STATE_ONLINE)
1580 if (skd_unquiesce_dev(skdev) < 0)
1584 skdev->sync_done = (SAM_STAT_GOOD == status) ? 1 : -1;
1586 cv_signal(&skdev->cv_waitq);
1602 * Inputs: skdev - device state structure.
1610 skd_send_fitmsg(struct skd_device *skdev,
1618 skdev->queue_depth_busy);
1629 if (skdev->dbg_level > 1) {
1655 skdev->ios_started++;
1657 SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
1665 * Inputs: skdev - device state structure.
1672 skd_send_special_fitmsg(struct skd_device *skdev,
1679 if (skdev->dbg_level > 1) {
1716 SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
1723 static void skd_complete_other(struct skd_device *skdev,
1769 * Inputs: skdev - device state structure.
1781 skd_check_status(struct skd_device *skdev, uint8_t cmp_status,
1793 skd_name(skdev), skerr->key, skerr->code, skerr->qual);
1821 skd_name(skdev), skerr->key,
1838 skdev->name,
1839 skdev->queue_depth_busy,
1840 (void *)skdev->skmsg_free_list, skd_list_skmsg(skdev, 0),
1841 (void *)skdev->skreq_free_list, skd_list_skreq(skdev, 0));
1844 skdev->name, skerr->type, cmp_status, skerr->key,
1859 * Inputs: skdev - device state structure.
1865 skd_isr_completion_posted(struct skd_device *skdev)
1881 (void) ddi_dma_sync(skdev->cq_dma_address.dma_handle, 0, 0,
1885 ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY);
1887 WAITQ_LOCK(skdev);
1889 skcmp = &skdev->skcomp_table[skdev->skcomp_ix];
1895 skerr = &skdev->skerr_table[skdev->skcomp_ix];
1900 skdev->skcomp_cycle, skdev->skcomp_ix,
1902 skdev->queue_depth_busy, cmp_bytes, skdev->proto_ver);
1904 if (cmp_cycle != skdev->skcomp_cycle) {
1905 Dcmn_err(CE_NOTE, "%s:end of completions", skdev->name);
1907 WAITQ_UNLOCK(skdev);
1912 skdev->n_req++;
1918 skdev->skcomp_ix++;
1919 if (skdev->skcomp_ix >= SKD_N_COMPLETION_ENTRY) {
1920 skdev->skcomp_ix = 0;
1921 skdev->skcomp_cycle++; /* 8-bit wrap-around */
1938 if (req_slot >= skdev->num_req_context) {
1942 skd_complete_other(skdev, skcmp, skerr);
1943 WAITQ_UNLOCK(skdev);
1947 skreq = &skdev->skreq_table[req_slot];
1962 ASSERT(msg_slot < skdev->num_fitmsg_context);
1963 skmsg = &skdev->skmsg_table[msg_slot];
1972 skmsg->next = skdev->skmsg_free_list;
1973 skdev->skmsg_free_list = skmsg;
1981 skreq->next = skdev->skreq_free_list;
1982 skdev->skreq_free_list = skreq;
1983 WAITQ_UNLOCK(skdev);
1994 if (cmp_status && skdev->disks_initialized) {
1997 skdev->name, (void *)pbuf, pbuf->x_xfer->x_blkno,
2001 ASSERT(skdev->active_cmds);
2002 atomic_dec_64(&skdev->active_cmds);
2007 skd_blkdev_postop_sg_list(skdev, skreq);
2008 WAITQ_UNLOCK(skdev);
2009 skd_end_request(skdev, skreq, 0);
2010 WAITQ_LOCK(skdev);
2012 switch (skd_check_status(skdev, cmp_status, skerr)) {
2015 WAITQ_UNLOCK(skdev);
2016 skd_end_request(skdev, skreq, 0);
2017 WAITQ_LOCK(skdev);
2021 skd_log_skreq(skdev, skreq, "retry(busy)");
2022 skd_queue(skdev, pbuf);
2023 skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT;
2024 skdev->timer_countdown = SKD_TIMER_MINUTES(20);
2026 (void) skd_quiesce_dev(skdev);
2040 WAITQ_UNLOCK(skdev);
2041 skd_end_request(skdev, skreq, -EIO);
2042 WAITQ_LOCK(skdev);
2066 ASSERT(msg_slot < skdev->num_fitmsg_context);
2067 skmsg = &skdev->skmsg_table[msg_slot];
2072 skmsg->next = skdev->skmsg_free_list;
2073 skdev->skmsg_free_list = skmsg;
2082 ASSERT(skdev->timeout_slot[timo_slot] > 0);
2083 ASSERT(skdev->queue_depth_busy > 0);
2085 atomic_dec_32(&skdev->timeout_slot[timo_slot]);
2086 atomic_dec_32(&skdev->queue_depth_busy);
2093 skreq->next = skdev->skreq_free_list;
2094 skdev->skreq_free_list = skreq;
2096 WAITQ_UNLOCK(skdev);
2101 if ((skdev->state == SKD_DRVR_STATE_PAUSING) &&
2102 (0 == skdev->queue_depth_busy)) {
2103 skdev->state = SKD_DRVR_STATE_PAUSED;
2104 cv_signal(&skdev->cv_waitq);
2114 * Inputs: skdev - device state structure.
2122 skd_complete_other(struct skd_device *skdev,
2146 skspcl = &skdev->internal_skspcl;
2151 skd_complete_internal(skdev, skcomp, skerr, skspcl);
2159 * Inputs: skdev - device state structure.
2165 skd_reset_skcomp(struct skd_device *skdev)
2173 if (skdev->skcomp_table)
2174 bzero(skdev->skcomp_table, nbytes);
2176 skdev->skcomp_ix = 0;
2177 skdev->skcomp_cycle = 1;
2190 * Inputs: arg - skdev device state structure.
2204 struct skd_device *skdev;
2206 skdev = (skd_device_t *)(uintptr_t)arg;
2208 ASSERT(skdev != NULL);
2210 skdev->intr_cntr++;
2212 Dcmn_err(CE_NOTE, "skd_isr_aif: intr=%" PRId64 "\n", skdev->intr_cntr);
2216 ASSERT(!WAITQ_LOCK_HELD(skdev));
2217 INTR_LOCK(skdev);
2219 intstat = SKD_READL(skdev, FIT_INT_STATUS_HOST);
2237 skdev->state == SKD_DRVR_STATE_ONLINE) {
2240 skd_isr_completion_posted(skdev);
2242 INTR_UNLOCK(skdev);
2248 SKD_WRITEL(skdev, ack, FIT_INT_STATUS_HOST);
2250 if ((skdev->state != SKD_DRVR_STATE_LOAD) &&
2251 (skdev->state != SKD_DRVR_STATE_STOPPING)) {
2255 skd_isr_completion_posted(skdev);
2261 skd_isr_fwstate(skdev);
2262 if (skdev->state == SKD_DRVR_STATE_FAULT ||
2263 skdev->state ==
2265 INTR_UNLOCK(skdev);
2273 skd_isr_msg_from_dev(skdev);
2277 INTR_UNLOCK(skdev);
2280 if (!SIMPLEQ_EMPTY(&skdev->waitqueue))
2281 skd_start(skdev);
2290 * Inputs: skdev - device state structure.
2296 skd_drive_fault(struct skd_device *skdev)
2298 skdev->state = SKD_DRVR_STATE_FAULT;
2300 skd_name(skdev));
2307 * Inputs: skdev - device state structure.
2313 skd_drive_disappeared(struct skd_device *skdev)
2315 skdev->state = SKD_DRVR_STATE_DISAPPEARED;
2317 skd_name(skdev));
2324 * Inputs: skdev - device state structure.
2330 skd_isr_fwstate(struct skd_device *skdev)
2337 prev_driver_state = skdev->state;
2339 sense = SKD_READL(skdev, FIT_STATUS);
2343 skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
2346 skdev->drive_state = state;
2348 switch (skdev->drive_state) {
2350 if (skdev->state == SKD_DRVR_STATE_PROTOCOL_MISMATCH) {
2351 skd_disable_interrupts(skdev);
2354 if (skdev->state == SKD_DRVR_STATE_RESTARTING) {
2355 skd_recover_requests(skdev);
2357 if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) {
2358 skdev->timer_countdown =
2360 skdev->state = SKD_DRVR_STATE_STARTING;
2361 skd_soft_reset(skdev);
2365 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
2366 skdev->last_mtd = mtd;
2370 skdev->queue_depth_limit = skdev->soft_queue_depth_limit;
2371 if (skdev->queue_depth_limit > skdev->hard_queue_depth_limit) {
2372 skdev->queue_depth_limit =
2373 skdev->hard_queue_depth_limit;
2376 skdev->queue_depth_lowat = skdev->queue_depth_limit * 2 / 3 + 1;
2377 if (skdev->queue_depth_lowat < 1)
2378 skdev->queue_depth_lowat = 1;
2382 skdev->queue_depth_limit,
2383 skdev->hard_queue_depth_limit,
2384 skdev->soft_queue_depth_limit,
2385 skdev->queue_depth_lowat);
2387 skd_refresh_device_data(skdev);
2390 skdev->state = SKD_DRVR_STATE_BUSY;
2391 skdev->timer_countdown = SKD_TIMER_MINUTES(20);
2392 (void) skd_quiesce_dev(skdev);
2395 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
2396 skd_start(skdev);
2399 skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
2400 skdev->timer_countdown = SKD_TIMER_MINUTES(20);
2403 skdev->state = SKD_DRVR_STATE_IDLE;
2406 skdev->state = SKD_DRVR_STATE_RESTARTING;
2408 switch (skdev->state) {
2413 skdev->state = SKD_DRVR_STATE_RESTARTING;
2419 "ISR FIT_SR_DRIVE_FW_BOOTING %s", skdev->name);
2420 skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
2421 skdev->timer_countdown = SKD_TIMER_SECONDS(SKD_WAIT_BOOT_TO);
2430 skd_drive_fault(skdev);
2431 skd_recover_requests(skdev);
2432 skd_start(skdev);
2436 skd_drive_disappeared(skdev);
2437 skd_recover_requests(skdev);
2438 skd_start(skdev);
2449 skd_skdev_state_to_str(skdev->state), skdev->state);
2456 * Inputs: skdev - device state structure.
2462 skd_recover_requests(struct skd_device *skdev)
2466 ASSERT(INTR_LOCK_HELD(skdev));
2468 for (i = 0; i < skdev->num_req_context; i++) {
2469 struct skd_request_context *skreq = &skdev->skreq_table[i];
2472 skd_log_skreq(skdev, skreq, "requeue");
2477 skd_blkdev_postop_sg_list(skdev, skreq);
2479 skd_end_request(skdev, skreq, EAGAIN);
2490 WAITQ_LOCK(skdev);
2491 skdev->skreq_free_list = skdev->skreq_table;
2492 WAITQ_UNLOCK(skdev);
2494 for (i = 0; i < skdev->num_fitmsg_context; i++) {
2495 struct skd_fitmsg_context *skmsg = &skdev->skmsg_table[i];
2498 skd_log_skmsg(skdev, skmsg, "salvaged");
2508 WAITQ_LOCK(skdev);
2509 skdev->skmsg_free_list = skdev->skmsg_table;
2510 WAITQ_UNLOCK(skdev);
2513 skdev->timeout_slot[i] = 0;
2515 skdev->queue_depth_busy = 0;
2522 * Inputs: skdev - device state structure.
2528 skd_isr_msg_from_dev(struct skd_device *skdev)
2535 mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
2537 Dcmn_err(CE_NOTE, "mfd=0x%x last_mtd=0x%x\n", mfd, skdev->last_mtd);
2542 if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd)) {
2548 skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd);
2550 if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) {
2552 skdev->name);
2554 skdev->name, skdev->proto_ver,
2557 skdev->name);
2558 skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH;
2559 skd_soft_reset(skdev);
2563 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
2564 skdev->last_mtd = mtd;
2568 skdev->hard_queue_depth_limit = FIT_MXD_DATA(mfd);
2571 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
2572 skdev->last_mtd = mtd;
2576 SKD_WRITEQ(skdev, skdev->cq_dma_address.cookies->dmac_laddress,
2579 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
2580 skdev->last_mtd = mtd;
2584 skd_reset_skcomp(skdev);
2586 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
2587 skdev->last_mtd = mtd;
2591 skdev->last_mtd = 0;
2608 * Inputs: skdev - device state structure.
2614 skd_disable_interrupts(struct skd_device *skdev)
2620 sense = SKD_READL(skdev, FIT_CONTROL);
2622 SKD_WRITEL(skdev, sense, FIT_CONTROL);
2630 SKD_WRITEL(skdev, ~0, FIT_INT_MASK_HOST);
2638 * Inputs: skdev - device state structure.
2644 skd_enable_interrupts(struct skd_device *skdev)
2659 SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST);
2663 val = SKD_READL(skdev, FIT_CONTROL);
2668 SKD_WRITEL(skdev, val, FIT_CONTROL);
2675 * Inputs: skdev - device state structure.
2681 skd_soft_reset(struct skd_device *skdev)
2687 val = SKD_READL(skdev, FIT_CONTROL);
2692 SKD_WRITEL(skdev, val, FIT_CONTROL);
2699 * Inputs: skdev - device state structure.
2705 skd_start_device(struct skd_device *skdev)
2713 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
2715 state = SKD_READL(skdev, FIT_STATUS);
2720 skdev->drive_state = state;
2721 skdev->last_mtd = 0;
2723 skdev->state = SKD_DRVR_STATE_STARTING;
2724 skdev->timer_countdown = SKD_TIMER_SECONDS(SKD_STARTING_TO);
2726 skd_enable_interrupts(skdev);
2728 switch (skdev->drive_state) {
2731 skd_name(skdev));
2735 Dcmn_err(CE_NOTE, "FIT_SR_DRIVE_FW_BOOTING %s\n", skdev->name);
2736 skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
2737 skdev->timer_countdown = SKD_TIMER_SECONDS(SKD_WAIT_BOOT_TO);
2742 skd_name(skdev));
2743 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
2744 skdev->timer_countdown = SKD_TIMER_SECONDS(60);
2749 skd_name(skdev));
2750 skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
2751 skdev->timer_countdown = SKD_TIMER_SECONDS(60);
2756 skd_soft_reset(skdev);
2762 skd_name(skdev));
2763 skdev->state = SKD_DRVR_STATE_BUSY;
2764 skdev->timer_countdown = SKD_TIMER_SECONDS(60);
2769 skd_name(skdev));
2778 skd_drive_fault(skdev);
2784 skd_drive_disappeared(skdev);
2791 skd_name(skdev), skdev->drive_state);
2795 state = SKD_READL(skdev, FIT_CONTROL);
2798 state = SKD_READL(skdev, FIT_INT_STATUS_HOST);
2801 state = SKD_READL(skdev, FIT_INT_MASK_HOST);
2804 state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
2807 state = SKD_READL(skdev, FIT_HW_VERSION);
2812 Dcmn_err(CE_NOTE, "Starting %s queue\n", skdev->name);
2813 skd_start(skdev);
2814 skdev->gendisk_on = -1;
2815 cv_signal(&skdev->cv_waitq);
2823 * Inputs: skdev - device state structure.
2829 skd_restart_device(struct skd_device *skdev)
2836 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
2838 state = SKD_READL(skdev, FIT_STATUS);
2843 skdev->drive_state = state;
2844 skdev->last_mtd = 0;
2846 skdev->state = SKD_DRVR_STATE_RESTARTING;
2847 skdev->timer_countdown = SKD_TIMER_MINUTES(4);
2849 skd_soft_reset(skdev);
2856 * Inputs: skdev - device state structure.
2862 skd_stop_device(struct skd_device *skdev)
2866 struct skd_special_context *skspcl = &skdev->internal_skspcl;
2868 if (SKD_DRVR_STATE_ONLINE != skdev->state) {
2870 skdev->name);
2876 skdev->name);
2880 skdev->state = SKD_DRVR_STATE_SYNCING;
2881 skdev->sync_done = 0;
2883 skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE);
2886 mutex_enter(&skdev->skd_internalio_mutex);
2887 while (skdev->sync_done == 0) {
2890 if (cv_timedwait(&skdev->cv_waitq,
2891 &skdev->skd_internalio_mutex, tmo) == -1) {
2898 mutex_exit(&skdev->skd_internalio_mutex);
2900 switch (skdev->sync_done) {
2903 skdev->name);
2907 skdev->name);
2911 skdev->name);
2916 skdev->state = SKD_DRVR_STATE_STOPPING;
2918 skd_disable_interrupts(skdev);
2921 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
2923 SKD_WRITEL(skdev, FIT_CR_SOFT_RESET, FIT_CONTROL);
2942 * Inputs: skdev - device state structure.
2951 skd_construct(skd_device_t *skdev, int instance)
2955 skdev->state = SKD_DRVR_STATE_LOAD;
2956 skdev->irq_type = skd_isr_type;
2957 skdev->soft_queue_depth_limit = skd_max_queue_depth;
2958 skdev->hard_queue_depth_limit = 10; /* until GET_CMDQ_DEPTH */
2960 skdev->num_req_context = skd_max_queue_depth;
2961 skdev->num_fitmsg_context = skd_max_queue_depth;
2963 skdev->queue_depth_limit = skdev->hard_queue_depth_limit;
2964 skdev->queue_depth_lowat = 1;
2965 skdev->proto_ver = 99; /* initialize to invalid value */
2966 skdev->sgs_per_request = skd_sgs_per_request;
2967 skdev->dbg_level = skd_dbg_level;
2969 rc = skd_cons_skcomp(skdev);
2974 rc = skd_cons_skmsg(skdev);
2979 rc = skd_cons_skreq(skdev);
2984 rc = skd_cons_sksb(skdev);
2995 skd_destruct(skdev);
3004 * Inputs: skdev - device state structure.
3011 skd_free_phys(skd_device_t *skdev, dma_mem_t *mem)
3013 _NOTE(ARGUNUSED(skdev));
3034 * Inputs: skdev - device state structure.
3046 skd_alloc_dma_mem(skd_device_t *skdev, dma_mem_t *mem, uint8_t atype)
3065 if (ddi_dma_alloc_handle(skdev->dip, &dma_attr, DDI_DMA_SLEEP, NULL,
3101 skd_free_phys(skdev, mem);
3115 * Inputs: skdev - device state structure.
3121 skd_cons_skcomp(struct skd_device *skdev)
3135 mem = &skdev->cq_dma_address;
3138 dma_alloc = skd_alloc_dma_mem(skdev, mem, ATYPE_64BIT);
3150 skdev->skcomp_table = skcomp;
3151 skdev->skerr_table = (struct fit_comp_error_info *)(dma_alloc +
3162 * Inputs: skdev - device state structure.
3168 skd_cons_skmsg(struct skd_device *skdev)
3176 skdev->num_fitmsg_context,
3178 skdev->num_fitmsg_context));
3180 skdev->skmsg_table = (struct skd_fitmsg_context *)kmem_zalloc(
3181 sizeof (struct skd_fitmsg_context) * skdev->num_fitmsg_context,
3184 for (i = 0; i < skdev->num_fitmsg_context; i++) {
3187 skmsg = &skdev->skmsg_table[i];
3196 skmsg->msg_buf = skd_alloc_dma_mem(skdev, mem, ATYPE_64BIT);
3212 skdev->skmsg_table[i - 1].next = NULL;
3213 skdev->skmsg_free_list = skdev->skmsg_table;
3222 * Inputs: skdev - device state structure.
3228 skd_cons_skreq(struct skd_device *skdev)
3236 skdev->num_req_context,
3238 skdev->num_req_context));
3240 skdev->skreq_table = (struct skd_request_context *)kmem_zalloc(
3241 sizeof (struct skd_request_context) * skdev->num_req_context,
3244 for (i = 0; i < skdev->num_req_context; i++) {
3247 skreq = &skdev->skreq_table[i];
3252 skreq->sksg_list = skd_cons_sg_list(skdev,
3253 skdev->sgs_per_request,
3265 skdev->skreq_table[i - 1].next = NULL;
3266 skdev->skreq_free_list = skdev->skreq_table;
3277 * Inputs: skdev - device state structure.
3283 skd_cons_sksb(struct skd_device *skdev)
3290 skspcl = &skdev->internal_skspcl;
3301 skspcl->data_buf = skd_alloc_dma_mem(skdev, mem, ATYPE_64BIT);
3315 skspcl->msg_buf = skd_alloc_dma_mem(skdev, mem, ATYPE_64BIT);
3324 skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1,
3333 if (skd_format_internal_skspcl(skdev) == 0) {
3346 * Inputs: skdev - device state structure.
3354 *skd_cons_sg_list(struct skd_device *skdev,
3367 sg_list = skd_alloc_dma_mem(skdev, mem, ATYPE_32BIT);
3391 static void skd_free_skcomp(struct skd_device *skdev);
3392 static void skd_free_skmsg(struct skd_device *skdev);
3393 static void skd_free_skreq(struct skd_device *skdev);
3394 static void skd_free_sksb(struct skd_device *skdev);
3396 static void skd_free_sg_list(struct skd_device *skdev,
3405 * Inputs: skdev - device state structure.
3411 skd_destruct(struct skd_device *skdev)
3413 if (skdev == NULL) {
3418 skd_free_sksb(skdev);
3421 skd_free_skreq(skdev);
3424 skd_free_skmsg(skdev);
3427 skd_free_skcomp(skdev);
3436 * Inputs: skdev - device state structure.
3442 skd_free_skcomp(struct skd_device *skdev)
3444 if (skdev->skcomp_table != NULL) {
3445 skd_free_phys(skdev, &skdev->cq_dma_address);
3448 skdev->skcomp_table = NULL;
3455 * Inputs: skdev - device state structure.
3461 skd_free_skmsg(struct skd_device *skdev)
3465 if (NULL == skdev->skmsg_table)
3468 for (i = 0; i < skdev->num_fitmsg_context; i++) {
3471 skmsg = &skdev->skmsg_table[i];
3474 skd_free_phys(skdev, &skmsg->mb_dma_address);
3481 kmem_free(skdev->skmsg_table, sizeof (struct skd_fitmsg_context) *
3482 skdev->num_fitmsg_context);
3484 skdev->skmsg_table = NULL;
3492 * Inputs: skdev - device state structure.
3498 skd_free_skreq(struct skd_device *skdev)
3502 if (NULL == skdev->skreq_table)
3505 for (i = 0; i < skdev->num_req_context; i++) {
3508 skreq = &skdev->skreq_table[i];
3510 skd_free_sg_list(skdev, skreq->sksg_list,
3511 skdev->sgs_per_request, skreq->sksg_dma_address);
3516 kmem_free(skdev->skreq_table, sizeof (struct skd_request_context) *
3517 skdev->num_req_context);
3519 skdev->skreq_table = NULL;
3528 * Inputs: skdev - device state structure.
3534 skd_free_sksb(struct skd_device *skdev)
3538 skspcl = &skdev->internal_skspcl;
3541 skd_free_phys(skdev, &skspcl->db_dma_address);
3547 skd_free_phys(skdev, &skspcl->mb_dma_address);
3552 skd_free_sg_list(skdev, skspcl->req.sksg_list, 1,
3562 * Inputs: skdev - device state structure.
3572 skd_free_sg_list(struct skd_device *skdev,
3577 skd_free_phys(skdev, &dma_addr);
3585 * Inputs: skdev - device state structure.
3592 skd_queue(skd_device_t *skdev, skd_buf_private_t *pbuf)
3596 ASSERT(skdev != NULL);
3599 ASSERT(WAITQ_LOCK_HELD(skdev));
3601 waitq = &skdev->waitqueue;
3613 * Inputs: skdev - device state structure.
3621 skd_list_skreq(skd_device_t *skdev, int list)
3629 skreq = &skdev->skreq_table[0];
3645 skreq = skdev->skreq_free_list;
3666 * Inputs: skdev - device state structure.
3673 skd_list_skmsg(skd_device_t *skdev, int list)
3678 skmsgp = &skdev->skmsg_table[0];
3698 skmsgp = skdev->skmsg_free_list;
3718 * Inputs: skdev - device state structure.
3725 *skd_get_queued_pbuf(skd_device_t *skdev)
3729 ASSERT(WAITQ_LOCK_HELD(skdev));
3730 pbuf = SIMPLEQ_FIRST(&skdev->waitqueue);
3732 SIMPLEQ_REMOVE_HEAD(&skdev->waitqueue, sq);
3744 * Inputs: skdev - device state structure.
3750 skd_pci_info(struct skd_device *skdev, char *str, size_t len)
3756 pcie_reg = skd_pci_find_capability(skdev, PCI_CAP_ID_EXP);
3762 lstat = pci_config_get16(skdev->pci_handle, pcie_reg);
3783 * Inputs: skdev - device state structure.
3790 skd_init(skd_device_t *skdev)
3954 * Inputs: skdev - device state structure.
3961 skd_log_skdev(struct skd_device *skdev, const char *event)
3963 Dcmn_err(CE_NOTE, "log_skdev(%s) skdev=%p event='%s'",
3964 skdev->name, (void *)skdev, event);
3966 skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
3967 skd_skdev_state_to_str(skdev->state), skdev->state);
3969 skdev->queue_depth_busy, skdev->queue_depth_limit,
3970 skdev->soft_queue_depth_limit, skdev->hard_queue_depth_limit,
3971 skdev->queue_depth_lowat);
3973 skdev->timeout_stamp, skdev->skcomp_cycle, skdev->skcomp_ix);
3980 * Inputs: skdev - device state structure.
3988 skd_log_skmsg(struct skd_device *skdev,
3992 skdev->name, (void *)skmsg, event);
4002 * Inputs: skdev - device state structure.
4010 skd_log_skreq(struct skd_device *skdev,
4016 skdev->name, (void *)skreq, (void *)skreq->pbuf, event);
4033 skdev->intr_cntr, skdev->queue_depth_busy);
4043 * Inputs: skdev - device state structure.
4049 skd_init_mutex(skd_device_t *skdev)
4054 skdev->instance, skdev->flags);
4056 intr = (void *)(uintptr_t)skdev->intr_pri;
4058 if (skdev->flags & SKD_MUTEX_INITED)
4062 mutex_init(&skdev->skd_lock_mutex, NULL, MUTEX_DRIVER,
4064 mutex_init(&skdev->skd_intr_mutex, NULL, MUTEX_DRIVER,
4066 mutex_init(&skdev->waitqueue_mutex, NULL, MUTEX_DRIVER,
4068 mutex_init(&skdev->skd_internalio_mutex, NULL, MUTEX_DRIVER,
4071 cv_init(&skdev->cv_waitq, NULL, CV_DRIVER, NULL);
4073 skdev->flags |= SKD_MUTEX_INITED;
4074 if (skdev->flags & SKD_MUTEX_DESTROYED)
4075 skdev->flags &= ~SKD_MUTEX_DESTROYED;
4078 skdev->instance, skdev->flags);
4087 * Inputs: skdev - device state structure.
4093 skd_destroy_mutex(skd_device_t *skdev)
4095 if ((skdev->flags & SKD_MUTEX_DESTROYED) == 0) {
4096 if (skdev->flags & SKD_MUTEX_INITED) {
4097 mutex_destroy(&skdev->waitqueue_mutex);
4098 mutex_destroy(&skdev->skd_intr_mutex);
4099 mutex_destroy(&skdev->skd_lock_mutex);
4100 mutex_destroy(&skdev->skd_internalio_mutex);
4102 cv_destroy(&skdev->cv_waitq);
4104 skdev->flags |= SKD_MUTEX_DESTROYED;
4106 if (skdev->flags & SKD_MUTEX_INITED)
4107 skdev->flags &= ~SKD_MUTEX_INITED;
4116 * Inputs: skdev - device state structure.
4123 skd_setup_intr(skd_device_t *skdev, int intr_type)
4131 Dcmn_err(CE_CONT, "(%s%d): setup_intr", DRV_NAME, skdev->instance);
4134 if (((ret = ddi_intr_get_nintrs(skdev->dip, intr_type, &count)) !=
4143 if (((ret = ddi_intr_get_navail(skdev->dip, intr_type, &avail)) !=
4160 skdev->hsize = sizeof (ddi_intr_handle_t) * avail;
4161 skdev->htable = kmem_zalloc(skdev->hsize, KM_SLEEP);
4164 if ((ret = ddi_intr_alloc(skdev->dip, skdev->htable, intr_type,
4169 skd_release_intr(skdev);
4174 skdev->intr_cnt = actual;
4177 (void) ddi_intr_set_pri(skdev->htable[0], 10);
4180 if ((ret = ddi_intr_get_pri(skdev->htable[0], &skdev->intr_pri)) !=
4183 skd_release_intr(skdev);
4190 if ((ret = ddi_intr_add_handler(skdev->htable[i],
4191 skd_isr_aif, (void *)skdev, (void *)((ulong_t)i))) !=
4195 skd_release_intr(skdev);
4202 if ((ret = skd_init_mutex(skdev)) != DDI_SUCCESS) {
4204 skd_release_intr(skdev);
4210 (void) ddi_intr_get_cap(skdev->htable[0], &skdev->intr_cap);
4213 if (skdev->intr_cap & DDI_INTR_FLAG_BLOCK) {
4214 if ((ret = ddi_intr_block_enable(skdev->htable,
4215 skdev->intr_cnt)) != DDI_SUCCESS) {
4218 skd_destroy_mutex(skdev);
4219 skd_release_intr(skdev);
4224 for (i = 0; i < skdev->intr_cnt; i++) {
4225 if ((ret = ddi_intr_enable(skdev->htable[i])) !=
4229 skd_destroy_mutex(skdev);
4230 skd_release_intr(skdev);
4238 (void) ddi_intr_clr_mask(skdev->htable[0]);
4240 skdev->irq_type = intr_type;
4249 * Inputs: skdev - device state structure.
4255 skd_disable_intr(skd_device_t *skdev)
4259 if (skdev->intr_cap & DDI_INTR_FLAG_BLOCK) {
4261 if ((rval = ddi_intr_block_disable(skdev->htable,
4262 skdev->intr_cnt)) != DDI_SUCCESS) {
4268 for (i = 0; i < skdev->intr_cnt; i++) {
4269 if ((rval = ddi_intr_disable(skdev->htable[i])) !=
4282 * Inputs: skdev - device state structure.
4288 skd_release_intr(skd_device_t *skdev)
4294 Dcmn_err(CE_CONT, "REL_INTR intr_cnt=%d", skdev->intr_cnt);
4296 if (skdev->irq_type == 0) {
4298 DRV_NAME, skdev->instance);
4302 if (skdev->htable != NULL && skdev->hsize > 0) {
4303 i = (int32_t)skdev->hsize / (int32_t)sizeof (ddi_intr_handle_t);
4306 if (skdev->htable[i] == 0) {
4311 if ((rval = ddi_intr_disable(skdev->htable[i])) !=
4316 if (i < skdev->intr_cnt) {
4318 skdev->htable[i])) != DDI_SUCCESS)
4327 if ((rval = ddi_intr_free(skdev->htable[i])) !=
4335 kmem_free(skdev->htable, skdev->hsize);
4336 skdev->htable = NULL;
4339 skdev->hsize = 0;
4340 skdev->intr_cnt = 0;
4341 skdev->intr_pri = 0;
4342 skdev->intr_cap = 0;
4343 skdev->irq_type = 0;
4352 * skdev - device state structure.
4361 skd_dealloc_resources(dev_info_t *dip, skd_device_t *skdev,
4365 if (skdev == NULL)
4369 skd_destruct(skdev);
4372 skd_disable_intr(skdev);
4373 skd_release_intr(skdev);
4377 ddi_regs_map_free(&skdev->dev_handle);
4380 ddi_regs_map_free(&skdev->iomap_handle);
4383 ddi_regs_map_free(&skdev->iobase_handle);
4386 pci_config_teardown(&skdev->pci_handle);
4389 if (skdev->pathname &&
4390 (skdev->flags & SKD_PATHNAME_ALLOCED)) {
4391 kmem_free(skdev->pathname,
4392 strlen(skdev->pathname)+1);
4396 if (skdev->s1120_devid)
4397 ddi_devid_free(skdev->s1120_devid);
4405 * Inputs: skdev - device state structure.
4411 skd_setup_interrupts(skd_device_t *skdev)
4420 if ((i = ddi_intr_get_supported_types(skdev->dip, &itypes)) !=
4427 skdev->name, itypes);
4429 itypes &= skdev->irq_type;
4432 (rval = skd_setup_intr(skdev, DDI_INTR_TYPE_MSIX)) == DDI_SUCCESS) {
4434 skdev->name);
4436 (rval = skd_setup_intr(skdev, DDI_INTR_TYPE_MSI)) == DDI_SUCCESS) {
4438 skdev->name);
4440 (rval = skd_setup_intr(skdev, DDI_INTR_TYPE_FIXED))
4443 skdev->name);
4446 skdev->name);
4450 Dcmn_err(CE_CONT, "%s: setup interrupts done", skdev->name);
4459 * Inputs: skdev - device state structure.
4467 skd_get_properties(dev_info_t *dip, skd_device_t *skdev)
4500 * Inputs: skdev - device state structure.
4506 skd_wait_for_s1120(skd_device_t *skdev)
4512 mutex_enter(&skdev->skd_internalio_mutex);
4514 while (skdev->gendisk_on == 0) {
4517 if (cv_timedwait(&skdev->cv_waitq,
4518 &skdev->skd_internalio_mutex, tmo) == -1) {
4525 mutex_exit(&skdev->skd_internalio_mutex);
4527 if (skdev->gendisk_on == 1)
4537 * Inputs: skdev - device state structure.
4544 skd_update_props(skd_device_t *skdev, dev_info_t *dip)
4549 skdev->Nblocks) != DDI_SUCCESS) ||
4553 skdev->name);
4561 * Inputs: skdev - device state structure.
4568 skd_setup_devid(skd_device_t *skdev, ddi_devid_t *devid)
4572 sz_model = scsi_ascii_inquiry_len(skdev->inq_product_id,
4573 strlen(skdev->inq_product_id));
4574 sz_sn = scsi_ascii_inquiry_len(skdev->inq_serial_num,
4575 strlen(skdev->inq_serial_num));
4578 (void) snprintf(skdev->devid_str, sizeof (skdev->devid_str),
4579 "%.*s=%.*s", sz_model, skdev->inq_product_id, sz_sn,
4580 skdev->inq_serial_num);
4581 rc = ddi_devid_init(skdev->dip, DEVID_SCSI_SERIAL, sz,
4582 skdev->devid_str, devid);
4585 cmn_err(CE_WARN, "!%s: devid_init FAILED", skdev->name);
4595 * Inputs: skdev - device state structure.
4602 skd_bd_attach(dev_info_t *dip, skd_device_t *skdev)
4606 skdev->s_bdh = bd_alloc_handle(skdev, &skd_bd_ops,
4609 if (skdev->s_bdh == NULL) {
4615 rv = bd_attach_handle(dip, skdev->s_bdh);
4621 skdev->bd_attached++;
4631 * Inputs: skdev - device state structure.
4637 skd_bd_detach(skd_device_t *skdev)
4639 if (skdev->bd_attached)
4640 (void) bd_detach_handle(skdev->s_bdh);
4642 bd_free_handle(skdev->s_bdh);
4660 skd_device_t *skdev = NULL;
4679 skd_start_timer(skdev);
4720 skdev = ddi_get_soft_state(skd_state, instance);
4721 if (skdev == NULL) {
4727 (void) snprintf(skdev->name, sizeof (skdev->name),
4730 skdev->dip = dip;
4731 skdev->instance = instance;
4733 ddi_set_driver_private(dip, skdev);
4746 skdev->pathname = kmem_zalloc(strlen(name) + 1, KM_SLEEP);
4747 (void) strlcpy(skdev->pathname, name, strlen(name) + 1);
4750 skdev->flags |= SKD_PATHNAME_ALLOCED;
4752 if (pci_config_setup(dip, &skdev->pci_handle) != DDI_SUCCESS) {
4770 ddi_regs_map_setup(dip, 1, &skdev->iobase, 0, regsize,
4771 &dev_acc_attr, &skdev->iobase_handle) != DDI_SUCCESS) {
4778 skdev->iomap_iobase = skdev->iobase;
4779 skdev->iomap_handle = skdev->iobase_handle;
4782 "regsize=%ld", skdev->name, (void *)skdev->iobase,
4783 (void *)skdev->iomap_iobase, 1, regsize);
4786 ddi_regs_map_setup(dip, 2, &skdev->dev_iobase, 0, regsize,
4787 &dev_acc_attr, &skdev->dev_handle) != DDI_SUCCESS) {
4794 skdev->dev_memsize = (int)regsize;
4797 skdev->name, (void *)skdev->dev_iobase,
4798 skdev->dev_memsize);
4802 cmd_reg = pci_config_get16(skdev->pci_handle, PCI_CONF_COMM);
4805 pci_config_put16(skdev->pci_handle, PCI_CONF_COMM, cmd_reg);
4808 skdev->vendor_id = pci_config_get16(skdev->pci_handle, PCI_CONF_VENID);
4809 skdev->device_id = pci_config_get16(skdev->pci_handle, PCI_CONF_DEVID);
4812 skdev->name, skdev->vendor_id, skdev->device_id);
4814 skd_get_properties(dip, skdev);
4816 (void) skd_init(skdev);
4818 if (skd_construct(skdev, instance)) {
4819 cmn_err(CE_WARN, "!%s: construct FAILED", skdev->name);
4826 SIMPLEQ_INIT(&skdev->waitqueue);
4831 if (skd_setup_interrupts(skdev) != DDI_SUCCESS) {
4833 skdev->name);
4839 ADAPTER_STATE_LOCK(skdev);
4840 skdev->flags |= SKD_ATTACHED;
4841 ADAPTER_STATE_UNLOCK(skdev);
4843 skdev->d_blkshift = 9;
4847 skd_start_device(skdev);
4849 ADAPTER_STATE_LOCK(skdev);
4850 skdev->progress = progress;
4851 ADAPTER_STATE_UNLOCK(skdev);
4857 if (skdev->gendisk_on != 1)
4858 (void) skd_wait_for_s1120(skdev);
4860 if (skdev->gendisk_on != 1) {
4862 skdev->name);
4868 skd_send_internal_skspcl(skdev, &skdev->internal_skspcl, INQUIRY);
4870 skdev->disks_initialized++;
4873 (void) skd_pci_info(skdev, pci_str, sizeof (pci_str));
4878 skdev->vendor_id, skdev->device_id, pci_str);
4880 Dcmn_err(CE_NOTE, " sTec S1120 %s\n", skdev->pathname);
4882 if (*skdev->inq_serial_num)
4884 skdev->inq_serial_num);
4886 if (*skdev->inq_product_id &&
4887 *skdev->inq_product_rev)
4889 skdev->inq_product_id, skdev->inq_product_rev);
4892 skdev->name, skdev->irq_type);
4894 skdev->name, skd_max_queue_depth);
4896 skdev->name, skd_sgs_per_request);
4898 skdev->name, skd_max_req_per_msg);
4900 if (skd_bd_attach(dip, skdev) == DDI_FAILURE)
4903 skd_update_props(skdev, dip);
4906 skd_start_timer(skdev);
4908 ADAPTER_STATE_LOCK(skdev);
4909 skdev->progress = progress;
4910 ADAPTER_STATE_UNLOCK(skdev);
4912 skdev->attached = 1;
4916 skd_dealloc_resources(dip, skdev, progress, instance);
4918 if ((skdev->flags & SKD_MUTEX_DESTROYED) == 0) {
4919 skd_destroy_mutex(skdev);
4932 * Inputs: skdev - device state structure.
4938 skd_halt(skd_device_t *skdev)
4940 Dcmn_err(CE_NOTE, "%s: halt/suspend ......", skdev->name);
4956 skd_device_t *skdev;
4964 skdev = ddi_get_soft_state(skd_state, instance);
4965 if (skdev == NULL) {
4976 ADAPTER_STATE_LOCK(skdev);
4979 skdev->flags |= (SKD_SUSPENDED | SKD_CMD_ABORT_TMO);
4982 if (skdev->skd_timer_timeout_id != 0) {
4983 timer_id = skdev->skd_timer_timeout_id;
4984 skdev->skd_timer_timeout_id = 0;
4986 ADAPTER_STATE_UNLOCK(skdev);
4993 if (skdev->power_level != LOW_POWER_LEVEL) {
4994 skd_halt(skdev);
4995 skdev->power_level = LOW_POWER_LEVEL;
4998 skspcl = &skdev->internal_skspcl;
4999 skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE);
5001 skd_stop_device(skdev);
5006 while (!SIMPLEQ_EMPTY(&skdev->waitqueue)) {
5007 pbuf = skd_get_queued_pbuf(skdev);
5008 skd_end_request_abnormal(skdev, pbuf, ECANCELED,
5017 skd_bd_detach(skdev);
5019 skd_dealloc_resources(dip, skdev, skdev->progress, instance);
5021 if ((skdev->flags & SKD_MUTEX_DESTROYED) == 0) {
5022 skd_destroy_mutex(skdev);
5034 ADAPTER_STATE_LOCK(skdev);
5035 skdev->flags |= SKD_SUSPENDED;
5038 if (skdev->skd_timer_timeout_id != 0) {
5039 timer_id = skdev->skd_timer_timeout_id;
5040 skdev->skd_timer_timeout_id = 0;
5042 ADAPTER_STATE_UNLOCK(skdev);
5050 skd_halt(skdev);
5086 skd_device_t *skdev = arg;
5088 (void) skd_setup_devid(skdev, devid);
5106 skd_device_t *skdev = arg;
5108 drive->d_qsize = (skdev->queue_depth_limit * 4) / 5;
5115 if (skdev->inquiry_is_valid != 0) {
5116 drive->d_vendor = skdev->inq_vendor_id;
5119 drive->d_product = skdev->inq_product_id;
5122 drive->d_serial = skdev->inq_serial_num;
5125 drive->d_revision = skdev->inq_product_rev;
5143 skd_device_t *skdev = arg;
5145 media->m_nblks = skdev->Nblocks;
5158 * Inputs: skdev - device state structure.
5168 skd_rw(skd_device_t *skdev, bd_xfer_t *xfer, int dir)
5179 if (skdev->state != SKD_DRVR_STATE_ONLINE) {
5182 skd_request_fn_not_online(skdev);
5191 WAITQ_LOCK(skdev);
5195 skd_queue(skdev, pbuf);
5196 skdev->ios_queued++;
5197 WAITQ_UNLOCK(skdev);
5199 skd_start(skdev);