Lines Matching refs:nvme

181 #error nvme driver needs porting for big-endian platforms
438 nvme_put64(nvme_t *nvme, uintptr_t reg, uint64_t val)
440 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x7) == 0);
443 ddi_put64(nvme->n_regh, (uint64_t *)(nvme->n_regs + reg), val);
447 nvme_put32(nvme_t *nvme, uintptr_t reg, uint32_t val)
449 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x3) == 0);
452 ddi_put32(nvme->n_regh, (uint32_t *)(nvme->n_regs + reg), val);
456 nvme_get64(nvme_t *nvme, uintptr_t reg)
460 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x7) == 0);
463 val = ddi_get64(nvme->n_regh, (uint64_t *)(nvme->n_regs + reg));
469 nvme_get32(nvme_t *nvme, uintptr_t reg)
473 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x3) == 0);
476 val = ddi_get32(nvme->n_regh, (uint32_t *)(nvme->n_regs + reg));
482 nvme_check_regs_hdl(nvme_t *nvme)
486 ddi_fm_acc_err_get(nvme->n_regh, &error, DDI_FME_VERSION);
538 nvme_alloc_dma_common(nvme_t *nvme, nvme_dma_t *dma,
541 if (ddi_dma_alloc_handle(nvme->n_dip, dma_attr, DDI_DMA_SLEEP, NULL,
548 dev_err(nvme->n_dip, CE_PANIC,
557 (void) ddi_dma_mem_alloc(dma->nd_dmah, len, &nvme->n_reg_acc_attr,
564 dev_err(nvme->n_dip, CE_WARN,
566 atomic_inc_32(&nvme->n_dma_bind_err);
575 nvme_zalloc_dma(nvme_t *nvme, size_t len, uint_t flags,
580 if (nvme_alloc_dma_common(nvme, dma, len, flags, dma_attr) !=
598 nvme_t *nvme = (nvme_t *)private;
603 if (nvme_alloc_dma_common(nvme, dma, nvme->n_pagesize,
604 DDI_DMA_READ, &nvme->n_prp_dma_attr) != DDI_SUCCESS) {
616 nvme_zalloc_queue_dma(nvme_t *nvme, uint32_t nentry, uint16_t qe_len,
620 ddi_dma_attr_t q_dma_attr = nvme->n_queue_dma_attr;
622 len = roundup(len, nvme->n_pagesize);
626 if (nvme_zalloc_dma(nvme, len, flags, &q_dma_attr, dma)
628 dev_err(nvme->n_dip, CE_WARN,
634 dev_err(nvme->n_dip, CE_WARN,
674 nvme_alloc_qpair(nvme_t *nvme, uint32_t nentry, nvme_qpair_t **nqp,
680 DDI_INTR_PRI(nvme->n_intr_pri));
682 if (nvme_zalloc_queue_dma(nvme, nentry, sizeof (nvme_sqe_t),
686 if (nvme_zalloc_queue_dma(nvme, nentry, sizeof (nvme_cqe_t),
694 qp->nq_sqtdbl = NVME_REG_SQTDBL(nvme, idx);
695 qp->nq_cqhdbl = NVME_REG_CQHDBL(nvme, idx);
711 nvme_alloc_cmd(nvme_t *nvme, int kmflag)
720 cmd->nc_nvme = nvme;
723 DDI_INTR_PRI(nvme->n_intr_pri));
788 nvme_retrieve_cmd(nvme_t *nvme, nvme_qpair_t *qp)
804 ASSERT(nvme->n_ioq[cqe->cqe_sqid] == qp);
814 ASSERT(cmd->nc_nvme == nvme);
1140 nvme_t *nvme = abort_cmd->nc_nvme;
1141 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP);
1144 sema_p(&nvme->n_abort_sema);
1165 sema_v(&nvme->n_abort_sema);
1166 dev_err(nvme->n_dip, CE_WARN,
1168 atomic_inc_32(&nvme->n_abort_failed);
1171 sema_v(&nvme->n_abort_sema);
1174 dev_err(nvme->n_dip, CE_WARN,
1177 atomic_inc_32(&nvme->n_abort_failed);
1179 atomic_inc_32(&nvme->n_cmd_aborted);
1201 nvme_t *nvme = cmd->nc_nvme;
1224 csts.r = nvme_get32(nvme, NVME_REG_CSTS);
1225 dev_err(nvme->n_dip, CE_WARN, "!command timeout, "
1227 atomic_inc_32(&nvme->n_cmd_timeout);
1230 nvme_check_regs_hdl(nvme) ||
1233 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST);
1234 nvme->n_dead = B_TRUE;
1277 nvme_t *nvme = cmd->nc_nvme;
1315 ret = nvme_submit_cmd(nvme->n_adminq, cmd);
1318 dev_err(nvme->n_dip, CE_WARN,
1320 atomic_inc_32(&nvme->n_async_resubmit_failed);
1328 nvme_get_logpage(nvme, event.b.ae_logpage);
1330 dev_err(nvme->n_dip, CE_WARN, "!wrong logpage in "
1332 atomic_inc_32(&nvme->n_wrong_logpage);
1337 dev_err(nvme->n_dip, CE_PANIC, "programming error: "
1342 dev_err(nvme->n_dip, CE_PANIC, "programming error: "
1347 dev_err(nvme->n_dip, CE_WARN, "!diagnostic failure");
1348 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST);
1349 nvme->n_dead = B_TRUE;
1350 atomic_inc_32(&nvme->n_diagfail_event);
1354 dev_err(nvme->n_dip, CE_WARN, "!persistent internal "
1356 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST);
1357 nvme->n_dead = B_TRUE;
1358 atomic_inc_32(&nvme->n_persistent_event);
1362 dev_err(nvme->n_dip, CE_WARN, "!transient internal "
1365 atomic_inc_32(&nvme->n_transient_event);
1369 dev_err(nvme->n_dip, CE_WARN,
1371 atomic_inc_32(&nvme->n_fw_load_event);
1379 nvme_get_logpage(nvme, event.b.ae_logpage, -1);
1381 dev_err(nvme->n_dip, CE_WARN, "!wrong logpage in "
1383 atomic_inc_32(&nvme->n_wrong_logpage);
1388 dev_err(nvme->n_dip, CE_WARN,
1391 atomic_inc_32(&nvme->n_reliability_event);
1395 dev_err(nvme->n_dip, CE_WARN,
1398 atomic_inc_32(&nvme->n_temperature_event);
1402 dev_err(nvme->n_dip, CE_WARN,
1405 atomic_inc_32(&nvme->n_spare_event);
1411 dev_err(nvme->n_dip, CE_WARN, "!vendor specific async event "
1414 atomic_inc_32(&nvme->n_vendor_event);
1418 dev_err(nvme->n_dip, CE_WARN, "!unknown async event received, "
1421 atomic_inc_32(&nvme->n_unknown_event);
1427 nvme->n_error_log_len);
1463 nvme_async_event(nvme_t *nvme)
1465 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP);
1472 ret = nvme_submit_cmd(nvme->n_adminq, cmd);
1475 dev_err(nvme->n_dip, CE_WARN,
1485 nvme_get_logpage(nvme_t *nvme, uint8_t logpage, ...)
1487 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP);
1504 bufsize = nvme->n_error_log_len *
1519 dev_err(nvme->n_dip, CE_WARN, "!unknown log page requested: %d",
1521 atomic_inc_32(&nvme->n_unknown_logpage);
1531 if (nvme_zalloc_dma(nvme, getlogpage.b.lp_numd * sizeof (uint32_t),
1532 DDI_DMA_READ, &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) {
1533 dev_err(nvme->n_dip, CE_WARN,
1539 dev_err(nvme->n_dip, CE_WARN,
1541 atomic_inc_32(&nvme->n_too_many_cookies);
1554 dev_err(nvme->n_dip, CE_WARN,
1560 dev_err(nvme->n_dip, CE_WARN,
1576 nvme_identify(nvme_t *nvme, uint32_t nsid)
1578 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP);
1587 if (nvme_zalloc_dma(nvme, NVME_IDENTIFY_BUFSIZE, DDI_DMA_READ,
1588 &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) {
1589 dev_err(nvme->n_dip, CE_WARN,
1595 dev_err(nvme->n_dip, CE_WARN,
1597 atomic_inc_32(&nvme->n_too_many_cookies);
1610 dev_err(nvme->n_dip, CE_WARN,
1616 dev_err(nvme->n_dip, CE_WARN,
1632 nvme_set_features(nvme_t *nvme, uint32_t nsid, uint8_t feature, uint32_t val,
1636 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP);
1649 if (!nvme->n_write_cache_present)
1661 dev_err(nvme->n_dip, CE_WARN,
1667 dev_err(nvme->n_dip, CE_WARN,
1683 nvme_write_cache_set(nvme_t *nvme, boolean_t enable)
1690 if (!nvme_set_features(nvme, 0, NVME_FEAT_WRITE_CACHE, nwc.r, &nwc.r))
1697 nvme_set_nqueues(nvme_t *nvme, uint16_t nqueues)
1703 if (!nvme_set_features(nvme, 0, NVME_FEAT_NQUEUES, nq.r, &nq.r)) {
1715 nvme_create_io_qpair(nvme_t *nvme, nvme_qpair_t *qp, uint16_t idx)
1717 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP);
1727 c_dw11.b.cq_iv = idx % nvme->n_intr_cnt;
1737 dev_err(nvme->n_dip, CE_WARN,
1743 dev_err(nvme->n_dip, CE_WARN,
1755 cmd = nvme_alloc_cmd(nvme, KM_SLEEP);
1764 dev_err(nvme->n_dip, CE_WARN,
1770 dev_err(nvme->n_dip, CE_WARN,
1783 nvme_reset(nvme_t *nvme, boolean_t quiesce)
1788 nvme_put32(nvme, NVME_REG_CC, 0);
1790 csts.r = nvme_get32(nvme, NVME_REG_CSTS);
1792 nvme_put32(nvme, NVME_REG_CC, 0);
1793 for (i = 0; i != nvme->n_timeout * 10; i++) {
1794 csts.r = nvme_get32(nvme, NVME_REG_CSTS);
1805 nvme_put32(nvme, NVME_REG_AQA, 0);
1806 nvme_put32(nvme, NVME_REG_ASQ, 0);
1807 nvme_put32(nvme, NVME_REG_ACQ, 0);
1809 csts.r = nvme_get32(nvme, NVME_REG_CSTS);
1814 nvme_shutdown(nvme_t *nvme, int mode, boolean_t quiesce)
1822 cc.r = nvme_get32(nvme, NVME_REG_CC);
1824 nvme_put32(nvme, NVME_REG_CC, cc.r);
1827 csts.r = nvme_get32(nvme, NVME_REG_CSTS);
1840 nvme_prepare_devid(nvme_t *nvme, uint32_t nsid)
1850 char model[sizeof (nvme->n_idctl->id_model) + 1];
1851 char serial[sizeof (nvme->n_idctl->id_serial) + 1];
1853 bcopy(nvme->n_idctl->id_model, model, sizeof (nvme->n_idctl->id_model));
1854 bcopy(nvme->n_idctl->id_serial, serial,
1855 sizeof (nvme->n_idctl->id_serial));
1857 model[sizeof (nvme->n_idctl->id_model)] = '\0';
1858 serial[sizeof (nvme->n_idctl->id_serial)] = '\0';
1860 nvme->n_ns[nsid - 1].ns_devid = kmem_asprintf("%4X-%s-%s-%X",
1861 nvme->n_idctl->id_vid, model, serial, nsid);
1865 nvme_init(nvme_t *nvme)
1876 char model[sizeof (nvme->n_idctl->id_model) + 1];
1880 vs.r = nvme_get32(nvme, NVME_REG_VS);
1881 nvme->n_version.v_major = vs.b.vs_mjr;
1882 nvme->n_version.v_minor = vs.b.vs_mnr;
1883 dev_err(nvme->n_dip, CE_CONT, "?NVMe spec version %d.%d",
1884 nvme->n_version.v_major, nvme->n_version.v_minor);
1886 if (NVME_VERSION_HIGHER(&nvme->n_version,
1888 dev_err(nvme->n_dip, CE_WARN, "!no support for version > %d.%d",
1890 if (nvme->n_strict_version)
1895 cap.r = nvme_get64(nvme, NVME_REG_CAP);
1898 dev_err(nvme->n_dip, CE_WARN,
1903 nvme->n_nssr_supported = cap.b.cap_nssrs;
1904 nvme->n_doorbell_stride = 4 << cap.b.cap_dstrd;
1905 nvme->n_timeout = cap.b.cap_to;
1906 nvme->n_arbitration_mechanisms = cap.b.cap_ams;
1907 nvme->n_cont_queues_reqd = cap.b.cap_cqr;
1908 nvme->n_max_queue_entries = cap.b.cap_mqes + 1;
1915 nvme->n_pageshift = MIN(MAX(cap.b.cap_mpsmin + 12, PAGESHIFT),
1917 nvme->n_pagesize = 1UL << (nvme->n_pageshift);
1922 nvme->n_queue_dma_attr.dma_attr_align = nvme->n_pagesize;
1923 nvme->n_queue_dma_attr.dma_attr_minxfer = nvme->n_pagesize;
1929 nvme->n_prp_dma_attr.dma_attr_maxxfer = nvme->n_pagesize;
1930 nvme->n_prp_dma_attr.dma_attr_minxfer = nvme->n_pagesize;
1931 nvme->n_prp_dma_attr.dma_attr_align = nvme->n_pagesize;
1932 nvme->n_prp_dma_attr.dma_attr_seg = nvme->n_pagesize - 1;
1937 if (nvme_reset(nvme, B_FALSE) == B_FALSE) {
1938 dev_err(nvme->n_dip, CE_WARN, "!unable to reset controller");
1939 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST);
1940 nvme->n_dead = B_TRUE;
1947 if (nvme_alloc_qpair(nvme, nvme->n_admin_queue_len, &nvme->n_adminq, 0)
1949 dev_err(nvme->n_dip, CE_WARN,
1953 nvme->n_ioq = kmem_alloc(sizeof (nvme_qpair_t *), KM_SLEEP);
1954 nvme->n_ioq[0] = nvme->n_adminq;
1956 nvme->n_progress |= NVME_ADMIN_QUEUE;
1958 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip,
1959 "admin-queue-len", nvme->n_admin_queue_len);
1961 aqa.b.aqa_asqs = aqa.b.aqa_acqs = nvme->n_admin_queue_len - 1;
1962 asq = nvme->n_adminq->nq_sqdma->nd_cookie.dmac_laddress;
1963 acq = nvme->n_adminq->nq_cqdma->nd_cookie.dmac_laddress;
1965 ASSERT((asq & (nvme->n_pagesize - 1)) == 0);
1966 ASSERT((acq & (nvme->n_pagesize - 1)) == 0);
1968 nvme_put32(nvme, NVME_REG_AQA, aqa.r);
1969 nvme_put64(nvme, NVME_REG_ASQ, asq);
1970 nvme_put64(nvme, NVME_REG_ACQ, acq);
1974 cc.b.cc_mps = nvme->n_pageshift - 12;
1980 nvme_put32(nvme, NVME_REG_CC, cc.r);
1985 csts.r = nvme_get32(nvme, NVME_REG_CSTS);
1987 for (i = 0; i != nvme->n_timeout * 10; i++) {
1989 csts.r = nvme_get32(nvme, NVME_REG_CSTS);
1992 dev_err(nvme->n_dip, CE_WARN,
1994 ddi_fm_service_impact(nvme->n_dip,
1996 nvme->n_dead = B_TRUE;
2006 dev_err(nvme->n_dip, CE_WARN, "!controller not ready");
2007 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST);
2008 nvme->n_dead = B_TRUE;
2016 sema_init(&nvme->n_abort_sema, 1, NULL, SEMA_DRIVER, NULL);
2021 if ((nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSIX, 1)
2023 (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSI, 1)
2025 (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_FIXED, 1)
2027 dev_err(nvme->n_dip, CE_WARN,
2035 if (nvme_async_event(nvme) != DDI_SUCCESS) {
2036 dev_err(nvme->n_dip, CE_WARN,
2044 nvme->n_idctl = nvme_identify(nvme, 0);
2045 if (nvme->n_idctl == NULL) {
2046 dev_err(nvme->n_dip, CE_WARN,
2054 bcopy(nvme->n_idctl->id_model, model, sizeof (nvme->n_idctl->id_model));
2055 model[sizeof (nvme->n_idctl->id_model)] = '\0';
2059 nvme->n_vendor = strdup("NVMe");
2061 nvme->n_vendor = strdup(vendor);
2063 nvme->n_product = strdup(product);
2068 nvme->n_async_event_limit = MAX(NVME_MIN_ASYNC_EVENT_LIMIT,
2069 MIN(nvme->n_admin_queue_len / 10,
2070 MIN(nvme->n_idctl->id_aerl + 1, nvme->n_async_event_limit)));
2072 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip,
2073 "async-event-limit", nvme->n_async_event_limit);
2075 nvme->n_abort_command_limit = nvme->n_idctl->id_acl + 1;
2083 sema_destroy(&nvme->n_abort_sema);
2084 sema_init(&nvme->n_abort_sema, nvme->n_abort_command_limit - 1, NULL,
2087 nvme->n_progress |= NVME_CTRL_LIMITS;
2089 if (nvme->n_idctl->id_mdts == 0)
2090 nvme->n_max_data_transfer_size = nvme->n_pagesize * 65536;
2092 nvme->n_max_data_transfer_size =
2093 1ull << (nvme->n_pageshift + nvme->n_idctl->id_mdts);
2095 nvme->n_error_log_len = nvme->n_idctl->id_elpe + 1;
2104 nvme->n_max_data_transfer_size = MIN(nvme->n_max_data_transfer_size,
2105 (nvme->n_pagesize / sizeof (uint64_t) * nvme->n_pagesize));
2107 nvme->n_prp_dma_attr.dma_attr_maxxfer = nvme->n_max_data_transfer_size;
2114 if (((1 << nvme->n_idctl->id_sqes.qes_min) > sizeof (nvme_sqe_t)) ||
2115 ((1 << nvme->n_idctl->id_sqes.qes_max) < sizeof (nvme_sqe_t)) ||
2116 ((1 << nvme->n_idctl->id_cqes.qes_min) > sizeof (nvme_cqe_t)) ||
2117 ((1 << nvme->n_idctl->id_cqes.qes_max) < sizeof (nvme_cqe_t)))
2125 nvme->n_write_cache_present =
2126 nvme->n_idctl->id_vwc.vwc_present == 0 ? B_FALSE : B_TRUE;
2128 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip,
2130 nvme->n_write_cache_present ? 1 : 0);
2132 if (!nvme->n_write_cache_present) {
2133 nvme->n_write_cache_enabled = B_FALSE;
2134 } else if (!nvme_write_cache_set(nvme, nvme->n_write_cache_enabled)) {
2135 dev_err(nvme->n_dip, CE_WARN,
2137 nvme->n_write_cache_enabled ? "en" : "dis");
2141 nvme->n_write_cache_enabled = B_TRUE;
2144 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip,
2146 nvme->n_write_cache_enabled ? 1 : 0);
2153 nvme->n_error_log = (nvme_error_log_entry_t *)
2154 nvme_get_logpage(nvme, NVME_LOGPAGE_ERROR);
2155 nvme->n_health_log = (nvme_health_log_t *)
2156 nvme_get_logpage(nvme, NVME_LOGPAGE_HEALTH, -1);
2157 nvme->n_fwslot_log = (nvme_fwslot_log_t *)
2158 nvme_get_logpage(nvme, NVME_LOGPAGE_FWSLOT);
2163 nvme->n_namespace_count = nvme->n_idctl->id_nn;
2164 nvme->n_ns = kmem_zalloc(sizeof (nvme_namespace_t) *
2165 nvme->n_namespace_count, KM_SLEEP);
2167 for (i = 0; i != nvme->n_namespace_count; i++) {
2171 nvme->n_ns[i].ns_nvme = nvme;
2172 nvme->n_ns[i].ns_idns = idns = nvme_identify(nvme, i + 1);
2175 dev_err(nvme->n_dip, CE_WARN,
2180 nvme->n_ns[i].ns_id = i + 1;
2181 nvme->n_ns[i].ns_block_count = idns->id_nsize;
2182 nvme->n_ns[i].ns_block_size =
2184 nvme->n_ns[i].ns_best_block_size = nvme->n_ns[i].ns_block_size;
2190 if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 1))
2191 bcopy(idns->id_eui64, nvme->n_ns[i].ns_eui64,
2192 sizeof (nvme->n_ns[i].ns_eui64));
2195 if (*(uint64_t *)nvme->n_ns[i].ns_eui64 == 0) {
2196 nvme_prepare_devid(nvme, nvme->n_ns[i].ns_id);
2201 * be overriden by setting strict-version=0 in nvme.conf
2203 if (nvme->n_strict_version)
2204 nvme->n_ns[i].ns_ignore = B_TRUE;
2220 nvme->n_ns[i].ns_best_block_size =
2224 if (nvme->n_ns[i].ns_best_block_size < nvme->n_min_block_size)
2225 nvme->n_ns[i].ns_best_block_size =
2226 nvme->n_min_block_size;
2235 dev_err(nvme->n_dip, CE_WARN,
2239 nvme->n_ns[i].ns_ignore = B_TRUE;
2246 if ((nvme->n_intr_types & (DDI_INTR_TYPE_MSI | DDI_INTR_TYPE_MSIX))
2248 nvme_release_interrupts(nvme);
2252 if ((nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSIX,
2254 (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSI,
2256 dev_err(nvme->n_dip, CE_WARN,
2262 nqueues = nvme->n_intr_cnt;
2267 nvme->n_ioq_count = nvme_set_nqueues(nvme, nqueues);
2268 if (nvme->n_ioq_count == 0) {
2269 dev_err(nvme->n_dip, CE_WARN,
2277 kmem_free(nvme->n_ioq, sizeof (nvme_qpair_t *));
2278 nvme->n_ioq = kmem_zalloc(sizeof (nvme_qpair_t *) *
2279 (nvme->n_ioq_count + 1), KM_SLEEP);
2280 nvme->n_ioq[0] = nvme->n_adminq;
2286 if (nvme->n_ioq_count < nqueues) {
2287 nvme_release_interrupts(nvme);
2289 if (nvme_setup_interrupts(nvme, nvme->n_intr_type,
2290 nvme->n_ioq_count) != DDI_SUCCESS) {
2291 dev_err(nvme->n_dip, CE_WARN,
2300 nvme->n_io_queue_len =
2301 MIN(nvme->n_io_queue_len, nvme->n_max_queue_entries);
2302 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, "io-queue-len",
2303 nvme->n_io_queue_len);
2305 for (i = 1; i != nvme->n_ioq_count + 1; i++) {
2306 if (nvme_alloc_qpair(nvme, nvme->n_io_queue_len,
2307 &nvme->n_ioq[i], i) != DDI_SUCCESS) {
2308 dev_err(nvme->n_dip, CE_WARN,
2313 if (nvme_create_io_qpair(nvme, nvme->n_ioq[i], i)
2315 dev_err(nvme->n_dip, CE_WARN,
2325 for (i = 1; i != nvme->n_async_event_limit; i++) {
2326 if (nvme_async_event(nvme) != DDI_SUCCESS) {
2327 dev_err(nvme->n_dip, CE_WARN,
2336 (void) nvme_reset(nvme, B_FALSE);
2344 nvme_t *nvme = (nvme_t *)arg1;
2350 if (inum >= nvme->n_intr_cnt)
2359 qnum < nvme->n_ioq_count + 1 && nvme->n_ioq[qnum] != NULL;
2360 qnum += nvme->n_intr_cnt) {
2361 while ((cmd = nvme_retrieve_cmd(nvme, nvme->n_ioq[qnum]))) {
2372 nvme_release_interrupts(nvme_t *nvme)
2376 for (i = 0; i < nvme->n_intr_cnt; i++) {
2377 if (nvme->n_inth[i] == NULL)
2380 if (nvme->n_intr_cap & DDI_INTR_FLAG_BLOCK)
2381 (void) ddi_intr_block_disable(&nvme->n_inth[i], 1);
2383 (void) ddi_intr_disable(nvme->n_inth[i]);
2385 (void) ddi_intr_remove_handler(nvme->n_inth[i]);
2386 (void) ddi_intr_free(nvme->n_inth[i]);
2389 kmem_free(nvme->n_inth, nvme->n_inth_sz);
2390 nvme->n_inth = NULL;
2391 nvme->n_inth_sz = 0;
2393 nvme->n_progress &= ~NVME_INTERRUPTS;
2397 nvme_setup_interrupts(nvme_t *nvme, int intr_type, int nqpairs)
2403 if (nvme->n_intr_types == 0) {
2404 ret = ddi_intr_get_supported_types(nvme->n_dip,
2405 &nvme->n_intr_types);
2407 dev_err(nvme->n_dip, CE_WARN,
2414 if ((nvme->n_intr_types & intr_type) == 0)
2417 ret = ddi_intr_get_nintrs(nvme->n_dip, intr_type, &nintrs);
2419 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_nintrs failed",
2424 ret = ddi_intr_get_navail(nvme->n_dip, intr_type, &navail);
2426 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_navail failed",
2435 nvme->n_inth_sz = sizeof (ddi_intr_handle_t) * navail;
2436 nvme->n_inth = kmem_zalloc(nvme->n_inth_sz, KM_SLEEP);
2438 ret = ddi_intr_alloc(nvme->n_dip, nvme->n_inth, intr_type, 0, navail,
2441 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_alloc failed",
2446 nvme->n_intr_cnt = count;
2448 ret = ddi_intr_get_pri(nvme->n_inth[0], &nvme->n_intr_pri);
2450 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_pri failed",
2456 ret = ddi_intr_add_handler(nvme->n_inth[i], nvme_intr,
2457 (void *)nvme, (void *)(uintptr_t)i);
2459 dev_err(nvme->n_dip, CE_WARN,
2465 (void) ddi_intr_get_cap(nvme->n_inth[0], &nvme->n_intr_cap);
2468 if (nvme->n_intr_cap & DDI_INTR_FLAG_BLOCK)
2469 ret = ddi_intr_block_enable(&nvme->n_inth[i], 1);
2471 ret = ddi_intr_enable(nvme->n_inth[i]);
2474 dev_err(nvme->n_dip, CE_WARN,
2480 nvme->n_intr_type = intr_type;
2482 nvme->n_progress |= NVME_INTERRUPTS;
2487 nvme_release_interrupts(nvme);
2504 nvme_t *nvme;
2519 nvme = ddi_get_soft_state(nvme_state, instance);
2520 ddi_set_driver_private(dip, nvme);
2521 nvme->n_dip = dip;
2523 nvme->n_strict_version = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
2525 nvme->n_ignore_unknown_vendor_status = ddi_prop_get_int(DDI_DEV_T_ANY,
2528 nvme->n_admin_queue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
2530 nvme->n_io_queue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
2532 nvme->n_async_event_limit = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
2535 nvme->n_write_cache_enabled = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
2538 nvme->n_min_block_size = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
2542 if (!ISP2(nvme->n_min_block_size) ||
2543 (nvme->n_min_block_size < NVME_DEFAULT_MIN_BLOCK_SIZE)) {
2545 "using default %d", ISP2(nvme->n_min_block_size) ?
2548 nvme->n_min_block_size = NVME_DEFAULT_MIN_BLOCK_SIZE;
2551 if (nvme->n_admin_queue_len < NVME_MIN_ADMIN_QUEUE_LEN)
2552 nvme->n_admin_queue_len = NVME_MIN_ADMIN_QUEUE_LEN;
2553 else if (nvme->n_admin_queue_len > NVME_MAX_ADMIN_QUEUE_LEN)
2554 nvme->n_admin_queue_len = NVME_MAX_ADMIN_QUEUE_LEN;
2556 if (nvme->n_io_queue_len < NVME_MIN_IO_QUEUE_LEN)
2557 nvme->n_io_queue_len = NVME_MIN_IO_QUEUE_LEN;
2559 if (nvme->n_async_event_limit < 1)
2560 nvme->n_async_event_limit = NVME_DEFAULT_ASYNC_EVENT_LIMIT;
2562 nvme->n_reg_acc_attr = nvme_reg_acc_attr;
2563 nvme->n_queue_dma_attr = nvme_queue_dma_attr;
2564 nvme->n_prp_dma_attr = nvme_prp_dma_attr;
2565 nvme->n_sgl_dma_attr = nvme_sgl_dma_attr;
2570 nvme->n_fm_cap = ddi_getprop(DDI_DEV_T_ANY, dip,
2575 ddi_fm_init(dip, &nvme->n_fm_cap, &nvme->n_fm_ibc);
2577 if (nvme->n_fm_cap) {
2578 if (nvme->n_fm_cap & DDI_FM_ACCCHK_CAPABLE)
2579 nvme->n_reg_acc_attr.devacc_attr_access =
2582 if (nvme->n_fm_cap & DDI_FM_DMACHK_CAPABLE) {
2583 nvme->n_prp_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
2584 nvme->n_sgl_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
2587 if (DDI_FM_EREPORT_CAP(nvme->n_fm_cap) ||
2588 DDI_FM_ERRCB_CAP(nvme->n_fm_cap))
2591 if (DDI_FM_ERRCB_CAP(nvme->n_fm_cap))
2593 (void *)nvme);
2596 nvme->n_progress |= NVME_FMA_INIT;
2607 if (ddi_regs_map_setup(dip, 1, &nvme->n_regs, 0, regsize,
2608 &nvme->n_reg_acc_attr, &nvme->n_regh) != DDI_SUCCESS) {
2613 nvme->n_progress |= NVME_REGS_MAPPED;
2620 nvme->n_cmd_taskq = ddi_taskq_create(dip, name, MIN(UINT16_MAX, ncpus),
2622 if (nvme->n_cmd_taskq == NULL) {
2632 nvme->n_prp_cache = kmem_cache_create(name, sizeof (nvme_dma_t),
2634 NULL, (void *)nvme, NULL, 0);
2636 if (nvme_init(nvme) != DDI_SUCCESS)
2642 for (i = 0; i != nvme->n_namespace_count; i++) {
2643 if (nvme->n_ns[i].ns_ignore)
2646 nvme->n_ns[i].ns_bd_hdl = bd_alloc_handle(&nvme->n_ns[i],
2647 &nvme_bd_ops, &nvme->n_prp_dma_attr, KM_SLEEP);
2649 if (nvme->n_ns[i].ns_bd_hdl == NULL) {
2655 if (bd_attach_handle(dip, nvme->n_ns[i].ns_bd_hdl)
2668 if (nvme->n_dead)
2680 nvme_t *nvme;
2687 nvme = ddi_get_soft_state(nvme_state, instance);
2689 if (nvme == NULL)
2692 if (nvme->n_ns) {
2693 for (i = 0; i != nvme->n_namespace_count; i++) {
2694 if (nvme->n_ns[i].ns_bd_hdl) {
2696 nvme->n_ns[i].ns_bd_hdl);
2697 bd_free_handle(nvme->n_ns[i].ns_bd_hdl);
2700 if (nvme->n_ns[i].ns_idns)
2701 kmem_free(nvme->n_ns[i].ns_idns,
2703 if (nvme->n_ns[i].ns_devid)
2704 strfree(nvme->n_ns[i].ns_devid);
2707 kmem_free(nvme->n_ns, sizeof (nvme_namespace_t) *
2708 nvme->n_namespace_count);
2711 if (nvme->n_progress & NVME_INTERRUPTS)
2712 nvme_release_interrupts(nvme);
2714 if (nvme->n_cmd_taskq)
2715 ddi_taskq_wait(nvme->n_cmd_taskq);
2717 if (nvme->n_ioq_count > 0) {
2718 for (i = 1; i != nvme->n_ioq_count + 1; i++) {
2719 if (nvme->n_ioq[i] != NULL) {
2721 nvme_free_qpair(nvme->n_ioq[i]);
2725 kmem_free(nvme->n_ioq, sizeof (nvme_qpair_t *) *
2726 (nvme->n_ioq_count + 1));
2729 if (nvme->n_prp_cache != NULL) {
2730 kmem_cache_destroy(nvme->n_prp_cache);
2733 if (nvme->n_progress & NVME_REGS_MAPPED) {
2734 nvme_shutdown(nvme, NVME_CC_SHN_NORMAL, B_FALSE);
2735 (void) nvme_reset(nvme, B_FALSE);
2738 if (nvme->n_cmd_taskq)
2739 ddi_taskq_destroy(nvme->n_cmd_taskq);
2741 if (nvme->n_progress & NVME_CTRL_LIMITS)
2742 sema_destroy(&nvme->n_abort_sema);
2744 if (nvme->n_progress & NVME_ADMIN_QUEUE)
2745 nvme_free_qpair(nvme->n_adminq);
2747 if (nvme->n_idctl)
2748 kmem_free(nvme->n_idctl, sizeof (nvme_identify_ctrl_t));
2750 if (nvme->n_progress & NVME_REGS_MAPPED)
2751 ddi_regs_map_free(&nvme->n_regh);
2753 if (nvme->n_progress & NVME_FMA_INIT) {
2754 if (DDI_FM_ERRCB_CAP(nvme->n_fm_cap))
2755 ddi_fm_handler_unregister(nvme->n_dip);
2757 if (DDI_FM_EREPORT_CAP(nvme->n_fm_cap) ||
2758 DDI_FM_ERRCB_CAP(nvme->n_fm_cap))
2759 pci_ereport_teardown(nvme->n_dip);
2761 ddi_fm_fini(nvme->n_dip);
2764 if (nvme->n_vendor != NULL)
2765 strfree(nvme->n_vendor);
2767 if (nvme->n_product != NULL)
2768 strfree(nvme->n_product);
2779 nvme_t *nvme;
2783 nvme = ddi_get_soft_state(nvme_state, instance);
2785 if (nvme == NULL)
2788 nvme_shutdown(nvme, NVME_CC_SHN_ABRUPT, B_TRUE);
2790 (void) nvme_reset(nvme, B_TRUE);
2798 nvme_t *nvme = cmd->nc_nvme;
2818 nprp_page = nvme->n_pagesize / sizeof (uint64_t) - 1;
2829 cmd->nc_dma = kmem_cache_alloc(nvme->n_prp_cache, KM_SLEEP);
2850 nvme_t *nvme = ns->ns_nvme;
2856 cmd = nvme_alloc_cmd(nvme, (xfer->x_flags & BD_XFER_POLL) ?
2913 nvme_t *nvme = ns->ns_nvme;
2921 drive->d_qsize = nvme->n_ioq_count * nvme->n_io_queue_len
2922 / nvme->n_namespace_count;
2936 drive->d_model = nvme->n_idctl->id_model;
2937 drive->d_model_len = sizeof (nvme->n_idctl->id_model);
2938 drive->d_vendor = nvme->n_vendor;
2939 drive->d_vendor_len = strlen(nvme->n_vendor);
2940 drive->d_product = nvme->n_product;
2941 drive->d_product_len = strlen(nvme->n_product);
2942 drive->d_serial = nvme->n_idctl->id_serial;
2943 drive->d_serial_len = sizeof (nvme->n_idctl->id_serial);
2944 drive->d_revision = nvme->n_idctl->id_fwrev;
2945 drive->d_revision_len = sizeof (nvme->n_idctl->id_fwrev);
2966 nvme_t *nvme = ns->ns_nvme;
2969 if (nvme->n_dead)
2980 cmd->nc_sqid = (CPU->cpu_id % nvme->n_ioq_count) + 1;
2981 ASSERT(cmd->nc_sqid <= nvme->n_ioq_count);
2983 if (nvme_submit_cmd(nvme->n_ioq[cmd->nc_sqid], cmd)