#include <sys/byteorder.h>
#ifdef _BIG_ENDIAN
#error nvme driver needs porting for big-endian platforms
#endif
#include <sys/modctl.h>
#include <sys/conf.h>
#include <sys/devops.h>
#include <sys/ddi.h>
#include <sys/ddi_ufm.h>
#include <sys/sunddi.h>
#include <sys/sunndi.h>
#include <sys/bitmap.h>
#include <sys/sysmacros.h>
#include <sys/param.h>
#include <sys/varargs.h>
#include <sys/cpuvar.h>
#include <sys/disp.h>
#include <sys/blkdev.h>
#include <sys/atomic.h>
#include <sys/archsystm.h>
#include <sys/sata/sata_hba.h>
#include <sys/stat.h>
#include <sys/policy.h>
#include <sys/list.h>
#include <sys/dkio.h>
#include <sys/pci.h>
#include <sys/mkdev.h>
#include <sys/nvme.h>
#ifdef __x86
#include <sys/x86_archext.h>
#endif
#include "nvme_reg.h"
#include "nvme_var.h"
CTASSERT(sizeof (nvme_identify_ctrl_t) == NVME_IDENTIFY_BUFSIZE);
CTASSERT(offsetof(nvme_identify_ctrl_t, id_oacs) == 256);
CTASSERT(offsetof(nvme_identify_ctrl_t, id_sqes) == 512);
CTASSERT(offsetof(nvme_identify_ctrl_t, id_oncs) == 520);
CTASSERT(offsetof(nvme_identify_ctrl_t, id_subnqn) == 768);
CTASSERT(offsetof(nvme_identify_ctrl_t, id_nvmof) == 1792);
CTASSERT(offsetof(nvme_identify_ctrl_t, id_psd) == 2048);
CTASSERT(offsetof(nvme_identify_ctrl_t, id_vs) == 3072);
CTASSERT(sizeof (nvme_identify_nsid_t) == NVME_IDENTIFY_BUFSIZE);
CTASSERT(offsetof(nvme_identify_nsid_t, id_fpi) == 32);
CTASSERT(offsetof(nvme_identify_nsid_t, id_anagrpid) == 92);
CTASSERT(offsetof(nvme_identify_nsid_t, id_nguid) == 104);
CTASSERT(offsetof(nvme_identify_nsid_t, id_lbaf) == 128);
CTASSERT(offsetof(nvme_identify_nsid_t, id_vs) == 384);
CTASSERT(sizeof (nvme_identify_nsid_list_t) == NVME_IDENTIFY_BUFSIZE);
CTASSERT(sizeof (nvme_identify_ctrl_list_t) == NVME_IDENTIFY_BUFSIZE);
CTASSERT(sizeof (nvme_identify_primary_caps_t) == NVME_IDENTIFY_BUFSIZE);
CTASSERT(offsetof(nvme_identify_primary_caps_t, nipc_vqfrt) == 32);
CTASSERT(offsetof(nvme_identify_primary_caps_t, nipc_vifrt) == 64);
CTASSERT(sizeof (nvme_nschange_list_t) == 4096);
static const int nvme_version_major = 2;
uint32_t nvme_format_cmd_timeout = 600;
uint32_t nvme_commit_save_cmd_timeout = 15;
uint32_t nvme_admin_cmd_timeout = 15;
uint32_t nvme_abort_cmd_timeout = 60;
uint32_t nvme_vendor_specific_admin_cmd_size = 1 << 24;
uint_t nvme_vendor_specific_admin_cmd_max_timeout = 60;
static id_space_t *nvme_open_minors;
static avl_tree_t nvme_open_minors_avl;
kmutex_t nvme_open_minors_mutex;
taskq_t *nvme_dead_taskq;
typedef enum {
NVME_MGMT_LOCK_NVME,
NVME_MGMT_LOCK_BDRO
} nvme_mgmt_lock_level_t;
static int nvme_attach(dev_info_t *, ddi_attach_cmd_t);
static int nvme_detach(dev_info_t *, ddi_detach_cmd_t);
static int nvme_quiesce(dev_info_t *);
static int nvme_fm_errcb(dev_info_t *, ddi_fm_error_t *, const void *);
static int nvme_setup_interrupts(nvme_t *, int, int);
static void nvme_release_interrupts(nvme_t *);
static uint_t nvme_intr(caddr_t, caddr_t);
static void nvme_shutdown(nvme_t *, boolean_t);
static boolean_t nvme_reset(nvme_t *, boolean_t);
static int nvme_init(nvme_t *);
static nvme_cmd_t *nvme_alloc_cmd(nvme_t *, int);
static void nvme_free_cmd(nvme_cmd_t *);
static nvme_cmd_t *nvme_create_nvm_cmd(nvme_namespace_t *, uint8_t,
bd_xfer_t *);
static void nvme_admin_cmd(nvme_cmd_t *, uint32_t);
static void nvme_submit_admin_cmd(nvme_qpair_t *, nvme_cmd_t *, uint32_t *);
static int nvme_submit_io_cmd(nvme_qpair_t *, nvme_cmd_t *);
static void nvme_submit_cmd_common(nvme_qpair_t *, nvme_cmd_t *, uint32_t *);
static nvme_cmd_t *nvme_unqueue_cmd(nvme_t *, nvme_qpair_t *, int);
static nvme_cmd_t *nvme_retrieve_cmd(nvme_t *, nvme_qpair_t *);
static void nvme_wait_cmd(nvme_cmd_t *, uint_t);
static void nvme_wakeup_cmd(void *);
static void nvme_async_event_task(void *);
static int nvme_check_unknown_cmd_status(nvme_cmd_t *);
static int nvme_check_vendor_cmd_status(nvme_cmd_t *);
static int nvme_check_integrity_cmd_status(nvme_cmd_t *);
static int nvme_check_specific_cmd_status(nvme_cmd_t *);
static int nvme_check_generic_cmd_status(nvme_cmd_t *);
static inline int nvme_check_cmd_status(nvme_cmd_t *);
static boolean_t nvme_check_cmd_status_ioctl(nvme_cmd_t *,
nvme_ioctl_common_t *);
static int nvme_abort_cmd(nvme_cmd_t *, const uint32_t);
static void nvme_async_event(nvme_t *);
static boolean_t nvme_format_nvm(nvme_t *, nvme_ioctl_format_t *);
static boolean_t nvme_get_logpage_int(nvme_t *, boolean_t, void **, size_t *,
uint8_t);
static boolean_t nvme_identify(nvme_t *, boolean_t, nvme_ioctl_identify_t *,
void **);
static boolean_t nvme_identify_int(nvme_t *, uint32_t, uint8_t, void **);
static int nvme_set_features(nvme_t *, boolean_t, uint32_t, uint8_t, uint32_t,
uint32_t *);
static int nvme_write_cache_set(nvme_t *, boolean_t);
static int nvme_set_nqueues(nvme_t *);
static void nvme_free_dma(nvme_dma_t *);
static int nvme_zalloc_dma(nvme_t *, size_t, uint_t, ddi_dma_attr_t *,
nvme_dma_t **);
static int nvme_zalloc_queue_dma(nvme_t *, uint32_t, uint16_t, uint_t,
nvme_dma_t **);
static void nvme_free_qpair(nvme_qpair_t *);
static int nvme_alloc_qpair(nvme_t *, uint32_t, nvme_qpair_t **, uint_t);
static int nvme_create_io_qpair(nvme_t *, nvme_qpair_t *, uint16_t);
static inline void nvme_put64(nvme_t *, uintptr_t, uint64_t);
static inline void nvme_put32(nvme_t *, uintptr_t, uint32_t);
static inline uint64_t nvme_get64(nvme_t *, uintptr_t);
static inline uint32_t nvme_get32(nvme_t *, uintptr_t);
static boolean_t nvme_check_regs_hdl(nvme_t *);
static boolean_t nvme_check_dma_hdl(nvme_dma_t *);
static int nvme_fill_prp(nvme_cmd_t *, ddi_dma_handle_t);
static void nvme_bd_xfer_done(void *);
static void nvme_bd_driveinfo(void *, bd_drive_t *);
static int nvme_bd_mediainfo(void *, bd_media_t *);
static int nvme_bd_cmd(nvme_namespace_t *, bd_xfer_t *, uint8_t);
static int nvme_bd_read(void *, bd_xfer_t *);
static int nvme_bd_write(void *, bd_xfer_t *);
static int nvme_bd_sync(void *, bd_xfer_t *);
static int nvme_bd_devid(void *, dev_info_t *, ddi_devid_t *);
static int nvme_bd_free_space(void *, bd_xfer_t *);
static int nvme_prp_dma_constructor(void *, void *, int);
static void nvme_prp_dma_destructor(void *, void *);
static void nvme_prepare_devid(nvme_t *, uint32_t);
static int nvme_ufm_fill_image(ddi_ufm_handle_t *, void *, uint_t,
ddi_ufm_image_t *);
static int nvme_ufm_fill_slot(ddi_ufm_handle_t *, void *, uint_t, uint_t,
ddi_ufm_slot_t *);
static int nvme_ufm_getcaps(ddi_ufm_handle_t *, void *, ddi_ufm_cap_t *);
static int nvme_open(dev_t *, int, int, cred_t *);
static int nvme_close(dev_t, int, int, cred_t *);
static int nvme_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
static int nvme_init_ns(nvme_t *, uint32_t);
static boolean_t nvme_bd_attach_ns(nvme_t *, nvme_ioctl_common_t *);
static boolean_t nvme_bd_detach_ns(nvme_t *, nvme_ioctl_common_t *);
static int nvme_minor_comparator(const void *, const void *);
typedef struct {
nvme_sqe_t *ica_sqe;
void *ica_data;
uint32_t ica_data_len;
uint_t ica_dma_flags;
int ica_copy_flags;
uint32_t ica_timeout;
uint32_t ica_cdw0;
} nvme_ioc_cmd_args_t;
static boolean_t nvme_ioc_cmd(nvme_t *, nvme_ioctl_common_t *,
nvme_ioc_cmd_args_t *);
static ddi_ufm_ops_t nvme_ufm_ops = {
NULL,
nvme_ufm_fill_image,
nvme_ufm_fill_slot,
nvme_ufm_getcaps
};
#define NVME_MINOR_INST_SHIFT 9
#define NVME_MINOR(inst, nsid) (((inst) << NVME_MINOR_INST_SHIFT) | (nsid))
#define NVME_MINOR_INST(minor) ((minor) >> NVME_MINOR_INST_SHIFT)
#define NVME_MINOR_NSID(minor) ((minor) & ((1 << NVME_MINOR_INST_SHIFT) - 1))
#define NVME_MINOR_MAX (NVME_MINOR(1, 0) - 2)
#define NVME_OPEN_NMINORS (1024 * 1024)
#define NVME_OPEN_MINOR_MIN (MAXMIN32 + 1)
#define NVME_OPEN_MINOR_MAX_EXCL (NVME_OPEN_MINOR_MIN + \
NVME_OPEN_NMINORS)
#define NVME_BUMP_STAT(nvme, stat) \
atomic_inc_64(&nvme->n_device_stat.nds_ ## stat.value.ui64)
static void *nvme_state;
static kmem_cache_t *nvme_cmd_cache;
static const ddi_dma_attr_t nvme_queue_dma_attr = {
.dma_attr_version = DMA_ATTR_V0,
.dma_attr_addr_lo = 0,
.dma_attr_addr_hi = 0xffffffffffffffffULL,
.dma_attr_count_max = (UINT16_MAX + 1) * sizeof (nvme_sqe_t) - 1,
.dma_attr_align = 0x1000,
.dma_attr_burstsizes = 0x7ff,
.dma_attr_minxfer = 0x1000,
.dma_attr_maxxfer = (UINT16_MAX + 1) * sizeof (nvme_sqe_t),
.dma_attr_seg = 0xffffffffffffffffULL,
.dma_attr_sgllen = 1,
.dma_attr_granular = 1,
.dma_attr_flags = 0,
};
static const ddi_dma_attr_t nvme_prp_dma_attr = {
.dma_attr_version = DMA_ATTR_V0,
.dma_attr_addr_lo = 0,
.dma_attr_addr_hi = 0xffffffffffffffffULL,
.dma_attr_count_max = 0xfff,
.dma_attr_align = 0x1000,
.dma_attr_burstsizes = 0x7ff,
.dma_attr_minxfer = 0x1000,
.dma_attr_maxxfer = 0x1000,
.dma_attr_seg = 0xfff,
.dma_attr_sgllen = -1,
.dma_attr_granular = 1,
.dma_attr_flags = 0,
};
static const ddi_dma_attr_t nvme_sgl_dma_attr = {
.dma_attr_version = DMA_ATTR_V0,
.dma_attr_addr_lo = 0,
.dma_attr_addr_hi = 0xffffffffffffffffULL,
.dma_attr_count_max = 0xffffffffUL,
.dma_attr_align = 1,
.dma_attr_burstsizes = 0x7ff,
.dma_attr_minxfer = 0x10,
.dma_attr_maxxfer = 0xfffffffffULL,
.dma_attr_seg = 0xffffffffffffffffULL,
.dma_attr_sgllen = -1,
.dma_attr_granular = 0x10,
.dma_attr_flags = 0
};
static ddi_device_acc_attr_t nvme_reg_acc_attr = {
.devacc_attr_version = DDI_DEVICE_ATTR_V0,
.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC,
.devacc_attr_dataorder = DDI_STRICTORDER_ACC
};
static const nvme_ioctl_check_t nvme_check_ctrl_info = {
.nck_ns_ok = B_FALSE, .nck_ns_minor_ok = B_FALSE,
.nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_FALSE,
.nck_bcast_ok = B_FALSE, .nck_excl = NVME_IOCTL_EXCL_NONE
};
static const nvme_ioctl_check_t nvme_check_ns_info = {
.nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_TRUE,
.nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_FALSE,
.nck_bcast_ok = B_FALSE, .nck_excl = NVME_IOCTL_EXCL_NONE
};
static const nvme_ioctl_check_t nvme_check_identify = {
.nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_TRUE,
.nck_skip_ctrl = B_TRUE, .nck_ctrl_rewrite = B_FALSE,
.nck_bcast_ok = B_TRUE, .nck_excl = NVME_IOCTL_EXCL_NONE
};
static const nvme_ioctl_check_t nvme_check_get_logpage = {
.nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_TRUE,
.nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_TRUE,
.nck_bcast_ok = B_TRUE, .nck_excl = NVME_IOCTL_EXCL_NONE
};
static const nvme_ioctl_check_t nvme_check_get_feature = {
.nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_TRUE,
.nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_FALSE,
.nck_bcast_ok = B_TRUE, .nck_excl = NVME_IOCTL_EXCL_NONE
};
static const nvme_ioctl_check_t nvme_check_format = {
.nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_TRUE,
.nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_TRUE,
.nck_bcast_ok = B_TRUE, .nck_excl = NVME_IOCTL_EXCL_WRITE
};
static const nvme_ioctl_check_t nvme_check_attach_detach = {
.nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_TRUE,
.nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_TRUE,
.nck_bcast_ok = B_FALSE, .nck_excl = NVME_IOCTL_EXCL_WRITE
};
static const nvme_ioctl_check_t nvme_check_ns_create = {
.nck_ns_ok = B_FALSE, .nck_ns_minor_ok = B_FALSE,
.nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_FALSE,
.nck_bcast_ok = B_FALSE, .nck_excl = NVME_IOCTL_EXCL_CTRL
};
static const nvme_ioctl_check_t nvme_check_ns_delete = {
.nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_FALSE,
.nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_TRUE,
.nck_bcast_ok = B_FALSE, .nck_excl = NVME_IOCTL_EXCL_WRITE
};
static const nvme_ioctl_check_t nvme_check_firmware = {
.nck_ns_ok = B_FALSE, .nck_ns_minor_ok = B_FALSE,
.nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_FALSE,
.nck_bcast_ok = B_FALSE, .nck_excl = NVME_IOCTL_EXCL_WRITE
};
static const nvme_ioctl_check_t nvme_check_passthru = {
.nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_FALSE,
.nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_FALSE,
.nck_bcast_ok = B_TRUE, .nck_excl = NVME_IOCTL_EXCL_NONE
};
static const nvme_ioctl_check_t nvme_check_locking = {
.nck_ns_ok = B_TRUE, .nck_ns_minor_ok = B_TRUE,
.nck_skip_ctrl = B_FALSE, .nck_ctrl_rewrite = B_FALSE,
.nck_bcast_ok = B_FALSE, .nck_excl = NVME_IOCTL_EXCL_SKIP
};
static const nvme_ioctl_errno_t nvme_ns_delete_states[] = {
[NVME_NS_STATE_UNALLOCATED] = NVME_IOCTL_E_NS_NO_NS,
[NVME_NS_STATE_ALLOCATED] = NVME_IOCTL_E_OK,
[NVME_NS_STATE_ACTIVE] = NVME_IOCTL_E_NS_CTRL_ATTACHED,
[NVME_NS_STATE_NOT_IGNORED] = NVME_IOCTL_E_NS_CTRL_ATTACHED,
[NVME_NS_STATE_ATTACHED] = NVME_IOCTL_E_NS_BLKDEV_ATTACH
};
static const nvme_ioctl_errno_t nvme_ctrl_attach_states[] = {
[NVME_NS_STATE_UNALLOCATED] = NVME_IOCTL_E_NS_NO_NS,
[NVME_NS_STATE_ALLOCATED] = NVME_IOCTL_E_OK,
[NVME_NS_STATE_ACTIVE] = NVME_IOCTL_E_NS_CTRL_ATTACHED,
[NVME_NS_STATE_NOT_IGNORED] = NVME_IOCTL_E_NS_CTRL_ATTACHED,
[NVME_NS_STATE_ATTACHED] = NVME_IOCTL_E_NS_BLKDEV_ATTACH
};
static const nvme_ioctl_errno_t nvme_ctrl_detach_states[] = {
[NVME_NS_STATE_UNALLOCATED] = NVME_IOCTL_E_NS_NO_NS,
[NVME_NS_STATE_ALLOCATED] = NVME_IOCTL_E_NS_CTRL_NOT_ATTACHED,
[NVME_NS_STATE_ACTIVE] = NVME_IOCTL_E_OK,
[NVME_NS_STATE_NOT_IGNORED] = NVME_IOCTL_E_OK,
[NVME_NS_STATE_ATTACHED] = NVME_IOCTL_E_NS_BLKDEV_ATTACH
};
static const nvme_ioctl_errno_t nvme_bd_attach_states[] = {
[NVME_NS_STATE_UNALLOCATED] = NVME_IOCTL_E_NS_NO_NS,
[NVME_NS_STATE_ALLOCATED] = NVME_IOCTL_E_NS_CTRL_NOT_ATTACHED,
[NVME_NS_STATE_ACTIVE] = NVME_IOCTL_E_UNSUP_ATTACH_NS,
[NVME_NS_STATE_NOT_IGNORED] = NVME_IOCTL_E_OK,
[NVME_NS_STATE_ATTACHED] = NVME_IOCTL_E_NS_BLKDEV_ATTACH,
};
static const nvme_ioctl_errno_t nvme_bd_detach_states[] = {
[NVME_NS_STATE_UNALLOCATED] = NVME_IOCTL_E_NS_NO_NS,
[NVME_NS_STATE_ALLOCATED] = NVME_IOCTL_E_NS_CTRL_NOT_ATTACHED,
[NVME_NS_STATE_ACTIVE] = NVME_IOCTL_E_NS_CTRL_ATTACHED,
[NVME_NS_STATE_NOT_IGNORED] = NVME_IOCTL_E_NS_CTRL_ATTACHED,
[NVME_NS_STATE_ATTACHED] = NVME_IOCTL_E_OK,
};
static const nvme_ioctl_errno_t nvme_format_nvm_states[] = {
[NVME_NS_STATE_UNALLOCATED] = NVME_IOCTL_E_NS_NO_NS,
[NVME_NS_STATE_ALLOCATED] = NVME_IOCTL_E_OK,
[NVME_NS_STATE_ACTIVE] = NVME_IOCTL_E_OK,
[NVME_NS_STATE_NOT_IGNORED] = NVME_IOCTL_E_OK,
[NVME_NS_STATE_ATTACHED] = NVME_IOCTL_E_NS_BLKDEV_ATTACH
};
static struct cb_ops nvme_cb_ops = {
.cb_open = nvme_open,
.cb_close = nvme_close,
.cb_strategy = nodev,
.cb_print = nodev,
.cb_dump = nodev,
.cb_read = nodev,
.cb_write = nodev,
.cb_ioctl = nvme_ioctl,
.cb_devmap = nodev,
.cb_mmap = nodev,
.cb_segmap = nodev,
.cb_chpoll = nochpoll,
.cb_prop_op = ddi_prop_op,
.cb_str = 0,
.cb_flag = D_NEW | D_MP,
.cb_rev = CB_REV,
.cb_aread = nodev,
.cb_awrite = nodev
};
static struct dev_ops nvme_dev_ops = {
.devo_rev = DEVO_REV,
.devo_refcnt = 0,
.devo_getinfo = ddi_no_info,
.devo_identify = nulldev,
.devo_probe = nulldev,
.devo_attach = nvme_attach,
.devo_detach = nvme_detach,
.devo_reset = nodev,
.devo_cb_ops = &nvme_cb_ops,
.devo_bus_ops = NULL,
.devo_power = NULL,
.devo_quiesce = nvme_quiesce,
};
static struct modldrv nvme_modldrv = {
.drv_modops = &mod_driverops,
.drv_linkinfo = "NVMe driver",
.drv_dev_ops = &nvme_dev_ops
};
static struct modlinkage nvme_modlinkage = {
.ml_rev = MODREV_1,
.ml_linkage = { &nvme_modldrv, NULL }
};
static bd_ops_t nvme_bd_ops = {
.o_version = BD_OPS_CURRENT_VERSION,
.o_drive_info = nvme_bd_driveinfo,
.o_media_info = nvme_bd_mediainfo,
.o_devid_init = nvme_bd_devid,
.o_sync_cache = nvme_bd_sync,
.o_read = nvme_bd_read,
.o_write = nvme_bd_write,
.o_free_space = nvme_bd_free_space,
};
static struct list nvme_lost_cmds;
static kmutex_t nvme_lc_mutex;
int
_init(void)
{
int error;
error = ddi_soft_state_init(&nvme_state, sizeof (nvme_t), 1);
if (error != DDI_SUCCESS)
return (error);
if ((nvme_open_minors = id_space_create("nvme_open_minors",
NVME_OPEN_MINOR_MIN, NVME_OPEN_MINOR_MAX_EXCL)) == NULL) {
ddi_soft_state_fini(&nvme_state);
return (ENOMEM);
}
nvme_cmd_cache = kmem_cache_create("nvme_cmd_cache",
sizeof (nvme_cmd_t), 64, NULL, NULL, NULL, NULL, NULL, 0);
mutex_init(&nvme_lc_mutex, NULL, MUTEX_DRIVER, NULL);
list_create(&nvme_lost_cmds, sizeof (nvme_cmd_t),
offsetof(nvme_cmd_t, nc_list));
mutex_init(&nvme_open_minors_mutex, NULL, MUTEX_DRIVER, NULL);
avl_create(&nvme_open_minors_avl, nvme_minor_comparator,
sizeof (nvme_minor_t), offsetof(nvme_minor_t, nm_avl));
nvme_dead_taskq = taskq_create("nvme_dead_taskq", 1, minclsyspri, 1, 1,
TASKQ_PREPOPULATE);
bd_mod_init(&nvme_dev_ops);
error = mod_install(&nvme_modlinkage);
if (error != DDI_SUCCESS) {
ddi_soft_state_fini(&nvme_state);
id_space_destroy(nvme_open_minors);
mutex_destroy(&nvme_lc_mutex);
list_destroy(&nvme_lost_cmds);
bd_mod_fini(&nvme_dev_ops);
mutex_destroy(&nvme_open_minors_mutex);
avl_destroy(&nvme_open_minors_avl);
taskq_destroy(nvme_dead_taskq);
}
return (error);
}
int
_fini(void)
{
int error;
if (!list_is_empty(&nvme_lost_cmds))
return (DDI_FAILURE);
error = mod_remove(&nvme_modlinkage);
if (error == DDI_SUCCESS) {
ddi_soft_state_fini(&nvme_state);
id_space_destroy(nvme_open_minors);
kmem_cache_destroy(nvme_cmd_cache);
mutex_destroy(&nvme_lc_mutex);
list_destroy(&nvme_lost_cmds);
bd_mod_fini(&nvme_dev_ops);
mutex_destroy(&nvme_open_minors_mutex);
avl_destroy(&nvme_open_minors_avl);
taskq_destroy(nvme_dead_taskq);
}
return (error);
}
int
_info(struct modinfo *modinfop)
{
return (mod_info(&nvme_modlinkage, modinfop));
}
static inline void
nvme_put64(nvme_t *nvme, uintptr_t reg, uint64_t val)
{
ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x7) == 0);
ddi_put64(nvme->n_regh, (uint64_t *)(nvme->n_regs + reg), val);
}
static inline void
nvme_put32(nvme_t *nvme, uintptr_t reg, uint32_t val)
{
ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x3) == 0);
ddi_put32(nvme->n_regh, (uint32_t *)(nvme->n_regs + reg), val);
}
static inline uint64_t
nvme_get64(nvme_t *nvme, uintptr_t reg)
{
uint64_t val;
ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x7) == 0);
val = ddi_get64(nvme->n_regh, (uint64_t *)(nvme->n_regs + reg));
return (val);
}
static inline uint32_t
nvme_get32(nvme_t *nvme, uintptr_t reg)
{
uint32_t val;
ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x3) == 0);
val = ddi_get32(nvme->n_regh, (uint32_t *)(nvme->n_regs + reg));
return (val);
}
static void
nvme_mgmt_lock_fini(nvme_mgmt_lock_t *lock)
{
ASSERT3U(lock->nml_bd_own, ==, 0);
mutex_destroy(&lock->nml_lock);
cv_destroy(&lock->nml_cv);
}
static void
nvme_mgmt_lock_init(nvme_mgmt_lock_t *lock)
{
mutex_init(&lock->nml_lock, NULL, MUTEX_DRIVER, NULL);
cv_init(&lock->nml_cv, NULL, CV_DRIVER, NULL);
lock->nml_bd_own = 0;
}
static void
nvme_mgmt_unlock(nvme_t *nvme)
{
nvme_mgmt_lock_t *lock = &nvme->n_mgmt;
cv_broadcast(&lock->nml_cv);
mutex_exit(&lock->nml_lock);
}
static boolean_t
nvme_mgmt_lock_held(const nvme_t *nvme)
{
return (MUTEX_HELD(&nvme->n_mgmt.nml_lock) != 0);
}
static void
nvme_mgmt_lock(nvme_t *nvme, nvme_mgmt_lock_level_t level)
{
nvme_mgmt_lock_t *lock = &nvme->n_mgmt;
mutex_enter(&lock->nml_lock);
while (lock->nml_bd_own != 0) {
if (level == NVME_MGMT_LOCK_BDRO)
break;
cv_wait(&lock->nml_cv, &lock->nml_lock);
}
}
static void
nvme_mgmt_bd_start(nvme_t *nvme)
{
nvme_mgmt_lock_t *lock = &nvme->n_mgmt;
VERIFY(MUTEX_HELD(&lock->nml_lock));
VERIFY3U(lock->nml_bd_own, ==, 0);
lock->nml_bd_own = (uintptr_t)curthread;
mutex_exit(&lock->nml_lock);
}
static void
nvme_mgmt_bd_end(nvme_t *nvme)
{
nvme_mgmt_lock_t *lock = &nvme->n_mgmt;
mutex_enter(&lock->nml_lock);
VERIFY3U(lock->nml_bd_own, ==, (uintptr_t)curthread);
lock->nml_bd_own = 0;
}
static boolean_t
nvme_ns_state_check(const nvme_namespace_t *ns, nvme_ioctl_common_t *ioc,
const nvme_ioctl_errno_t states[NVME_NS_NSTATES])
{
VERIFY(nvme_mgmt_lock_held(ns->ns_nvme));
VERIFY3U(ns->ns_state, <, NVME_NS_NSTATES);
if (states[ns->ns_state] == NVME_IOCTL_E_OK) {
return (B_TRUE);
}
return (nvme_ioctl_error(ioc, states[ns->ns_state], 0, 0));
}
static void
nvme_ctrl_mark_dead(nvme_t *nvme, boolean_t removed)
{
boolean_t was_dead;
was_dead = atomic_cas_32((volatile uint32_t *)&nvme->n_dead, B_FALSE,
B_TRUE);
if (removed) {
nvme->n_dead_status = NVME_IOCTL_E_CTRL_GONE;
}
if (was_dead) {
return;
}
if (!removed) {
ASSERT3U(nvme->n_dead_status, ==, NVME_IOCTL_E_CTRL_DEAD);
ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST);
}
taskq_dispatch_ent(nvme_dead_taskq, nvme_rwlock_ctrl_dead, nvme,
TQ_NOSLEEP, &nvme->n_dead_tqent);
}
static boolean_t
nvme_ctrl_is_gone(const nvme_t *nvme)
{
if (nvme->n_dead && nvme->n_dead_status == NVME_IOCTL_E_CTRL_GONE)
return (B_TRUE);
return (B_FALSE);
}
static boolean_t
nvme_check_regs_hdl(nvme_t *nvme)
{
ddi_fm_error_t error;
ddi_fm_acc_err_get(nvme->n_regh, &error, DDI_FME_VERSION);
if (error.fme_status != DDI_FM_OK)
return (B_TRUE);
return (B_FALSE);
}
static boolean_t
nvme_check_dma_hdl(nvme_dma_t *dma)
{
ddi_fm_error_t error;
if (dma == NULL)
return (B_FALSE);
ddi_fm_dma_err_get(dma->nd_dmah, &error, DDI_FME_VERSION);
if (error.fme_status != DDI_FM_OK)
return (B_TRUE);
return (B_FALSE);
}
static void
nvme_free_dma_common(nvme_dma_t *dma)
{
if (dma->nd_dmah != NULL)
(void) ddi_dma_unbind_handle(dma->nd_dmah);
if (dma->nd_acch != NULL)
ddi_dma_mem_free(&dma->nd_acch);
if (dma->nd_dmah != NULL)
ddi_dma_free_handle(&dma->nd_dmah);
}
static void
nvme_free_dma(nvme_dma_t *dma)
{
nvme_free_dma_common(dma);
kmem_free(dma, sizeof (*dma));
}
static void
nvme_prp_dma_destructor(void *buf, void *private __unused)
{
nvme_dma_t *dma = (nvme_dma_t *)buf;
nvme_free_dma_common(dma);
}
static int
nvme_alloc_dma_common(nvme_t *nvme, nvme_dma_t *dma,
size_t len, uint_t flags, ddi_dma_attr_t *dma_attr)
{
if (ddi_dma_alloc_handle(nvme->n_dip, dma_attr, DDI_DMA_SLEEP, NULL,
&dma->nd_dmah) != DDI_SUCCESS) {
dev_err(nvme->n_dip, CE_PANIC,
"!failed to get DMA handle, check DMA attributes");
return (DDI_FAILURE);
}
(void) ddi_dma_mem_alloc(dma->nd_dmah, len, &nvme->n_reg_acc_attr,
DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &dma->nd_memp,
&dma->nd_len, &dma->nd_acch);
if (ddi_dma_addr_bind_handle(dma->nd_dmah, NULL, dma->nd_memp,
dma->nd_len, flags | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
&dma->nd_cookie, &dma->nd_ncookie) != DDI_DMA_MAPPED) {
dev_err(nvme->n_dip, CE_WARN,
"!failed to bind DMA memory");
NVME_BUMP_STAT(nvme, dma_bind_err);
nvme_free_dma_common(dma);
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
static int
nvme_zalloc_dma(nvme_t *nvme, size_t len, uint_t flags,
ddi_dma_attr_t *dma_attr, nvme_dma_t **ret)
{
nvme_dma_t *dma = kmem_zalloc(sizeof (nvme_dma_t), KM_SLEEP);
if (nvme_alloc_dma_common(nvme, dma, len, flags, dma_attr) !=
DDI_SUCCESS) {
*ret = NULL;
kmem_free(dma, sizeof (nvme_dma_t));
return (DDI_FAILURE);
}
bzero(dma->nd_memp, dma->nd_len);
*ret = dma;
return (DDI_SUCCESS);
}
static int
nvme_prp_dma_constructor(void *buf, void *private, int flags __unused)
{
nvme_dma_t *dma = (nvme_dma_t *)buf;
nvme_t *nvme = (nvme_t *)private;
dma->nd_dmah = NULL;
dma->nd_acch = NULL;
if (nvme_alloc_dma_common(nvme, dma, nvme->n_pagesize,
DDI_DMA_READ, &nvme->n_prp_dma_attr) != DDI_SUCCESS) {
return (-1);
}
ASSERT(dma->nd_ncookie == 1);
dma->nd_cached = B_TRUE;
return (0);
}
static int
nvme_zalloc_queue_dma(nvme_t *nvme, uint32_t nentry, uint16_t qe_len,
uint_t flags, nvme_dma_t **dma)
{
uint32_t len = nentry * qe_len;
ddi_dma_attr_t q_dma_attr = nvme->n_queue_dma_attr;
len = roundup(len, nvme->n_pagesize);
if (nvme_zalloc_dma(nvme, len, flags, &q_dma_attr, dma)
!= DDI_SUCCESS) {
dev_err(nvme->n_dip, CE_WARN,
"!failed to get DMA memory for queue");
goto fail;
}
if ((*dma)->nd_ncookie != 1) {
dev_err(nvme->n_dip, CE_WARN,
"!got too many cookies for queue DMA");
goto fail;
}
return (DDI_SUCCESS);
fail:
if (*dma) {
nvme_free_dma(*dma);
*dma = NULL;
}
return (DDI_FAILURE);
}
static void
nvme_free_cq(nvme_cq_t *cq)
{
mutex_destroy(&cq->ncq_mutex);
if (cq->ncq_cmd_taskq != NULL)
taskq_destroy(cq->ncq_cmd_taskq);
if (cq->ncq_dma != NULL)
nvme_free_dma(cq->ncq_dma);
kmem_free(cq, sizeof (*cq));
}
static void
nvme_free_qpair(nvme_qpair_t *qp)
{
int i;
mutex_destroy(&qp->nq_mutex);
sema_destroy(&qp->nq_sema);
if (qp->nq_sqdma != NULL)
nvme_free_dma(qp->nq_sqdma);
if (qp->nq_active_cmds > 0)
for (i = 0; i != qp->nq_nentry; i++)
if (qp->nq_cmd[i] != NULL)
nvme_free_cmd(qp->nq_cmd[i]);
if (qp->nq_cmd != NULL)
kmem_free(qp->nq_cmd, sizeof (nvme_cmd_t *) * qp->nq_nentry);
kmem_free(qp, sizeof (nvme_qpair_t));
}
static void
nvme_destroy_cq_array(nvme_t *nvme, uint_t start)
{
uint_t i;
for (i = start; i < nvme->n_cq_count; i++)
if (nvme->n_cq[i] != NULL)
nvme_free_cq(nvme->n_cq[i]);
kmem_free(nvme->n_cq, sizeof (*nvme->n_cq) * nvme->n_cq_count);
}
static int
nvme_alloc_cq(nvme_t *nvme, uint32_t nentry, nvme_cq_t **cqp, uint16_t idx,
uint_t nthr)
{
nvme_cq_t *cq = kmem_zalloc(sizeof (*cq), KM_SLEEP);
char name[64];
mutex_init(&cq->ncq_mutex, NULL, MUTEX_DRIVER,
DDI_INTR_PRI(nvme->n_intr_pri));
if (nvme_zalloc_queue_dma(nvme, nentry, sizeof (nvme_cqe_t),
DDI_DMA_READ, &cq->ncq_dma) != DDI_SUCCESS)
goto fail;
cq->ncq_cq = (nvme_cqe_t *)cq->ncq_dma->nd_memp;
cq->ncq_nentry = nentry;
cq->ncq_id = idx;
cq->ncq_hdbl = NVME_REG_CQHDBL(nvme, idx);
(void) snprintf(name, sizeof (name), "%s%d_cmd_taskq%u",
ddi_driver_name(nvme->n_dip), ddi_get_instance(nvme->n_dip), idx);
cq->ncq_cmd_taskq = taskq_create(name, nthr, minclsyspri, 64, INT_MAX,
TASKQ_PREPOPULATE);
if (cq->ncq_cmd_taskq == NULL) {
dev_err(nvme->n_dip, CE_WARN, "!failed to create cmd "
"taskq for cq %u", idx);
goto fail;
}
*cqp = cq;
return (DDI_SUCCESS);
fail:
nvme_free_cq(cq);
*cqp = NULL;
return (DDI_FAILURE);
}
static int
nvme_create_cq_array(nvme_t *nvme, uint_t ncq, uint32_t nentry, uint_t nthr)
{
nvme_cq_t **cq;
uint_t i, cq_count;
ASSERT3U(ncq, >, nvme->n_cq_count);
cq = nvme->n_cq;
cq_count = nvme->n_cq_count;
nvme->n_cq = kmem_zalloc(sizeof (*nvme->n_cq) * ncq, KM_SLEEP);
nvme->n_cq_count = ncq;
for (i = 0; i < cq_count; i++)
nvme->n_cq[i] = cq[i];
for (; i < nvme->n_cq_count; i++)
if (nvme_alloc_cq(nvme, nentry, &nvme->n_cq[i], i, nthr) !=
DDI_SUCCESS)
goto fail;
if (cq != NULL)
kmem_free(cq, sizeof (*cq) * cq_count);
return (DDI_SUCCESS);
fail:
nvme_destroy_cq_array(nvme, cq_count);
nvme->n_cq_count = cq_count;
nvme->n_cq = cq;
return (DDI_FAILURE);
}
static int
nvme_alloc_qpair(nvme_t *nvme, uint32_t nentry, nvme_qpair_t **nqp,
uint_t idx)
{
nvme_qpair_t *qp = kmem_zalloc(sizeof (*qp), KM_SLEEP);
uint_t cq_idx;
mutex_init(&qp->nq_mutex, NULL, MUTEX_DRIVER,
DDI_INTR_PRI(nvme->n_intr_pri));
sema_init(&qp->nq_sema, nentry - 1, NULL, SEMA_DRIVER, NULL);
if (nvme_zalloc_queue_dma(nvme, nentry, sizeof (nvme_sqe_t),
DDI_DMA_WRITE, &qp->nq_sqdma) != DDI_SUCCESS)
goto fail;
cq_idx = idx == 0 ? 0 : 1 + (idx - 1) % (nvme->n_cq_count - 1);
qp->nq_cq = nvme->n_cq[cq_idx];
qp->nq_sq = (nvme_sqe_t *)qp->nq_sqdma->nd_memp;
qp->nq_nentry = nentry;
qp->nq_sqtdbl = NVME_REG_SQTDBL(nvme, idx);
qp->nq_cmd = kmem_zalloc(sizeof (nvme_cmd_t *) * nentry, KM_SLEEP);
qp->nq_next_cmd = 0;
*nqp = qp;
return (DDI_SUCCESS);
fail:
nvme_free_qpair(qp);
*nqp = NULL;
return (DDI_FAILURE);
}
static nvme_cmd_t *
nvme_alloc_cmd(nvme_t *nvme, int kmflag)
{
nvme_cmd_t *cmd = kmem_cache_alloc(nvme_cmd_cache, kmflag);
if (cmd != NULL) {
bzero(cmd, sizeof (nvme_cmd_t));
cmd->nc_nvme = nvme;
}
return (cmd);
}
static nvme_cmd_t *
nvme_alloc_admin_cmd(nvme_t *nvme, int kmflag)
{
nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, kmflag);
if (cmd != NULL) {
cmd->nc_flags |= NVME_CMD_F_USELOCK;
mutex_init(&cmd->nc_mutex, NULL, MUTEX_DRIVER,
DDI_INTR_PRI(nvme->n_intr_pri));
cv_init(&cmd->nc_cv, NULL, CV_DRIVER, NULL);
}
return (cmd);
}
static void
nvme_free_cmd(nvme_cmd_t *cmd)
{
if (list_link_active(&cmd->nc_list))
return;
if (cmd->nc_dma) {
nvme_free_dma(cmd->nc_dma);
cmd->nc_dma = NULL;
}
if (cmd->nc_prp) {
kmem_cache_free(cmd->nc_nvme->n_prp_cache, cmd->nc_prp);
cmd->nc_prp = NULL;
}
if ((cmd->nc_flags & NVME_CMD_F_USELOCK) != 0) {
cv_destroy(&cmd->nc_cv);
mutex_destroy(&cmd->nc_mutex);
}
kmem_cache_free(nvme_cmd_cache, cmd);
}
static void
nvme_submit_admin_cmd(nvme_qpair_t *qp, nvme_cmd_t *cmd, uint32_t *qtimeoutp)
{
sema_p(&qp->nq_sema);
nvme_submit_cmd_common(qp, cmd, qtimeoutp);
}
static int
nvme_submit_io_cmd(nvme_qpair_t *qp, nvme_cmd_t *cmd)
{
if (cmd->nc_nvme->n_dead) {
return (EIO);
}
sema_p(&qp->nq_sema);
nvme_submit_cmd_common(qp, cmd, NULL);
return (0);
}
static void
nvme_submit_cmd_common(nvme_qpair_t *qp, nvme_cmd_t *cmd, uint32_t *qtimeoutp)
{
nvme_reg_sqtdbl_t tail = { 0 };
cmd->nc_submit_ts = gethrtime();
cmd->nc_state = NVME_CMD_SUBMITTED;
mutex_enter(&qp->nq_mutex);
if (cmd->nc_nvme->n_dead) {
cmd->nc_queue_ts = gethrtime();
cmd->nc_state = NVME_CMD_QUEUED;
taskq_dispatch_ent(qp->nq_cq->ncq_cmd_taskq, cmd->nc_callback,
cmd, TQ_NOSLEEP, &cmd->nc_tqent);
sema_v(&qp->nq_sema);
mutex_exit(&qp->nq_mutex);
return;
}
while (qp->nq_cmd[qp->nq_next_cmd] != NULL)
qp->nq_next_cmd = (qp->nq_next_cmd + 1) % qp->nq_nentry;
qp->nq_cmd[qp->nq_next_cmd] = cmd;
qp->nq_active_cmds++;
if (qtimeoutp != NULL)
*qtimeoutp = qp->nq_active_timeout;
qp->nq_active_timeout += cmd->nc_timeout;
cmd->nc_sqe.sqe_cid = qp->nq_next_cmd;
bcopy(&cmd->nc_sqe, &qp->nq_sq[qp->nq_sqtail], sizeof (nvme_sqe_t));
(void) ddi_dma_sync(qp->nq_sqdma->nd_dmah,
sizeof (nvme_sqe_t) * qp->nq_sqtail,
sizeof (nvme_sqe_t), DDI_DMA_SYNC_FORDEV);
qp->nq_next_cmd = (qp->nq_next_cmd + 1) % qp->nq_nentry;
tail.b.sqtdbl_sqt = qp->nq_sqtail = (qp->nq_sqtail + 1) % qp->nq_nentry;
nvme_put32(cmd->nc_nvme, qp->nq_sqtdbl, tail.r);
mutex_exit(&qp->nq_mutex);
}
static nvme_cmd_t *
nvme_unqueue_cmd(nvme_t *nvme, nvme_qpair_t *qp, int cid)
{
nvme_cmd_t *cmd;
ASSERT(mutex_owned(&qp->nq_mutex));
ASSERT3S(cid, <, qp->nq_nentry);
cmd = qp->nq_cmd[cid];
if (cmd == NULL)
return (NULL);
qp->nq_cmd[cid] = NULL;
ASSERT3U(qp->nq_active_cmds, >, 0);
qp->nq_active_cmds--;
ASSERT3U(qp->nq_active_timeout, >=, cmd->nc_timeout);
qp->nq_active_timeout -= cmd->nc_timeout;
sema_v(&qp->nq_sema);
ASSERT3P(cmd, !=, NULL);
ASSERT3P(cmd->nc_nvme, ==, nvme);
ASSERT3S(cmd->nc_sqe.sqe_cid, ==, cid);
return (cmd);
}
static void
nvme_lost_cmd(nvme_t *nvme, nvme_cmd_t *cmd)
{
ASSERT(mutex_owned(&cmd->nc_mutex));
switch (cmd->nc_state) {
case NVME_CMD_SUBMITTED: {
nvme_qpair_t *qp = nvme->n_ioq[cmd->nc_sqid];
mutex_enter(&qp->nq_mutex);
(void) nvme_unqueue_cmd(nvme, qp, cmd->nc_sqe.sqe_cid);
mutex_exit(&qp->nq_mutex);
}
case NVME_CMD_ALLOCATED:
case NVME_CMD_COMPLETED:
break;
case NVME_CMD_QUEUED:
while (cmd->nc_state != NVME_CMD_COMPLETED)
cv_wait(&cmd->nc_cv, &cmd->nc_mutex);
break;
case NVME_CMD_LOST:
dev_err(cmd->nc_nvme->n_dip, CE_PANIC,
"%s: command %p already lost", __func__, (void *)cmd);
break;
}
cmd->nc_state = NVME_CMD_LOST;
mutex_enter(&nvme_lc_mutex);
list_insert_head(&nvme_lost_cmds, cmd);
mutex_exit(&nvme_lc_mutex);
}
static nvme_cmd_t *
nvme_get_completed(nvme_t *nvme, nvme_cq_t *cq)
{
nvme_qpair_t *qp;
nvme_cqe_t *cqe;
nvme_cmd_t *cmd;
ASSERT(mutex_owned(&cq->ncq_mutex));
retry:
cqe = &cq->ncq_cq[cq->ncq_head];
if (cqe->cqe_sf.sf_p == cq->ncq_phase)
return (NULL);
qp = nvme->n_ioq[cqe->cqe_sqid];
mutex_enter(&qp->nq_mutex);
cmd = nvme_unqueue_cmd(nvme, qp, cqe->cqe_cid);
mutex_exit(&qp->nq_mutex);
qp->nq_sqhead = cqe->cqe_sqhd;
cq->ncq_head = (cq->ncq_head + 1) % cq->ncq_nentry;
if (cq->ncq_head == 0)
cq->ncq_phase = cq->ncq_phase != 0 ? 0 : 1;
if (cmd == NULL) {
dev_err(nvme->n_dip, CE_WARN,
"!received completion for unknown cid 0x%x", cqe->cqe_cid);
NVME_BUMP_STAT(nvme, unknown_cid);
goto retry;
}
ASSERT3U(cmd->nc_sqid, ==, cqe->cqe_sqid);
bcopy(cqe, &cmd->nc_cqe, sizeof (nvme_cqe_t));
return (cmd);
}
static uint_t
nvme_process_iocq(nvme_t *nvme, nvme_cq_t *cq)
{
nvme_reg_cqhdbl_t head = { 0 };
nvme_cmd_t *cmd;
uint_t completed = 0;
if (ddi_dma_sync(cq->ncq_dma->nd_dmah, 0, 0, DDI_DMA_SYNC_FORKERNEL) !=
DDI_SUCCESS)
dev_err(nvme->n_dip, CE_WARN, "!ddi_dma_sync() failed in %s",
__func__);
mutex_enter(&cq->ncq_mutex);
while ((cmd = nvme_get_completed(nvme, cq)) != NULL) {
if ((cmd->nc_flags & NVME_CMD_F_USELOCK) != 0) {
mutex_enter(&cmd->nc_mutex);
if (cmd->nc_state == NVME_CMD_LOST) {
mutex_exit(&cmd->nc_mutex);
completed++;
continue;
}
}
cmd->nc_queue_ts = gethrtime();
cmd->nc_state = NVME_CMD_QUEUED;
if ((cmd->nc_flags & NVME_CMD_F_USELOCK) != 0)
mutex_exit(&cmd->nc_mutex);
taskq_dispatch_ent(cq->ncq_cmd_taskq, cmd->nc_callback, cmd,
TQ_NOSLEEP, &cmd->nc_tqent);
completed++;
}
if (completed > 0) {
head.b.cqhdbl_cqh = cq->ncq_head;
nvme_put32(nvme, cq->ncq_hdbl, head.r);
}
mutex_exit(&cq->ncq_mutex);
return (completed);
}
static nvme_cmd_t *
nvme_retrieve_cmd(nvme_t *nvme, nvme_qpair_t *qp)
{
nvme_cq_t *cq = qp->nq_cq;
nvme_reg_cqhdbl_t head = { 0 };
nvme_cmd_t *cmd;
if (ddi_dma_sync(cq->ncq_dma->nd_dmah, 0, 0, DDI_DMA_SYNC_FORKERNEL) !=
DDI_SUCCESS)
dev_err(nvme->n_dip, CE_WARN, "!ddi_dma_sync() failed in %s",
__func__);
mutex_enter(&cq->ncq_mutex);
if ((cmd = nvme_get_completed(nvme, cq)) != NULL) {
head.b.cqhdbl_cqh = cq->ncq_head;
nvme_put32(nvme, cq->ncq_hdbl, head.r);
}
mutex_exit(&cq->ncq_mutex);
return (cmd);
}
static int
nvme_check_unknown_cmd_status(nvme_cmd_t *cmd)
{
nvme_cqe_t *cqe = &cmd->nc_cqe;
dev_err(cmd->nc_nvme->n_dip, CE_WARN,
"!unknown command status received: opc = %x, sqid = %d, cid = %d, "
"sc = %x, sct = %x, dnr = %d, m = %d", cmd->nc_sqe.sqe_opc,
cqe->cqe_sqid, cqe->cqe_cid, cqe->cqe_sf.sf_sc, cqe->cqe_sf.sf_sct,
cqe->cqe_sf.sf_dnr, cqe->cqe_sf.sf_m);
if (cmd->nc_xfer != NULL)
bd_error(cmd->nc_xfer, BD_ERR_ILLRQ);
if (((cmd->nc_flags & NVME_CMD_F_DONTPANIC) == 0) &&
cmd->nc_nvme->n_strict_version) {
nvme_ctrl_mark_dead(cmd->nc_nvme, B_FALSE);
}
return (EIO);
}
static int
nvme_check_vendor_cmd_status(nvme_cmd_t *cmd)
{
nvme_cqe_t *cqe = &cmd->nc_cqe;
dev_err(cmd->nc_nvme->n_dip, CE_WARN,
"!unknown command status received: opc = %x, sqid = %d, cid = %d, "
"sc = %x, sct = %x, dnr = %d, m = %d", cmd->nc_sqe.sqe_opc,
cqe->cqe_sqid, cqe->cqe_cid, cqe->cqe_sf.sf_sc, cqe->cqe_sf.sf_sct,
cqe->cqe_sf.sf_dnr, cqe->cqe_sf.sf_m);
if (!cmd->nc_nvme->n_ignore_unknown_vendor_status) {
nvme_ctrl_mark_dead(cmd->nc_nvme, B_FALSE);
}
return (EIO);
}
static int
nvme_check_integrity_cmd_status(nvme_cmd_t *cmd)
{
nvme_cqe_t *cqe = &cmd->nc_cqe;
switch (cqe->cqe_sf.sf_sc) {
case NVME_CQE_SC_INT_NVM_WRITE:
if (cmd->nc_xfer != NULL)
bd_error(cmd->nc_xfer, BD_ERR_MEDIA);
return (EIO);
case NVME_CQE_SC_INT_NVM_READ:
if (cmd->nc_xfer != NULL)
bd_error(cmd->nc_xfer, BD_ERR_MEDIA);
return (EIO);
default:
return (nvme_check_unknown_cmd_status(cmd));
}
}
static int
nvme_check_generic_cmd_status(nvme_cmd_t *cmd)
{
nvme_cqe_t *cqe = &cmd->nc_cqe;
switch (cqe->cqe_sf.sf_sc) {
case NVME_CQE_SC_GEN_SUCCESS:
return (0);
case NVME_CQE_SC_GEN_INV_OPC:
NVME_BUMP_STAT(cmd->nc_nvme, inv_cmd_err);
if ((cmd->nc_flags & NVME_CMD_F_DONTPANIC) == 0) {
dev_err(cmd->nc_nvme->n_dip, CE_PANIC,
"programming error: invalid opcode in cmd %p",
(void *)cmd);
}
return (EINVAL);
case NVME_CQE_SC_GEN_INV_FLD:
NVME_BUMP_STAT(cmd->nc_nvme, inv_field_err);
if ((cmd->nc_flags & NVME_CMD_F_DONTPANIC) == 0) {
dev_err(cmd->nc_nvme->n_dip, CE_PANIC,
"programming error: invalid field in cmd %p",
(void *)cmd);
}
return (EIO);
case NVME_CQE_SC_GEN_ID_CNFL:
dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: "
"cmd ID conflict in cmd %p", (void *)cmd);
return (0);
case NVME_CQE_SC_GEN_INV_NS:
NVME_BUMP_STAT(cmd->nc_nvme, inv_nsfmt_err);
if ((cmd->nc_flags & NVME_CMD_F_DONTPANIC) == 0) {
dev_err(cmd->nc_nvme->n_dip, CE_PANIC,
"programming error: invalid NS/format in cmd %p",
(void *)cmd);
}
return (EINVAL);
case NVME_CQE_SC_GEN_CMD_SEQ_ERR:
NVME_BUMP_STAT(cmd->nc_nvme, inv_cmdseq_err);
if ((cmd->nc_flags & NVME_CMD_F_DONTPANIC) == 0) {
dev_err(cmd->nc_nvme->n_dip, CE_PANIC,
"programming error: command sequencing error %p",
(void *)cmd);
}
return (EINVAL);
case NVME_CQE_SC_GEN_NVM_LBA_RANGE:
dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: "
"LBA out of range in cmd %p", (void *)cmd);
return (0);
case NVME_CQE_SC_GEN_DATA_XFR_ERR:
NVME_BUMP_STAT(cmd->nc_nvme, data_xfr_err);
if (cmd->nc_xfer != NULL)
bd_error(cmd->nc_xfer, BD_ERR_NTRDY);
return (EIO);
case NVME_CQE_SC_GEN_INTERNAL_ERR:
NVME_BUMP_STAT(cmd->nc_nvme, internal_err);
if (cmd->nc_xfer != NULL)
bd_error(cmd->nc_xfer, BD_ERR_NTRDY);
return (EIO);
case NVME_CQE_SC_GEN_ABORT_REQUEST:
NVME_BUMP_STAT(cmd->nc_nvme, abort_rq_err);
return (ECANCELED);
case NVME_CQE_SC_GEN_ABORT_PWRLOSS:
NVME_BUMP_STAT(cmd->nc_nvme, abort_pwrloss_err);
nvme_ctrl_mark_dead(cmd->nc_nvme, B_FALSE);
return (EIO);
case NVME_CQE_SC_GEN_ABORT_SQ_DEL:
NVME_BUMP_STAT(cmd->nc_nvme, abort_sq_del);
return (EIO);
case NVME_CQE_SC_GEN_NVM_CAP_EXC:
NVME_BUMP_STAT(cmd->nc_nvme, nvm_cap_exc);
if (cmd->nc_xfer != NULL)
bd_error(cmd->nc_xfer, BD_ERR_MEDIA);
return (EIO);
case NVME_CQE_SC_GEN_NVM_NS_NOTRDY:
NVME_BUMP_STAT(cmd->nc_nvme, nvm_ns_notrdy);
if (cmd->nc_xfer != NULL)
bd_error(cmd->nc_xfer, BD_ERR_NTRDY);
return (EIO);
case NVME_CQE_SC_GEN_NVM_FORMATTING:
if (!NVME_VERSION_ATLEAST(&cmd->nc_nvme->n_version, 1, 2))
return (nvme_check_unknown_cmd_status(cmd));
NVME_BUMP_STAT(cmd->nc_nvme, nvm_ns_formatting);
if (cmd->nc_xfer != NULL)
bd_error(cmd->nc_xfer, BD_ERR_NTRDY);
return (EIO);
default:
return (nvme_check_unknown_cmd_status(cmd));
}
}
static int
nvme_check_specific_cmd_status(nvme_cmd_t *cmd)
{
nvme_cqe_t *cqe = &cmd->nc_cqe;
switch (cqe->cqe_sf.sf_sc) {
case NVME_CQE_SC_SPC_INV_CQ:
ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE);
NVME_BUMP_STAT(cmd->nc_nvme, inv_cq_err);
return (EINVAL);
case NVME_CQE_SC_SPC_INV_QID:
ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE ||
cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_SQUEUE ||
cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE ||
cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_CQUEUE);
NVME_BUMP_STAT(cmd->nc_nvme, inv_qid_err);
return (EINVAL);
case NVME_CQE_SC_SPC_MAX_QSZ_EXC:
ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE ||
cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE);
NVME_BUMP_STAT(cmd->nc_nvme, max_qsz_exc);
return (EINVAL);
case NVME_CQE_SC_SPC_ABRT_CMD_EXC:
ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_ABORT);
dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: "
"abort command limit exceeded in cmd %p", (void *)cmd);
return (0);
case NVME_CQE_SC_SPC_ASYNC_EVREQ_EXC:
ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_ASYNC_EVENT);
dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: "
"async event request limit exceeded in cmd %p",
(void *)cmd);
return (0);
case NVME_CQE_SC_SPC_INV_INT_VECT:
ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE);
NVME_BUMP_STAT(cmd->nc_nvme, inv_int_vect);
return (EINVAL);
case NVME_CQE_SC_SPC_INV_LOG_PAGE:
ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_GET_LOG_PAGE);
NVME_BUMP_STAT(cmd->nc_nvme, inv_log_page);
return (EINVAL);
case NVME_CQE_SC_SPC_INV_FORMAT:
ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_FORMAT ||
cmd->nc_sqe.sqe_opc == NVME_OPC_NS_MGMT);
NVME_BUMP_STAT(cmd->nc_nvme, inv_format);
if (cmd->nc_xfer != NULL)
bd_error(cmd->nc_xfer, BD_ERR_ILLRQ);
return (EINVAL);
case NVME_CQE_SC_SPC_INV_Q_DEL:
ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_CQUEUE);
NVME_BUMP_STAT(cmd->nc_nvme, inv_q_del);
return (EINVAL);
case NVME_CQE_SC_SPC_NVM_CNFL_ATTR:
ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_DSET_MGMT ||
cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_READ ||
cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE);
NVME_BUMP_STAT(cmd->nc_nvme, cnfl_attr);
if (cmd->nc_xfer != NULL)
bd_error(cmd->nc_xfer, BD_ERR_ILLRQ);
return (EINVAL);
case NVME_CQE_SC_SPC_NVM_INV_PROT:
ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_COMPARE ||
cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_READ ||
cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE);
NVME_BUMP_STAT(cmd->nc_nvme, inv_prot);
if (cmd->nc_xfer != NULL)
bd_error(cmd->nc_xfer, BD_ERR_ILLRQ);
return (EINVAL);
case NVME_CQE_SC_SPC_NVM_READONLY:
ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE);
NVME_BUMP_STAT(cmd->nc_nvme, readonly);
if (cmd->nc_xfer != NULL)
bd_error(cmd->nc_xfer, BD_ERR_ILLRQ);
return (EROFS);
case NVME_CQE_SC_SPC_INV_FW_SLOT:
NVME_BUMP_STAT(cmd->nc_nvme, inv_fwslot);
ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
return (EINVAL);
case NVME_CQE_SC_SPC_INV_FW_IMG:
NVME_BUMP_STAT(cmd->nc_nvme, inv_fwimg);
ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
return (EINVAL);
case NVME_CQE_SC_SPC_FW_RESET:
NVME_BUMP_STAT(cmd->nc_nvme, fwact_creset);
ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
return (0);
case NVME_CQE_SC_SPC_FW_NSSR:
NVME_BUMP_STAT(cmd->nc_nvme, fwact_nssr);
ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
return (0);
case NVME_CQE_SC_SPC_FW_NEXT_RESET:
NVME_BUMP_STAT(cmd->nc_nvme, fwact_reset);
ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
return (0);
case NVME_CQE_SC_SPC_FW_MTFA:
NVME_BUMP_STAT(cmd->nc_nvme, fwact_mtfa);
ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
return (EAGAIN);
case NVME_CQE_SC_SPC_FW_PROHIBITED:
NVME_BUMP_STAT(cmd->nc_nvme, fwact_prohibited);
ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
return (EINVAL);
case NVME_CQE_SC_SPC_FW_OVERLAP:
NVME_BUMP_STAT(cmd->nc_nvme, fw_overlap);
ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_IMAGE_LOAD ||
cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
return (EINVAL);
case NVME_CQE_SC_SPC_NS_ATTACHED:
ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NS_ATTACH);
NVME_BUMP_STAT(cmd->nc_nvme, ns_attached);
return (EEXIST);
case NVME_CQE_SC_SPC_NS_PRIV:
ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NS_ATTACH);
NVME_BUMP_STAT(cmd->nc_nvme, ns_priv);
return (EACCES);
case NVME_CQE_SC_SPC_NS_NOT_ATTACH:
ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NS_ATTACH);
NVME_BUMP_STAT(cmd->nc_nvme, ns_not_attached);
return (ENOENT);
case NVME_CQE_SC_SPC_INV_CTRL_LIST:
ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NS_ATTACH);
NVME_BUMP_STAT(cmd->nc_nvme, ana_attach);
return (EINVAL);
case NVME_CQE_SC_SPC_ANA_ATTACH:
ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NS_ATTACH);
NVME_BUMP_STAT(cmd->nc_nvme, ana_attach);
return (EIO);
case NVME_CQE_SC_SPC_NS_ATTACH_LIM:
ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NS_ATTACH);
NVME_BUMP_STAT(cmd->nc_nvme, ns_attach_lim);
return (EOVERFLOW);
default:
return (nvme_check_unknown_cmd_status(cmd));
}
}
static inline int
nvme_check_cmd_status(nvme_cmd_t *cmd)
{
nvme_cqe_t *cqe = &cmd->nc_cqe;
if (cmd->nc_nvme->n_dead)
return (EIO);
if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC &&
cqe->cqe_sf.sf_sc == NVME_CQE_SC_GEN_SUCCESS)
return (0);
if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC)
return (nvme_check_generic_cmd_status(cmd));
else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_SPECIFIC)
return (nvme_check_specific_cmd_status(cmd));
else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_INTEGRITY)
return (nvme_check_integrity_cmd_status(cmd));
else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_VENDOR)
return (nvme_check_vendor_cmd_status(cmd));
return (nvme_check_unknown_cmd_status(cmd));
}
static boolean_t
nvme_check_cmd_status_ioctl(nvme_cmd_t *cmd, nvme_ioctl_common_t *ioc)
{
nvme_cqe_t *cqe = &cmd->nc_cqe;
nvme_t *nvme = cmd->nc_nvme;
if (nvme->n_dead) {
return (nvme_ioctl_error(ioc, nvme->n_dead_status, 0, 0));
}
if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC &&
cqe->cqe_sf.sf_sc == NVME_CQE_SC_GEN_SUCCESS)
return (B_TRUE);
if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC) {
(void) nvme_check_generic_cmd_status(cmd);
} else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_SPECIFIC) {
(void) nvme_check_specific_cmd_status(cmd);
} else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_INTEGRITY) {
(void) nvme_check_integrity_cmd_status(cmd);
} else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_VENDOR) {
(void) nvme_check_vendor_cmd_status(cmd);
} else {
(void) nvme_check_unknown_cmd_status(cmd);
}
return (nvme_ioctl_error(ioc, NVME_IOCTL_E_CTRL_ERROR,
cqe->cqe_sf.sf_sct, cqe->cqe_sf.sf_sc));
}
static int
nvme_abort_cmd(nvme_cmd_t *cmd, const uint32_t sec)
{
nvme_t *nvme = cmd->nc_nvme;
nvme_cmd_t *abort_cmd = nvme_alloc_admin_cmd(nvme, KM_SLEEP);
nvme_abort_cmd_t ac = { 0 };
int ret = 0;
sema_p(&nvme->n_abort_sema);
ac.b.ac_cid = cmd->nc_sqe.sqe_cid;
ac.b.ac_sqid = cmd->nc_sqid;
abort_cmd->nc_sqid = 0;
abort_cmd->nc_sqe.sqe_opc = NVME_OPC_ABORT;
abort_cmd->nc_callback = nvme_wakeup_cmd;
abort_cmd->nc_sqe.sqe_cdw10 = ac.r;
mutex_exit(&cmd->nc_mutex);
nvme_admin_cmd(abort_cmd, MAX(nvme_abort_cmd_timeout, sec));
mutex_enter(&cmd->nc_mutex);
sema_v(&nvme->n_abort_sema);
if (abort_cmd->nc_state == NVME_CMD_LOST) {
dev_err(nvme->n_dip, CE_WARN,
"!ABORT of command %d/%d timed out",
cmd->nc_sqe.sqe_cid, cmd->nc_sqid);
NVME_BUMP_STAT(nvme, abort_timeout);
ret = EIO;
} else if ((ret = nvme_check_cmd_status(abort_cmd)) != 0) {
dev_err(nvme->n_dip, CE_WARN,
"!ABORT of command %d/%d "
"failed with sct = %x, sc = %x",
cmd->nc_sqe.sqe_cid, cmd->nc_sqid,
abort_cmd->nc_cqe.cqe_sf.sf_sct,
abort_cmd->nc_cqe.cqe_sf.sf_sc);
NVME_BUMP_STAT(nvme, abort_failed);
} else {
boolean_t success = ((abort_cmd->nc_cqe.cqe_dw0 & 1) == 0);
dev_err(nvme->n_dip, CE_WARN,
"!ABORT of command %d/%d %ssuccessful",
cmd->nc_sqe.sqe_cid, cmd->nc_sqid,
success ? "" : "un");
if (success) {
NVME_BUMP_STAT(nvme, abort_successful);
} else {
NVME_BUMP_STAT(nvme, abort_unsuccessful);
}
}
nvme_free_cmd(abort_cmd);
return (ret);
}
static void
nvme_wait_cmd(nvme_cmd_t *cmd, uint32_t sec)
{
nvme_t *nvme = cmd->nc_nvme;
nvme_reg_csts_t csts;
uint_t ccnt;
ASSERT(mutex_owned(&cmd->nc_mutex));
while (cmd->nc_state != NVME_CMD_COMPLETED) {
clock_t timeout = ddi_get_lbolt() +
drv_usectohz((long)sec * MICROSEC);
if (cv_timedwait(&cmd->nc_cv, &cmd->nc_mutex, timeout) == -1) {
if (cmd->nc_state != NVME_CMD_QUEUED)
break;
}
}
if (cmd->nc_state == NVME_CMD_COMPLETED) {
DTRACE_PROBE1(nvme_admin_cmd_completed, nvme_cmd_t *, cmd);
nvme_admin_stat_cmd(nvme, cmd);
return;
}
DTRACE_PROBE1(nvme_admin_cmd_timeout, nvme_cmd_t *, cmd);
csts.r = nvme_get32(nvme, NVME_REG_CSTS);
dev_err(nvme->n_dip, CE_WARN, "!command %d/%d timeout, "
"OPC = %x, CFS = %d", cmd->nc_sqe.sqe_cid, cmd->nc_sqid,
cmd->nc_sqe.sqe_opc, csts.b.csts_cfs);
NVME_BUMP_STAT(nvme, cmd_timeout);
if (csts.b.csts_cfs ||
nvme_check_regs_hdl(nvme) ||
nvme_check_dma_hdl(cmd->nc_dma) ||
cmd->nc_sqe.sqe_opc == NVME_OPC_ABORT) {
nvme_ctrl_mark_dead(cmd->nc_nvme, B_FALSE);
nvme_lost_cmd(nvme, cmd);
return;
}
mutex_exit(&cmd->nc_mutex);
ccnt = nvme_process_iocq(nvme, nvme->n_adminq->nq_cq);
mutex_enter(&cmd->nc_mutex);
if (ccnt > 0) {
dev_err(nvme->n_dip, CE_WARN, "!possible missed interrupt "
"(%u completions found on admin CQ at timeout)", ccnt);
}
if (cmd->nc_state == NVME_CMD_COMPLETED) {
DTRACE_PROBE1(nvme_admin_cmd_completed, nvme_cmd_t *,
cmd);
nvme_admin_stat_cmd(nvme, cmd);
return;
}
if (nvme_abort_cmd(cmd, sec) == 0) {
while (cmd->nc_state != NVME_CMD_COMPLETED)
cv_wait(&cmd->nc_cv, &cmd->nc_mutex);
return;
}
VERIFY(cmd->nc_nvme->n_dead);
nvme_lost_cmd(nvme, cmd);
}
static void
nvme_wakeup_cmd(void *arg)
{
nvme_cmd_t *cmd = arg;
ASSERT(cmd->nc_flags & NVME_CMD_F_USELOCK);
mutex_enter(&cmd->nc_mutex);
cmd->nc_state = NVME_CMD_COMPLETED;
cv_signal(&cmd->nc_cv);
mutex_exit(&cmd->nc_mutex);
}
static void
nvme_async_event_task(void *arg)
{
nvme_cmd_t *cmd = arg;
nvme_t *nvme = cmd->nc_nvme;
nvme_error_log_entry_t *error_log = NULL;
nvme_health_log_t *health_log = NULL;
nvme_nschange_list_t *nslist = NULL;
size_t logsize = 0;
nvme_async_event_t event;
if (nvme_check_cmd_status(cmd) != 0) {
dev_err(cmd->nc_nvme->n_dip, CE_WARN,
"!async event request returned failure, sct = 0x%x, "
"sc = 0x%x, dnr = %d, m = %d", cmd->nc_cqe.cqe_sf.sf_sct,
cmd->nc_cqe.cqe_sf.sf_sc, cmd->nc_cqe.cqe_sf.sf_dnr,
cmd->nc_cqe.cqe_sf.sf_m);
if (cmd->nc_cqe.cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC &&
cmd->nc_cqe.cqe_sf.sf_sc == NVME_CQE_SC_GEN_INTERNAL_ERR) {
nvme_ctrl_mark_dead(cmd->nc_nvme, B_FALSE);
}
if (cmd->nc_cqe.cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC &&
cmd->nc_cqe.cqe_sf.sf_sc == NVME_CQE_SC_GEN_INV_OPC &&
cmd->nc_cqe.cqe_sf.sf_dnr == 1) {
nvme->n_async_event_supported = B_FALSE;
}
nvme_free_cmd(cmd);
return;
}
event.r = cmd->nc_cqe.cqe_dw0;
bzero(&cmd->nc_cqe, sizeof (nvme_cqe_t));
nvme_submit_admin_cmd(nvme->n_adminq, cmd, NULL);
cmd = NULL;
switch (event.b.ae_type) {
case NVME_ASYNC_TYPE_ERROR:
if (event.b.ae_logpage == NVME_LOGPAGE_ERROR) {
if (!nvme_get_logpage_int(nvme, B_FALSE,
(void **)&error_log, &logsize,
NVME_LOGPAGE_ERROR)) {
return;
}
} else {
dev_err(nvme->n_dip, CE_WARN, "!wrong logpage in "
"async event reply: type=0x%x logpage=0x%x",
event.b.ae_type, event.b.ae_logpage);
NVME_BUMP_STAT(nvme, wrong_logpage);
return;
}
switch (event.b.ae_info) {
case NVME_ASYNC_ERROR_INV_SQ:
dev_err(nvme->n_dip, CE_PANIC, "programming error: "
"invalid submission queue");
return;
case NVME_ASYNC_ERROR_INV_DBL:
dev_err(nvme->n_dip, CE_PANIC, "programming error: "
"invalid doorbell write value");
return;
case NVME_ASYNC_ERROR_DIAGFAIL:
dev_err(nvme->n_dip, CE_WARN, "!diagnostic failure");
nvme_ctrl_mark_dead(nvme, B_FALSE);
NVME_BUMP_STAT(nvme, diagfail_event);
break;
case NVME_ASYNC_ERROR_PERSISTENT:
dev_err(nvme->n_dip, CE_WARN, "!persistent internal "
"device error");
nvme_ctrl_mark_dead(nvme, B_FALSE);
NVME_BUMP_STAT(nvme, persistent_event);
break;
case NVME_ASYNC_ERROR_TRANSIENT:
dev_err(nvme->n_dip, CE_WARN, "!transient internal "
"device error");
NVME_BUMP_STAT(nvme, transient_event);
break;
case NVME_ASYNC_ERROR_FW_LOAD:
dev_err(nvme->n_dip, CE_WARN,
"!firmware image load error");
NVME_BUMP_STAT(nvme, fw_load_event);
break;
}
break;
case NVME_ASYNC_TYPE_HEALTH:
if (event.b.ae_logpage == NVME_LOGPAGE_HEALTH) {
if (!nvme_get_logpage_int(nvme, B_FALSE,
(void **)&health_log, &logsize,
NVME_LOGPAGE_HEALTH)) {
return;
}
} else {
dev_err(nvme->n_dip, CE_WARN, "!wrong logpage in "
"type=0x%x logpage=0x%x", event.b.ae_type,
event.b.ae_logpage);
NVME_BUMP_STAT(nvme, wrong_logpage);
return;
}
switch (event.b.ae_info) {
case NVME_ASYNC_HEALTH_RELIABILITY:
dev_err(nvme->n_dip, CE_WARN,
"!device reliability compromised");
NVME_BUMP_STAT(nvme, reliability_event);
break;
case NVME_ASYNC_HEALTH_TEMPERATURE:
dev_err(nvme->n_dip, CE_WARN,
"!temperature above threshold");
NVME_BUMP_STAT(nvme, temperature_event);
break;
case NVME_ASYNC_HEALTH_SPARE:
dev_err(nvme->n_dip, CE_WARN,
"!spare space below threshold");
NVME_BUMP_STAT(nvme, spare_event);
break;
}
break;
case NVME_ASYNC_TYPE_NOTICE:
switch (event.b.ae_info) {
case NVME_ASYNC_NOTICE_NS_CHANGE:
if (event.b.ae_logpage != NVME_LOGPAGE_NSCHANGE) {
dev_err(nvme->n_dip, CE_WARN,
"!wrong logpage in async event reply: "
"type=0x%x logpage=0x%x",
event.b.ae_type, event.b.ae_logpage);
NVME_BUMP_STAT(nvme, wrong_logpage);
break;
}
dev_err(nvme->n_dip, CE_NOTE,
"namespace attribute change event, "
"logpage = 0x%x", event.b.ae_logpage);
NVME_BUMP_STAT(nvme, notice_event);
if (!nvme_get_logpage_int(nvme, B_FALSE,
(void **)&nslist, &logsize,
NVME_LOGPAGE_NSCHANGE)) {
break;
}
if (nslist->nscl_ns[0] == UINT32_MAX) {
dev_err(nvme->n_dip, CE_CONT,
"more than %u namespaces have changed.\n",
NVME_NSCHANGE_LIST_SIZE);
break;
}
nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME);
for (uint_t i = 0; i < NVME_NSCHANGE_LIST_SIZE; i++) {
uint32_t nsid = nslist->nscl_ns[i];
nvme_namespace_t *ns;
if (nsid == 0)
break;
dev_err(nvme->n_dip, CE_NOTE,
"!namespace nvme%d/%u has changed.",
ddi_get_instance(nvme->n_dip), nsid);
if (nvme_init_ns(nvme, nsid) != DDI_SUCCESS)
continue;
ns = nvme_nsid2ns(nvme, nsid);
if (ns->ns_state <= NVME_NS_STATE_NOT_IGNORED)
continue;
nvme_mgmt_bd_start(nvme);
bd_state_change(ns->ns_bd_hdl);
nvme_mgmt_bd_end(nvme);
}
nvme_mgmt_unlock(nvme);
break;
case NVME_ASYNC_NOTICE_FW_ACTIVATE:
dev_err(nvme->n_dip, CE_NOTE,
"firmware activation starting, "
"logpage = 0x%x", event.b.ae_logpage);
NVME_BUMP_STAT(nvme, notice_event);
break;
case NVME_ASYNC_NOTICE_TELEMETRY:
dev_err(nvme->n_dip, CE_NOTE,
"telemetry log changed, "
"logpage = 0x%x", event.b.ae_logpage);
NVME_BUMP_STAT(nvme, notice_event);
break;
case NVME_ASYNC_NOTICE_NS_ASYMM:
dev_err(nvme->n_dip, CE_NOTE,
"asymmetric namespace access change, "
"logpage = 0x%x", event.b.ae_logpage);
NVME_BUMP_STAT(nvme, notice_event);
break;
case NVME_ASYNC_NOTICE_LATENCYLOG:
dev_err(nvme->n_dip, CE_NOTE,
"predictable latency event aggregate log change, "
"logpage = 0x%x", event.b.ae_logpage);
NVME_BUMP_STAT(nvme, notice_event);
break;
case NVME_ASYNC_NOTICE_LBASTATUS:
dev_err(nvme->n_dip, CE_NOTE,
"LBA status information alert, "
"logpage = 0x%x", event.b.ae_logpage);
NVME_BUMP_STAT(nvme, notice_event);
break;
case NVME_ASYNC_NOTICE_ENDURANCELOG:
dev_err(nvme->n_dip, CE_NOTE,
"endurance group event aggregate log page change, "
"logpage = 0x%x", event.b.ae_logpage);
NVME_BUMP_STAT(nvme, notice_event);
break;
default:
dev_err(nvme->n_dip, CE_WARN,
"!unknown notice async event received, "
"info = 0x%x, logpage = 0x%x", event.b.ae_info,
event.b.ae_logpage);
NVME_BUMP_STAT(nvme, unknown_event);
break;
}
break;
case NVME_ASYNC_TYPE_VENDOR:
dev_err(nvme->n_dip, CE_WARN, "!vendor specific async event "
"received, info = 0x%x, logpage = 0x%x", event.b.ae_info,
event.b.ae_logpage);
NVME_BUMP_STAT(nvme, vendor_event);
break;
default:
dev_err(nvme->n_dip, CE_WARN, "!unknown async event received, "
"type = 0x%x, info = 0x%x, logpage = 0x%x", event.b.ae_type,
event.b.ae_info, event.b.ae_logpage);
NVME_BUMP_STAT(nvme, unknown_event);
break;
}
if (error_log != NULL)
kmem_free(error_log, logsize);
if (health_log != NULL)
kmem_free(health_log, logsize);
if (nslist != NULL)
kmem_free(nslist, logsize);
}
static void
nvme_admin_cmd(nvme_cmd_t *cmd, uint32_t sec)
{
uint32_t qtimeout;
ASSERT(cmd->nc_flags & NVME_CMD_F_USELOCK);
mutex_enter(&cmd->nc_mutex);
cmd->nc_timeout = sec;
nvme_submit_admin_cmd(cmd->nc_nvme->n_adminq, cmd, &qtimeout);
nvme_wait_cmd(cmd, sec + qtimeout);
mutex_exit(&cmd->nc_mutex);
}
static void
nvme_async_event(nvme_t *nvme)
{
nvme_cmd_t *cmd;
cmd = nvme_alloc_admin_cmd(nvme, KM_SLEEP);
cmd->nc_sqid = 0;
cmd->nc_sqe.sqe_opc = NVME_OPC_ASYNC_EVENT;
cmd->nc_callback = nvme_async_event_task;
cmd->nc_flags |= NVME_CMD_F_DONTPANIC;
nvme_submit_admin_cmd(nvme->n_adminq, cmd, NULL);
}
static boolean_t
nvme_no_blkdev_attached(nvme_t *nvme, uint32_t nsid)
{
ASSERT(nvme_mgmt_lock_held(nvme));
ASSERT3U(nsid, !=, 0);
if (nsid != NVME_NSID_BCAST) {
nvme_namespace_t *ns = nvme_nsid2ns(nvme, nsid);
return (ns->ns_state < NVME_NS_STATE_ATTACHED);
}
for (uint32_t i = 1; i <= nvme->n_namespace_count; i++) {
nvme_namespace_t *ns = nvme_nsid2ns(nvme, i);
if (ns->ns_state >= NVME_NS_STATE_ATTACHED) {
return (B_FALSE);
}
}
return (B_TRUE);
}
static boolean_t
nvme_format_nvm(nvme_t *nvme, nvme_ioctl_format_t *ioc)
{
nvme_cmd_t *cmd = nvme_alloc_admin_cmd(nvme, KM_SLEEP);
nvme_format_nvm_t format_nvm = { 0 };
boolean_t ret;
format_nvm.b.fm_lbaf = bitx32(ioc->nif_lbaf, 3, 0);
format_nvm.b.fm_ses = bitx32(ioc->nif_ses, 2, 0);
cmd->nc_sqid = 0;
cmd->nc_callback = nvme_wakeup_cmd;
cmd->nc_sqe.sqe_nsid = ioc->nif_common.nioc_nsid;
cmd->nc_sqe.sqe_opc = NVME_OPC_NVM_FORMAT;
cmd->nc_sqe.sqe_cdw10 = format_nvm.r;
cmd->nc_flags |= NVME_CMD_F_DONTPANIC;
nvme_admin_cmd(cmd, nvme_format_cmd_timeout);
if (!nvme_check_cmd_status_ioctl(cmd, &ioc->nif_common) != 0) {
dev_err(nvme->n_dip, CE_WARN,
"!FORMAT failed with sct = %x, sc = %x",
cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc);
ret = B_FALSE;
goto fail;
}
ret = B_TRUE;
fail:
nvme_free_cmd(cmd);
return (ret);
}
static boolean_t
nvme_get_logpage(nvme_t *nvme, boolean_t user, nvme_ioctl_get_logpage_t *log,
void **buf)
{
nvme_cmd_t *cmd = nvme_alloc_admin_cmd(nvme, KM_SLEEP);
nvme_getlogpage_dw10_t dw10;
uint32_t offlo, offhi;
nvme_getlogpage_dw11_t dw11;
nvme_getlogpage_dw14_t dw14;
uint32_t ndw;
boolean_t ret = B_FALSE;
bzero(&dw10, sizeof (dw10));
bzero(&dw11, sizeof (dw11));
bzero(&dw14, sizeof (dw14));
cmd->nc_sqid = 0;
cmd->nc_callback = nvme_wakeup_cmd;
cmd->nc_sqe.sqe_opc = NVME_OPC_GET_LOG_PAGE;
cmd->nc_sqe.sqe_nsid = log->nigl_common.nioc_nsid;
if (user)
cmd->nc_flags |= NVME_CMD_F_DONTPANIC;
ndw = (uint32_t)(log->nigl_len / 4);
ASSERT3U(ndw, >, 0);
ndw--;
dw10.b.lp_lid = bitx32(log->nigl_lid, 7, 0);
dw10.b.lp_lsp = bitx32(log->nigl_lsp, 6, 0);
dw10.b.lp_rae = bitx32(log->nigl_lsp, 0, 0);
dw10.b.lp_lnumdl = bitx32(ndw, 15, 0);
dw11.b.lp_numdu = bitx32(ndw, 31, 16);
dw11.b.lp_lsi = bitx32(log->nigl_lsi, 15, 0);
offlo = bitx64(log->nigl_offset, 31, 0);
offhi = bitx64(log->nigl_offset, 63, 32);
dw14.b.lp_csi = bitx32(log->nigl_csi, 7, 0);
cmd->nc_sqe.sqe_cdw10 = dw10.r;
cmd->nc_sqe.sqe_cdw11 = dw11.r;
cmd->nc_sqe.sqe_cdw12 = offlo;
cmd->nc_sqe.sqe_cdw13 = offhi;
cmd->nc_sqe.sqe_cdw14 = dw14.r;
if (nvme_zalloc_dma(nvme, log->nigl_len, DDI_DMA_READ,
&nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) {
dev_err(nvme->n_dip, CE_WARN,
"!nvme_zalloc_dma failed for GET LOG PAGE");
ret = nvme_ioctl_error(&log->nigl_common,
NVME_IOCTL_E_NO_DMA_MEM, 0, 0);
goto fail;
}
if (nvme_fill_prp(cmd, cmd->nc_dma->nd_dmah) != 0) {
ret = nvme_ioctl_error(&log->nigl_common,
NVME_IOCTL_E_NO_DMA_MEM, 0, 0);
goto fail;
}
nvme_admin_cmd(cmd, nvme_admin_cmd_timeout);
if (!nvme_check_cmd_status_ioctl(cmd, &log->nigl_common)) {
if (!user) {
dev_err(nvme->n_dip, CE_WARN,
"!GET LOG PAGE failed with sct = %x, sc = %x",
cmd->nc_cqe.cqe_sf.sf_sct,
cmd->nc_cqe.cqe_sf.sf_sc);
}
ret = B_FALSE;
goto fail;
}
*buf = kmem_alloc(log->nigl_len, KM_SLEEP);
bcopy(cmd->nc_dma->nd_memp, *buf, log->nigl_len);
ret = B_TRUE;
fail:
nvme_free_cmd(cmd);
return (ret);
}
static boolean_t
nvme_get_logpage_int(nvme_t *nvme, boolean_t user, void **buf, size_t *bufsize,
uint8_t lid)
{
const nvme_log_page_info_t *info = NULL;
nvme_ioctl_get_logpage_t log;
nvme_valid_ctrl_data_t data;
boolean_t bret;
bool var;
for (size_t i = 0; i < nvme_std_log_npages; i++) {
if (nvme_std_log_pages[i].nlpi_lid == lid &&
nvme_std_log_pages[i].nlpi_csi == NVME_CSI_NVM) {
info = &nvme_std_log_pages[i];
break;
}
}
if (info == NULL) {
return (B_FALSE);
}
data.vcd_vers = &nvme->n_version;
data.vcd_id = nvme->n_idctl;
bzero(&log, sizeof (log));
log.nigl_common.nioc_nsid = NVME_NSID_BCAST;
log.nigl_csi = info->nlpi_csi;
log.nigl_lid = info->nlpi_lid;
log.nigl_len = nvme_log_page_info_size(info, &data, &var);
if (log.nigl_len == 0 || var) {
return (B_FALSE);
}
bret = nvme_get_logpage(nvme, user, &log, buf);
if (!bret) {
return (B_FALSE);
}
*bufsize = log.nigl_len;
return (B_TRUE);
}
static boolean_t
nvme_identify(nvme_t *nvme, boolean_t user, nvme_ioctl_identify_t *ioc,
void **buf)
{
nvme_cmd_t *cmd = nvme_alloc_admin_cmd(nvme, KM_SLEEP);
boolean_t ret = B_FALSE;
nvme_identify_dw10_t dw10;
ASSERT3P(buf, !=, NULL);
bzero(&dw10, sizeof (dw10));
cmd->nc_sqid = 0;
cmd->nc_callback = nvme_wakeup_cmd;
cmd->nc_sqe.sqe_opc = NVME_OPC_IDENTIFY;
cmd->nc_sqe.sqe_nsid = ioc->nid_common.nioc_nsid;
dw10.b.id_cns = bitx32(ioc->nid_cns, 7, 0);
dw10.b.id_cntid = bitx32(ioc->nid_ctrlid, 15, 0);
cmd->nc_sqe.sqe_cdw10 = dw10.r;
if (nvme_zalloc_dma(nvme, NVME_IDENTIFY_BUFSIZE, DDI_DMA_READ,
&nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) {
dev_err(nvme->n_dip, CE_WARN,
"!nvme_zalloc_dma failed for IDENTIFY");
ret = nvme_ioctl_error(&ioc->nid_common,
NVME_IOCTL_E_NO_DMA_MEM, 0, 0);
goto fail;
}
if (cmd->nc_dma->nd_ncookie > 2) {
dev_err(nvme->n_dip, CE_WARN,
"!too many DMA cookies for IDENTIFY");
NVME_BUMP_STAT(nvme, too_many_cookies);
ret = nvme_ioctl_error(&ioc->nid_common,
NVME_IOCTL_E_BAD_PRP, 0, 0);
goto fail;
}
cmd->nc_sqe.sqe_dptr.d_prp[0] = cmd->nc_dma->nd_cookie.dmac_laddress;
if (cmd->nc_dma->nd_ncookie > 1) {
ddi_dma_nextcookie(cmd->nc_dma->nd_dmah,
&cmd->nc_dma->nd_cookie);
cmd->nc_sqe.sqe_dptr.d_prp[1] =
cmd->nc_dma->nd_cookie.dmac_laddress;
}
if (user)
cmd->nc_flags |= NVME_CMD_F_DONTPANIC;
nvme_admin_cmd(cmd, nvme_admin_cmd_timeout);
if (!nvme_check_cmd_status_ioctl(cmd, &ioc->nid_common)) {
dev_err(nvme->n_dip, CE_WARN,
"!IDENTIFY failed with sct = %x, sc = %x",
cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc);
ret = B_FALSE;
goto fail;
}
*buf = kmem_alloc(NVME_IDENTIFY_BUFSIZE, KM_SLEEP);
bcopy(cmd->nc_dma->nd_memp, *buf, NVME_IDENTIFY_BUFSIZE);
ret = B_TRUE;
fail:
nvme_free_cmd(cmd);
return (ret);
}
static boolean_t
nvme_identify_int(nvme_t *nvme, uint32_t nsid, uint8_t cns, void **buf)
{
nvme_ioctl_identify_t id;
bzero(&id, sizeof (nvme_ioctl_identify_t));
id.nid_common.nioc_nsid = nsid;
id.nid_cns = cns;
return (nvme_identify(nvme, B_FALSE, &id, buf));
}
static boolean_t
nvme_get_current_nqueues(nvme_t *nvme, nvme_nqueues_t *nq)
{
nvme_cmd_t *cmd = nvme_alloc_admin_cmd(nvme, KM_SLEEP);
nvme_get_features_dw10_t gf_dw10 = { 0 };
boolean_t ret = B_FALSE;
gf_dw10.b.gt_fid = NVME_FEAT_NQUEUES;
cmd->nc_sqid = 0;
cmd->nc_callback = nvme_wakeup_cmd;
cmd->nc_sqe.sqe_opc = NVME_OPC_GET_FEATURES;
cmd->nc_sqe.sqe_cdw10 = gf_dw10.r;
cmd->nc_flags |= NVME_CMD_F_DONTPANIC;
nvme_admin_cmd(cmd, nvme_admin_cmd_timeout);
if ((ret = nvme_check_cmd_status(cmd)) != 0) {
dev_err(nvme->n_dip, CE_WARN,
"!GET FEATURES NQUEUES failed with sct = %x, sc = %x",
cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc);
goto fail;
}
nq->r = cmd->nc_cqe.cqe_dw0;
ret = B_TRUE;
fail:
nvme_free_cmd(cmd);
return (ret);
}
static int
nvme_set_features(nvme_t *nvme, boolean_t user, uint32_t nsid, uint8_t feature,
uint32_t val, uint32_t *res)
{
_NOTE(ARGUNUSED(nsid));
nvme_cmd_t *cmd = nvme_alloc_admin_cmd(nvme, KM_SLEEP);
int ret = EINVAL;
ASSERT(res != NULL);
cmd->nc_sqid = 0;
cmd->nc_callback = nvme_wakeup_cmd;
cmd->nc_sqe.sqe_opc = NVME_OPC_SET_FEATURES;
cmd->nc_sqe.sqe_cdw10 = feature;
cmd->nc_sqe.sqe_cdw11 = val;
if (user)
cmd->nc_flags |= NVME_CMD_F_DONTPANIC;
switch (feature) {
case NVME_FEAT_WRITE_CACHE:
if (!nvme->n_write_cache_present)
goto fail;
break;
case NVME_FEAT_NQUEUES:
break;
default:
goto fail;
}
nvme_admin_cmd(cmd, nvme_admin_cmd_timeout);
if ((ret = nvme_check_cmd_status(cmd)) != 0) {
dev_err(nvme->n_dip, CE_WARN,
"!SET FEATURES %d failed with sct = %x, sc = %x",
feature, cmd->nc_cqe.cqe_sf.sf_sct,
cmd->nc_cqe.cqe_sf.sf_sc);
goto fail;
}
*res = cmd->nc_cqe.cqe_dw0;
fail:
nvme_free_cmd(cmd);
return (ret);
}
static int
nvme_write_cache_set(nvme_t *nvme, boolean_t enable)
{
nvme_write_cache_t nwc = { 0 };
if (enable)
nwc.b.wc_wce = 1;
return (nvme_set_features(nvme, B_TRUE, 0, NVME_FEAT_WRITE_CACHE,
nwc.r, &nwc.r));
}
static int
nvme_set_nqueues(nvme_t *nvme)
{
nvme_nqueues_t nq = { 0 };
int ret;
if (nvme->n_completion_queues == -1)
nvme->n_completion_queues = nvme->n_intr_cnt;
nvme->n_completion_queues = MIN(nvme->n_completion_queues,
nvme->n_intr_cnt);
if (nvme->n_submission_queues == -1)
nvme->n_submission_queues = nvme->n_completion_queues;
nvme->n_completion_queues = MIN(nvme->n_completion_queues,
nvme->n_submission_queues);
ASSERT(nvme->n_submission_queues > 0);
ASSERT(nvme->n_completion_queues > 0);
nq.b.nq_nsq = nvme->n_submission_queues - 1;
nq.b.nq_ncq = nvme->n_completion_queues - 1;
ret = nvme_set_features(nvme, B_FALSE, 0, NVME_FEAT_NQUEUES, nq.r,
&nq.r);
if (ret == 0) {
nvme->n_submission_queues = MIN(nvme->n_submission_queues,
nq.b.nq_nsq + 1);
nvme->n_completion_queues = MIN(nvme->n_completion_queues,
nq.b.nq_ncq + 1);
}
return (ret);
}
static int
nvme_create_completion_queue(nvme_t *nvme, nvme_cq_t *cq)
{
nvme_cmd_t *cmd = nvme_alloc_admin_cmd(nvme, KM_SLEEP);
nvme_create_queue_dw10_t dw10 = { 0 };
nvme_create_cq_dw11_t c_dw11 = { 0 };
int ret;
dw10.b.q_qid = cq->ncq_id;
dw10.b.q_qsize = cq->ncq_nentry - 1;
c_dw11.b.cq_pc = 1;
c_dw11.b.cq_ien = 1;
c_dw11.b.cq_iv = cq->ncq_id % nvme->n_intr_cnt;
cmd->nc_sqid = 0;
cmd->nc_callback = nvme_wakeup_cmd;
cmd->nc_sqe.sqe_opc = NVME_OPC_CREATE_CQUEUE;
cmd->nc_sqe.sqe_cdw10 = dw10.r;
cmd->nc_sqe.sqe_cdw11 = c_dw11.r;
cmd->nc_sqe.sqe_dptr.d_prp[0] = cq->ncq_dma->nd_cookie.dmac_laddress;
nvme_admin_cmd(cmd, nvme_admin_cmd_timeout);
if ((ret = nvme_check_cmd_status(cmd)) != 0) {
dev_err(nvme->n_dip, CE_WARN,
"!CREATE CQUEUE failed with sct = %x, sc = %x",
cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc);
}
nvme_free_cmd(cmd);
return (ret);
}
static int
nvme_create_io_qpair(nvme_t *nvme, nvme_qpair_t *qp, uint16_t idx)
{
nvme_cq_t *cq = qp->nq_cq;
nvme_cmd_t *cmd;
nvme_create_queue_dw10_t dw10 = { 0 };
nvme_create_sq_dw11_t s_dw11 = { 0 };
int ret;
if (idx <= cq->ncq_id &&
nvme_create_completion_queue(nvme, cq) != DDI_SUCCESS)
return (DDI_FAILURE);
dw10.b.q_qid = idx;
dw10.b.q_qsize = qp->nq_nentry - 1;
s_dw11.b.sq_pc = 1;
s_dw11.b.sq_cqid = cq->ncq_id;
cmd = nvme_alloc_admin_cmd(nvme, KM_SLEEP);
cmd->nc_sqid = 0;
cmd->nc_callback = nvme_wakeup_cmd;
cmd->nc_sqe.sqe_opc = NVME_OPC_CREATE_SQUEUE;
cmd->nc_sqe.sqe_cdw10 = dw10.r;
cmd->nc_sqe.sqe_cdw11 = s_dw11.r;
cmd->nc_sqe.sqe_dptr.d_prp[0] = qp->nq_sqdma->nd_cookie.dmac_laddress;
nvme_admin_cmd(cmd, nvme_admin_cmd_timeout);
if ((ret = nvme_check_cmd_status(cmd)) != 0) {
dev_err(nvme->n_dip, CE_WARN,
"!CREATE SQUEUE failed with sct = %x, sc = %x",
cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc);
}
nvme_free_cmd(cmd);
return (ret);
}
static boolean_t
nvme_reset(nvme_t *nvme, boolean_t quiesce)
{
nvme_reg_csts_t csts;
int i;
if (nvme_ctrl_is_gone(nvme)) {
return (B_FALSE);
}
nvme_put32(nvme, NVME_REG_CC, 0);
csts.r = nvme_get32(nvme, NVME_REG_CSTS);
if (csts.b.csts_rdy == 1) {
nvme_put32(nvme, NVME_REG_CC, 0);
for (i = 0; i < nvme->n_timeout * 10; i++) {
csts.r = nvme_get32(nvme, NVME_REG_CSTS);
if (csts.b.csts_rdy == 0)
break;
if (quiesce) {
drv_usecwait(50000);
} else {
delay(drv_usectohz(50000));
}
}
}
nvme_put32(nvme, NVME_REG_AQA, 0);
nvme_put32(nvme, NVME_REG_ASQ, 0);
nvme_put32(nvme, NVME_REG_ACQ, 0);
csts.r = nvme_get32(nvme, NVME_REG_CSTS);
return (csts.b.csts_rdy == 0 ? B_TRUE : B_FALSE);
}
static void
nvme_shutdown(nvme_t *nvme, boolean_t quiesce)
{
nvme_reg_cc_t cc;
nvme_reg_csts_t csts;
int i;
if (nvme_ctrl_is_gone(nvme)) {
return;
}
cc.r = nvme_get32(nvme, NVME_REG_CC);
cc.b.cc_shn = NVME_CC_SHN_NORMAL;
nvme_put32(nvme, NVME_REG_CC, cc.r);
for (i = 0; i < 10; i++) {
csts.r = nvme_get32(nvme, NVME_REG_CSTS);
if (csts.b.csts_shst == NVME_CSTS_SHN_COMPLETE)
break;
if (quiesce) {
drv_usecwait(100000);
} else {
delay(drv_usectohz(100000));
}
}
}
static size_t
nvme_strlen(const char *str, size_t len)
{
if (len <= 0)
return (0);
while (str[--len] == ' ')
;
return (++len);
}
static void
nvme_config_min_block_size(nvme_t *nvme, char *model, char *val)
{
ulong_t bsize = 0;
char *msg = "";
if (ddi_strtoul(val, NULL, 0, &bsize) != 0)
goto err;
if (!ISP2(bsize)) {
msg = ": not a power of 2";
goto err;
}
if (bsize < NVME_DEFAULT_MIN_BLOCK_SIZE) {
msg = ": too low";
goto err;
}
nvme->n_min_block_size = bsize;
return;
err:
dev_err(nvme->n_dip, CE_WARN,
"!nvme-config-list: ignoring invalid min-phys-block-size '%s' "
"for model '%s'%s", val, model, msg);
nvme->n_min_block_size = NVME_DEFAULT_MIN_BLOCK_SIZE;
}
static void
nvme_config_boolean(nvme_t *nvme, char *model, char *name, char *val,
boolean_t *b)
{
if (strcmp(val, "on") == 0 ||
strcmp(val, "true") == 0)
*b = B_TRUE;
else if (strcmp(val, "off") == 0 ||
strcmp(val, "false") == 0)
*b = B_FALSE;
else
dev_err(nvme->n_dip, CE_WARN,
"!nvme-config-list: invalid value for %s '%s'"
" for model '%s', ignoring", name, val, model);
}
static void
nvme_config_list(nvme_t *nvme)
{
char **config_list;
uint_t nelem;
int rv;
rv = ddi_prop_lookup_string_array(DDI_DEV_T_ANY, nvme->n_dip,
DDI_PROP_DONTPASS, "nvme-config-list", &config_list, &nelem);
if (rv != DDI_PROP_SUCCESS) {
if (rv == DDI_PROP_CANNOT_DECODE) {
dev_err(nvme->n_dip, CE_WARN,
"!nvme-config-list: cannot be decoded");
}
return;
}
if ((nelem % 3) != 0) {
dev_err(nvme->n_dip, CE_WARN, "!nvme-config-list: must be "
"triplets of <model>/<fwrev>/<name-value-list> strings ");
goto out;
}
for (uint_t i = 0; i < nelem; i += 3) {
char *model = config_list[i];
char *fwrev = config_list[i + 1];
char *nvp, *save_nv;
size_t id_model_len, id_fwrev_len;
id_model_len = nvme_strlen(nvme->n_idctl->id_model,
sizeof (nvme->n_idctl->id_model));
if (strlen(model) != id_model_len)
continue;
if (strncmp(model, nvme->n_idctl->id_model, id_model_len) != 0)
continue;
id_fwrev_len = nvme_strlen(nvme->n_idctl->id_fwrev,
sizeof (nvme->n_idctl->id_fwrev));
if (strlen(fwrev) != 0) {
boolean_t match = B_FALSE;
char *fwr, *last_fw;
for (fwr = strtok_r(fwrev, ",", &last_fw);
fwr != NULL;
fwr = strtok_r(NULL, ",", &last_fw)) {
if (strlen(fwr) != id_fwrev_len)
continue;
if (strncmp(fwr, nvme->n_idctl->id_fwrev,
id_fwrev_len) == 0)
match = B_TRUE;
}
if (!match)
continue;
}
for (nvp = strtok_r(config_list[i + 2], ",", &save_nv);
nvp != NULL; nvp = strtok_r(NULL, ",", &save_nv)) {
char *name = nvp;
char *val = strchr(nvp, ':');
if (val == NULL || name == val) {
dev_err(nvme->n_dip, CE_WARN,
"!nvme-config-list: <name-value-list> "
"for model '%s' is malformed", model);
goto out;
}
*val++ = '\0';
if (strcmp(name, "ignore-unknown-vendor-status") == 0) {
nvme_config_boolean(nvme, model, name, val,
&nvme->n_ignore_unknown_vendor_status);
} else if (strcmp(name, "min-phys-block-size") == 0) {
nvme_config_min_block_size(nvme, model, val);
} else if (strcmp(name, "volatile-write-cache") == 0) {
nvme_config_boolean(nvme, model, name, val,
&nvme->n_write_cache_enabled);
} else {
dev_err(nvme->n_dip, CE_WARN,
"!nvme-config-list: unknown config '%s' "
"for model '%s', ignoring", name, model);
}
}
}
out:
ddi_prop_free(config_list);
}
static void
nvme_prepare_devid(nvme_t *nvme, uint32_t nsid)
{
char model[sizeof (nvme->n_idctl->id_model) + 1];
char serial[sizeof (nvme->n_idctl->id_serial) + 1];
bcopy(nvme->n_idctl->id_model, model, sizeof (nvme->n_idctl->id_model));
bcopy(nvme->n_idctl->id_serial, serial,
sizeof (nvme->n_idctl->id_serial));
model[sizeof (nvme->n_idctl->id_model)] = '\0';
serial[sizeof (nvme->n_idctl->id_serial)] = '\0';
nvme_nsid2ns(nvme, nsid)->ns_devid = kmem_asprintf("%4X-%s-%s-%X",
nvme->n_idctl->id_vid, model, serial, nsid);
}
static nvme_identify_nsid_list_t *
nvme_update_nsid_list(nvme_t *nvme, int cns)
{
nvme_identify_nsid_list_t *nslist;
if (nvme_identify_int(nvme, 0, cns, (void **)&nslist))
return (nslist);
return (NULL);
}
nvme_namespace_t *
nvme_nsid2ns(nvme_t *nvme, uint32_t nsid)
{
ASSERT3U(nsid, !=, 0);
ASSERT3U(nsid, <=, nvme->n_namespace_count);
return (&nvme->n_ns[nsid - 1]);
}
static boolean_t
nvme_allocated_ns(nvme_namespace_t *ns)
{
nvme_t *nvme = ns->ns_nvme;
uint32_t i;
ASSERT(nvme_mgmt_lock_held(nvme));
if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 2) &&
nvme->n_idctl->id_oacs.oa_nsmgmt != 0) {
nvme_identify_nsid_list_t *nslist = nvme_update_nsid_list(nvme,
NVME_IDENTIFY_NSID_ALLOC_LIST);
boolean_t found = B_FALSE;
if (nslist == NULL)
return (B_TRUE);
for (i = 0; i < ARRAY_SIZE(nslist->nl_nsid); i++) {
if (ns->ns_id == 0)
break;
if (ns->ns_id == nslist->nl_nsid[i])
found = B_TRUE;
}
kmem_free(nslist, NVME_IDENTIFY_BUFSIZE);
return (found);
} else {
return (B_TRUE);
}
}
static boolean_t
nvme_active_ns(nvme_namespace_t *ns)
{
nvme_t *nvme = ns->ns_nvme;
uint64_t *ptr;
uint32_t i;
ASSERT(nvme_mgmt_lock_held(nvme));
if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 1)) {
nvme_identify_nsid_list_t *nslist = nvme_update_nsid_list(nvme,
NVME_IDENTIFY_NSID_LIST);
boolean_t found = B_FALSE;
if (nslist == NULL)
return (B_TRUE);
for (i = 0; i < ARRAY_SIZE(nslist->nl_nsid); i++) {
if (ns->ns_id == 0)
break;
if (ns->ns_id == nslist->nl_nsid[i])
found = B_TRUE;
}
kmem_free(nslist, NVME_IDENTIFY_BUFSIZE);
return (found);
}
for (ptr = (uint64_t *)ns->ns_idns;
ptr != (uint64_t *)(ns->ns_idns + 1);
ptr++) {
if (*ptr != 0) {
return (B_TRUE);
}
}
return (B_FALSE);
}
static int
nvme_init_ns(nvme_t *nvme, uint32_t nsid)
{
nvme_namespace_t *ns = nvme_nsid2ns(nvme, nsid);
nvme_identify_nsid_t *idns;
nvme_ns_state_t orig_state;
ns->ns_nvme = nvme;
ASSERT(nvme_mgmt_lock_held(nvme));
if (!nvme_identify_int(nvme, nsid, NVME_IDENTIFY_NSID,
(void **)&idns)) {
dev_err(nvme->n_dip, CE_WARN,
"!failed to identify namespace %d", nsid);
return (DDI_FAILURE);
}
if (ns->ns_idns != NULL)
kmem_free(ns->ns_idns, sizeof (nvme_identify_nsid_t));
ns->ns_idns = idns;
ns->ns_id = nsid;
orig_state = ns->ns_state;
if (nvme_active_ns(ns)) {
if (orig_state == NVME_NS_STATE_ATTACHED) {
ns->ns_state = NVME_NS_STATE_ATTACHED;
} else {
ns->ns_state = NVME_NS_STATE_NOT_IGNORED;
}
} else if (nvme_allocated_ns(ns)) {
ns->ns_state = NVME_NS_STATE_ALLOCATED;
} else {
ns->ns_state = NVME_NS_STATE_UNALLOCATED;
}
ns->ns_block_count = idns->id_nsize;
ns->ns_block_size =
1 << idns->id_lbaf[idns->id_flbas.lba_format].lbaf_lbads;
ns->ns_best_block_size = ns->ns_block_size;
if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 1))
bcopy(idns->id_eui64, ns->ns_eui64, sizeof (ns->ns_eui64));
if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 2))
bcopy(idns->id_nguid, ns->ns_nguid, sizeof (ns->ns_nguid));
if (*(uint64_t *)ns->ns_eui64 == 0)
nvme_prepare_devid(nvme, ns->ns_id);
(void) snprintf(ns->ns_name, sizeof (ns->ns_name), "%u", ns->ns_id);
for (uint32_t j = 0, last_rp = 3; j <= idns->id_nlbaf; j++) {
if (idns->id_lbaf[j].lbaf_lbads == 0)
break;
if (idns->id_lbaf[j].lbaf_ms != 0)
continue;
if (idns->id_lbaf[j].lbaf_rp >= last_rp)
continue;
last_rp = idns->id_lbaf[j].lbaf_rp;
ns->ns_best_block_size =
1 << idns->id_lbaf[j].lbaf_lbads;
}
if (ns->ns_best_block_size < nvme->n_min_block_size)
ns->ns_best_block_size = nvme->n_min_block_size;
if (ns->ns_state >= NVME_NS_STATE_NOT_IGNORED) {
if (idns->id_dps.dp_pinfo) {
dev_err(nvme->n_dip, CE_WARN,
"!ignoring namespace %d, unsupported feature: "
"pinfo = %d", nsid, idns->id_dps.dp_pinfo);
ns->ns_state = NVME_NS_STATE_ACTIVE;
}
if (ns->ns_block_size < 512) {
dev_err(nvme->n_dip, CE_WARN,
"!ignoring namespace %d, unsupported block size "
"%"PRIu64, nsid, (uint64_t)ns->ns_block_size);
ns->ns_state = NVME_NS_STATE_ACTIVE;
}
}
if (orig_state == NVME_NS_STATE_ATTACHED &&
ns->ns_state != NVME_NS_STATE_ATTACHED) {
dev_err(nvme->n_dip, CE_PANIC, "namespace %u state "
"unexpectedly changed and removed blkdev support!", nsid);
}
if (orig_state > NVME_NS_STATE_ACTIVE) {
if (ns->ns_state < NVME_NS_STATE_NOT_IGNORED)
nvme->n_namespaces_attachable--;
} else if (ns->ns_state >= NVME_NS_STATE_NOT_IGNORED) {
nvme->n_namespaces_attachable++;
}
return (DDI_SUCCESS);
}
static boolean_t
nvme_bd_attach_ns(nvme_t *nvme, nvme_ioctl_common_t *com)
{
nvme_namespace_t *ns = nvme_nsid2ns(nvme, com->nioc_nsid);
int ret;
ASSERT(nvme_mgmt_lock_held(nvme));
if (!nvme_ns_state_check(ns, com, nvme_bd_attach_states)) {
return (B_FALSE);
}
if (ns->ns_bd_hdl == NULL) {
bd_ops_t ops = nvme_bd_ops;
if (!nvme->n_idctl->id_oncs.on_dset_mgmt)
ops.o_free_space = NULL;
ns->ns_bd_hdl = bd_alloc_handle(ns, &ops, &nvme->n_prp_dma_attr,
KM_SLEEP);
if (ns->ns_bd_hdl == NULL) {
dev_err(nvme->n_dip, CE_WARN, "!Failed to get blkdev "
"handle for namespace id %u", com->nioc_nsid);
return (nvme_ioctl_error(com,
NVME_IOCTL_E_BLKDEV_ATTACH, 0, 0));
}
}
nvme_mgmt_bd_start(nvme);
ret = bd_attach_handle(nvme->n_dip, ns->ns_bd_hdl);
nvme_mgmt_bd_end(nvme);
if (ret != DDI_SUCCESS) {
return (nvme_ioctl_error(com, NVME_IOCTL_E_BLKDEV_ATTACH,
0, 0));
}
ns->ns_state = NVME_NS_STATE_ATTACHED;
return (B_TRUE);
}
static boolean_t
nvme_bd_detach_ns(nvme_t *nvme, nvme_ioctl_common_t *com)
{
nvme_namespace_t *ns = nvme_nsid2ns(nvme, com->nioc_nsid);
int ret;
ASSERT(nvme_mgmt_lock_held(nvme));
if (!nvme_ns_state_check(ns, com, nvme_bd_detach_states)) {
return (B_FALSE);
}
nvme_mgmt_bd_start(nvme);
ASSERT3P(ns->ns_bd_hdl, !=, NULL);
ret = bd_detach_handle(ns->ns_bd_hdl);
nvme_mgmt_bd_end(nvme);
if (ret != DDI_SUCCESS) {
return (nvme_ioctl_error(com, NVME_IOCTL_E_BLKDEV_DETACH, 0,
0));
}
ns->ns_state = NVME_NS_STATE_NOT_IGNORED;
return (B_TRUE);
}
static void
nvme_rescan_ns(nvme_t *nvme, uint32_t nsid)
{
ASSERT(nvme_mgmt_lock_held(nvme));
ASSERT3U(nsid, !=, 0);
if (nsid != NVME_NSID_BCAST) {
nvme_namespace_t *ns = nvme_nsid2ns(nvme, nsid);
ASSERT3U(ns->ns_state, <, NVME_NS_STATE_ATTACHED);
(void) nvme_init_ns(nvme, nsid);
return;
}
for (uint32_t i = 1; i <= nvme->n_namespace_count; i++) {
nvme_namespace_t *ns = nvme_nsid2ns(nvme, i);
ASSERT3U(ns->ns_state, <, NVME_NS_STATE_ATTACHED);
(void) nvme_init_ns(nvme, i);
}
}
typedef struct nvme_quirk_table {
uint16_t nq_vendor_id;
uint16_t nq_device_id;
nvme_quirk_t nq_quirks;
} nvme_quirk_table_t;
static const nvme_quirk_table_t nvme_quirks[] = {
{ 0x1987, 0x5018, NVME_QUIRK_START_CID },
};
static void
nvme_detect_quirks(nvme_t *nvme)
{
for (uint_t i = 0; i < ARRAY_SIZE(nvme_quirks); i++) {
const nvme_quirk_table_t *nqt = &nvme_quirks[i];
if (nqt->nq_vendor_id == nvme->n_vendor_id &&
nqt->nq_device_id == nvme->n_device_id) {
nvme->n_quirks = nqt->nq_quirks;
return;
}
}
}
static void
nvme_enable_host_behavior(nvme_t *nvme)
{
nvme_host_behavior_t *hb;
nvme_ioc_cmd_args_t args = { NULL };
nvme_sqe_t sqe = {
.sqe_opc = NVME_OPC_SET_FEATURES,
.sqe_cdw10 = NVME_FEAT_HOST_BEHAVE,
.sqe_nsid = 0
};
nvme_ioctl_common_t err;
if (nvme->n_idctl->id_lpa.lp_da4s == 0)
return;
hb = kmem_zalloc(sizeof (nvme_host_behavior_t), KM_SLEEP);
hb->nhb_etdas = 1;
args.ica_sqe = &sqe;
args.ica_data = hb;
args.ica_data_len = sizeof (nvme_host_behavior_t);
args.ica_dma_flags = DDI_DMA_WRITE;
args.ica_copy_flags = FKIOCTL;
args.ica_timeout = nvme_admin_cmd_timeout;
if (!nvme_ioc_cmd(nvme, &err, &args)) {
dev_err(nvme->n_dip, CE_WARN, "failed to enable host behavior "
"feature: 0x%x/0x%x/0x%x", err.nioc_drv_err,
err.nioc_ctrl_sct, err.nioc_ctrl_sc);
}
kmem_free(hb, sizeof (nvme_host_behavior_t));
}
static int
nvme_init(nvme_t *nvme)
{
nvme_reg_cc_t cc = { 0 };
nvme_reg_aqa_t aqa = { 0 };
nvme_reg_asq_t asq = { 0 };
nvme_reg_acq_t acq = { 0 };
nvme_reg_cap_t cap;
nvme_reg_vs_t vs;
nvme_reg_csts_t csts;
nvme_nqueues_t nq;
int i = 0;
uint_t tq_threads;
char model[sizeof (nvme->n_idctl->id_model) + 1];
char *vendor, *product;
uint32_t nsid;
vs.r = nvme_get32(nvme, NVME_REG_VS);
nvme->n_version.v_major = vs.b.vs_mjr;
nvme->n_version.v_minor = vs.b.vs_mnr;
dev_err(nvme->n_dip, CE_CONT, "?NVMe spec version %d.%d\n",
nvme->n_version.v_major, nvme->n_version.v_minor);
if (nvme->n_version.v_major > nvme_version_major) {
dev_err(nvme->n_dip, CE_WARN, "!no support for version > %d.x",
nvme_version_major);
if (nvme->n_strict_version)
goto fail;
}
cap.r = nvme_get64(nvme, NVME_REG_CAP);
if ((cap.b.cap_css & NVME_CAP_CSS_NVM) == 0) {
dev_err(nvme->n_dip, CE_WARN,
"!NVM command set not supported by hardware");
goto fail;
}
nvme->n_nssr_supported = cap.b.cap_nssrs;
nvme->n_doorbell_stride = 4 << cap.b.cap_dstrd;
nvme->n_timeout = cap.b.cap_to;
nvme->n_arbitration_mechanisms = cap.b.cap_ams;
nvme->n_cont_queues_reqd = cap.b.cap_cqr;
nvme->n_max_queue_entries = cap.b.cap_mqes + 1;
nvme->n_pageshift = MIN(MAX(cap.b.cap_mpsmin + 12, PAGESHIFT),
cap.b.cap_mpsmax + 12);
nvme->n_pagesize = 1UL << (nvme->n_pageshift);
nvme->n_queue_dma_attr.dma_attr_align = nvme->n_pagesize;
nvme->n_queue_dma_attr.dma_attr_minxfer = nvme->n_pagesize;
nvme->n_prp_dma_attr.dma_attr_maxxfer = nvme->n_pagesize;
nvme->n_prp_dma_attr.dma_attr_minxfer = nvme->n_pagesize;
nvme->n_prp_dma_attr.dma_attr_align = nvme->n_pagesize;
nvme->n_prp_dma_attr.dma_attr_seg = nvme->n_pagesize - 1;
if (nvme_reset(nvme, B_FALSE) == B_FALSE) {
dev_err(nvme->n_dip, CE_WARN, "!unable to reset controller");
ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST);
nvme->n_dead = B_TRUE;
goto fail;
}
if (nvme_create_cq_array(nvme, 1, nvme->n_admin_queue_len, 4) !=
DDI_SUCCESS) {
dev_err(nvme->n_dip, CE_WARN,
"!failed to pre-allocate admin completion queue");
goto fail;
}
if (nvme_alloc_qpair(nvme, nvme->n_admin_queue_len, &nvme->n_adminq, 0)
!= DDI_SUCCESS) {
dev_err(nvme->n_dip, CE_WARN,
"!unable to allocate admin qpair");
goto fail;
}
nvme->n_ioq = kmem_alloc(sizeof (nvme_qpair_t *), KM_SLEEP);
nvme->n_ioq[0] = nvme->n_adminq;
if (nvme->n_quirks & NVME_QUIRK_START_CID)
nvme->n_adminq->nq_next_cmd++;
nvme->n_progress |= NVME_ADMIN_QUEUE;
(void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip,
"admin-queue-len", nvme->n_admin_queue_len);
aqa.b.aqa_asqs = aqa.b.aqa_acqs = nvme->n_admin_queue_len - 1;
asq = nvme->n_adminq->nq_sqdma->nd_cookie.dmac_laddress;
acq = nvme->n_adminq->nq_cq->ncq_dma->nd_cookie.dmac_laddress;
ASSERT((asq & (nvme->n_pagesize - 1)) == 0);
ASSERT((acq & (nvme->n_pagesize - 1)) == 0);
nvme_put32(nvme, NVME_REG_AQA, aqa.r);
nvme_put64(nvme, NVME_REG_ASQ, asq);
nvme_put64(nvme, NVME_REG_ACQ, acq);
cc.b.cc_ams = 0;
cc.b.cc_css = 0;
cc.b.cc_mps = nvme->n_pageshift - 12;
cc.b.cc_shn = 0;
cc.b.cc_en = 1;
cc.b.cc_iosqes = 6;
cc.b.cc_iocqes = 4;
nvme_put32(nvme, NVME_REG_CC, cc.r);
csts.r = nvme_get32(nvme, NVME_REG_CSTS);
if (csts.b.csts_rdy == 0) {
for (i = 0; i != nvme->n_timeout * 10; i++) {
delay(drv_usectohz(50000));
csts.r = nvme_get32(nvme, NVME_REG_CSTS);
if (csts.b.csts_cfs == 1) {
dev_err(nvme->n_dip, CE_WARN,
"!controller fatal status at init");
ddi_fm_service_impact(nvme->n_dip,
DDI_SERVICE_LOST);
nvme->n_dead = B_TRUE;
goto fail;
}
if (csts.b.csts_rdy == 1)
break;
}
}
if (csts.b.csts_rdy == 0) {
dev_err(nvme->n_dip, CE_WARN, "!controller not ready");
ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST);
nvme->n_dead = B_TRUE;
goto fail;
}
sema_init(&nvme->n_abort_sema, 1, NULL, SEMA_DRIVER, NULL);
if ((nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSIX, 1)
!= DDI_SUCCESS) &&
(nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSI, 1)
!= DDI_SUCCESS) &&
(nvme_setup_interrupts(nvme, DDI_INTR_TYPE_FIXED, 1)
!= DDI_SUCCESS)) {
dev_err(nvme->n_dip, CE_WARN,
"!failed to set up initial interrupt");
goto fail;
}
nvme->n_dead_status = NVME_IOCTL_E_CTRL_DEAD;
if (!nvme_identify_int(nvme, 0, NVME_IDENTIFY_CTRL,
(void **)&nvme->n_idctl)) {
dev_err(nvme->n_dip, CE_WARN, "!failed to identify controller");
goto fail;
}
nvme_config_list(nvme);
bcopy(nvme->n_idctl->id_model, model, sizeof (nvme->n_idctl->id_model));
model[sizeof (nvme->n_idctl->id_model)] = '\0';
sata_split_model(model, &vendor, &product);
if (vendor == NULL)
nvme->n_vendor = strdup("NVMe");
else
nvme->n_vendor = strdup(vendor);
nvme->n_product = strdup(product);
nvme->n_async_event_limit = MAX(NVME_MIN_ASYNC_EVENT_LIMIT,
MIN(nvme->n_admin_queue_len / 10,
MIN(nvme->n_idctl->id_aerl + 1, nvme->n_async_event_limit)));
(void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip,
"async-event-limit", nvme->n_async_event_limit);
nvme->n_abort_command_limit = nvme->n_idctl->id_acl + 1;
sema_destroy(&nvme->n_abort_sema);
sema_init(&nvme->n_abort_sema, nvme->n_abort_command_limit - 1, NULL,
SEMA_DRIVER, NULL);
nvme->n_progress |= NVME_CTRL_LIMITS;
if (nvme->n_idctl->id_mdts == 0)
nvme->n_max_data_transfer_size = nvme->n_pagesize * 65536;
else
nvme->n_max_data_transfer_size =
1ull << (nvme->n_pageshift + nvme->n_idctl->id_mdts);
nvme->n_error_log_len = nvme->n_idctl->id_elpe + 1;
nvme->n_max_data_transfer_size = MIN(nvme->n_max_data_transfer_size,
(nvme->n_pagesize / sizeof (uint64_t) * nvme->n_pagesize));
nvme->n_prp_dma_attr.dma_attr_maxxfer = nvme->n_max_data_transfer_size;
if (((1 << nvme->n_idctl->id_sqes.qes_min) > sizeof (nvme_sqe_t)) ||
((1 << nvme->n_idctl->id_sqes.qes_max) < sizeof (nvme_sqe_t)) ||
((1 << nvme->n_idctl->id_cqes.qes_min) > sizeof (nvme_cqe_t)) ||
((1 << nvme->n_idctl->id_cqes.qes_max) < sizeof (nvme_cqe_t)))
goto fail;
nvme->n_write_cache_present =
nvme->n_idctl->id_vwc.vwc_present == 0 ? B_FALSE : B_TRUE;
(void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip,
"volatile-write-cache-present",
nvme->n_write_cache_present ? 1 : 0);
if (!nvme->n_write_cache_present) {
nvme->n_write_cache_enabled = B_FALSE;
} else if (nvme_write_cache_set(nvme, nvme->n_write_cache_enabled)
!= 0) {
dev_err(nvme->n_dip, CE_WARN,
"!failed to %sable volatile write cache",
nvme->n_write_cache_enabled ? "en" : "dis");
nvme->n_write_cache_enabled = B_TRUE;
}
(void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip,
"volatile-write-cache-enable",
nvme->n_write_cache_enabled ? 1 : 0);
nvme->n_namespace_count = nvme->n_idctl->id_nn;
if (nvme->n_namespace_count == 0) {
dev_err(nvme->n_dip, CE_WARN,
"!controllers without namespaces are not supported");
goto fail;
}
nvme->n_ns = kmem_zalloc(sizeof (nvme_namespace_t) *
nvme->n_namespace_count, KM_SLEEP);
if (nvme_ctrl_atleast(nvme, &nvme_vers_1v2) &&
nvme->n_idctl->id_oacs.oa_nsmgmt != 0) {
nsid = NVME_NSID_BCAST;
} else {
nsid = 1;
}
if (!nvme_identify_int(nvme, nsid, NVME_IDENTIFY_NSID,
(void **)&nvme->n_idcomns)) {
dev_err(nvme->n_dip, CE_WARN, "!failed to identify common "
"namespace information");
goto fail;
}
if (nvme_get_current_nqueues(nvme, &nq)) {
nvme->n_submission_queues_supported = nq.b.nq_nsq + 1;
nvme->n_completion_queues_supported = nq.b.nq_ncq + 1;
} else {
dev_err(nvme->n_dip, CE_WARN,
"!failed to retrieve number of supported queues");
goto fail;
}
if ((nvme->n_intr_types & (DDI_INTR_TYPE_MSI | DDI_INTR_TYPE_MSIX))
!= 0) {
const uint16_t nqueues = MIN(
nvme->n_submission_queues_supported,
nvme->n_completion_queues_supported);
nvme_release_interrupts(nvme);
if ((nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSIX,
nqueues) != DDI_SUCCESS) &&
(nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSI,
nqueues) != DDI_SUCCESS)) {
dev_err(nvme->n_dip, CE_WARN,
"!failed to set up MSI/MSI-X interrupts");
goto fail;
}
}
if (nvme_set_nqueues(nvme) != 0) {
dev_err(nvme->n_dip, CE_WARN,
"!failed to set number of I/O queues to %d",
nvme->n_intr_cnt);
goto fail;
}
kmem_free(nvme->n_ioq, sizeof (nvme_qpair_t *));
nvme->n_ioq = kmem_zalloc(sizeof (nvme_qpair_t *) *
(nvme->n_submission_queues + 1), KM_SLEEP);
nvme->n_ioq[0] = nvme->n_adminq;
ASSERT(nvme->n_submission_queues >= nvme->n_completion_queues);
nvme->n_ioq_count = nvme->n_submission_queues;
nvme->n_io_squeue_len =
MIN(nvme->n_io_squeue_len, nvme->n_max_queue_entries);
(void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, "io-squeue-len",
nvme->n_io_squeue_len);
if (nvme->n_submission_queues == nvme->n_completion_queues)
nvme->n_io_cqueue_len = MIN(nvme->n_io_cqueue_len,
nvme->n_io_squeue_len);
nvme->n_io_cqueue_len = MIN(nvme->n_io_cqueue_len,
nvme->n_max_queue_entries);
(void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, "io-cqueue-len",
nvme->n_io_cqueue_len);
uint_t ncpus_eff = ncpus;
if (ncpus_eff < 2)
ncpus_eff = (boot_max_ncpus == -1) ? max_ncpus : boot_max_ncpus;
tq_threads = ncpus_eff / nvme->n_completion_queues;
tq_threads = MAX(1, tq_threads);
if (nvme_create_cq_array(nvme, nvme->n_completion_queues + 1,
nvme->n_io_cqueue_len, tq_threads) != DDI_SUCCESS) {
dev_err(nvme->n_dip, CE_WARN,
"!failed to pre-allocate completion queues");
goto fail;
}
if (nvme->n_completion_queues + 1 < nvme->n_intr_cnt) {
nvme_release_interrupts(nvme);
if (nvme_setup_interrupts(nvme, nvme->n_intr_type,
nvme->n_completion_queues + 1) != DDI_SUCCESS) {
dev_err(nvme->n_dip, CE_WARN,
"!failed to reduce number of interrupts");
goto fail;
}
}
for (i = 1; i != nvme->n_ioq_count + 1; i++) {
if (nvme_alloc_qpair(nvme, nvme->n_io_squeue_len,
&nvme->n_ioq[i], i) != DDI_SUCCESS) {
dev_err(nvme->n_dip, CE_WARN,
"!unable to allocate I/O qpair %d", i);
goto fail;
}
if (nvme_create_io_qpair(nvme, nvme->n_ioq[i], i) != 0) {
dev_err(nvme->n_dip, CE_WARN,
"!unable to create I/O qpair %d", i);
goto fail;
}
}
nvme_enable_host_behavior(nvme);
return (DDI_SUCCESS);
fail:
(void) nvme_reset(nvme, B_FALSE);
return (DDI_FAILURE);
}
static uint_t
nvme_intr(caddr_t arg1, caddr_t arg2)
{
nvme_t *nvme = (nvme_t *)arg1;
int inum = (int)(uintptr_t)arg2;
int ccnt = 0;
int qnum;
if (inum >= nvme->n_intr_cnt)
return (DDI_INTR_UNCLAIMED);
if (nvme->n_dead) {
return (nvme->n_intr_type == DDI_INTR_TYPE_FIXED ?
DDI_INTR_UNCLAIMED : DDI_INTR_CLAIMED);
}
for (qnum = inum;
qnum < nvme->n_cq_count && nvme->n_cq[qnum] != NULL;
qnum += nvme->n_intr_cnt) {
ccnt += nvme_process_iocq(nvme, nvme->n_cq[qnum]);
}
return (ccnt > 0 ? DDI_INTR_CLAIMED : DDI_INTR_UNCLAIMED);
}
static void
nvme_release_interrupts(nvme_t *nvme)
{
int i;
for (i = 0; i < nvme->n_intr_cnt; i++) {
if (nvme->n_inth[i] == NULL)
break;
if (nvme->n_intr_cap & DDI_INTR_FLAG_BLOCK)
(void) ddi_intr_block_disable(&nvme->n_inth[i], 1);
else
(void) ddi_intr_disable(nvme->n_inth[i]);
(void) ddi_intr_remove_handler(nvme->n_inth[i]);
(void) ddi_intr_free(nvme->n_inth[i]);
}
kmem_free(nvme->n_inth, nvme->n_inth_sz);
nvme->n_inth = NULL;
nvme->n_inth_sz = 0;
nvme->n_progress &= ~NVME_INTERRUPTS;
}
static int
nvme_setup_interrupts(nvme_t *nvme, int intr_type, int nqpairs)
{
int nintrs, navail, count;
int ret;
int i;
if (nvme->n_intr_types == 0) {
ret = ddi_intr_get_supported_types(nvme->n_dip,
&nvme->n_intr_types);
if (ret != DDI_SUCCESS) {
dev_err(nvme->n_dip, CE_WARN,
"!%s: ddi_intr_get_supported types failed",
__func__);
return (ret);
}
#ifdef __x86
if (get_hwenv() == HW_VMWARE)
nvme->n_intr_types &= ~DDI_INTR_TYPE_MSIX;
#endif
}
if ((nvme->n_intr_types & intr_type) == 0)
return (DDI_FAILURE);
ret = ddi_intr_get_nintrs(nvme->n_dip, intr_type, &nintrs);
if (ret != DDI_SUCCESS) {
dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_nintrs failed",
__func__);
return (ret);
}
ret = ddi_intr_get_navail(nvme->n_dip, intr_type, &navail);
if (ret != DDI_SUCCESS) {
dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_navail failed",
__func__);
return (ret);
}
if (navail > nqpairs)
navail = nqpairs;
nvme->n_inth_sz = sizeof (ddi_intr_handle_t) * navail;
nvme->n_inth = kmem_zalloc(nvme->n_inth_sz, KM_SLEEP);
ret = ddi_intr_alloc(nvme->n_dip, nvme->n_inth, intr_type, 0, navail,
&count, 0);
if (ret != DDI_SUCCESS) {
dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_alloc failed",
__func__);
goto fail;
}
nvme->n_intr_cnt = count;
ret = ddi_intr_get_pri(nvme->n_inth[0], &nvme->n_intr_pri);
if (ret != DDI_SUCCESS) {
dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_pri failed",
__func__);
goto fail;
}
for (i = 0; i < count; i++) {
ret = ddi_intr_add_handler(nvme->n_inth[i], nvme_intr,
(void *)nvme, (void *)(uintptr_t)i);
if (ret != DDI_SUCCESS) {
dev_err(nvme->n_dip, CE_WARN,
"!%s: ddi_intr_add_handler failed", __func__);
goto fail;
}
}
(void) ddi_intr_get_cap(nvme->n_inth[0], &nvme->n_intr_cap);
for (i = 0; i < count; i++) {
if (nvme->n_intr_cap & DDI_INTR_FLAG_BLOCK)
ret = ddi_intr_block_enable(&nvme->n_inth[i], 1);
else
ret = ddi_intr_enable(nvme->n_inth[i]);
if (ret != DDI_SUCCESS) {
dev_err(nvme->n_dip, CE_WARN,
"!%s: enabling interrupt %d failed", __func__, i);
goto fail;
}
}
nvme->n_intr_type = intr_type;
nvme->n_progress |= NVME_INTERRUPTS;
return (DDI_SUCCESS);
fail:
nvme_release_interrupts(nvme);
return (ret);
}
static int
nvme_fm_errcb(dev_info_t *dip, ddi_fm_error_t *fm_error, const void *arg)
{
_NOTE(ARGUNUSED(arg));
pci_ereport_post(dip, fm_error, NULL);
return (fm_error->fme_status);
}
static void
nvme_remove_callback(dev_info_t *dip, ddi_eventcookie_t cookie, void *a,
void *b)
{
nvme_t *nvme = a;
nvme_ctrl_mark_dead(nvme, B_TRUE);
for (uint_t i = 0; i < nvme->n_ioq_count + 1; i++) {
nvme_qpair_t *qp = nvme->n_ioq[i];
mutex_enter(&qp->nq_mutex);
for (size_t j = 0; j < qp->nq_nentry; j++) {
nvme_cmd_t *cmd = qp->nq_cmd[j];
nvme_cmd_t *u_cmd;
if (cmd == NULL) {
continue;
}
u_cmd = nvme_unqueue_cmd(nvme, qp, cmd->nc_sqe.sqe_cid);
taskq_dispatch_ent(qp->nq_cq->ncq_cmd_taskq,
cmd->nc_callback, cmd, TQ_NOSLEEP, &cmd->nc_tqent);
ASSERT3P(u_cmd, ==, cmd);
}
mutex_exit(&qp->nq_mutex);
}
}
static int
nvme_minor_comparator(const void *l, const void *r)
{
const nvme_minor_t *lm = l;
const nvme_minor_t *rm = r;
if (lm->nm_minor > rm->nm_minor) {
return (1);
} else if (lm->nm_minor < rm->nm_minor) {
return (-1);
} else {
return (0);
}
}
static void
nvme_minor_free(nvme_minor_t *minor)
{
if (minor->nm_minor > 0) {
ASSERT3S(minor->nm_minor, >=, NVME_OPEN_MINOR_MIN);
id_free(nvme_open_minors, minor->nm_minor);
minor->nm_minor = 0;
}
VERIFY0(list_link_active(&minor->nm_ctrl_lock.nli_node));
VERIFY0(list_link_active(&minor->nm_ns_lock.nli_node));
cv_destroy(&minor->nm_cv);
kmem_free(minor, sizeof (nvme_minor_t));
}
static nvme_minor_t *
nvme_minor_find_by_dev(dev_t dev)
{
id_t id = (id_t)getminor(dev);
nvme_minor_t search = { .nm_minor = id };
nvme_minor_t *ret;
mutex_enter(&nvme_open_minors_mutex);
ret = avl_find(&nvme_open_minors_avl, &search, NULL);
mutex_exit(&nvme_open_minors_mutex);
return (ret);
}
static int
nvme_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
{
nvme_t *nvme;
int instance;
int nregs;
off_t regsize;
char name[32];
if (cmd != DDI_ATTACH)
return (DDI_FAILURE);
instance = ddi_get_instance(dip);
if (ddi_soft_state_zalloc(nvme_state, instance) != DDI_SUCCESS)
return (DDI_FAILURE);
nvme = ddi_get_soft_state(nvme_state, instance);
ddi_set_driver_private(dip, nvme);
nvme->n_dip = dip;
if (pci_config_setup(dip, &nvme->n_pcicfg_handle) != DDI_SUCCESS) {
dev_err(dip, CE_WARN, "!failed to map PCI config space");
goto fail;
}
nvme->n_progress |= NVME_PCI_CONFIG;
nvme->n_vendor_id =
pci_config_get16(nvme->n_pcicfg_handle, PCI_CONF_VENID);
nvme->n_device_id =
pci_config_get16(nvme->n_pcicfg_handle, PCI_CONF_DEVID);
nvme->n_revision_id =
pci_config_get8(nvme->n_pcicfg_handle, PCI_CONF_REVID);
nvme->n_subsystem_device_id =
pci_config_get16(nvme->n_pcicfg_handle, PCI_CONF_SUBSYSID);
nvme->n_subsystem_vendor_id =
pci_config_get16(nvme->n_pcicfg_handle, PCI_CONF_SUBVENID);
nvme_detect_quirks(nvme);
if (ddi_get_eventcookie(nvme->n_dip, DDI_DEVI_REMOVE_EVENT,
&nvme->n_rm_cookie) == DDI_SUCCESS) {
if (ddi_add_event_handler(nvme->n_dip, nvme->n_rm_cookie,
nvme_remove_callback, nvme, &nvme->n_ev_rm_cb_id) !=
DDI_SUCCESS) {
goto fail;
}
} else {
nvme->n_ev_rm_cb_id = NULL;
}
mutex_init(&nvme->n_minor_mutex, NULL, MUTEX_DRIVER, NULL);
nvme->n_progress |= NVME_MUTEX_INIT;
nvme->n_strict_version = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
DDI_PROP_DONTPASS, "strict-version", 1) == 1 ? B_TRUE : B_FALSE;
nvme->n_ignore_unknown_vendor_status = ddi_prop_get_int(DDI_DEV_T_ANY,
dip, DDI_PROP_DONTPASS, "ignore-unknown-vendor-status", 0) == 1 ?
B_TRUE : B_FALSE;
nvme->n_admin_queue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
DDI_PROP_DONTPASS, "admin-queue-len", NVME_DEFAULT_ADMIN_QUEUE_LEN);
nvme->n_io_squeue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
DDI_PROP_DONTPASS, "io-squeue-len", NVME_DEFAULT_IO_QUEUE_LEN);
nvme->n_io_cqueue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
DDI_PROP_DONTPASS, "io-cqueue-len", 2 * NVME_DEFAULT_IO_QUEUE_LEN);
nvme->n_async_event_limit = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
DDI_PROP_DONTPASS, "async-event-limit",
NVME_DEFAULT_ASYNC_EVENT_LIMIT);
nvme->n_write_cache_enabled = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
DDI_PROP_DONTPASS, "volatile-write-cache-enable", 1) != 0 ?
B_TRUE : B_FALSE;
nvme->n_min_block_size = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
DDI_PROP_DONTPASS, "min-phys-block-size",
NVME_DEFAULT_MIN_BLOCK_SIZE);
nvme->n_submission_queues = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
DDI_PROP_DONTPASS, "max-submission-queues", -1);
nvme->n_completion_queues = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
DDI_PROP_DONTPASS, "max-completion-queues", -1);
if (!ISP2(nvme->n_min_block_size) ||
(nvme->n_min_block_size < NVME_DEFAULT_MIN_BLOCK_SIZE)) {
dev_err(dip, CE_WARN, "!min-phys-block-size %s, "
"using default %d", ISP2(nvme->n_min_block_size) ?
"too low" : "not a power of 2",
NVME_DEFAULT_MIN_BLOCK_SIZE);
nvme->n_min_block_size = NVME_DEFAULT_MIN_BLOCK_SIZE;
}
if (nvme->n_submission_queues != -1 &&
(nvme->n_submission_queues < 1 ||
nvme->n_submission_queues > UINT16_MAX)) {
dev_err(dip, CE_WARN, "!\"submission-queues\"=%d is not "
"valid. Must be [1..%d]", nvme->n_submission_queues,
UINT16_MAX);
nvme->n_submission_queues = -1;
}
if (nvme->n_completion_queues != -1 &&
(nvme->n_completion_queues < 1 ||
nvme->n_completion_queues > UINT16_MAX)) {
dev_err(dip, CE_WARN, "!\"completion-queues\"=%d is not "
"valid. Must be [1..%d]", nvme->n_completion_queues,
UINT16_MAX);
nvme->n_completion_queues = -1;
}
if (nvme->n_admin_queue_len < NVME_MIN_ADMIN_QUEUE_LEN)
nvme->n_admin_queue_len = NVME_MIN_ADMIN_QUEUE_LEN;
else if (nvme->n_admin_queue_len > NVME_MAX_ADMIN_QUEUE_LEN)
nvme->n_admin_queue_len = NVME_MAX_ADMIN_QUEUE_LEN;
if (nvme->n_io_squeue_len < NVME_MIN_IO_QUEUE_LEN)
nvme->n_io_squeue_len = NVME_MIN_IO_QUEUE_LEN;
if (nvme->n_io_cqueue_len < NVME_MIN_IO_QUEUE_LEN)
nvme->n_io_cqueue_len = NVME_MIN_IO_QUEUE_LEN;
if (nvme->n_async_event_limit < 1)
nvme->n_async_event_limit = NVME_DEFAULT_ASYNC_EVENT_LIMIT;
nvme->n_reg_acc_attr = nvme_reg_acc_attr;
nvme->n_queue_dma_attr = nvme_queue_dma_attr;
nvme->n_prp_dma_attr = nvme_prp_dma_attr;
nvme->n_sgl_dma_attr = nvme_sgl_dma_attr;
nvme->n_fm_cap = ddi_getprop(DDI_DEV_T_ANY, dip,
DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable",
DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
ddi_fm_init(dip, &nvme->n_fm_cap, &nvme->n_fm_ibc);
if (nvme->n_fm_cap) {
if (nvme->n_fm_cap & DDI_FM_ACCCHK_CAPABLE)
nvme->n_reg_acc_attr.devacc_attr_access =
DDI_FLAGERR_ACC;
if (nvme->n_fm_cap & DDI_FM_DMACHK_CAPABLE) {
nvme->n_prp_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
nvme->n_sgl_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
}
if (DDI_FM_EREPORT_CAP(nvme->n_fm_cap) ||
DDI_FM_ERRCB_CAP(nvme->n_fm_cap))
pci_ereport_setup(dip);
if (DDI_FM_ERRCB_CAP(nvme->n_fm_cap))
ddi_fm_handler_register(dip, nvme_fm_errcb,
(void *)nvme);
}
nvme->n_progress |= NVME_FMA_INIT;
if (ddi_dev_nregs(dip, &nregs) == DDI_FAILURE ||
nregs < 2 ||
ddi_dev_regsize(dip, 1, ®size) == DDI_FAILURE)
goto fail;
if (ddi_regs_map_setup(dip, 1, &nvme->n_regs, 0, regsize,
&nvme->n_reg_acc_attr, &nvme->n_regh) != DDI_SUCCESS) {
dev_err(dip, CE_WARN, "!failed to map regset 1");
goto fail;
}
nvme->n_progress |= NVME_REGS_MAPPED;
if (!nvme_stat_init(nvme)) {
dev_err(dip, CE_WARN, "!failed to create device kstats");
goto fail;
}
nvme->n_progress |= NVME_STAT_INIT;
(void) snprintf(name, sizeof (name), "%s%d_prp_cache",
ddi_driver_name(dip), ddi_get_instance(dip));
nvme->n_prp_cache = kmem_cache_create(name, sizeof (nvme_dma_t),
0, nvme_prp_dma_constructor, nvme_prp_dma_destructor,
NULL, (void *)nvme, NULL, 0);
if (nvme_init(nvme) != DDI_SUCCESS)
goto fail;
if (ddi_ufm_init(dip, DDI_UFM_CURRENT_VERSION, &nvme_ufm_ops,
&nvme->n_ufmh, nvme) != 0) {
dev_err(dip, CE_WARN, "!failed to initialize UFM subsystem");
goto fail;
}
mutex_init(&nvme->n_fwslot_mutex, NULL, MUTEX_DRIVER, NULL);
ddi_ufm_update(nvme->n_ufmh);
nvme->n_progress |= NVME_UFM_INIT;
nvme_mgmt_lock_init(&nvme->n_mgmt);
nvme_lock_init(&nvme->n_lock);
nvme->n_progress |= NVME_MGMT_INIT;
nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME);
boolean_t minor_logged = B_FALSE;
for (uint32_t i = 1; i <= nvme->n_namespace_count; i++) {
nvme_namespace_t *ns = nvme_nsid2ns(nvme, i);
nvme_lock_init(&ns->ns_lock);
ns->ns_progress |= NVME_NS_LOCK;
ns->ns_state = NVME_NS_STATE_ACTIVE;
if (nvme_init_ns(nvme, i) != 0) {
nvme_mgmt_unlock(nvme);
goto fail;
}
if (i > NVME_MINOR_MAX) {
if (!minor_logged) {
dev_err(dip, CE_WARN, "namespace minor "
"creation limited to the first %u "
"namespaces, device has %u",
NVME_MINOR_MAX, nvme->n_namespace_count);
minor_logged = B_TRUE;
}
continue;
}
if (ddi_create_minor_node(nvme->n_dip, ns->ns_name, S_IFCHR,
NVME_MINOR(ddi_get_instance(nvme->n_dip), i),
DDI_NT_NVME_ATTACHMENT_POINT, 0) != DDI_SUCCESS) {
nvme_mgmt_unlock(nvme);
dev_err(dip, CE_WARN,
"!failed to create minor node for namespace %d", i);
goto fail;
}
ns->ns_progress |= NVME_NS_MINOR;
}
nvme->n_progress |= NVME_NS_INIT;
if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
NVME_MINOR(ddi_get_instance(dip), 0), DDI_NT_NVME_NEXUS, 0) !=
DDI_SUCCESS) {
nvme_mgmt_unlock(nvme);
dev_err(dip, CE_WARN, "nvme_attach: "
"cannot create devctl minor node");
goto fail;
}
for (uint32_t i = 1; i <= nvme->n_namespace_count; i++) {
nvme_namespace_t *ns = nvme_nsid2ns(nvme, i);
nvme_ioctl_common_t com = { .nioc_nsid = i };
if (ns->ns_state < NVME_NS_STATE_NOT_IGNORED)
continue;
if (!nvme_bd_attach_ns(nvme, &com) && com.nioc_drv_err !=
NVME_IOCTL_E_UNSUP_ATTACH_NS) {
dev_err(nvme->n_dip, CE_WARN, "!failed to attach "
"namespace %d due to blkdev error (0x%x)", i,
com.nioc_drv_err);
}
}
nvme_mgmt_unlock(nvme);
nvme->n_async_event_supported = B_TRUE;
for (uint16_t i = 0; i < nvme->n_async_event_limit; i++) {
nvme_async_event(nvme);
}
return (DDI_SUCCESS);
fail:
if (nvme->n_dead)
return (DDI_SUCCESS);
(void) nvme_detach(dip, DDI_DETACH);
return (DDI_FAILURE);
}
static int
nvme_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
{
int instance;
nvme_t *nvme;
if (cmd != DDI_DETACH)
return (DDI_FAILURE);
instance = ddi_get_instance(dip);
nvme = ddi_get_soft_state(nvme_state, instance);
if (nvme == NULL)
return (DDI_FAILURE);
ddi_remove_minor_node(dip, NULL);
if (nvme->n_ev_rm_cb_id != NULL) {
(void) ddi_remove_event_handler(nvme->n_ev_rm_cb_id);
}
nvme->n_ev_rm_cb_id = NULL;
taskq_wait(nvme_dead_taskq);
if (nvme->n_ns) {
for (uint32_t i = 1; i <= nvme->n_namespace_count; i++) {
nvme_namespace_t *ns = nvme_nsid2ns(nvme, i);
if (ns->ns_bd_hdl) {
(void) bd_detach_handle(ns->ns_bd_hdl);
bd_free_handle(ns->ns_bd_hdl);
}
if (ns->ns_idns)
kmem_free(ns->ns_idns,
sizeof (nvme_identify_nsid_t));
if (ns->ns_devid)
strfree(ns->ns_devid);
if ((ns->ns_progress & NVME_NS_LOCK) != 0)
nvme_lock_fini(&ns->ns_lock);
}
kmem_free(nvme->n_ns, sizeof (nvme_namespace_t) *
nvme->n_namespace_count);
}
if (nvme->n_progress & NVME_MGMT_INIT) {
nvme_lock_fini(&nvme->n_lock);
nvme_mgmt_lock_fini(&nvme->n_mgmt);
}
if (nvme->n_progress & NVME_UFM_INIT) {
ddi_ufm_fini(nvme->n_ufmh);
mutex_destroy(&nvme->n_fwslot_mutex);
}
if (nvme->n_progress & NVME_INTERRUPTS)
nvme_release_interrupts(nvme);
for (uint_t i = 0; i < nvme->n_cq_count; i++) {
if (nvme->n_cq[i]->ncq_cmd_taskq != NULL)
taskq_wait(nvme->n_cq[i]->ncq_cmd_taskq);
}
if (nvme->n_progress & NVME_MUTEX_INIT) {
mutex_destroy(&nvme->n_minor_mutex);
}
if (nvme->n_ioq_count > 0) {
for (uint_t i = 1; i != nvme->n_ioq_count + 1; i++) {
if (nvme->n_ioq[i] != NULL) {
nvme_free_qpair(nvme->n_ioq[i]);
}
}
kmem_free(nvme->n_ioq, sizeof (nvme_qpair_t *) *
(nvme->n_ioq_count + 1));
}
if (nvme->n_prp_cache != NULL) {
kmem_cache_destroy(nvme->n_prp_cache);
}
if (nvme->n_progress & NVME_REGS_MAPPED) {
nvme_shutdown(nvme, B_FALSE);
(void) nvme_reset(nvme, B_FALSE);
}
if (nvme->n_progress & NVME_CTRL_LIMITS)
sema_destroy(&nvme->n_abort_sema);
if (nvme->n_progress & NVME_ADMIN_QUEUE)
nvme_free_qpair(nvme->n_adminq);
if (nvme->n_cq_count > 0) {
nvme_destroy_cq_array(nvme, 0);
nvme->n_cq = NULL;
nvme->n_cq_count = 0;
}
if (nvme->n_idcomns)
kmem_free(nvme->n_idcomns, NVME_IDENTIFY_BUFSIZE);
if (nvme->n_idctl)
kmem_free(nvme->n_idctl, NVME_IDENTIFY_BUFSIZE);
if (nvme->n_progress & NVME_REGS_MAPPED)
ddi_regs_map_free(&nvme->n_regh);
if (nvme->n_progress & NVME_STAT_INIT)
nvme_stat_cleanup(nvme);
if (nvme->n_progress & NVME_FMA_INIT) {
if (DDI_FM_ERRCB_CAP(nvme->n_fm_cap))
ddi_fm_handler_unregister(nvme->n_dip);
if (DDI_FM_EREPORT_CAP(nvme->n_fm_cap) ||
DDI_FM_ERRCB_CAP(nvme->n_fm_cap))
pci_ereport_teardown(nvme->n_dip);
ddi_fm_fini(nvme->n_dip);
}
if (nvme->n_progress & NVME_PCI_CONFIG)
pci_config_teardown(&nvme->n_pcicfg_handle);
if (nvme->n_vendor != NULL)
strfree(nvme->n_vendor);
if (nvme->n_product != NULL)
strfree(nvme->n_product);
ddi_soft_state_free(nvme_state, instance);
return (DDI_SUCCESS);
}
static int
nvme_quiesce(dev_info_t *dip)
{
int instance;
nvme_t *nvme;
instance = ddi_get_instance(dip);
nvme = ddi_get_soft_state(nvme_state, instance);
if (nvme == NULL)
return (DDI_FAILURE);
nvme_shutdown(nvme, B_TRUE);
(void) nvme_reset(nvme, B_TRUE);
return (DDI_SUCCESS);
}
static int
nvme_fill_prp(nvme_cmd_t *cmd, ddi_dma_handle_t dma)
{
nvme_t *nvme = cmd->nc_nvme;
uint_t nprp_per_page, nprp;
uint64_t *prp;
const ddi_dma_cookie_t *cookie;
uint_t idx;
uint_t ncookies = ddi_dma_ncookies(dma);
if (ncookies == 0)
return (DDI_FAILURE);
if ((cookie = ddi_dma_cookie_get(dma, 0)) == NULL)
return (DDI_FAILURE);
cmd->nc_sqe.sqe_dptr.d_prp[0] = cookie->dmac_laddress;
if (ncookies == 1) {
cmd->nc_sqe.sqe_dptr.d_prp[1] = 0;
return (DDI_SUCCESS);
} else if (ncookies == 2) {
if ((cookie = ddi_dma_cookie_get(dma, 1)) == NULL)
return (DDI_FAILURE);
cmd->nc_sqe.sqe_dptr.d_prp[1] = cookie->dmac_laddress;
return (DDI_SUCCESS);
}
nprp_per_page = nvme->n_pagesize / sizeof (uint64_t);
ASSERT(nprp_per_page > 0);
nprp = howmany(ncookies - 1, nprp_per_page);
VERIFY(nprp == 1);
cmd->nc_prp = kmem_cache_alloc(nvme->n_prp_cache, KM_SLEEP);
bzero(cmd->nc_prp->nd_memp, cmd->nc_prp->nd_len);
cmd->nc_sqe.sqe_dptr.d_prp[1] = cmd->nc_prp->nd_cookie.dmac_laddress;
prp = (uint64_t *)cmd->nc_prp->nd_memp;
for (idx = 1; idx < ncookies; idx++) {
if ((cookie = ddi_dma_cookie_get(dma, idx)) == NULL)
return (DDI_FAILURE);
*prp++ = cookie->dmac_laddress;
}
(void) ddi_dma_sync(cmd->nc_prp->nd_dmah, 0, cmd->nc_prp->nd_len,
DDI_DMA_SYNC_FORDEV);
return (DDI_SUCCESS);
}
CTASSERT(sizeof (nvme_range_t) * NVME_DSET_MGMT_MAX_RANGES == 4096);
static int
nvme_fill_ranges(nvme_cmd_t *cmd, bd_xfer_t *xfer, uint64_t blocksize,
int allocflag)
{
const dkioc_free_list_t *dfl = xfer->x_dfl;
const dkioc_free_list_ext_t *exts = dfl->dfl_exts;
nvme_t *nvme = cmd->nc_nvme;
nvme_range_t *ranges = NULL;
uint_t i;
VERIFY3U(dfl->dfl_num_exts, >, 0);
VERIFY3U(dfl->dfl_num_exts, <=, NVME_DSET_MGMT_MAX_RANGES);
cmd->nc_sqe.sqe_cdw10 = (dfl->dfl_num_exts - 1) & 0xff;
cmd->nc_sqe.sqe_cdw11 = NVME_DSET_MGMT_ATTR_DEALLOCATE;
cmd->nc_prp = kmem_cache_alloc(nvme->n_prp_cache, allocflag);
if (cmd->nc_prp == NULL)
return (DDI_FAILURE);
bzero(cmd->nc_prp->nd_memp, cmd->nc_prp->nd_len);
ranges = (nvme_range_t *)cmd->nc_prp->nd_memp;
cmd->nc_sqe.sqe_dptr.d_prp[0] = cmd->nc_prp->nd_cookie.dmac_laddress;
cmd->nc_sqe.sqe_dptr.d_prp[1] = 0;
for (i = 0; i < dfl->dfl_num_exts; i++) {
uint64_t lba, len;
lba = (dfl->dfl_offset + exts[i].dfle_start) / blocksize;
len = exts[i].dfle_length / blocksize;
VERIFY3U(len, <=, UINT32_MAX);
ranges[i].nr_ctxattr = 0;
ranges[i].nr_len = len;
ranges[i].nr_lba = lba;
}
(void) ddi_dma_sync(cmd->nc_prp->nd_dmah, 0, cmd->nc_prp->nd_len,
DDI_DMA_SYNC_FORDEV);
return (DDI_SUCCESS);
}
static nvme_cmd_t *
nvme_create_nvm_cmd(nvme_namespace_t *ns, uint8_t opc, bd_xfer_t *xfer)
{
nvme_t *nvme = ns->ns_nvme;
nvme_cmd_t *cmd;
int allocflag;
allocflag = (xfer->x_flags & BD_XFER_POLL) ? KM_NOSLEEP : KM_SLEEP;
cmd = nvme_alloc_cmd(nvme, allocflag);
if (cmd == NULL)
return (NULL);
cmd->nc_sqe.sqe_opc = opc;
cmd->nc_callback = nvme_bd_xfer_done;
cmd->nc_xfer = xfer;
switch (opc) {
case NVME_OPC_NVM_WRITE:
case NVME_OPC_NVM_READ:
VERIFY(xfer->x_nblks <= 0x10000);
cmd->nc_sqe.sqe_nsid = ns->ns_id;
cmd->nc_sqe.sqe_cdw10 = xfer->x_blkno & 0xffffffffu;
cmd->nc_sqe.sqe_cdw11 = (xfer->x_blkno >> 32);
cmd->nc_sqe.sqe_cdw12 = (uint16_t)(xfer->x_nblks - 1);
if (nvme_fill_prp(cmd, xfer->x_dmah) != DDI_SUCCESS)
goto fail;
break;
case NVME_OPC_NVM_FLUSH:
cmd->nc_sqe.sqe_nsid = ns->ns_id;
break;
case NVME_OPC_NVM_DSET_MGMT:
cmd->nc_sqe.sqe_nsid = ns->ns_id;
if (nvme_fill_ranges(cmd, xfer,
(uint64_t)ns->ns_block_size, allocflag) != DDI_SUCCESS)
goto fail;
break;
default:
goto fail;
}
return (cmd);
fail:
nvme_free_cmd(cmd);
return (NULL);
}
static void
nvme_bd_xfer_done(void *arg)
{
nvme_cmd_t *cmd = arg;
bd_xfer_t *xfer = cmd->nc_xfer;
int error = 0;
error = nvme_check_cmd_status(cmd);
nvme_free_cmd(cmd);
bd_xfer_done(xfer, error);
}
static void
nvme_bd_driveinfo(void *arg, bd_drive_t *drive)
{
nvme_namespace_t *ns = arg;
nvme_t *nvme = ns->ns_nvme;
uint_t ns_count = MAX(1, nvme->n_namespaces_attachable);
nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_BDRO);
drive->d_qcount = nvme->n_ioq_count;
drive->d_qsize = nvme->n_io_squeue_len / ns_count;
drive->d_qsize = MAX(drive->d_qsize, NVME_MIN_IO_QUEUE_LEN);
drive->d_removable = B_FALSE;
drive->d_hotpluggable = B_FALSE;
bcopy(ns->ns_eui64, drive->d_eui64, sizeof (drive->d_eui64));
drive->d_target = ns->ns_id;
drive->d_lun = 0;
drive->d_model = nvme->n_idctl->id_model;
drive->d_model_len = sizeof (nvme->n_idctl->id_model);
drive->d_vendor = nvme->n_vendor;
drive->d_vendor_len = strlen(nvme->n_vendor);
drive->d_product = nvme->n_product;
drive->d_product_len = strlen(nvme->n_product);
drive->d_serial = nvme->n_idctl->id_serial;
drive->d_serial_len = sizeof (nvme->n_idctl->id_serial);
drive->d_revision = nvme->n_idctl->id_fwrev;
drive->d_revision_len = sizeof (nvme->n_idctl->id_fwrev);
if (nvme->n_idctl->id_oncs.on_dset_mgmt)
drive->d_max_free_seg = NVME_DSET_MGMT_MAX_RANGES;
nvme_mgmt_unlock(nvme);
}
static int
nvme_bd_mediainfo(void *arg, bd_media_t *media)
{
nvme_namespace_t *ns = arg;
nvme_t *nvme = ns->ns_nvme;
if (nvme->n_dead) {
return (EIO);
}
nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_BDRO);
media->m_nblks = ns->ns_block_count;
media->m_blksize = ns->ns_block_size;
media->m_readonly = B_FALSE;
media->m_solidstate = B_TRUE;
media->m_pblksize = ns->ns_best_block_size;
nvme_mgmt_unlock(nvme);
return (0);
}
static int
nvme_bd_cmd(nvme_namespace_t *ns, bd_xfer_t *xfer, uint8_t opc)
{
nvme_t *nvme = ns->ns_nvme;
nvme_cmd_t *cmd;
nvme_qpair_t *ioq;
boolean_t poll;
int ret;
if (nvme->n_dead) {
return (EIO);
}
cmd = nvme_create_nvm_cmd(ns, opc, xfer);
if (cmd == NULL)
return (ENOMEM);
cmd->nc_sqid = xfer->x_qnum + 1;
ASSERT(cmd->nc_sqid <= nvme->n_ioq_count);
ioq = nvme->n_ioq[cmd->nc_sqid];
poll = (xfer->x_flags & BD_XFER_POLL) != 0;
ret = nvme_submit_io_cmd(ioq, cmd);
if (ret != 0)
return (ret);
if (!poll)
return (0);
do {
cmd = nvme_retrieve_cmd(nvme, ioq);
if (cmd != NULL) {
ASSERT0(cmd->nc_flags & NVME_CMD_F_USELOCK);
cmd->nc_callback(cmd);
} else {
drv_usecwait(10);
}
} while (ioq->nq_active_cmds != 0);
return (0);
}
static int
nvme_bd_read(void *arg, bd_xfer_t *xfer)
{
nvme_namespace_t *ns = arg;
return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_READ));
}
static int
nvme_bd_write(void *arg, bd_xfer_t *xfer)
{
nvme_namespace_t *ns = arg;
return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_WRITE));
}
static int
nvme_bd_sync(void *arg, bd_xfer_t *xfer)
{
nvme_namespace_t *ns = arg;
if (ns->ns_nvme->n_dead)
return (EIO);
if (!ns->ns_nvme->n_write_cache_present) {
bd_xfer_done(xfer, ENOTSUP);
return (0);
}
if (!ns->ns_nvme->n_write_cache_enabled) {
bd_xfer_done(xfer, 0);
return (0);
}
return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_FLUSH));
}
static int
nvme_bd_devid(void *arg, dev_info_t *devinfo, ddi_devid_t *devid)
{
nvme_namespace_t *ns = arg;
nvme_t *nvme = ns->ns_nvme;
if (nvme->n_dead) {
return (EIO);
}
if (*(uint64_t *)ns->ns_nguid != 0 ||
*(uint64_t *)(ns->ns_nguid + 8) != 0) {
return (ddi_devid_init(devinfo, DEVID_NVME_NGUID,
sizeof (ns->ns_nguid), ns->ns_nguid, devid));
} else if (*(uint64_t *)ns->ns_eui64 != 0) {
return (ddi_devid_init(devinfo, DEVID_NVME_EUI64,
sizeof (ns->ns_eui64), ns->ns_eui64, devid));
} else {
return (ddi_devid_init(devinfo, DEVID_NVME_NSID,
strlen(ns->ns_devid), ns->ns_devid, devid));
}
}
static int
nvme_bd_free_space(void *arg, bd_xfer_t *xfer)
{
nvme_namespace_t *ns = arg;
if (xfer->x_dfl == NULL)
return (EINVAL);
if (!ns->ns_nvme->n_idctl->id_oncs.on_dset_mgmt)
return (ENOTSUP);
return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_DSET_MGMT));
}
static int
nvme_open(dev_t *devp, int flag, int otyp, cred_t *cred_p)
{
#ifndef __lock_lint
_NOTE(ARGUNUSED(cred_p));
#endif
nvme_t *nvme;
nvme_minor_t *minor = NULL;
uint32_t nsid;
minor_t m = getminor(*devp);
int rv = 0;
if (otyp != OTYP_CHR)
return (EINVAL);
if (m >= NVME_OPEN_MINOR_MIN)
return (ENXIO);
nvme = ddi_get_soft_state(nvme_state, NVME_MINOR_INST(m));
nsid = NVME_MINOR_NSID(m);
if (nvme == NULL)
return (ENXIO);
if (nsid > MIN(nvme->n_namespace_count, NVME_MINOR_MAX))
return (ENXIO);
if (nvme->n_dead)
return (EIO);
minor = kmem_zalloc(sizeof (nvme_minor_t), KM_NOSLEEP_LAZY);
if (minor == NULL) {
return (ENOMEM);
}
cv_init(&minor->nm_cv, NULL, CV_DRIVER, NULL);
list_link_init(&minor->nm_ctrl_lock.nli_node);
minor->nm_ctrl_lock.nli_nvme = nvme;
minor->nm_ctrl_lock.nli_minor = minor;
list_link_init(&minor->nm_ns_lock.nli_node);
minor->nm_ns_lock.nli_nvme = nvme;
minor->nm_ns_lock.nli_minor = minor;
minor->nm_minor = id_alloc_nosleep(nvme_open_minors);
if (minor->nm_minor == -1) {
nvme_minor_free(minor);
return (ENOSPC);
}
minor->nm_ctrl = nvme;
if (nsid != 0) {
minor->nm_ns = nvme_nsid2ns(nvme, nsid);
}
mutex_enter(&nvme_open_minors_mutex);
avl_add(&nvme_open_minors_avl, minor);
mutex_exit(&nvme_open_minors_mutex);
mutex_enter(&nvme->n_minor_mutex);
if ((flag & FEXCL) != 0) {
nvme_ioctl_lock_t lock = {
.nil_level = NVME_LOCK_L_WRITE,
.nil_flags = NVME_LOCK_F_DONT_BLOCK
};
if (minor->nm_ns != NULL) {
lock.nil_ent = NVME_LOCK_E_NS;
lock.nil_common.nioc_nsid = nsid;
} else {
lock.nil_ent = NVME_LOCK_E_CTRL;
}
nvme_rwlock(minor, &lock);
if (lock.nil_common.nioc_drv_err != NVME_IOCTL_E_OK) {
mutex_exit(&nvme->n_minor_mutex);
mutex_enter(&nvme_open_minors_mutex);
avl_remove(&nvme_open_minors_avl, minor);
mutex_exit(&nvme_open_minors_mutex);
nvme_minor_free(minor);
return (EBUSY);
}
}
mutex_exit(&nvme->n_minor_mutex);
*devp = makedevice(getmajor(*devp), (minor_t)minor->nm_minor);
return (rv);
}
static int
nvme_close(dev_t dev, int flag __unused, int otyp, cred_t *cred_p __unused)
{
nvme_minor_t *minor;
nvme_t *nvme;
if (otyp != OTYP_CHR) {
return (ENXIO);
}
minor = nvme_minor_find_by_dev(dev);
if (minor == NULL) {
return (ENXIO);
}
mutex_enter(&nvme_open_minors_mutex);
avl_remove(&nvme_open_minors_avl, minor);
mutex_exit(&nvme_open_minors_mutex);
nvme = minor->nm_ctrl;
mutex_enter(&nvme->n_minor_mutex);
ASSERT3U(minor->nm_ctrl_lock.nli_state, !=, NVME_LOCK_STATE_BLOCKED);
ASSERT3U(minor->nm_ns_lock.nli_state, !=, NVME_LOCK_STATE_BLOCKED);
if (minor->nm_ctrl_lock.nli_state == NVME_LOCK_STATE_ACQUIRED) {
VERIFY3P(minor->nm_ctrl_lock.nli_lock, !=, NULL);
nvme_rwunlock(&minor->nm_ctrl_lock,
minor->nm_ctrl_lock.nli_lock);
}
if (minor->nm_ns_lock.nli_state == NVME_LOCK_STATE_ACQUIRED) {
VERIFY3P(minor->nm_ns_lock.nli_lock, !=, NULL);
nvme_rwunlock(&minor->nm_ns_lock, minor->nm_ns_lock.nli_lock);
}
mutex_exit(&nvme->n_minor_mutex);
nvme_minor_free(minor);
return (0);
}
void
nvme_ioctl_success(nvme_ioctl_common_t *ioc)
{
ioc->nioc_drv_err = NVME_IOCTL_E_OK;
ioc->nioc_ctrl_sc = NVME_CQE_SC_GEN_SUCCESS;
ioc->nioc_ctrl_sct = NVME_CQE_SCT_GENERIC;
}
boolean_t
nvme_ioctl_error(nvme_ioctl_common_t *ioc, nvme_ioctl_errno_t err, uint32_t sct,
uint32_t sc)
{
ioc->nioc_drv_err = err;
ioc->nioc_ctrl_sct = sct;
ioc->nioc_ctrl_sc = sc;
return (B_FALSE);
}
static int
nvme_ioctl_copyout_error(nvme_ioctl_errno_t err, intptr_t uaddr, int mode)
{
nvme_ioctl_common_t ioc;
ASSERT3U(err, !=, NVME_IOCTL_E_CTRL_ERROR);
bzero(&ioc, sizeof (ioc));
if (ddi_copyout(&ioc, (void *)uaddr, sizeof (nvme_ioctl_common_t),
mode & FKIOCTL) != 0) {
return (EFAULT);
}
return (0);
}
static boolean_t
nvme_ioctl_excl_check(nvme_minor_t *minor, nvme_ioctl_common_t *ioc,
const nvme_ioctl_check_t *check)
{
nvme_t *const nvme = minor->nm_ctrl;
nvme_namespace_t *ns;
boolean_t have_ctrl, have_ns, ctrl_is_excl, ns_is_excl;
if (check->nck_excl == NVME_IOCTL_EXCL_SKIP) {
return (B_TRUE);
}
if (ioc->nioc_nsid == 0 || ioc->nioc_nsid == NVME_NSID_BCAST) {
ns = NULL;
} else {
ns = nvme_nsid2ns(nvme, ioc->nioc_nsid);
}
mutex_enter(&nvme->n_minor_mutex);
ctrl_is_excl = nvme->n_lock.nl_writer != NULL;
have_ctrl = nvme->n_lock.nl_writer == &minor->nm_ctrl_lock;
if (ns != NULL) {
ns_is_excl = ns->ns_lock.nl_writer != NULL;
have_ns = ns->ns_lock.nl_writer == &minor->nm_ns_lock;
ASSERT0(have_ctrl && have_ns);
#ifdef DEBUG
if (have_ns) {
ASSERT3P(minor->nm_ns_lock.nli_ns, ==, ns);
}
#endif
} else {
ns_is_excl = B_FALSE;
have_ns = B_FALSE;
}
ASSERT0(ctrl_is_excl && ns_is_excl);
mutex_exit(&nvme->n_minor_mutex);
if (check->nck_excl == NVME_IOCTL_EXCL_CTRL) {
if (have_ctrl) {
return (B_TRUE);
}
return (nvme_ioctl_error(ioc, NVME_IOCTL_E_NEED_CTRL_WRLOCK,
0, 0));
}
if (check->nck_excl == NVME_IOCTL_EXCL_WRITE) {
if (ns == NULL) {
if (have_ctrl) {
return (B_TRUE);
}
return (nvme_ioctl_error(ioc,
NVME_IOCTL_E_NEED_CTRL_WRLOCK, 0, 0));
} else {
if (have_ctrl || have_ns) {
return (B_TRUE);
}
return (nvme_ioctl_error(ioc,
NVME_IOCTL_E_NEED_NS_WRLOCK, 0, 0));
}
}
if (ctrl_is_excl && !have_ctrl) {
return (nvme_ioctl_error(ioc, NVME_IOCTL_E_CTRL_LOCKED, 0, 0));
}
if (ns != NULL && ns_is_excl && !have_ns) {
return (nvme_ioctl_error(ioc, NVME_IOCTL_E_NS_LOCKED, 0, 0));
}
return (B_TRUE);
}
static boolean_t
nvme_ioctl_check(nvme_minor_t *minor, nvme_ioctl_common_t *ioc,
const nvme_ioctl_check_t *check)
{
if (minor->nm_ns != NULL) {
if (!check->nck_ns_ok || !check->nck_ns_minor_ok) {
return (nvme_ioctl_error(ioc, NVME_IOCTL_E_NOT_CTRL, 0,
0));
}
if (ioc->nioc_nsid == 0) {
ioc->nioc_nsid = minor->nm_ns->ns_id;
} else if (ioc->nioc_nsid != minor->nm_ns->ns_id) {
return (nvme_ioctl_error(ioc,
NVME_IOCTL_E_MINOR_WRONG_NS, 0, 0));
}
return (nvme_ioctl_excl_check(minor, ioc, check));
}
if (check->nck_skip_ctrl) {
return (nvme_ioctl_excl_check(minor, ioc, check));
}
if (!check->nck_ns_ok) {
if (ioc->nioc_nsid != 0) {
return (nvme_ioctl_error(ioc, NVME_IOCTL_E_NS_UNUSE, 0,
0));
}
return (nvme_ioctl_excl_check(minor, ioc, check));
}
if (ioc->nioc_nsid > minor->nm_ctrl->n_namespace_count &&
ioc->nioc_nsid != NVME_NSID_BCAST) {
return (nvme_ioctl_error(ioc, NVME_IOCTL_E_NS_RANGE, 0, 0));
}
if (ioc->nioc_nsid == 0 && check->nck_ctrl_rewrite) {
ioc->nioc_nsid = NVME_NSID_BCAST;
}
if (!check->nck_bcast_ok && ioc->nioc_nsid == NVME_NSID_BCAST) {
return (nvme_ioctl_error(ioc, NVME_IOCTL_E_NO_BCAST_NS, 0, 0));
}
return (nvme_ioctl_excl_check(minor, ioc, check));
}
static int
nvme_ioctl_ctrl_info(nvme_minor_t *minor, intptr_t arg, int mode,
cred_t *cred_p)
{
nvme_t *const nvme = minor->nm_ctrl;
nvme_ioctl_ctrl_info_t *info;
nvme_reg_cap_t cap = { 0 };
nvme_ioctl_identify_t id = { .nid_cns = NVME_IDENTIFY_CTRL };
void *idbuf;
if ((mode & FREAD) == 0)
return (EBADF);
info = kmem_alloc(sizeof (nvme_ioctl_ctrl_info_t), KM_NOSLEEP_LAZY);
if (info == NULL) {
return (nvme_ioctl_copyout_error(NVME_IOCTL_E_NO_KERN_MEM, arg,
mode));
}
if (ddi_copyin((void *)arg, info, sizeof (nvme_ioctl_ctrl_info_t),
mode & FKIOCTL) != 0) {
kmem_free(info, sizeof (nvme_ioctl_ctrl_info_t));
return (EFAULT);
}
if (!nvme_ioctl_check(minor, &info->nci_common,
&nvme_check_ctrl_info)) {
goto copyout;
}
if (!nvme_identify(nvme, B_TRUE, &id, &idbuf)) {
info->nci_common = id.nid_common;
goto copyout;
}
bcopy(idbuf, &info->nci_ctrl_id, sizeof (nvme_identify_ctrl_t));
kmem_free(idbuf, NVME_IDENTIFY_BUFSIZE);
bcopy(nvme->n_idcomns, &info->nci_common_ns,
sizeof (nvme_identify_nsid_t));
info->nci_vers = nvme->n_version;
cap.r = nvme_get64(nvme, NVME_REG_CAP);
info->nci_caps.cap_mpsmax = 1 << (12 + cap.b.cap_mpsmax);
info->nci_caps.cap_mpsmin = 1 << (12 + cap.b.cap_mpsmin);
info->nci_nintrs = (uint32_t)nvme->n_intr_cnt;
copyout:
if (ddi_copyout(info, (void *)arg, sizeof (nvme_ioctl_ctrl_info_t),
mode & FKIOCTL) != 0) {
kmem_free(info, sizeof (nvme_ioctl_ctrl_info_t));
return (EFAULT);
}
kmem_free(info, sizeof (nvme_ioctl_ctrl_info_t));
return (0);
}
static int
nvme_ioctl_ns_info(nvme_minor_t *minor, intptr_t arg, int mode, cred_t *cred_p)
{
nvme_t *const nvme = minor->nm_ctrl;
nvme_ioctl_ns_info_t *ns_info;
nvme_namespace_t *ns;
nvme_ioctl_identify_t id = { .nid_cns = NVME_IDENTIFY_NSID };
void *idbuf;
if ((mode & FREAD) == 0)
return (EBADF);
ns_info = kmem_zalloc(sizeof (nvme_ioctl_ns_info_t), KM_NOSLEEP_LAZY);
if (ns_info == NULL) {
return (nvme_ioctl_copyout_error(NVME_IOCTL_E_NO_KERN_MEM, arg,
mode));
}
if (ddi_copyin((void *)arg, ns_info, sizeof (nvme_ioctl_ns_info_t),
mode & FKIOCTL) != 0) {
kmem_free(ns_info, sizeof (nvme_ioctl_ns_info_t));
return (EFAULT);
}
if (!nvme_ioctl_check(minor, &ns_info->nni_common,
&nvme_check_ns_info)) {
goto copyout;
}
ASSERT3U(ns_info->nni_common.nioc_nsid, >, 0);
ns = nvme_nsid2ns(nvme, ns_info->nni_common.nioc_nsid);
id.nid_common.nioc_nsid = ns_info->nni_common.nioc_nsid;
if (!nvme_identify(nvme, B_TRUE, &id, &idbuf)) {
ns_info->nni_common = id.nid_common;
goto copyout;
}
bcopy(idbuf, &ns_info->nni_id, sizeof (nvme_identify_nsid_t));
kmem_free(idbuf, NVME_IDENTIFY_BUFSIZE);
nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME);
ns_info->nni_state = ns->ns_state;
if (ns->ns_state >= NVME_NS_STATE_ATTACHED) {
const char *addr;
ns_info->nni_state = NVME_NS_STATE_ATTACHED;
addr = bd_address(ns->ns_bd_hdl);
if (strlcpy(ns_info->nni_addr, addr,
sizeof (ns_info->nni_addr)) >= sizeof (ns_info->nni_addr)) {
nvme_mgmt_unlock(nvme);
(void) nvme_ioctl_error(&ns_info->nni_common,
NVME_IOCTL_E_BD_ADDR_OVER, 0, 0);
goto copyout;
}
}
nvme_mgmt_unlock(nvme);
copyout:
if (ddi_copyout(ns_info, (void *)arg, sizeof (nvme_ioctl_ns_info_t),
mode & FKIOCTL) != 0) {
kmem_free(ns_info, sizeof (nvme_ioctl_ns_info_t));
return (EFAULT);
}
kmem_free(ns_info, sizeof (nvme_ioctl_ns_info_t));
return (0);
}
static int
nvme_ioctl_identify(nvme_minor_t *minor, intptr_t arg, int mode, cred_t *cred_p)
{
_NOTE(ARGUNUSED(cred_p));
nvme_t *const nvme = minor->nm_ctrl;
void *idctl;
uint_t model;
nvme_ioctl_identify_t id;
#ifdef _MULTI_DATAMODEL
nvme_ioctl_identify32_t id32;
#endif
boolean_t ns_minor;
if ((mode & FREAD) == 0)
return (EBADF);
model = ddi_model_convert_from(mode);
switch (model) {
#ifdef _MULTI_DATAMODEL
case DDI_MODEL_ILP32:
bzero(&id, sizeof (id));
if (ddi_copyin((void *)arg, &id32, sizeof (id32),
mode & FKIOCTL) != 0) {
return (EFAULT);
}
id.nid_common.nioc_nsid = id32.nid_common.nioc_nsid;
id.nid_cns = id32.nid_cns;
id.nid_ctrlid = id32.nid_ctrlid;
id.nid_data = id32.nid_data;
break;
#endif
case DDI_MODEL_NONE:
if (ddi_copyin((void *)arg, &id, sizeof (id),
mode & FKIOCTL) != 0) {
return (EFAULT);
}
break;
default:
return (ENOTSUP);
}
if (!nvme_ioctl_check(minor, &id.nid_common, &nvme_check_identify)) {
goto copyout;
}
ns_minor = minor->nm_ns != NULL;
if (!nvme_validate_identify(nvme, &id, ns_minor)) {
goto copyout;
}
if (nvme_identify(nvme, B_TRUE, &id, &idctl)) {
int ret = ddi_copyout(idctl, (void *)id.nid_data,
NVME_IDENTIFY_BUFSIZE, mode & FKIOCTL);
kmem_free(idctl, NVME_IDENTIFY_BUFSIZE);
if (ret != 0) {
(void) nvme_ioctl_error(&id.nid_common,
NVME_IOCTL_E_BAD_USER_DATA, 0, 0);
goto copyout;
}
nvme_ioctl_success(&id.nid_common);
}
copyout:
switch (model) {
#ifdef _MULTI_DATAMODEL
case DDI_MODEL_ILP32:
id32.nid_common = id.nid_common;
if (ddi_copyout(&id32, (void *)arg, sizeof (id32),
mode & FKIOCTL) != 0) {
return (EFAULT);
}
break;
#endif
case DDI_MODEL_NONE:
if (ddi_copyout(&id, (void *)arg, sizeof (id),
mode & FKIOCTL) != 0) {
return (EFAULT);
}
break;
default:
return (ENOTSUP);
}
return (0);
}
static boolean_t
nvme_ioc_cmd(nvme_t *nvme, nvme_ioctl_common_t *ioc, nvme_ioc_cmd_args_t *args)
{
nvme_cmd_t *cmd;
boolean_t ret = B_FALSE;
cmd = nvme_alloc_admin_cmd(nvme, KM_SLEEP);
cmd->nc_sqid = 0;
cmd->nc_flags |= NVME_CMD_F_DONTPANIC;
cmd->nc_callback = nvme_wakeup_cmd;
cmd->nc_sqe = *args->ica_sqe;
if ((args->ica_dma_flags & DDI_DMA_RDWR) != 0) {
if (args->ica_data == NULL) {
ret = nvme_ioctl_error(ioc, NVME_IOCTL_E_NO_DMA_MEM,
0, 0);
goto free_cmd;
}
if (nvme_zalloc_dma(nvme, args->ica_data_len,
args->ica_dma_flags, &nvme->n_prp_dma_attr, &cmd->nc_dma) !=
DDI_SUCCESS) {
dev_err(nvme->n_dip, CE_WARN,
"!nvme_zalloc_dma failed for nvme_ioc_cmd()");
ret = nvme_ioctl_error(ioc,
NVME_IOCTL_E_NO_DMA_MEM, 0, 0);
goto free_cmd;
}
if (nvme_fill_prp(cmd, cmd->nc_dma->nd_dmah) != 0) {
ret = nvme_ioctl_error(ioc,
NVME_IOCTL_E_NO_DMA_MEM, 0, 0);
goto free_cmd;
}
if ((args->ica_dma_flags & DDI_DMA_WRITE) != 0 &&
ddi_copyin(args->ica_data, cmd->nc_dma->nd_memp,
args->ica_data_len, args->ica_copy_flags) != 0) {
ret = nvme_ioctl_error(ioc, NVME_IOCTL_E_BAD_USER_DATA,
0, 0);
goto free_cmd;
}
}
nvme_admin_cmd(cmd, args->ica_timeout);
if (!nvme_check_cmd_status_ioctl(cmd, ioc)) {
ret = B_FALSE;
goto free_cmd;
}
args->ica_cdw0 = cmd->nc_cqe.cqe_dw0;
if ((args->ica_dma_flags & DDI_DMA_READ) != 0 &&
ddi_copyout(cmd->nc_dma->nd_memp, args->ica_data,
args->ica_data_len, args->ica_copy_flags) != 0) {
ret = nvme_ioctl_error(ioc, NVME_IOCTL_E_BAD_USER_DATA, 0, 0);
goto free_cmd;
}
ret = B_TRUE;
nvme_ioctl_success(ioc);
free_cmd:
nvme_free_cmd(cmd);
return (ret);
}
static int
nvme_ioctl_get_logpage(nvme_minor_t *minor, intptr_t arg, int mode,
cred_t *cred_p)
{
nvme_t *const nvme = minor->nm_ctrl;
void *buf;
nvme_ioctl_get_logpage_t log;
uint_t model;
#ifdef _MULTI_DATAMODEL
nvme_ioctl_get_logpage32_t log32;
#endif
if ((mode & FREAD) == 0) {
return (EBADF);
}
model = ddi_model_convert_from(mode);
switch (model) {
#ifdef _MULTI_DATAMODEL
case DDI_MODEL_ILP32:
bzero(&log, sizeof (log));
if (ddi_copyin((void *)arg, &log32, sizeof (log32),
mode & FKIOCTL) != 0) {
return (EFAULT);
}
log.nigl_common.nioc_nsid = log32.nigl_common.nioc_nsid;
log.nigl_csi = log32.nigl_csi;
log.nigl_lid = log32.nigl_lid;
log.nigl_lsp = log32.nigl_lsp;
log.nigl_len = log32.nigl_len;
log.nigl_offset = log32.nigl_offset;
log.nigl_data = log32.nigl_data;
break;
#endif
case DDI_MODEL_NONE:
if (ddi_copyin((void *)arg, &log, sizeof (log),
mode & FKIOCTL) != 0) {
return (EFAULT);
}
break;
default:
return (ENOTSUP);
}
if (!nvme_ioctl_check(minor, &log.nigl_common,
&nvme_check_get_logpage)) {
goto copyout;
}
if (!nvme_validate_logpage(nvme, &log)) {
goto copyout;
}
if (nvme_get_logpage(nvme, B_TRUE, &log, &buf)) {
int copy;
copy = ddi_copyout(buf, (void *)log.nigl_data, log.nigl_len,
mode & FKIOCTL);
kmem_free(buf, log.nigl_len);
if (copy != 0) {
(void) nvme_ioctl_error(&log.nigl_common,
NVME_IOCTL_E_BAD_USER_DATA, 0, 0);
goto copyout;
}
nvme_ioctl_success(&log.nigl_common);
}
copyout:
switch (model) {
#ifdef _MULTI_DATAMODEL
case DDI_MODEL_ILP32:
bzero(&log32, sizeof (log32));
log32.nigl_common = log.nigl_common;
log32.nigl_csi = log.nigl_csi;
log32.nigl_lid = log.nigl_lid;
log32.nigl_lsp = log.nigl_lsp;
log32.nigl_len = log.nigl_len;
log32.nigl_offset = log.nigl_offset;
log32.nigl_data = log.nigl_data;
if (ddi_copyout(&log32, (void *)arg, sizeof (log32),
mode & FKIOCTL) != 0) {
return (EFAULT);
}
break;
#endif
case DDI_MODEL_NONE:
if (ddi_copyout(&log, (void *)arg, sizeof (log),
mode & FKIOCTL) != 0) {
return (EFAULT);
}
break;
default:
return (ENOTSUP);
}
return (0);
}
static int
nvme_ioctl_get_feature(nvme_minor_t *minor, intptr_t arg, int mode,
cred_t *cred_p)
{
nvme_t *const nvme = minor->nm_ctrl;
nvme_ioctl_get_feature_t feat;
uint_t model;
#ifdef _MULTI_DATAMODEL
nvme_ioctl_get_feature32_t feat32;
#endif
nvme_get_features_dw10_t gf_dw10 = { 0 };
nvme_ioc_cmd_args_t args = { NULL };
nvme_sqe_t sqe = {
.sqe_opc = NVME_OPC_GET_FEATURES
};
if ((mode & FREAD) == 0) {
return (EBADF);
}
model = ddi_model_convert_from(mode);
switch (model) {
#ifdef _MULTI_DATAMODEL
case DDI_MODEL_ILP32:
bzero(&feat, sizeof (feat));
if (ddi_copyin((void *)arg, &feat32, sizeof (feat32),
mode & FKIOCTL) != 0) {
return (EFAULT);
}
feat.nigf_common.nioc_nsid = feat32.nigf_common.nioc_nsid;
feat.nigf_fid = feat32.nigf_fid;
feat.nigf_sel = feat32.nigf_sel;
feat.nigf_cdw11 = feat32.nigf_cdw11;
feat.nigf_data = feat32.nigf_data;
feat.nigf_len = feat32.nigf_len;
break;
#endif
case DDI_MODEL_NONE:
if (ddi_copyin((void *)arg, &feat, sizeof (feat),
mode & FKIOCTL) != 0) {
return (EFAULT);
}
break;
default:
return (ENOTSUP);
}
if (!nvme_ioctl_check(minor, &feat.nigf_common,
&nvme_check_get_feature)) {
goto copyout;
}
if (!nvme_validate_get_feature(nvme, &feat)) {
goto copyout;
}
gf_dw10.b.gt_fid = bitx32(feat.nigf_fid, 7, 0);
gf_dw10.b.gt_sel = bitx32(feat.nigf_sel, 2, 0);
sqe.sqe_cdw10 = gf_dw10.r;
sqe.sqe_cdw11 = feat.nigf_cdw11;
sqe.sqe_nsid = feat.nigf_common.nioc_nsid;
args.ica_sqe = &sqe;
if (feat.nigf_len != 0) {
args.ica_data = (void *)feat.nigf_data;
args.ica_data_len = feat.nigf_len;
args.ica_dma_flags = DDI_DMA_READ;
}
args.ica_copy_flags = mode;
args.ica_timeout = nvme_admin_cmd_timeout;
if (!nvme_ioc_cmd(nvme, &feat.nigf_common, &args)) {
goto copyout;
}
feat.nigf_cdw0 = args.ica_cdw0;
copyout:
switch (model) {
#ifdef _MULTI_DATAMODEL
case DDI_MODEL_ILP32:
bzero(&feat32, sizeof (feat32));
feat32.nigf_common = feat.nigf_common;
feat32.nigf_fid = feat.nigf_fid;
feat32.nigf_sel = feat.nigf_sel;
feat32.nigf_cdw11 = feat.nigf_cdw11;
feat32.nigf_data = feat.nigf_data;
feat32.nigf_len = feat.nigf_len;
feat32.nigf_cdw0 = feat.nigf_cdw0;
if (ddi_copyout(&feat32, (void *)arg, sizeof (feat32),
mode & FKIOCTL) != 0) {
return (EFAULT);
}
break;
#endif
case DDI_MODEL_NONE:
if (ddi_copyout(&feat, (void *)arg, sizeof (feat),
mode & FKIOCTL) != 0) {
return (EFAULT);
}
break;
default:
return (ENOTSUP);
}
return (0);
}
static int
nvme_ioctl_format(nvme_minor_t *minor, intptr_t arg, int mode, cred_t *cred_p)
{
nvme_t *const nvme = minor->nm_ctrl;
nvme_ioctl_format_t ioc;
if ((mode & FWRITE) == 0)
return (EBADF);
if (secpolicy_sys_config(cred_p, B_FALSE) != 0)
return (EPERM);
if (ddi_copyin((void *)(uintptr_t)arg, &ioc,
sizeof (nvme_ioctl_format_t), mode & FKIOCTL) != 0)
return (EFAULT);
if (!nvme_ioctl_check(minor, &ioc.nif_common, &nvme_check_format)) {
goto copyout;
}
if (!nvme_validate_format(nvme, &ioc)) {
goto copyout;
}
nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME);
if (ioc.nif_common.nioc_nsid == NVME_NSID_BCAST) {
if (!nvme_no_blkdev_attached(nvme, ioc.nif_common.nioc_nsid)) {
nvme_mgmt_unlock(nvme);
(void) nvme_ioctl_error(&ioc.nif_common,
NVME_IOCTL_E_NS_BLKDEV_ATTACH, 0, 0);
goto copyout;
}
} else {
nvme_namespace_t *ns = nvme_nsid2ns(nvme,
ioc.nif_common.nioc_nsid);
if (!nvme_ns_state_check(ns, &ioc.nif_common,
nvme_format_nvm_states)) {
nvme_mgmt_unlock(nvme);
goto copyout;
}
}
if (nvme_format_nvm(nvme, &ioc)) {
nvme_ioctl_success(&ioc.nif_common);
nvme_rescan_ns(nvme, ioc.nif_common.nioc_nsid);
}
nvme_mgmt_unlock(nvme);
copyout:
if (ddi_copyout(&ioc, (void *)(uintptr_t)arg, sizeof (ioc),
mode & FKIOCTL) != 0) {
return (EFAULT);
}
return (0);
}
static int
nvme_ioctl_bd_detach(nvme_minor_t *minor, intptr_t arg, int mode,
cred_t *cred_p)
{
nvme_t *const nvme = minor->nm_ctrl;
nvme_ioctl_common_t com;
if ((mode & FWRITE) == 0)
return (EBADF);
if (secpolicy_sys_config(cred_p, B_FALSE) != 0)
return (EPERM);
if (ddi_copyin((void *)(uintptr_t)arg, &com, sizeof (com),
mode & FKIOCTL) != 0) {
return (EFAULT);
}
if (!nvme_ioctl_check(minor, &com, &nvme_check_attach_detach)) {
goto copyout;
}
nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME);
if (nvme_bd_detach_ns(nvme, &com)) {
nvme_ioctl_success(&com);
}
nvme_mgmt_unlock(nvme);
copyout:
if (ddi_copyout(&com, (void *)(uintptr_t)arg, sizeof (com),
mode & FKIOCTL) != 0) {
return (EFAULT);
}
return (0);
}
static int
nvme_ioctl_bd_attach(nvme_minor_t *minor, intptr_t arg, int mode,
cred_t *cred_p)
{
nvme_t *const nvme = minor->nm_ctrl;
nvme_ioctl_common_t com;
nvme_namespace_t *ns;
if ((mode & FWRITE) == 0)
return (EBADF);
if (secpolicy_sys_config(cred_p, B_FALSE) != 0)
return (EPERM);
if (ddi_copyin((void *)(uintptr_t)arg, &com, sizeof (com),
mode & FKIOCTL) != 0) {
return (EFAULT);
}
if (!nvme_ioctl_check(minor, &com, &nvme_check_attach_detach)) {
goto copyout;
}
nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME);
ns = nvme_nsid2ns(nvme, com.nioc_nsid);
if (ns->ns_state < NVME_NS_STATE_ATTACHED) {
nvme_rescan_ns(nvme, com.nioc_nsid);
}
if (nvme_bd_attach_ns(nvme, &com)) {
nvme_ioctl_success(&com);
}
nvme_mgmt_unlock(nvme);
copyout:
if (ddi_copyout(&com, (void *)(uintptr_t)arg, sizeof (com),
mode & FKIOCTL) != 0) {
return (EFAULT);
}
return (0);
}
static boolean_t
nvme_ctrl_attach_detach_ns(nvme_t *nvme, nvme_namespace_t *ns,
nvme_ioctl_common_t *ioc, boolean_t attach)
{
nvme_ioc_cmd_args_t args = { NULL };
nvme_sqe_t sqe;
nvme_ns_mgmt_dw10_t dw10;
uint16_t ctrlids[2];
ASSERT(nvme_mgmt_lock_held(nvme));
bzero(&sqe, sizeof (sqe));
sqe.sqe_nsid = ioc->nioc_nsid;
sqe.sqe_opc = NVME_OPC_NS_ATTACH;
dw10.r = 0;
dw10.b.nsm_sel = attach ? NVME_NS_ATTACH_CTRL_ATTACH :
NVME_NS_ATTACH_CTRL_DETACH;
sqe.sqe_cdw10 = dw10.r;
ctrlids[0] = 1;
ctrlids[1] = nvme->n_idctl->id_cntlid;
args.ica_sqe = &sqe;
args.ica_data = ctrlids;
args.ica_data_len = sizeof (ctrlids);
args.ica_dma_flags = DDI_DMA_WRITE;
args.ica_copy_flags = FKIOCTL;
args.ica_timeout = nvme_admin_cmd_timeout;
return (nvme_ioc_cmd(nvme, ioc, &args));
}
static int
nvme_ioctl_ctrl_detach(nvme_minor_t *minor, intptr_t arg, int mode,
cred_t *cred_p)
{
nvme_t *const nvme = minor->nm_ctrl;
nvme_ioctl_common_t com;
nvme_namespace_t *ns;
if ((mode & FWRITE) == 0)
return (EBADF);
if (secpolicy_sys_config(cred_p, B_FALSE) != 0)
return (EPERM);
if (ddi_copyin((void *)(uintptr_t)arg, &com, sizeof (com),
mode & FKIOCTL) != 0) {
return (EFAULT);
}
if (!nvme_ioctl_check(minor, &com, &nvme_check_attach_detach)) {
goto copyout;
}
if (!nvme_validate_ctrl_attach_detach_ns(nvme, &com)) {
goto copyout;
}
nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME);
ns = nvme_nsid2ns(nvme, com.nioc_nsid);
if (nvme_ns_state_check(ns, &com, nvme_ctrl_detach_states)) {
if (nvme_ctrl_attach_detach_ns(nvme, ns, &com, B_FALSE)) {
nvme_rescan_ns(nvme, com.nioc_nsid);
nvme_ioctl_success(&com);
}
}
nvme_mgmt_unlock(nvme);
copyout:
if (ddi_copyout(&com, (void *)(uintptr_t)arg, sizeof (com),
mode & FKIOCTL) != 0) {
return (EFAULT);
}
return (0);
}
static int
nvme_ioctl_ns_create(nvme_minor_t *minor, intptr_t arg, int mode,
cred_t *cred_p)
{
nvme_t *const nvme = minor->nm_ctrl;
nvme_ioctl_ns_create_t create;
if ((mode & FWRITE) == 0)
return (EBADF);
if (secpolicy_sys_config(cred_p, B_FALSE) != 0)
return (EPERM);
if (ddi_copyin((void *)(uintptr_t)arg, &create, sizeof (create),
mode & FKIOCTL) != 0) {
return (EFAULT);
}
if (!nvme_ioctl_check(minor, &create.nnc_common,
&nvme_check_ns_create)) {
goto copyout;
}
if (!nvme_validate_ns_create(nvme, &create)) {
goto copyout;
}
nvme_identify_nsid_t *idns = kmem_zalloc(sizeof (nvme_identify_nsid_t),
KM_NOSLEEP_LAZY);
if (idns == NULL) {
(void) nvme_ioctl_error(&create.nnc_common,
NVME_IOCTL_E_NO_KERN_MEM, 0, 0);
goto copyout;
}
idns->id_nsize = create.nnc_nsze;
idns->id_ncap = create.nnc_ncap;
idns->id_flbas.lba_format = create.nnc_flbas;
idns->id_nmic.nm_shared = bitx32(create.nnc_nmic, 0, 0);
nvme_ioc_cmd_args_t args = { NULL };
nvme_sqe_t sqe;
nvme_ns_mgmt_dw10_t dw10;
nvme_ns_mgmt_dw11_t dw11;
bzero(&sqe, sizeof (sqe));
sqe.sqe_nsid = create.nnc_common.nioc_nsid;
sqe.sqe_opc = NVME_OPC_NS_MGMT;
dw10.r = 0;
dw10.b.nsm_sel = NVME_NS_MGMT_NS_CREATE;
sqe.sqe_cdw10 = dw10.r;
dw11.r = 0;
dw11.b.nsm_csi = create.nnc_csi;
sqe.sqe_cdw11 = dw11.r;
args.ica_sqe = &sqe;
args.ica_data = idns;
args.ica_data_len = sizeof (nvme_identify_nsid_t);
args.ica_dma_flags = DDI_DMA_WRITE;
args.ica_copy_flags = FKIOCTL;
args.ica_timeout = nvme_format_cmd_timeout;
nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME);
if (nvme_ioc_cmd(nvme, &create.nnc_common, &args)) {
create.nnc_nsid = args.ica_cdw0;
nvme_rescan_ns(nvme, create.nnc_nsid);
nvme_ioctl_success(&create.nnc_common);
}
nvme_mgmt_unlock(nvme);
kmem_free(idns, sizeof (nvme_identify_nsid_t));
copyout:
if (ddi_copyout(&create, (void *)(uintptr_t)arg, sizeof (create),
mode & FKIOCTL) != 0) {
return (EFAULT);
}
return (0);
}
static int
nvme_ioctl_ns_delete(nvme_minor_t *minor, intptr_t arg, int mode,
cred_t *cred_p)
{
nvme_t *const nvme = minor->nm_ctrl;
nvme_ioctl_common_t com;
if ((mode & FWRITE) == 0)
return (EBADF);
if (secpolicy_sys_config(cred_p, B_FALSE) != 0)
return (EPERM);
if (ddi_copyin((void *)(uintptr_t)arg, &com, sizeof (com),
mode & FKIOCTL) != 0) {
return (EFAULT);
}
if (!nvme_ioctl_check(minor, &com, &nvme_check_ns_delete)) {
goto copyout;
}
if (!nvme_validate_ns_delete(nvme, &com)) {
goto copyout;
}
nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME);
if (com.nioc_nsid == NVME_NSID_BCAST) {
if (!nvme_no_blkdev_attached(nvme, com.nioc_nsid)) {
nvme_mgmt_unlock(nvme);
(void) nvme_ioctl_error(&com,
NVME_IOCTL_E_NS_BLKDEV_ATTACH, 0, 0);
goto copyout;
}
} else {
nvme_namespace_t *ns = nvme_nsid2ns(nvme, com.nioc_nsid);
if (!nvme_ns_state_check(ns, &com, nvme_ns_delete_states)) {
nvme_mgmt_unlock(nvme);
goto copyout;
}
}
nvme_ioc_cmd_args_t args = { NULL };
nvme_sqe_t sqe;
nvme_ns_mgmt_dw10_t dw10;
bzero(&sqe, sizeof (sqe));
sqe.sqe_nsid = com.nioc_nsid;
sqe.sqe_opc = NVME_OPC_NS_MGMT;
dw10.r = 0;
dw10.b.nsm_sel = NVME_NS_MGMT_NS_DELETE;
sqe.sqe_cdw10 = dw10.r;
args.ica_sqe = &sqe;
args.ica_data = NULL;
args.ica_data_len = 0;
args.ica_dma_flags = 0;
args.ica_copy_flags = 0;
args.ica_timeout = nvme_format_cmd_timeout;
if (nvme_ioc_cmd(nvme, &com, &args)) {
nvme_rescan_ns(nvme, com.nioc_nsid);
nvme_ioctl_success(&com);
}
nvme_mgmt_unlock(nvme);
copyout:
if (ddi_copyout(&com, (void *)(uintptr_t)arg, sizeof (com),
mode & FKIOCTL) != 0) {
return (EFAULT);
}
return (0);
}
static int
nvme_ioctl_ctrl_attach(nvme_minor_t *minor, intptr_t arg, int mode,
cred_t *cred_p)
{
nvme_t *const nvme = minor->nm_ctrl;
nvme_ioctl_common_t com;
nvme_namespace_t *ns;
if ((mode & FWRITE) == 0)
return (EBADF);
if (secpolicy_sys_config(cred_p, B_FALSE) != 0)
return (EPERM);
if (ddi_copyin((void *)(uintptr_t)arg, &com, sizeof (com),
mode & FKIOCTL) != 0) {
return (EFAULT);
}
if (!nvme_ioctl_check(minor, &com, &nvme_check_attach_detach)) {
goto copyout;
}
if (!nvme_validate_ctrl_attach_detach_ns(nvme, &com)) {
goto copyout;
}
nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME);
ns = nvme_nsid2ns(nvme, com.nioc_nsid);
if (nvme_ns_state_check(ns, &com, nvme_ctrl_attach_states)) {
if (nvme_ctrl_attach_detach_ns(nvme, ns, &com, B_TRUE)) {
nvme_rescan_ns(nvme, com.nioc_nsid);
nvme_ioctl_success(&com);
}
}
nvme_mgmt_unlock(nvme);
copyout:
if (ddi_copyout(&com, (void *)(uintptr_t)arg, sizeof (com),
mode & FKIOCTL) != 0) {
return (EFAULT);
}
return (0);
}
static void
nvme_ufm_update(nvme_t *nvme)
{
mutex_enter(&nvme->n_fwslot_mutex);
ddi_ufm_update(nvme->n_ufmh);
if (nvme->n_fwslot != NULL) {
kmem_free(nvme->n_fwslot, sizeof (nvme_fwslot_log_t));
nvme->n_fwslot = NULL;
}
mutex_exit(&nvme->n_fwslot_mutex);
}
static int
nvme_ioctl_firmware_download(nvme_minor_t *minor, intptr_t arg, int mode,
cred_t *cred_p)
{
nvme_t *const nvme = minor->nm_ctrl;
nvme_ioctl_fw_load_t fw;
uint64_t len, maxcopy;
offset_t offset;
uint32_t gran;
nvme_valid_ctrl_data_t data;
uintptr_t buf;
nvme_sqe_t sqe = {
.sqe_opc = NVME_OPC_FW_IMAGE_LOAD
};
if ((mode & FWRITE) == 0)
return (EBADF);
if (secpolicy_sys_config(cred_p, B_FALSE) != 0)
return (EPERM);
if (ddi_copyin((void *)(uintptr_t)arg, &fw, sizeof (fw),
mode & FKIOCTL) != 0) {
return (EFAULT);
}
if (!nvme_ioctl_check(minor, &fw.fwl_common, &nvme_check_firmware)) {
goto copyout;
}
if (!nvme_validate_fw_load(nvme, &fw)) {
goto copyout;
}
len = fw.fwl_len;
offset = fw.fwl_off;
buf = fw.fwl_buf;
data.vcd_vers = &nvme->n_version;
data.vcd_id = nvme->n_idctl;
gran = nvme_fw_load_granularity(&data);
if ((nvme->n_max_data_transfer_size % gran) == 0) {
maxcopy = nvme->n_max_data_transfer_size;
} else if (gran <= nvme->n_max_data_transfer_size) {
maxcopy = gran;
} else {
(void) nvme_ioctl_error(&fw.fwl_common,
NVME_IOCTL_E_FW_LOAD_IMPOS_GRAN, 0, 0);
goto copyout;
}
while (len > 0) {
nvme_ioc_cmd_args_t args = { NULL };
uint64_t copylen = MIN(maxcopy, len);
sqe.sqe_cdw10 = (uint32_t)(copylen >> NVME_DWORD_SHIFT) - 1;
sqe.sqe_cdw11 = (uint32_t)(offset >> NVME_DWORD_SHIFT);
args.ica_sqe = &sqe;
args.ica_data = (void *)buf;
args.ica_data_len = copylen;
args.ica_dma_flags = DDI_DMA_WRITE;
args.ica_copy_flags = mode;
args.ica_timeout = nvme_admin_cmd_timeout;
if (!nvme_ioc_cmd(nvme, &fw.fwl_common, &args)) {
break;
}
buf += copylen;
offset += copylen;
len -= copylen;
}
copyout:
if (ddi_copyout(&fw, (void *)(uintptr_t)arg, sizeof (fw),
mode & FKIOCTL) != 0) {
return (EFAULT);
}
return (0);
}
static int
nvme_ioctl_firmware_commit(nvme_minor_t *minor, intptr_t arg, int mode,
cred_t *cred_p)
{
nvme_t *const nvme = minor->nm_ctrl;
nvme_ioctl_fw_commit_t fw;
nvme_firmware_commit_dw10_t fc_dw10 = { 0 };
nvme_ioc_cmd_args_t args = { NULL };
nvme_sqe_t sqe = {
.sqe_opc = NVME_OPC_FW_ACTIVATE
};
if ((mode & FWRITE) == 0)
return (EBADF);
if (secpolicy_sys_config(cred_p, B_FALSE) != 0)
return (EPERM);
if (ddi_copyin((void *)(uintptr_t)arg, &fw, sizeof (fw),
mode & FKIOCTL) != 0) {
return (EFAULT);
}
if (!nvme_ioctl_check(minor, &fw.fwc_common, &nvme_check_firmware)) {
goto copyout;
}
if (!nvme_validate_fw_commit(nvme, &fw)) {
goto copyout;
}
fc_dw10.b.fc_slot = fw.fwc_slot;
fc_dw10.b.fc_action = fw.fwc_action;
sqe.sqe_cdw10 = fc_dw10.r;
args.ica_sqe = &sqe;
args.ica_timeout = nvme_commit_save_cmd_timeout;
(void) nvme_ioc_cmd(nvme, &fw.fwc_common, &args);
nvme_ufm_update(nvme);
copyout:
if (ddi_copyout(&fw, (void *)(uintptr_t)arg, sizeof (fw),
mode & FKIOCTL) != 0) {
return (EFAULT);
}
return (0);
}
static int
nvme_passthru_copyin_cmd(const void *buf, nvme_ioctl_passthru_t *cmd, int mode)
{
switch (ddi_model_convert_from(mode & FMODELS)) {
#ifdef _MULTI_DATAMODEL
case DDI_MODEL_ILP32: {
nvme_ioctl_passthru32_t cmd32;
if (ddi_copyin(buf, (void*)&cmd32, sizeof (cmd32), mode) != 0)
return (EFAULT);
bzero(cmd, sizeof (nvme_ioctl_passthru_t));
cmd->npc_common.nioc_nsid = cmd32.npc_common.nioc_nsid;
cmd->npc_opcode = cmd32.npc_opcode;
cmd->npc_timeout = cmd32.npc_timeout;
cmd->npc_flags = cmd32.npc_flags;
cmd->npc_impact = cmd32.npc_impact;
cmd->npc_cdw12 = cmd32.npc_cdw12;
cmd->npc_cdw13 = cmd32.npc_cdw13;
cmd->npc_cdw14 = cmd32.npc_cdw14;
cmd->npc_cdw15 = cmd32.npc_cdw15;
cmd->npc_buflen = cmd32.npc_buflen;
cmd->npc_buf = cmd32.npc_buf;
break;
}
#endif
case DDI_MODEL_NONE:
if (ddi_copyin(buf, (void *)cmd, sizeof (nvme_ioctl_passthru_t),
mode) != 0) {
return (EFAULT);
}
break;
default:
return (ENOTSUP);
}
return (0);
}
static int
nvme_passthru_copyout_cmd(const nvme_ioctl_passthru_t *cmd, void *buf, int mode)
{
switch (ddi_model_convert_from(mode & FMODELS)) {
#ifdef _MULTI_DATAMODEL
case DDI_MODEL_ILP32: {
nvme_ioctl_passthru32_t cmd32;
bzero(&cmd32, sizeof (nvme_ioctl_passthru32_t));
cmd32.npc_common = cmd->npc_common;
cmd32.npc_opcode = cmd->npc_opcode;
cmd32.npc_timeout = cmd->npc_timeout;
cmd32.npc_flags = cmd->npc_flags;
cmd32.npc_impact = cmd->npc_impact;
cmd32.npc_cdw0 = cmd->npc_cdw0;
cmd32.npc_cdw12 = cmd->npc_cdw12;
cmd32.npc_cdw13 = cmd->npc_cdw13;
cmd32.npc_cdw14 = cmd->npc_cdw14;
cmd32.npc_cdw15 = cmd->npc_cdw15;
cmd32.npc_buflen = (size32_t)cmd->npc_buflen;
cmd32.npc_buf = (uintptr32_t)cmd->npc_buf;
if (ddi_copyout(&cmd32, buf, sizeof (cmd32), mode) != 0)
return (EFAULT);
break;
}
#endif
case DDI_MODEL_NONE:
if (ddi_copyout(cmd, buf, sizeof (nvme_ioctl_passthru_t),
mode) != 0) {
return (EFAULT);
}
break;
default:
return (ENOTSUP);
}
return (0);
}
static int
nvme_ioctl_passthru(nvme_minor_t *minor, intptr_t arg, int mode, cred_t *cred_p)
{
nvme_t *const nvme = minor->nm_ctrl;
int rv;
nvme_ioctl_passthru_t pass;
nvme_sqe_t sqe;
nvme_ioc_cmd_args_t args = { NULL };
if ((mode & FWRITE) == 0)
return (EBADF);
if (secpolicy_sys_config(cred_p, B_FALSE) != 0)
return (EPERM);
if ((rv = nvme_passthru_copyin_cmd((void *)(uintptr_t)arg, &pass,
mode)) != 0) {
return (rv);
}
if (!nvme_ioctl_check(minor, &pass.npc_common, &nvme_check_passthru)) {
goto copyout;
}
if (!nvme_validate_vuc(nvme, &pass)) {
goto copyout;
}
nvme_mgmt_lock(nvme, NVME_MGMT_LOCK_NVME);
if ((pass.npc_impact & NVME_IMPACT_NS) != 0) {
if (!nvme_no_blkdev_attached(nvme, NVME_NSID_BCAST)) {
nvme_mgmt_unlock(nvme);
(void) nvme_ioctl_error(&pass.npc_common,
NVME_IOCTL_E_NS_BLKDEV_ATTACH, 0, 0);
goto copyout;
}
}
bzero(&sqe, sizeof (sqe));
sqe.sqe_opc = pass.npc_opcode;
sqe.sqe_nsid = pass.npc_common.nioc_nsid;
sqe.sqe_cdw10 = (uint32_t)(pass.npc_buflen >> NVME_DWORD_SHIFT);
sqe.sqe_cdw12 = pass.npc_cdw12;
sqe.sqe_cdw13 = pass.npc_cdw13;
sqe.sqe_cdw14 = pass.npc_cdw14;
sqe.sqe_cdw15 = pass.npc_cdw15;
args.ica_sqe = &sqe;
args.ica_data = (void *)pass.npc_buf;
args.ica_data_len = pass.npc_buflen;
args.ica_copy_flags = mode;
args.ica_timeout = pass.npc_timeout;
if ((pass.npc_flags & NVME_PASSTHRU_READ) != 0)
args.ica_dma_flags |= DDI_DMA_READ;
else if ((pass.npc_flags & NVME_PASSTHRU_WRITE) != 0)
args.ica_dma_flags |= DDI_DMA_WRITE;
if (nvme_ioc_cmd(nvme, &pass.npc_common, &args)) {
pass.npc_cdw0 = args.ica_cdw0;
if ((pass.npc_impact & NVME_IMPACT_NS) != 0) {
nvme_rescan_ns(nvme, NVME_NSID_BCAST);
}
}
nvme_mgmt_unlock(nvme);
copyout:
rv = nvme_passthru_copyout_cmd(&pass, (void *)(uintptr_t)arg,
mode);
return (rv);
}
static int
nvme_ioctl_lock(nvme_minor_t *minor, intptr_t arg, int mode,
cred_t *cred_p)
{
nvme_ioctl_lock_t lock;
const nvme_lock_flags_t all_flags = NVME_LOCK_F_DONT_BLOCK;
nvme_t *nvme = minor->nm_ctrl;
if ((mode & FWRITE) == 0)
return (EBADF);
if (secpolicy_sys_config(cred_p, B_FALSE) != 0)
return (EPERM);
if (ddi_copyin((void *)(uintptr_t)arg, &lock, sizeof (lock),
mode & FKIOCTL) != 0) {
return (EFAULT);
}
if (lock.nil_ent != NVME_LOCK_E_CTRL &&
lock.nil_ent != NVME_LOCK_E_NS) {
(void) nvme_ioctl_error(&lock.nil_common,
NVME_IOCTL_E_BAD_LOCK_ENTITY, 0, 0);
goto copyout;
}
if (lock.nil_level != NVME_LOCK_L_READ &&
lock.nil_level != NVME_LOCK_L_WRITE) {
(void) nvme_ioctl_error(&lock.nil_common,
NVME_IOCTL_E_BAD_LOCK_LEVEL, 0, 0);
goto copyout;
}
if ((lock.nil_flags & ~all_flags) != 0) {
(void) nvme_ioctl_error(&lock.nil_common,
NVME_IOCTL_E_BAD_LOCK_FLAGS, 0, 0);
goto copyout;
}
if (!nvme_ioctl_check(minor, &lock.nil_common, &nvme_check_locking)) {
goto copyout;
}
if (lock.nil_common.nioc_nsid != 0 &&
lock.nil_ent == NVME_LOCK_E_CTRL) {
(void) nvme_ioctl_error(&lock.nil_common,
NVME_IOCTL_E_NS_CANNOT_LOCK_CTRL, 0, 0);
goto copyout;
}
mutex_enter(&nvme->n_minor_mutex);
if (minor->nm_ctrl_lock.nli_state == NVME_LOCK_STATE_BLOCKED ||
minor->nm_ns_lock.nli_state == NVME_LOCK_STATE_BLOCKED) {
(void) nvme_ioctl_error(&lock.nil_common,
NVME_IOCTL_E_LOCK_PENDING, 0, 0);
mutex_exit(&nvme->n_minor_mutex);
goto copyout;
}
if ((lock.nil_ent == NVME_LOCK_E_CTRL &&
minor->nm_ctrl_lock.nli_state == NVME_LOCK_STATE_ACQUIRED) ||
(lock.nil_ent == NVME_LOCK_E_NS &&
minor->nm_ns_lock.nli_state == NVME_LOCK_STATE_ACQUIRED &&
minor->nm_ns_lock.nli_ns->ns_id == lock.nil_common.nioc_nsid)) {
(void) nvme_ioctl_error(&lock.nil_common,
NVME_IOCTL_E_LOCK_ALREADY_HELD, 0, 0);
mutex_exit(&nvme->n_minor_mutex);
goto copyout;
}
if (lock.nil_ent == NVME_LOCK_E_CTRL &&
minor->nm_ns_lock.nli_state != NVME_LOCK_STATE_UNLOCKED) {
(void) nvme_ioctl_error(&lock.nil_common,
NVME_IOCTL_E_LOCK_NO_CTRL_WITH_NS, 0, 0);
mutex_exit(&nvme->n_minor_mutex);
goto copyout;
}
if (lock.nil_ent == NVME_LOCK_E_NS &&
(minor->nm_ctrl_lock.nli_state == NVME_LOCK_STATE_ACQUIRED &&
minor->nm_ctrl_lock.nli_curlevel == NVME_LOCK_L_WRITE)) {
(void) nvme_ioctl_error(&lock.nil_common,
NVME_IOCTL_LOCK_NO_NS_WITH_CTRL_WRLOCK, 0, 0);
mutex_exit(&nvme->n_minor_mutex);
goto copyout;
}
if (lock.nil_ent == NVME_LOCK_E_NS &&
minor->nm_ns_lock.nli_state != NVME_LOCK_STATE_UNLOCKED) {
(void) nvme_ioctl_error(&lock.nil_common,
NVME_IOCTL_E_LOCK_NO_2ND_NS, 0, 0);
mutex_exit(&nvme->n_minor_mutex);
goto copyout;
}
#ifdef DEBUG
if (lock.nil_ent == NVME_LOCK_E_NS) {
ASSERT3P(minor->nm_ns_lock.nli_lock, ==, NULL);
ASSERT3U(minor->nm_ns_lock.nli_state, ==,
NVME_LOCK_STATE_UNLOCKED);
ASSERT3U(minor->nm_ns_lock.nli_curlevel, ==, 0);
ASSERT3P(minor->nm_ns_lock.nli_ns, ==, NULL);
if (minor->nm_ns != NULL) {
ASSERT3U(minor->nm_ns->ns_id, ==,
lock.nil_common.nioc_nsid);
}
ASSERT0(list_link_active(&minor->nm_ns_lock.nli_node));
} else {
ASSERT3P(minor->nm_ctrl_lock.nli_lock, ==, NULL);
ASSERT3U(minor->nm_ctrl_lock.nli_state, ==,
NVME_LOCK_STATE_UNLOCKED);
ASSERT3U(minor->nm_ctrl_lock.nli_curlevel, ==, 0);
ASSERT3P(minor->nm_ns_lock.nli_ns, ==, NULL);
ASSERT0(list_link_active(&minor->nm_ctrl_lock.nli_node));
ASSERT3P(minor->nm_ns_lock.nli_lock, ==, NULL);
ASSERT3U(minor->nm_ns_lock.nli_state, ==,
NVME_LOCK_STATE_UNLOCKED);
ASSERT3U(minor->nm_ns_lock.nli_curlevel, ==, 0);
ASSERT3P(minor->nm_ns_lock.nli_ns, ==, NULL);
ASSERT0(list_link_active(&minor->nm_ns_lock.nli_node));
}
#endif
nvme_rwlock(minor, &lock);
mutex_exit(&nvme->n_minor_mutex);
copyout:
if (ddi_copyout(&lock, (void *)(uintptr_t)arg, sizeof (lock),
mode & FKIOCTL) != 0) {
return (EFAULT);
}
return (0);
}
static int
nvme_ioctl_unlock(nvme_minor_t *minor, intptr_t arg, int mode,
cred_t *cred_p)
{
nvme_ioctl_unlock_t unlock;
nvme_t *const nvme = minor->nm_ctrl;
boolean_t is_ctrl;
nvme_lock_t *lock;
nvme_minor_lock_info_t *info;
if ((mode & FWRITE) == 0)
return (EBADF);
if (ddi_copyin((void *)(uintptr_t)arg, &unlock, sizeof (unlock),
mode & FKIOCTL) != 0) {
return (EFAULT);
}
if (unlock.niu_ent != NVME_LOCK_E_CTRL &&
unlock.niu_ent != NVME_LOCK_E_NS) {
(void) nvme_ioctl_error(&unlock.niu_common,
NVME_IOCTL_E_BAD_LOCK_ENTITY, 0, 0);
goto copyout;
}
if (!nvme_ioctl_check(minor, &unlock.niu_common, &nvme_check_locking)) {
goto copyout;
}
if (unlock.niu_common.nioc_nsid != 0 &&
unlock.niu_ent == NVME_LOCK_E_CTRL) {
(void) nvme_ioctl_error(&unlock.niu_common,
NVME_IOCTL_E_NS_CANNOT_UNLOCK_CTRL, 0, 0);
goto copyout;
}
mutex_enter(&nvme->n_minor_mutex);
if (unlock.niu_ent == NVME_LOCK_E_CTRL) {
if (minor->nm_ctrl_lock.nli_state != NVME_LOCK_STATE_ACQUIRED) {
mutex_exit(&nvme->n_minor_mutex);
(void) nvme_ioctl_error(&unlock.niu_common,
NVME_IOCTL_E_LOCK_NOT_HELD, 0, 0);
goto copyout;
}
} else {
if (minor->nm_ns_lock.nli_ns == NULL) {
mutex_exit(&nvme->n_minor_mutex);
(void) nvme_ioctl_error(&unlock.niu_common,
NVME_IOCTL_E_LOCK_NOT_HELD, 0, 0);
goto copyout;
}
if (minor->nm_ns_lock.nli_ns->ns_id !=
unlock.niu_common.nioc_nsid) {
mutex_exit(&nvme->n_minor_mutex);
ASSERT3P(minor->nm_ns, ==, NULL);
(void) nvme_ioctl_error(&unlock.niu_common,
NVME_IOCTL_E_LOCK_WRONG_NS, 0, 0);
goto copyout;
}
if (minor->nm_ns_lock.nli_state != NVME_LOCK_STATE_ACQUIRED) {
mutex_exit(&nvme->n_minor_mutex);
(void) nvme_ioctl_error(&unlock.niu_common,
NVME_IOCTL_E_LOCK_NOT_HELD, 0, 0);
goto copyout;
}
}
is_ctrl = unlock.niu_ent == NVME_LOCK_E_CTRL;
if (is_ctrl) {
lock = &nvme->n_lock;
info = &minor->nm_ctrl_lock;
} else {
nvme_namespace_t *ns;
const uint32_t nsid = unlock.niu_common.nioc_nsid;
ns = nvme_nsid2ns(nvme, nsid);
lock = &ns->ns_lock;
info = &minor->nm_ns_lock;
VERIFY3P(ns, ==, info->nli_ns);
}
nvme_rwunlock(info, lock);
mutex_exit(&nvme->n_minor_mutex);
nvme_ioctl_success(&unlock.niu_common);
copyout:
if (ddi_copyout(&unlock, (void *)(uintptr_t)arg, sizeof (unlock),
mode & FKIOCTL) != 0) {
return (EFAULT);
}
return (0);
}
static int
nvme_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cred_p,
int *rval_p)
{
#ifndef __lock_lint
_NOTE(ARGUNUSED(rval_p));
#endif
int ret;
nvme_minor_t *minor;
nvme_t *nvme;
minor = nvme_minor_find_by_dev(dev);
if (minor == NULL) {
return (ENXIO);
}
nvme = minor->nm_ctrl;
if (nvme == NULL)
return (ENXIO);
if (IS_DEVCTL(cmd))
return (ndi_devctl_ioctl(nvme->n_dip, cmd, arg, mode, 0));
if (nvme->n_dead && (cmd != NVME_IOC_BD_DETACH && cmd !=
NVME_IOC_UNLOCK)) {
if (IS_NVME_IOC(cmd) == 0) {
return (EIO);
}
return (nvme_ioctl_copyout_error(nvme->n_dead_status, arg,
mode));
}
switch (cmd) {
case NVME_IOC_CTRL_INFO:
ret = nvme_ioctl_ctrl_info(minor, arg, mode, cred_p);
break;
case NVME_IOC_IDENTIFY:
ret = nvme_ioctl_identify(minor, arg, mode, cred_p);
break;
case NVME_IOC_GET_LOGPAGE:
ret = nvme_ioctl_get_logpage(minor, arg, mode, cred_p);
break;
case NVME_IOC_GET_FEATURE:
ret = nvme_ioctl_get_feature(minor, arg, mode, cred_p);
break;
case NVME_IOC_BD_DETACH:
ret = nvme_ioctl_bd_detach(minor, arg, mode, cred_p);
break;
case NVME_IOC_BD_ATTACH:
ret = nvme_ioctl_bd_attach(minor, arg, mode, cred_p);
break;
case NVME_IOC_FORMAT:
ret = nvme_ioctl_format(minor, arg, mode, cred_p);
break;
case NVME_IOC_FIRMWARE_DOWNLOAD:
ret = nvme_ioctl_firmware_download(minor, arg, mode, cred_p);
break;
case NVME_IOC_FIRMWARE_COMMIT:
ret = nvme_ioctl_firmware_commit(minor, arg, mode, cred_p);
break;
case NVME_IOC_NS_INFO:
ret = nvme_ioctl_ns_info(minor, arg, mode, cred_p);
break;
case NVME_IOC_PASSTHRU:
ret = nvme_ioctl_passthru(minor, arg, mode, cred_p);
break;
case NVME_IOC_LOCK:
ret = nvme_ioctl_lock(minor, arg, mode, cred_p);
break;
case NVME_IOC_UNLOCK:
ret = nvme_ioctl_unlock(minor, arg, mode, cred_p);
break;
case NVME_IOC_CTRL_DETACH:
ret = nvme_ioctl_ctrl_detach(minor, arg, mode, cred_p);
break;
case NVME_IOC_CTRL_ATTACH:
ret = nvme_ioctl_ctrl_attach(minor, arg, mode, cred_p);
break;
case NVME_IOC_NS_CREATE:
ret = nvme_ioctl_ns_create(minor, arg, mode, cred_p);
break;
case NVME_IOC_NS_DELETE:
ret = nvme_ioctl_ns_delete(minor, arg, mode, cred_p);
break;
default:
ret = ENOTTY;
break;
}
ASSERT(!nvme_mgmt_lock_held(nvme));
return (ret);
}
static int
nvme_ufm_fill_image(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno,
ddi_ufm_image_t *img)
{
nvme_t *nvme = arg;
if (imgno != 0)
return (EINVAL);
ddi_ufm_image_set_desc(img, "Firmware");
ddi_ufm_image_set_nslots(img, nvme->n_idctl->id_frmw.fw_nslot);
return (0);
}
static int
nvme_ufm_fill_slot(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno,
uint_t slotno, ddi_ufm_slot_t *slot)
{
nvme_t *nvme = arg;
void *log = NULL;
size_t bufsize;
ddi_ufm_attr_t attr = 0;
char fw_ver[NVME_FWVER_SZ + 1];
if (imgno > 0 || slotno > (nvme->n_idctl->id_frmw.fw_nslot - 1))
return (EINVAL);
mutex_enter(&nvme->n_fwslot_mutex);
if (nvme->n_fwslot == NULL) {
if (!nvme_get_logpage_int(nvme, B_TRUE, &log, &bufsize,
NVME_LOGPAGE_FWSLOT) ||
bufsize != sizeof (nvme_fwslot_log_t)) {
if (log != NULL)
kmem_free(log, bufsize);
mutex_exit(&nvme->n_fwslot_mutex);
return (EIO);
}
nvme->n_fwslot = (nvme_fwslot_log_t *)log;
}
if (slotno == (nvme->n_fwslot->fw_afi - 1))
attr |= DDI_UFM_ATTR_ACTIVE;
if (slotno != 0 || nvme->n_idctl->id_frmw.fw_readonly == 0)
attr |= DDI_UFM_ATTR_WRITEABLE;
if (nvme->n_fwslot->fw_frs[slotno][0] == '\0') {
attr |= DDI_UFM_ATTR_EMPTY;
} else {
(void) strncpy(fw_ver, nvme->n_fwslot->fw_frs[slotno],
NVME_FWVER_SZ);
fw_ver[NVME_FWVER_SZ] = '\0';
ddi_ufm_slot_set_version(slot, fw_ver);
}
mutex_exit(&nvme->n_fwslot_mutex);
ddi_ufm_slot_set_attrs(slot, attr);
return (0);
}
static int
nvme_ufm_getcaps(ddi_ufm_handle_t *ufmh, void *arg, ddi_ufm_cap_t *caps)
{
*caps = DDI_UFM_CAP_REPORT;
return (0);
}
boolean_t
nvme_ctrl_atleast(nvme_t *nvme, const nvme_version_t *min)
{
return (nvme_vers_atleast(&nvme->n_version, min) ? B_TRUE : B_FALSE);
}