#include <sys/ddi.h>
#include <sys/sunddi.h>
#include <sys/scsi/adapters/mfi/mfi.h>
#include <sys/scsi/adapters/mfi/mfi_evt.h>
#include <sys/scsi/adapters/mfi/mfi_ld.h>
#include "lmrc.h"
#include "lmrc_reg.h"
#include "lmrc_raid.h"
static int lmrc_get_raidmap(lmrc_t *, lmrc_fw_raid_map_t **);
static int lmrc_sync_raidmap(lmrc_t *);
static void lmrc_sync_raidmap_again(lmrc_t *, lmrc_mfi_cmd_t *);
static void lmrc_complete_sync_raidmap(lmrc_t *, lmrc_mfi_cmd_t *);
static int lmrc_validate_raidmap(lmrc_t *, lmrc_fw_raid_map_t *);
static void lmrc_raid_tgt_activate_cb(void *, char *, scsi_tgtmap_tgt_type_t,
void **);
static boolean_t lmrc_raid_tgt_deactivate_cb(void *, char *,
scsi_tgtmap_tgt_type_t, void *, scsi_tgtmap_deact_rsn_t);
static struct buf *lmrc_raid_send_inquiry(lmrc_t *, lmrc_tgt_t *, uint8_t,
uint8_t);
static uint64_t lmrc_raid_get_wwn(lmrc_t *, uint8_t);
static int lmrc_raid_update_tgtmap(lmrc_t *, mfi_ld_tgtid_list_t *);
static int
lmrc_get_raidmap(lmrc_t *lmrc, lmrc_fw_raid_map_t **raidmap)
{
lmrc_mfi_cmd_t *mfi;
lmrc_fw_raid_map_t *rm;
int ret;
mfi = lmrc_get_dcmd(lmrc, MFI_FRAME_DIR_READ, MFI_DCMD_LD_MAP_GET_INFO,
lmrc->l_max_map_sz, 4);
if (mfi == NULL)
return (DDI_FAILURE);
ret = lmrc_issue_blocked_mfi(lmrc, mfi);
if (ret != DDI_SUCCESS)
goto out;
(void) ddi_dma_sync(mfi->mfi_data_dma.ld_hdl, 0,
mfi->mfi_data_dma.ld_len, DDI_DMA_SYNC_FORKERNEL);
rm = mfi->mfi_data_dma.ld_buf;
if (rm->rm_raidmap_sz > lmrc->l_max_map_sz) {
dev_err(lmrc->l_dip, CE_WARN,
"!FW reports a too large RAID map size: %d",
rm->rm_raidmap_sz);
ret = DDI_FAILURE;
goto out;
}
*raidmap = kmem_zalloc(rm->rm_raidmap_sz, KM_SLEEP);
bcopy(rm, *raidmap, rm->rm_raidmap_sz);
out:
lmrc_put_dcmd(lmrc, mfi);
return (ret);
}
static int
lmrc_sync_raidmap(lmrc_t *lmrc)
{
lmrc_fw_raid_map_t *rm;
lmrc_mfi_cmd_t *mfi;
mfi_dcmd_payload_t *dcmd;
rw_enter(&lmrc->l_raidmap_lock, RW_READER);
rm = lmrc->l_raidmap;
mfi = lmrc_get_dcmd(lmrc, MFI_FRAME_DIR_WRITE, MFI_DCMD_LD_MAP_GET_INFO,
rm->rm_raidmap_sz, 4);
if (mfi == NULL) {
rw_exit(&lmrc->l_raidmap_lock);
return (DDI_FAILURE);
}
dcmd = &mfi->mfi_frame->mf_dcmd;
dcmd->md_mbox_8[0] = rm->rm_ld_count;
dcmd->md_mbox_8[1] = MFI_DCMD_MBOX_PEND_FLAG;
rw_exit(&lmrc->l_raidmap_lock);
mutex_enter(&mfi->mfi_lock);
lmrc_sync_raidmap_again(lmrc, mfi);
mutex_exit(&mfi->mfi_lock);
return (DDI_SUCCESS);
}
static void
lmrc_sync_raidmap_again(lmrc_t *lmrc, lmrc_mfi_cmd_t *mfi)
{
lmrc_fw_raid_map_t *rm;
lmrc_dma_t *dma = &mfi->mfi_data_dma;
mfi_ld_ref_t *ld_sync = dma->ld_buf;
mfi_dcmd_payload_t *dcmd = &mfi->mfi_frame->mf_dcmd;
uint32_t ld;
bzero(dma->ld_buf, dma->ld_len);
rw_enter(&lmrc->l_raidmap_lock, RW_READER);
rm = lmrc->l_raidmap;
for (ld = 0; ld < rm->rm_ld_count; ld++) {
lmrc_ld_raid_t *lr = lmrc_ld_raid_get(ld, rm);
ASSERT(lr != NULL);
ld_sync[ld].lr_tgtid = lr->lr_target_id;
ld_sync[ld].lr_seqnum = lr->lr_seq_num;
}
dcmd->md_mbox_8[0] = rm->rm_ld_count;
rw_exit(&lmrc->l_raidmap_lock);
ASSERT(mutex_owned(&mfi->mfi_lock));
lmrc_issue_mfi(lmrc, mfi, lmrc_complete_sync_raidmap);
}
static void
lmrc_complete_sync_raidmap(lmrc_t *lmrc, lmrc_mfi_cmd_t *mfi)
{
mfi_header_t *hdr = &mfi->mfi_frame->mf_hdr;
lmrc_dma_t *dma = &mfi->mfi_data_dma;
lmrc_fw_raid_map_t *rm = dma->ld_buf;
ASSERT(mutex_owned(&mfi->mfi_lock));
if (hdr->mh_cmd_status != MFI_STAT_OK) {
if (hdr->mh_cmd_status == MFI_STAT_NOT_FOUND)
return;
dev_err(lmrc->l_dip, CE_WARN,
"!LD target map sync failed, status = %d",
hdr->mh_cmd_status);
taskq_dispatch_ent(lmrc->l_taskq, (task_func_t *)lmrc_put_mfi,
mfi, TQ_NOSLEEP, &mfi->mfi_tqent);
return;
}
if (lmrc_validate_raidmap(lmrc, rm) != DDI_SUCCESS)
return;
rw_enter(&lmrc->l_raidmap_lock, RW_WRITER);
VERIFY3U(lmrc->l_raidmap->rm_raidmap_sz, ==, dma->ld_len);
bcopy(rm, lmrc->l_raidmap, lmrc->l_raidmap->rm_raidmap_sz);
rw_exit(&lmrc->l_raidmap_lock);
lmrc_sync_raidmap_again(lmrc, mfi);
}
static int
lmrc_validate_raidmap(lmrc_t *lmrc, lmrc_fw_raid_map_t *raidmap)
{
lmrc_raid_map_desc_t *desc;
int i;
if (raidmap->rm_desc_table_off > raidmap->rm_raidmap_sz)
return (DDI_FAILURE);
if (raidmap->rm_desc_table_off + raidmap->rm_desc_table_sz >
raidmap->rm_raidmap_sz)
return (DDI_FAILURE);
if (raidmap->rm_desc_table_nelem != LMRC_RAID_MAP_DESC_TYPES_COUNT)
return (DDI_FAILURE);
if (raidmap->rm_desc_table_sz !=
raidmap->rm_desc_table_nelem * sizeof (lmrc_raid_map_desc_t))
return (DDI_FAILURE);
desc = (lmrc_raid_map_desc_t *)
((uint8_t *)raidmap + raidmap->rm_desc_table_off);
for (i = 0; i < raidmap->rm_desc_table_nelem; i++) {
if (desc[i].rmd_type >= LMRC_RAID_MAP_DESC_TYPES_COUNT)
return (DDI_FAILURE);
if (desc[i].rmd_off + raidmap->rm_desc_table_off +
raidmap->rm_desc_table_sz >
raidmap->rm_raidmap_sz)
return (DDI_FAILURE);
if (desc[i].rmd_off + desc[i].rmd_bufsz +
raidmap->rm_desc_table_off + raidmap->rm_desc_table_sz >
raidmap->rm_raidmap_sz)
return (DDI_FAILURE);
raidmap->rm_desc_ptrs[desc[i].rmd_type] = (void *)
((uint8_t *)desc + raidmap->rm_desc_table_sz +
desc[i].rmd_off);
}
return (DDI_SUCCESS);
}
int
lmrc_setup_raidmap(lmrc_t *lmrc)
{
lmrc_fw_raid_map_t *raidmap;
int ret;
ret = lmrc_get_raidmap(lmrc, &raidmap);
if (ret != DDI_SUCCESS)
return (ret);
ret = lmrc_validate_raidmap(lmrc, raidmap);
if (ret != DDI_SUCCESS) {
kmem_free(raidmap, raidmap->rm_raidmap_sz);
return (ret);
}
rw_enter(&lmrc->l_raidmap_lock, RW_WRITER);
lmrc_free_raidmap(lmrc);
lmrc->l_raidmap = raidmap;
rw_exit(&lmrc->l_raidmap_lock);
ret = lmrc_sync_raidmap(lmrc);
return (ret);
}
void
lmrc_free_raidmap(lmrc_t *lmrc)
{
if (lmrc->l_raidmap != NULL) {
kmem_free(lmrc->l_raidmap, lmrc->l_raidmap->rm_raidmap_sz);
lmrc->l_raidmap = NULL;
}
}
boolean_t
lmrc_ld_tm_capable(lmrc_t *lmrc, uint16_t tgtid)
{
boolean_t tm_capable = B_FALSE;
rw_enter(&lmrc->l_raidmap_lock, RW_READER);
if (lmrc->l_raidmap != NULL) {
uint16_t ld_id = lmrc_ld_id_get(tgtid, lmrc->l_raidmap);
lmrc_ld_raid_t *lr = lmrc_ld_raid_get(ld_id, lmrc->l_raidmap);
if (lr->lr_cap.lc_tm_cap != 0)
tm_capable = B_TRUE;
}
rw_exit(&lmrc->l_raidmap_lock);
return (tm_capable);
}
static void
lmrc_raid_tgt_activate_cb(void *tgtmap_priv, char *tgt_addr,
scsi_tgtmap_tgt_type_t type, void **tgt_privp)
{
lmrc_t *lmrc = tgtmap_priv;
lmrc_tgt_t *tgt = *tgt_privp;
uint16_t tgtid = tgt - lmrc->l_targets;
VERIFY(lmrc == tgt->tgt_lmrc);
VERIFY3U(tgtid, <, LMRC_MAX_LD);
lmrc_tgt_init(tgt, tgtid, tgt_addr, NULL);
}
static boolean_t
lmrc_raid_tgt_deactivate_cb(void *tgtmap_priv, char *tgtaddr,
scsi_tgtmap_tgt_type_t type, void *tgt_priv, scsi_tgtmap_deact_rsn_t deact)
{
lmrc_t *lmrc = tgtmap_priv;
lmrc_tgt_t *tgt = tgt_priv;
VERIFY(lmrc == tgt->tgt_lmrc);
lmrc_tgt_clear(tgt);
return (B_FALSE);
}
static struct buf *
lmrc_raid_send_inquiry(lmrc_t *lmrc, lmrc_tgt_t *tgt, uint8_t evpd,
uint8_t page_code)
{
struct buf *inq_bp = NULL;
struct scsi_pkt *inq_pkt = NULL;
const size_t len = 0xf0;
struct scsi_device sd;
int ret;
bzero(&sd, sizeof (sd));
sd.sd_address.a_hba_tran = ddi_get_driver_private(lmrc->l_raid_dip);
sd.sd_address.a.a_sd = &sd;
scsi_device_hba_private_set(&sd, tgt);
inq_bp = scsi_alloc_consistent_buf(&sd.sd_address, NULL,
len, B_READ, SLEEP_FUNC, NULL);
if (inq_bp == NULL)
goto out;
inq_pkt = scsi_init_pkt(&sd.sd_address, NULL, inq_bp, CDB_GROUP0,
sizeof (struct scsi_arq_status), 0, PKT_CONSISTENT, SLEEP_FUNC,
NULL);
if (inq_pkt == NULL)
goto fail;
(void) scsi_setup_cdb((union scsi_cdb *)inq_pkt->pkt_cdbp,
SCMD_INQUIRY, 0, len, 0);
inq_pkt->pkt_cdbp[1] = evpd;
inq_pkt->pkt_cdbp[2] = page_code;
ret = scsi_poll(inq_pkt);
scsi_destroy_pkt(inq_pkt);
if (ret != 0) {
fail:
scsi_free_consistent_buf(inq_bp);
inq_bp = NULL;
}
out:
return (inq_bp);
}
static uint64_t
lmrc_raid_get_wwn(lmrc_t *lmrc, uint8_t tgtid)
{
lmrc_tgt_t *tgt = &lmrc->l_targets[tgtid];
char *guid = NULL;
struct buf *inq_bp = NULL, *inq83_bp = NULL;
uint64_t wwn = 0;
ddi_devid_t devid;
int ret;
rw_enter(&tgt->tgt_lock, RW_WRITER);
VERIFY3U(tgt->tgt_lmrc, ==, lmrc);
if (tgt->tgt_dev_id == LMRC_DEVHDL_INVALID)
tgt->tgt_dev_id = tgtid;
else
VERIFY3U(tgt->tgt_dev_id, ==, tgtid);
rw_exit(&tgt->tgt_lock);
inq_bp = lmrc_raid_send_inquiry(lmrc, tgt, 0, 0);
if (inq_bp == NULL)
goto fail;
inq83_bp = lmrc_raid_send_inquiry(lmrc, tgt, 1, 0x83);
if (inq83_bp == NULL)
goto fail;
ret = ddi_devid_scsi_encode(DEVID_SCSI_ENCODE_VERSION1,
NULL, (uchar_t *)inq_bp->b_un.b_addr, sizeof (struct scsi_inquiry),
NULL, 0, (uchar_t *)inq83_bp->b_un.b_addr, inq83_bp->b_bcount,
&devid);
if (ret != DDI_SUCCESS)
goto fail;
guid = ddi_devid_to_guid(devid);
if (guid == NULL)
goto fail;
(void) scsi_wwnstr_to_wwn(guid, &wwn);
ddi_devid_free_guid(guid);
fail:
if (inq_bp != NULL)
scsi_free_consistent_buf(inq_bp);
if (inq83_bp != NULL)
scsi_free_consistent_buf(inq83_bp);
return (wwn);
}
static int
lmrc_raid_update_tgtmap(lmrc_t *lmrc, mfi_ld_tgtid_list_t *ld_list)
{
int ret;
int i;
if (ld_list->ltl_count > lmrc->l_fw_supported_vd_count)
return (DDI_FAILURE);
ret = scsi_hba_tgtmap_set_begin(lmrc->l_raid_tgtmap);
if (ret != DDI_SUCCESS)
return (ret);
for (i = 0; i < ld_list->ltl_count; i++) {
uint8_t tgtid = ld_list->ltl_tgtid[i];
char name[SCSI_WWN_BUFLEN];
uint64_t wwn;
if (tgtid > lmrc->l_fw_supported_vd_count) {
dev_err(lmrc->l_dip, CE_WARN,
"!%s: invalid LD tgt id %d", __func__, tgtid);
goto fail;
}
wwn = lmrc_raid_get_wwn(lmrc, tgtid);
if (wwn != 0)
(void) scsi_wwn_to_wwnstr(wwn, 0, name);
else
(void) snprintf(name, sizeof (name), "%d", tgtid);
ret = scsi_hba_tgtmap_set_add(lmrc->l_raid_tgtmap,
SCSI_TGT_SCSI_DEVICE, name, &lmrc->l_targets[tgtid]);
if (ret != DDI_SUCCESS)
goto fail;
}
return (scsi_hba_tgtmap_set_end(lmrc->l_raid_tgtmap, 0));
fail:
(void) scsi_hba_tgtmap_set_flush(lmrc->l_raid_tgtmap);
return (DDI_FAILURE);
}
int
lmrc_get_ld_list(lmrc_t *lmrc)
{
mfi_dcmd_payload_t *dcmd;
lmrc_mfi_cmd_t *mfi;
int ret;
if (!INITLEVEL_ACTIVE(lmrc, LMRC_INITLEVEL_RAID))
return (DDI_SUCCESS);
mfi = lmrc_get_dcmd(lmrc, MFI_FRAME_DIR_READ, MFI_DCMD_LD_LIST_QUERY,
sizeof (mfi_ld_tgtid_list_t) + lmrc->l_fw_supported_vd_count, 1);
if (mfi == NULL)
return (DDI_FAILURE);
dcmd = &mfi->mfi_frame->mf_dcmd;
dcmd->md_mbox_8[0] = MFI_LD_QUERY_TYPE_EXPOSED_TO_HOST;
if (lmrc->l_max_256_vd_support)
dcmd->md_mbox_8[2] = 1;
ret = lmrc_issue_blocked_mfi(lmrc, mfi);
if (ret != DDI_SUCCESS)
goto out;
ret = lmrc_raid_update_tgtmap(lmrc, mfi->mfi_data_dma.ld_buf);
out:
lmrc_put_dcmd(lmrc, mfi);
return (ret);
}
int
lmrc_raid_aen_handler(lmrc_t *lmrc, mfi_evt_detail_t *evt)
{
int ret = DDI_SUCCESS;
switch (evt->evt_code) {
case MFI_EVT_LD_CC_STARTED:
case MFI_EVT_LD_CC_PROGRESS:
case MFI_EVT_LD_CC_COMPLETE:
break;
case MFI_EVT_LD_FAST_INIT_STARTED:
case MFI_EVT_LD_FULL_INIT_STARTED:
ret = lmrc_get_ld_list(lmrc);
break;
case MFI_EVT_LD_BG_INIT_PROGRESS:
case MFI_EVT_LD_INIT_PROGRESS:
break;
case MFI_EVT_LD_INIT_ABORTED:
case MFI_EVT_LD_INIT_COMPLETE:
ret = lmrc_get_ld_list(lmrc);
break;
case MFI_EVT_LD_BBT_CLEARED:
break;
case MFI_EVT_LD_PROP_CHANGED:
break;
case MFI_EVT_LD_OFFLINE:
ret = lmrc_get_ld_list(lmrc);
break;
case MFI_EVT_LD_DELETED:
ret = lmrc_get_ld_list(lmrc);
break;
case MFI_EVT_LD_OPTIMAL:
break;
case MFI_EVT_LD_CREATED:
ret = lmrc_get_ld_list(lmrc);
break;
case MFI_EVT_LD_AVAILABLE:
break;
case MFI_EVT_LD_STATE_CHANGE:
ret = lmrc_get_ld_list(lmrc);
break;
default:
ret = DDI_FAILURE;
}
return (ret);
}
int
lmrc_raid_attach(dev_info_t *dip)
{
scsi_hba_tran_t *tran = ddi_get_driver_private(dip);
dev_info_t *pdip = ddi_get_parent(dip);
lmrc_t *lmrc = ddi_get_soft_state(lmrc_state, ddi_get_instance(pdip));
int ret;
VERIFY(tran != NULL);
VERIFY(lmrc != NULL);
if (lmrc->l_fw_fault)
return (DDI_FAILURE);
tran->tran_hba_private = lmrc;
lmrc->l_raid_dip = dip;
ret = scsi_hba_tgtmap_create(dip, SCSI_TM_FULLSET, MICROSEC,
2 * MICROSEC, lmrc, lmrc_raid_tgt_activate_cb,
lmrc_raid_tgt_deactivate_cb, &lmrc->l_raid_tgtmap);
if (ret != DDI_SUCCESS)
return (ret);
ret = lmrc_setup_raidmap(lmrc);
if (ret != DDI_SUCCESS) {
dev_err(lmrc->l_dip, CE_WARN, "!RAID map setup failed.");
return (DDI_FAILURE);
}
INITLEVEL_SET(lmrc, LMRC_INITLEVEL_RAID);
ret = lmrc_get_ld_list(lmrc);
if (ret != DDI_SUCCESS) {
dev_err(lmrc->l_dip, CE_WARN, "!Failed to get LD list.");
return (ret);
}
return (DDI_SUCCESS);
}
int
lmrc_raid_detach(dev_info_t *dip)
{
dev_info_t *pdip = ddi_get_parent(dip);
lmrc_t *lmrc = ddi_get_soft_state(lmrc_state, ddi_get_instance(pdip));
VERIFY(lmrc != NULL);
INITLEVEL_CLEAR(lmrc, LMRC_INITLEVEL_RAID);
if (lmrc->l_raid_tgtmap != NULL) {
scsi_hba_tgtmap_destroy(lmrc->l_raid_tgtmap);
lmrc->l_raid_tgtmap = NULL;
}
lmrc->l_raid_dip = NULL;
return (DDI_SUCCESS);
}