#include <sys/scsi/scsi.h>
#include <sys/dkbad.h>
#include <sys/dklabel.h>
#include <sys/dkio.h>
#include <sys/fdio.h>
#include <sys/cdio.h>
#include <sys/mhd.h>
#include <sys/vtoc.h>
#include <sys/dktp/fdisk.h>
#include <sys/kstat.h>
#include <sys/vtrace.h>
#include <sys/note.h>
#include <sys/thread.h>
#include <sys/proc.h>
#include <sys/efi_partition.h>
#include <sys/var.h>
#include <sys/aio_req.h>
#include <sys/dkioc_free_util.h>
#include <sys/taskq.h>
#include <sys/uuid.h>
#include <sys/byteorder.h>
#include <sys/sdt.h>
#include "sd_xbuf.h"
#include <sys/scsi/targets/sddef.h>
#include <sys/cmlb.h>
#include <sys/sysevent/eventdefs.h>
#include <sys/sysevent/dev.h>
#include <sys/fm/protocol.h>
#define SD_MODULE_NAME "SCSI Disk Driver"
#define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_PARALLEL
static char *sd_label = NULL;
static char *sd_max_xfer_size = "sd_max_xfer_size";
static char *sd_config_list = "sd-config-list";
#ifdef SDDEBUG
int sd_force_pm_supported = 0;
#endif
void *sd_state = NULL;
int sd_io_time = SD_IO_TIME;
int sd_failfast_enable = 1;
int sd_ua_retry_count = SD_UA_RETRY_COUNT;
int sd_report_pfa = 1;
int sd_max_throttle = SD_MAX_THROTTLE;
int sd_min_throttle = SD_MIN_THROTTLE;
int sd_rot_delay = 4;
int sd_qfull_throttle_enable = TRUE;
int sd_retry_on_reservation_conflict = 1;
int sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY;
_NOTE(SCHEME_PROTECTS_DATA("safe sharing", sd_reinstate_resv_delay))
static int sd_dtype_optical_bind = -1;
static char *sd_resv_conflict_name = "sd_retry_on_reservation_conflict";
uint_t sd_component_mask = 0x0;
uint_t sd_level_mask = 0x0;
struct sd_lun *sd_debug_un = NULL;
uint_t sd_error_level = SCSI_ERR_RETRYABLE;
static uint32_t sd_xbuf_active_limit = 512;
static uint32_t sd_xbuf_reserve_limit = 16;
static struct sd_resv_reclaim_request sd_tr = { NULL, NULL, NULL, 0, 0, 0 };
static int sd_reset_throttle_timeout = SD_RESET_THROTTLE_TIMEOUT;
static int sd_qfull_throttle_timeout = SD_QFULL_THROTTLE_TIMEOUT;
static int sd_check_media_time = 3000000;
static int sd_wait_cmds_complete = SD_WAIT_CMDS_COMPLETE;
static char sd_log_buf[1024];
static kmutex_t sd_log_mutex;
struct sd_scsi_hba_tgt_lun {
struct sd_scsi_hba_tgt_lun *next;
dev_info_t *pdip;
int nlun[NTARGETS_WIDE];
};
#define SD_SCSI_LUN_ATTACH 0
#define SD_SCSI_LUN_DETACH 1
static kmutex_t sd_scsi_target_lun_mutex;
static struct sd_scsi_hba_tgt_lun *sd_scsi_target_lun_head = NULL;
_NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex,
sd_scsi_hba_tgt_lun::next sd_scsi_hba_tgt_lun::pdip))
_NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex,
sd_scsi_target_lun_head))
struct sd_scsi_probe_cache {
struct sd_scsi_probe_cache *next;
dev_info_t *pdip;
int cache[NTARGETS_WIDE];
};
static kmutex_t sd_scsi_probe_cache_mutex;
static struct sd_scsi_probe_cache *sd_scsi_probe_cache_head = NULL;
_NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex,
sd_scsi_probe_cache::next sd_scsi_probe_cache::pdip))
_NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex,
sd_scsi_probe_cache_head))
static sd_power_attr_ss sd_pwr_ss = {
{ "NAME=spindle-motor", "0=off", "1=on", NULL },
{0, 100},
{30, 0},
{20000, 0}
};
static sd_power_attr_pc sd_pwr_pc = {
{ "NAME=spindle-motor", "0=stopped", "1=standby", "2=idle",
"3=active", NULL },
{0, 0, 0, 100},
{90, 90, 20, 0},
{15000, 15000, 1000, 0}
};
static int sd_pl2pc[] = {
SD_TARGET_START_VALID,
SD_TARGET_STANDBY,
SD_TARGET_IDLE,
SD_TARGET_ACTIVE
};
static sd_tunables seagate_properties = {
SEAGATE_THROTTLE_VALUE,
0,
0,
0,
0,
0,
0,
0,
0
};
static sd_tunables fujitsu_properties = {
FUJITSU_THROTTLE_VALUE,
0,
0,
0,
0,
0,
0,
0,
0
};
static sd_tunables ibm_properties = {
IBM_THROTTLE_VALUE,
0,
0,
0,
0,
0,
0,
0,
0
};
static sd_tunables sve_properties = {
SVE_THROTTLE_VALUE,
0,
0,
SVE_BUSY_RETRIES,
SVE_RESET_RETRY_COUNT,
SVE_RESERVE_RELEASE_TIME,
SVE_MIN_THROTTLE_VALUE,
SVE_DISKSORT_DISABLED_FLAG,
0
};
static sd_tunables maserati_properties = {
0,
0,
0,
0,
0,
0,
0,
MASERATI_DISKSORT_DISABLED_FLAG,
MASERATI_LUN_RESET_ENABLED_FLAG
};
static sd_tunables pirus_properties = {
PIRUS_THROTTLE_VALUE,
0,
PIRUS_NRR_COUNT,
PIRUS_BUSY_RETRIES,
PIRUS_RESET_RETRY_COUNT,
0,
PIRUS_MIN_THROTTLE_VALUE,
PIRUS_DISKSORT_DISABLED_FLAG,
PIRUS_LUN_RESET_ENABLED_FLAG
};
static sd_tunables elite_properties = {
ELITE_THROTTLE_VALUE,
0,
0,
0,
0,
0,
0,
0,
0
};
static sd_tunables st31200n_properties = {
ST31200N_THROTTLE_VALUE,
0,
0,
0,
0,
0,
0,
0,
0
};
static sd_tunables lsi_properties_scsi = {
LSI_THROTTLE_VALUE,
0,
LSI_NOTREADY_RETRIES,
0,
0,
0,
0,
0,
0
};
static sd_tunables symbios_properties = {
SYMBIOS_THROTTLE_VALUE,
0,
SYMBIOS_NOTREADY_RETRIES,
0,
0,
0,
0,
0,
0
};
static sd_tunables lsi_properties = {
0,
0,
LSI_NOTREADY_RETRIES,
0,
0,
0,
0,
0,
0
};
static sd_tunables lsi_oem_properties = {
0,
0,
LSI_OEM_NOTREADY_RETRIES,
0,
0,
0,
0,
0,
0,
1
};
#if (defined(SD_PROP_TST))
#define SD_TST_CTYPE_VAL CTYPE_CDROM
#define SD_TST_THROTTLE_VAL 16
#define SD_TST_NOTREADY_VAL 12
#define SD_TST_BUSY_VAL 60
#define SD_TST_RST_RETRY_VAL 36
#define SD_TST_RSV_REL_TIME 60
static sd_tunables tst_properties = {
SD_TST_THROTTLE_VAL,
SD_TST_CTYPE_VAL,
SD_TST_NOTREADY_VAL,
SD_TST_BUSY_VAL,
SD_TST_RST_RETRY_VAL,
SD_TST_RSV_REL_TIME,
0,
0,
0
};
#endif
#define SD_TOUPPER(C) (((C) >= 'a' && (C) <= 'z') ? (C) - 'a' + 'A' : (C))
static sd_disk_config_t sd_disk_table[] = {
{ "SEAGATE ST34371FC", SD_CONF_BSET_THROTTLE, &seagate_properties },
{ "SEAGATE ST19171FC", SD_CONF_BSET_THROTTLE, &seagate_properties },
{ "SEAGATE ST39102FC", SD_CONF_BSET_THROTTLE, &seagate_properties },
{ "SEAGATE ST39103FC", SD_CONF_BSET_THROTTLE, &seagate_properties },
{ "SEAGATE ST118273F", SD_CONF_BSET_THROTTLE, &seagate_properties },
{ "SEAGATE ST318202F", SD_CONF_BSET_THROTTLE, &seagate_properties },
{ "SEAGATE ST318203F", SD_CONF_BSET_THROTTLE, &seagate_properties },
{ "SEAGATE ST136403F", SD_CONF_BSET_THROTTLE, &seagate_properties },
{ "SEAGATE ST318304F", SD_CONF_BSET_THROTTLE, &seagate_properties },
{ "SEAGATE ST336704F", SD_CONF_BSET_THROTTLE, &seagate_properties },
{ "SEAGATE ST373405F", SD_CONF_BSET_THROTTLE, &seagate_properties },
{ "SEAGATE ST336605F", SD_CONF_BSET_THROTTLE, &seagate_properties },
{ "SEAGATE ST336752F", SD_CONF_BSET_THROTTLE, &seagate_properties },
{ "SEAGATE ST318452F", SD_CONF_BSET_THROTTLE, &seagate_properties },
{ "FUJITSU MAG3091F", SD_CONF_BSET_THROTTLE, &fujitsu_properties },
{ "FUJITSU MAG3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties },
{ "FUJITSU MAA3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties },
{ "FUJITSU MAF3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties },
{ "FUJITSU MAL3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties },
{ "FUJITSU MAL3738F", SD_CONF_BSET_THROTTLE, &fujitsu_properties },
{ "FUJITSU MAM3182FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties },
{ "FUJITSU MAM3364FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties },
{ "FUJITSU MAM3738FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties },
{ "IBM DDYFT1835", SD_CONF_BSET_THROTTLE, &ibm_properties },
{ "IBM DDYFT3695", SD_CONF_BSET_THROTTLE, &ibm_properties },
{ "IBM IC35LF2D2", SD_CONF_BSET_THROTTLE, &ibm_properties },
{ "IBM IC35LF2PR", SD_CONF_BSET_THROTTLE, &ibm_properties },
{ "IBM 1724-100", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
{ "IBM 1726-2xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
{ "IBM 1726-22x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
{ "IBM 1726-4xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
{ "IBM 1726-42x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
{ "IBM 1726-3xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
{ "IBM 3526", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
{ "IBM 3542", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
{ "IBM 3552", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
{ "IBM 1722", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
{ "IBM 1742", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
{ "IBM 1815", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
{ "IBM FAStT", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
{ "IBM 1814", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
{ "IBM 1814-200", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
{ "IBM 1818", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
{ "DELL MD3000", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
{ "DELL MD3000i", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
{ "LSI INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
{ "ENGENIO INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
{ "SGI TP", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
{ "SGI IS", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
{ "*CSM100_*", SD_CONF_BSET_NRR_COUNT |
SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties },
{ "*CSM200_*", SD_CONF_BSET_NRR_COUNT |
SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties },
{ "Fujitsu SX300", SD_CONF_BSET_THROTTLE, &lsi_oem_properties },
{ "LSI", SD_CONF_BSET_NRR_COUNT, &lsi_properties },
{ "SUN SESS01", SD_CONF_BSET_THROTTLE |
SD_CONF_BSET_BSY_RETRY_COUNT|
SD_CONF_BSET_RST_RETRIES|
SD_CONF_BSET_RSV_REL_TIME|
SD_CONF_BSET_MIN_THROTTLE|
SD_CONF_BSET_DISKSORT_DISABLED,
&sve_properties },
{ "SUN SVE01", SD_CONF_BSET_DISKSORT_DISABLED |
SD_CONF_BSET_LUN_RESET_ENABLED,
&maserati_properties },
{ "SUN SE6920", SD_CONF_BSET_THROTTLE |
SD_CONF_BSET_NRR_COUNT|
SD_CONF_BSET_BSY_RETRY_COUNT|
SD_CONF_BSET_RST_RETRIES|
SD_CONF_BSET_MIN_THROTTLE|
SD_CONF_BSET_DISKSORT_DISABLED|
SD_CONF_BSET_LUN_RESET_ENABLED,
&pirus_properties },
{ "SUN SE6940", SD_CONF_BSET_THROTTLE |
SD_CONF_BSET_NRR_COUNT|
SD_CONF_BSET_BSY_RETRY_COUNT|
SD_CONF_BSET_RST_RETRIES|
SD_CONF_BSET_MIN_THROTTLE|
SD_CONF_BSET_DISKSORT_DISABLED|
SD_CONF_BSET_LUN_RESET_ENABLED,
&pirus_properties },
{ "SUN StorageTek 6920", SD_CONF_BSET_THROTTLE |
SD_CONF_BSET_NRR_COUNT|
SD_CONF_BSET_BSY_RETRY_COUNT|
SD_CONF_BSET_RST_RETRIES|
SD_CONF_BSET_MIN_THROTTLE|
SD_CONF_BSET_DISKSORT_DISABLED|
SD_CONF_BSET_LUN_RESET_ENABLED,
&pirus_properties },
{ "SUN StorageTek 6940", SD_CONF_BSET_THROTTLE |
SD_CONF_BSET_NRR_COUNT|
SD_CONF_BSET_BSY_RETRY_COUNT|
SD_CONF_BSET_RST_RETRIES|
SD_CONF_BSET_MIN_THROTTLE|
SD_CONF_BSET_DISKSORT_DISABLED|
SD_CONF_BSET_LUN_RESET_ENABLED,
&pirus_properties },
{ "SUN PSX1000", SD_CONF_BSET_THROTTLE |
SD_CONF_BSET_NRR_COUNT|
SD_CONF_BSET_BSY_RETRY_COUNT|
SD_CONF_BSET_RST_RETRIES|
SD_CONF_BSET_MIN_THROTTLE|
SD_CONF_BSET_DISKSORT_DISABLED|
SD_CONF_BSET_LUN_RESET_ENABLED,
&pirus_properties },
{ "SUN SE6330", SD_CONF_BSET_THROTTLE |
SD_CONF_BSET_NRR_COUNT|
SD_CONF_BSET_BSY_RETRY_COUNT|
SD_CONF_BSET_RST_RETRIES|
SD_CONF_BSET_MIN_THROTTLE|
SD_CONF_BSET_DISKSORT_DISABLED|
SD_CONF_BSET_LUN_RESET_ENABLED,
&pirus_properties },
{ "SUN STK6580_6780", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
{ "SUN SUN_6180", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
{ "STK OPENstorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
{ "STK OpenStorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
{ "STK BladeCtlr", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
{ "STK FLEXLINE", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
{ "SYMBIOS", SD_CONF_BSET_NRR_COUNT, &symbios_properties },
{ "SEAGATE ST42400N", SD_CONF_BSET_THROTTLE, &elite_properties },
{ "SEAGATE ST31200N", SD_CONF_BSET_THROTTLE, &st31200n_properties },
{ "SEAGATE ST41600N", SD_CONF_BSET_TUR_CHECK, NULL },
{ "CONNER CP30540", SD_CONF_BSET_NOCACHE, NULL },
{ "*SUN0104*", SD_CONF_BSET_FAB_DEVID, NULL },
{ "*SUN0207*", SD_CONF_BSET_FAB_DEVID, NULL },
{ "*SUN0327*", SD_CONF_BSET_FAB_DEVID, NULL },
{ "*SUN0340*", SD_CONF_BSET_FAB_DEVID, NULL },
{ "*SUN0424*", SD_CONF_BSET_FAB_DEVID, NULL },
{ "*SUN0669*", SD_CONF_BSET_FAB_DEVID, NULL },
{ "*SUN1.0G*", SD_CONF_BSET_FAB_DEVID, NULL },
{ "SYMBIOS INF-01-00 ", SD_CONF_BSET_FAB_DEVID, NULL },
{ "SYMBIOS", SD_CONF_BSET_THROTTLE|SD_CONF_BSET_NRR_COUNT,
&symbios_properties },
{ "LSI", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_NRR_COUNT,
&lsi_properties_scsi },
{ " NEC CD-ROM DRIVE:260 ", (SD_CONF_BSET_PLAYMSF_BCD
| SD_CONF_BSET_READSUB_BCD
| SD_CONF_BSET_READ_TOC_ADDR_BCD
| SD_CONF_BSET_NO_READ_HEADER
| SD_CONF_BSET_READ_CD_XD4), NULL },
{ " NEC CD-ROM DRIVE:270 ", (SD_CONF_BSET_PLAYMSF_BCD
| SD_CONF_BSET_READSUB_BCD
| SD_CONF_BSET_READ_TOC_ADDR_BCD
| SD_CONF_BSET_NO_READ_HEADER
| SD_CONF_BSET_READ_CD_XD4), NULL },
#if (defined(SD_PROP_TST))
{ "VENDOR PRODUCT ", (SD_CONF_BSET_THROTTLE
| SD_CONF_BSET_CTYPE
| SD_CONF_BSET_NRR_COUNT
| SD_CONF_BSET_FAB_DEVID
| SD_CONF_BSET_NOCACHE
| SD_CONF_BSET_BSY_RETRY_COUNT
| SD_CONF_BSET_PLAYMSF_BCD
| SD_CONF_BSET_READSUB_BCD
| SD_CONF_BSET_READ_TOC_TRK_BCD
| SD_CONF_BSET_READ_TOC_ADDR_BCD
| SD_CONF_BSET_NO_READ_HEADER
| SD_CONF_BSET_READ_CD_XD4
| SD_CONF_BSET_RST_RETRIES
| SD_CONF_BSET_RSV_REL_TIME
| SD_CONF_BSET_TUR_CHECK), &tst_properties},
#endif
};
static const int sd_disk_table_size =
sizeof (sd_disk_table)/ sizeof (sd_disk_config_t);
static char sd_flash_dev_table[][25] = {
"ATA MARVELL SD88SA02",
"MARVELL SD88SA02",
"TOSHIBA THNSNV05",
};
static const int sd_flash_dev_table_size =
sizeof (sd_flash_dev_table) / sizeof (sd_flash_dev_table[0]);
#define SD_INTERCONNECT_PARALLEL 0
#define SD_INTERCONNECT_FABRIC 1
#define SD_INTERCONNECT_FIBRE 2
#define SD_INTERCONNECT_SSA 3
#define SD_INTERCONNECT_SATA 4
#define SD_INTERCONNECT_SAS 5
#define SD_IS_PARALLEL_SCSI(un) \
((un)->un_interconnect_type == SD_INTERCONNECT_PARALLEL)
#define SD_IS_SERIAL(un) \
(((un)->un_interconnect_type == SD_INTERCONNECT_SATA) ||\
((un)->un_interconnect_type == SD_INTERCONNECT_SAS))
#define VPD_HEAD_OFFSET 3
#define VPD_PAGE_LENGTH 3
#define VPD_MODE_PAGE 1
static kmutex_t sd_sense_mutex = {0};
#define New_state(un, s) \
(un)->un_last_state = (un)->un_state, (un)->un_state = (s)
#define Restore_state(un) \
{ uchar_t tmp = (un)->un_last_state; New_state((un), tmp); }
static struct sd_cdbinfo sd_cdbtab[] = {
{ CDB_GROUP0, 0x00, 0x1FFFFF, 0xFF, },
{ CDB_GROUP1, SCMD_GROUP1, 0xFFFFFFFF, 0xFFFF, },
{ CDB_GROUP5, SCMD_GROUP5, 0xFFFFFFFF, 0xFFFFFFFF, },
{ CDB_GROUP4, SCMD_GROUP4, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFF, },
};
static int sd_pm_idletime = 1;
typedef struct unmap_param_hdr_s {
uint16_t uph_data_len;
uint16_t uph_descr_data_len;
uint32_t uph_reserved;
} unmap_param_hdr_t;
typedef struct unmap_blk_descr_s {
uint64_t ubd_lba;
uint32_t ubd_lba_cnt;
uint32_t ubd_reserved;
} unmap_blk_descr_t;
#define SD_UNMAP_MAX_DESCR \
((UINT16_MAX - sizeof (unmap_param_hdr_t)) / sizeof (unmap_blk_descr_t))
#define SD_UNMAP_PARAM_LIST_MAXSZ (sizeof (unmap_param_hdr_t) + \
SD_UNMAP_MAX_DESCR * sizeof (unmap_blk_descr_t))
int _init(void);
int _fini(void);
int _info(struct modinfo *modinfop);
static void sd_log_trace(uint_t comp, struct sd_lun *un, const char *fmt, ...);
static void sd_log_info(uint_t comp, struct sd_lun *un, const char *fmt, ...);
static void sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...);
static int sdprobe(dev_info_t *devi);
static int sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg,
void **result);
static int sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
int mod_flags, char *name, caddr_t valuep, int *lengthp);
static void sd_scsi_probe_cache_init(void);
static void sd_scsi_probe_cache_fini(void);
static void sd_scsi_clear_probe_cache(void);
static int sd_scsi_probe_with_cache(struct scsi_device *devp, int (*fn)());
static void sd_scsi_target_lun_init(void);
static void sd_scsi_target_lun_fini(void);
static int sd_scsi_get_target_lun_count(dev_info_t *dip, int target);
static void sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag);
static int sd_spin_up_unit(sd_ssc_t *ssc);
static sd_ssc_t *sd_ssc_init(struct sd_lun *un);
static int sd_ssc_send(sd_ssc_t *ssc, struct uscsi_cmd *incmd,
int flag, enum uio_seg dataspace, int path_flag);
static void sd_ssc_fini(sd_ssc_t *ssc);
static void sd_ssc_assessment(sd_ssc_t *ssc,
enum sd_type_assessment tp_assess);
static void sd_ssc_post(sd_ssc_t *ssc, enum sd_driver_assessment sd_assess);
static void sd_ssc_print(sd_ssc_t *ssc, int sd_severity);
static void sd_ssc_ereport_post(sd_ssc_t *ssc,
enum sd_driver_assessment drv_assess);
static void sd_ssc_set_info(sd_ssc_t *ssc, int ssc_flags, uint_t comp,
const char *fmt, ...);
static void sd_ssc_extract_info(sd_ssc_t *ssc, struct sd_lun *un,
struct scsi_pkt *pktp, struct buf *bp, struct sd_xbuf *xp);
static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag,
enum uio_seg dataspace, int path_flag);
static void sd_enable_descr_sense(sd_ssc_t *ssc);
static void sd_reenable_dsense_task(void *arg);
static void sd_set_mmc_caps(sd_ssc_t *ssc);
static void sd_read_unit_properties(struct sd_lun *un);
static int sd_process_sdconf_file(struct sd_lun *un);
static void sd_nvpair_str_decode(struct sd_lun *un, char *nvpair_str);
static void sd_set_properties(struct sd_lun *un, char *name, char *value);
static void sd_get_tunables_from_conf(struct sd_lun *un, int flags,
int *data_list, sd_tunables *values);
static void sd_process_sdconf_table(struct sd_lun *un);
static int sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen);
static int sd_blank_cmp(struct sd_lun *un, char *id, int idlen);
static int sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list,
int list_len, char *dataname_ptr);
static void sd_set_vers1_properties(struct sd_lun *un, int flags,
sd_tunables *prop_list);
static void sd_register_devid(sd_ssc_t *ssc, dev_info_t *devi,
int reservation_flag);
static int sd_get_devid(sd_ssc_t *ssc);
static ddi_devid_t sd_create_devid(sd_ssc_t *ssc);
static int sd_write_deviceid(sd_ssc_t *ssc);
static int sd_check_vpd_page_support(sd_ssc_t *ssc);
static void sd_setup_pm(sd_ssc_t *ssc, dev_info_t *devi);
static void sd_create_pm_components(dev_info_t *devi, struct sd_lun *un);
static int sd_ddi_suspend(dev_info_t *devi);
static int sd_ddi_resume(dev_info_t *devi);
static int sd_pm_state_change(struct sd_lun *un, int level, int flag);
static int sdpower(dev_info_t *devi, int component, int level);
static int sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd);
static int sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd);
static int sd_unit_attach(dev_info_t *devi);
static int sd_unit_detach(dev_info_t *devi);
static void sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi);
static void sd_create_errstats(struct sd_lun *un, int instance);
static void sd_set_errstats(struct sd_lun *un);
static void sd_set_pstats(struct sd_lun *un);
static int sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk);
static int sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pkt);
static int sd_send_polled_RQS(struct sd_lun *un);
static int sd_ddi_scsi_poll(struct scsi_pkt *pkt);
#define SD_CACHE_ENABLE 1
#define SD_CACHE_DISABLE 0
#define SD_CACHE_NOCHANGE -1
static int sd_cache_control(sd_ssc_t *ssc, int rcd_flag, int wce_flag);
static int sd_get_write_cache_enabled(sd_ssc_t *ssc, int *is_enabled);
static void sd_get_write_cache_changeable(sd_ssc_t *ssc, int *is_changeable);
static void sd_get_nv_sup(sd_ssc_t *ssc);
static dev_t sd_make_device(dev_info_t *devi);
static void sd_check_bdc_vpd(sd_ssc_t *ssc);
static void sd_check_emulation_mode(sd_ssc_t *ssc);
static void sd_update_block_info(struct sd_lun *un, uint32_t lbasize,
uint64_t capacity);
static int sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p);
static int sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p);
static int sd_ready_and_valid(sd_ssc_t *ssc, int part);
static void sdmin(struct buf *bp);
static int sdread(dev_t dev, struct uio *uio, cred_t *cred_p);
static int sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p);
static int sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p);
static int sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p);
static int sdstrategy(struct buf *bp);
static int sdioctl(dev_t, int, intptr_t, int, cred_t *, int *);
static void sd_mapblockaddr_iostart(int index, struct sd_lun *un,
struct buf *bp);
static void sd_mapblocksize_iostart(int index, struct sd_lun *un,
struct buf *bp);
static void sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp);
static void sd_checksum_uscsi_iostart(int index, struct sd_lun *un,
struct buf *bp);
static void sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp);
static void sd_core_iostart(int index, struct sd_lun *un, struct buf *bp);
static void sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp);
static void sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp);
static void sd_mapblockaddr_iodone(int index, struct sd_lun *un,
struct buf *bp);
static void sd_mapblocksize_iodone(int index, struct sd_lun *un,
struct buf *bp);
static void sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp);
static void sd_checksum_uscsi_iodone(int index, struct sd_lun *un,
struct buf *bp);
static void sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp);
static void sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg);
static int sd_initpkt_for_buf(struct buf *, struct scsi_pkt **);
static void sd_destroypkt_for_buf(struct buf *);
static int sd_setup_rw_pkt(struct sd_lun *un, struct scsi_pkt **pktpp,
struct buf *bp, int flags,
int (*callback)(caddr_t), caddr_t callback_arg,
diskaddr_t lba, uint32_t blockcount);
static int sd_setup_next_rw_pkt(struct sd_lun *un, struct scsi_pkt *pktp,
struct buf *bp, diskaddr_t lba, uint32_t blockcount);
static int sd_uscsi_strategy(struct buf *bp);
static int sd_initpkt_for_uscsi(struct buf *, struct scsi_pkt **);
static void sd_destroypkt_for_uscsi(struct buf *);
static void sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp,
uchar_t chain_type, void *pktinfop);
static int sd_pm_entry(struct sd_lun *un);
static void sd_pm_exit(struct sd_lun *un);
static void sd_pm_idletimeout_handler(void *arg);
static void sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp);
static void sdintr(struct scsi_pkt *pktp);
static void sd_start_cmds(struct sd_lun *un, struct buf *immed_bp);
static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag,
enum uio_seg dataspace, int path_flag);
static struct buf *sd_bioclone_alloc(struct buf *bp, size_t datalen,
daddr_t blkno, int (*func)(struct buf *));
static struct buf *sd_shadow_buf_alloc(struct buf *bp, size_t datalen,
uint_t bflags, daddr_t blkno, int (*func)(struct buf *));
static void sd_bioclone_free(struct buf *bp);
static void sd_shadow_buf_free(struct buf *bp);
static void sd_print_transport_rejected_message(struct sd_lun *un,
struct sd_xbuf *xp, int code);
static void sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp,
void *arg, int code);
static void sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp,
void *arg, int code);
static void sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp,
void *arg, int code);
static void sd_retry_command(struct sd_lun *un, struct buf *bp,
int retry_check_flag,
void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, int c),
void *user_arg, int failure_code, clock_t retry_delay,
void (*statp)(kstat_io_t *));
static void sd_set_retry_bp(struct sd_lun *un, struct buf *bp,
clock_t retry_delay, void (*statp)(kstat_io_t *));
static void sd_send_request_sense_command(struct sd_lun *un, struct buf *bp,
struct scsi_pkt *pktp);
static void sd_start_retry_command(void *arg);
static void sd_start_direct_priority_command(void *arg);
static void sd_return_failed_command(struct sd_lun *un, struct buf *bp,
int errcode);
static void sd_return_failed_command_no_restart(struct sd_lun *un,
struct buf *bp, int errcode);
static void sd_return_command(struct sd_lun *un, struct buf *bp);
static void sd_sync_with_callback(struct sd_lun *un);
static int sdrunout(caddr_t arg);
static void sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp);
static struct buf *sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *xp);
static void sd_reduce_throttle(struct sd_lun *un, int throttle_type);
static void sd_restore_throttle(void *arg);
static void sd_init_cdb_limits(struct sd_lun *un);
static void sd_pkt_status_good(struct sd_lun *un, struct buf *bp,
struct sd_xbuf *xp, struct scsi_pkt *pktp);
static void sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp,
struct sd_xbuf *xp, struct scsi_pkt *pktp);
static void sd_pkt_status_busy(struct sd_lun *un, struct buf *bp,
struct sd_xbuf *xp, struct scsi_pkt *pktp);
static void sd_pkt_status_reservation_conflict(struct sd_lun *un,
struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp);
static void sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp,
struct sd_xbuf *xp, struct scsi_pkt *pktp);
static void sd_handle_request_sense(struct sd_lun *un, struct buf *bp,
struct sd_xbuf *xp, struct scsi_pkt *pktp);
static void sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp,
struct sd_xbuf *xp, struct scsi_pkt *pktp);
static int sd_validate_sense_data(struct sd_lun *un, struct buf *bp,
struct sd_xbuf *xp, size_t actual_len);
static void sd_decode_sense(struct sd_lun *un, struct buf *bp,
struct sd_xbuf *xp, struct scsi_pkt *pktp);
static void sd_print_sense_msg(struct sd_lun *un, struct buf *bp,
void *arg, int code);
static void sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp,
struct sd_xbuf *xp, struct scsi_pkt *pktp);
static void sd_sense_key_recoverable_error(struct sd_lun *un,
uint8_t *sense_datap,
struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp);
static void sd_sense_key_not_ready(struct sd_lun *un,
uint8_t *sense_datap,
struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp);
static void sd_sense_key_medium_or_hardware_error(struct sd_lun *un,
uint8_t *sense_datap,
struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp);
static void sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp,
struct sd_xbuf *xp, struct scsi_pkt *pktp);
static void sd_sense_key_unit_attention(struct sd_lun *un,
uint8_t *sense_datap,
struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp);
static void sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp,
struct sd_xbuf *xp, struct scsi_pkt *pktp);
static void sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp,
struct sd_xbuf *xp, struct scsi_pkt *pktp);
static void sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp,
struct sd_xbuf *xp, struct scsi_pkt *pktp);
static void sd_sense_key_default(struct sd_lun *un,
uint8_t *sense_datap,
struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp);
static void sd_print_retry_msg(struct sd_lun *un, struct buf *bp,
void *arg, int flag);
static void sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp,
struct sd_xbuf *xp, struct scsi_pkt *pktp);
static void sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp,
struct sd_xbuf *xp, struct scsi_pkt *pktp);
static void sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp,
struct sd_xbuf *xp, struct scsi_pkt *pktp);
static void sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp,
struct sd_xbuf *xp, struct scsi_pkt *pktp);
static void sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp,
struct sd_xbuf *xp, struct scsi_pkt *pktp);
static void sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp,
struct sd_xbuf *xp, struct scsi_pkt *pktp);
static void sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp,
struct sd_xbuf *xp, struct scsi_pkt *pktp);
static void sd_pkt_reason_default(struct sd_lun *un, struct buf *bp,
struct sd_xbuf *xp, struct scsi_pkt *pktp);
static void sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp);
static void sd_start_stop_unit_callback(void *arg);
static void sd_start_stop_unit_task(void *arg);
static void sd_taskq_create(void);
static void sd_taskq_delete(void);
static void sd_target_change_task(void *arg);
static void sd_log_dev_status_event(struct sd_lun *un, char *esc, int km_flag);
static void sd_log_lun_expansion_event(struct sd_lun *un, int km_flag);
static void sd_log_eject_request_event(struct sd_lun *un, int km_flag);
static void sd_media_change_task(void *arg);
static int sd_handle_mchange(struct sd_lun *un);
static int sd_send_scsi_DOORLOCK(sd_ssc_t *ssc, int flag, int path_flag);
static int sd_send_scsi_READ_CAPACITY(sd_ssc_t *ssc, uint64_t *capp,
uint32_t *lbap, int path_flag);
static int sd_send_scsi_READ_CAPACITY_16(sd_ssc_t *ssc, uint64_t *capp,
uint32_t *lbap, uint32_t *psp, int path_flag);
static int sd_send_scsi_START_STOP_UNIT(sd_ssc_t *ssc, int pc_flag,
int flag, int path_flag);
static int sd_send_scsi_INQUIRY(sd_ssc_t *ssc, uchar_t *bufaddr,
size_t buflen, uchar_t evpd, uchar_t page_code, size_t *residp);
static int sd_send_scsi_TEST_UNIT_READY(sd_ssc_t *ssc, int flag);
static int sd_send_scsi_PERSISTENT_RESERVE_IN(sd_ssc_t *ssc,
uchar_t usr_cmd, uint16_t data_len, uchar_t *data_bufp);
static int sd_send_scsi_PERSISTENT_RESERVE_OUT(sd_ssc_t *ssc,
uchar_t usr_cmd, uchar_t *usr_bufp);
static int sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un,
struct dk_callback *dkc);
static int sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp);
static int sd_send_scsi_UNMAP(dev_t dev, sd_ssc_t *ssc, dkioc_free_list_t *dfl,
int flag);
static int sd_send_scsi_GET_CONFIGURATION(sd_ssc_t *ssc,
struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen,
uchar_t *bufaddr, uint_t buflen, int path_flag);
static int sd_send_scsi_feature_GET_CONFIGURATION(sd_ssc_t *ssc,
struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen,
uchar_t *bufaddr, uint_t buflen, char feature, int path_flag);
static int sd_send_scsi_MODE_SENSE(sd_ssc_t *ssc, int cdbsize,
uchar_t *bufaddr, size_t buflen, uchar_t page_code, int path_flag);
static int sd_send_scsi_MODE_SELECT(sd_ssc_t *ssc, int cdbsize,
uchar_t *bufaddr, size_t buflen, uchar_t save_page, int path_flag);
static int sd_send_scsi_RDWR(sd_ssc_t *ssc, uchar_t cmd, void *bufaddr,
size_t buflen, daddr_t start_block, int path_flag);
#define sd_send_scsi_READ(ssc, bufaddr, buflen, start_block, path_flag) \
sd_send_scsi_RDWR(ssc, SCMD_READ, bufaddr, buflen, start_block, \
path_flag)
#define sd_send_scsi_WRITE(ssc, bufaddr, buflen, start_block, path_flag)\
sd_send_scsi_RDWR(ssc, SCMD_WRITE, bufaddr, buflen, start_block,\
path_flag)
static int sd_send_scsi_LOG_SENSE(sd_ssc_t *ssc, uchar_t *bufaddr,
uint16_t buflen, uchar_t page_code, uchar_t page_control,
uint16_t param_ptr, int path_flag);
static int sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION(sd_ssc_t *ssc,
uchar_t *bufaddr, size_t buflen, uchar_t class_req);
static boolean_t sd_gesn_media_data_valid(uchar_t *data);
static int sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un);
static void sd_free_rqs(struct sd_lun *un);
static void sd_dump_memory(struct sd_lun *un, uint_t comp, char *title,
uchar_t *data, int len, int fmt);
static void sd_panic_for_res_conflict(struct sd_lun *un);
static int sd_get_media_info(dev_t dev, caddr_t arg, int flag);
static int sd_get_media_info_ext(dev_t dev, caddr_t arg, int flag);
static int sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag);
static int sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag);
static int sd_check_mhd(dev_t dev, int interval);
static int sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp);
static void sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt);
static char *sd_sname(uchar_t status);
static void sd_mhd_resvd_recover(void *arg);
static void sd_resv_reclaim_thread();
static int sd_take_ownership(dev_t dev, struct mhioctkown *p);
static int sd_reserve_release(dev_t dev, int cmd);
static void sd_rmv_resv_reclaim_req(dev_t dev);
static void sd_mhd_reset_notify_cb(caddr_t arg);
static int sd_persistent_reservation_in_read_keys(struct sd_lun *un,
mhioc_inkeys_t *usrp, int flag);
static int sd_persistent_reservation_in_read_resv(struct sd_lun *un,
mhioc_inresvs_t *usrp, int flag);
static int sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag);
static int sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag);
static int sd_mhdioc_release(dev_t dev);
static int sd_mhdioc_register_devid(dev_t dev);
static int sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag);
static int sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag);
static int sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag);
static int sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag);
static int sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag);
static int sr_pause_resume(dev_t dev, int mode);
static int sr_play_msf(dev_t dev, caddr_t data, int flag);
static int sr_play_trkind(dev_t dev, caddr_t data, int flag);
static int sr_read_all_subcodes(dev_t dev, caddr_t data, int flag);
static int sr_read_subchannel(dev_t dev, caddr_t data, int flag);
static int sr_read_tocentry(dev_t dev, caddr_t data, int flag);
static int sr_read_tochdr(dev_t dev, caddr_t data, int flag);
static int sr_read_cdda(dev_t dev, caddr_t data, int flag);
static int sr_read_cdxa(dev_t dev, caddr_t data, int flag);
static int sr_read_mode1(dev_t dev, caddr_t data, int flag);
static int sr_read_mode2(dev_t dev, caddr_t data, int flag);
static int sr_read_cd_mode2(dev_t dev, caddr_t data, int flag);
static int sr_sector_mode(dev_t dev, uint32_t blksize);
static int sr_eject(dev_t dev);
static void sr_ejected(register struct sd_lun *un);
static int sr_check_wp(dev_t dev);
static opaque_t sd_watch_request_submit(struct sd_lun *un);
static int sd_check_media(dev_t dev, enum dkio_state state);
static int sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp);
static void sd_delayed_cv_broadcast(void *arg);
static int sr_volume_ctrl(dev_t dev, caddr_t data, int flag);
static int sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag);
static int sd_log_page_supported(sd_ssc_t *ssc, int log_page);
static void sd_check_for_writable_cd(sd_ssc_t *ssc, int path_flag);
static int sd_wm_cache_constructor(void *wm, void *un, int flags);
static void sd_wm_cache_destructor(void *wm, void *un);
static struct sd_w_map *sd_range_lock(struct sd_lun *un, daddr_t startb,
daddr_t endb, ushort_t typ);
static struct sd_w_map *sd_get_range(struct sd_lun *un, daddr_t startb,
daddr_t endb);
static void sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp);
static void sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm);
static void sd_read_modify_write_task(void * arg);
static int
sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk,
struct buf **bpp);
static void sd_failfast_flushq(struct sd_lun *un);
static int sd_failfast_flushq_callback(struct buf *bp);
static int sd_setup_next_xfer(struct sd_lun *un, struct buf *bp,
struct scsi_pkt *pkt, struct sd_xbuf *xp);
static int sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr,
diskaddr_t start_block, size_t reqlength, void *tg_cookie);
static int sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie);
static void sd_rmw_msg_print_handler(void *arg);
#define SD_FAILFAST_INACTIVE 0
#define SD_FAILFAST_ACTIVE 1
#define SD_FAILFAST_FLUSH_ALL_BUFS 0x01
#define SD_FAILFAST_FLUSH_ALL_QUEUES 0x02
static int sd_failfast_flushctl = SD_FAILFAST_FLUSH_ALL_QUEUES;
#ifdef SD_FAULT_INJECTION
static void sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un);
static void sd_faultinjection(struct scsi_pkt *pktp);
static void sd_injection_log(char *buf, struct sd_lun *un);
#endif
static struct cb_ops sd_cb_ops = {
sdopen,
sdclose,
sdstrategy,
nodev,
sddump,
sdread,
sdwrite,
sdioctl,
nodev,
nodev,
nodev,
nochpoll,
sd_prop_op,
0,
D_64BIT | D_MP | D_NEW | D_HOTPLUG,
CB_REV,
sdaread,
sdawrite
};
struct dev_ops sd_ops = {
DEVO_REV,
0,
sdinfo,
nulldev,
sdprobe,
sdattach,
sddetach,
nodev,
&sd_cb_ops,
NULL,
sdpower,
ddi_quiesce_not_needed,
};
#include <sys/modctl.h>
static struct modldrv modldrv = {
&mod_driverops,
SD_MODULE_NAME,
&sd_ops
};
static struct modlinkage modlinkage = {
MODREV_1, &modldrv, NULL
};
static cmlb_tg_ops_t sd_tgops = {
TG_DK_OPS_VERSION_1,
sd_tg_rdwr,
sd_tg_getinfo
};
static struct scsi_asq_key_strings sd_additional_codes[] = {
0x81, 0, "Logical Unit is Reserved",
0x85, 0, "Audio Address Not Valid",
0xb6, 0, "Media Load Mechanism Failed",
0xB9, 0, "Audio Play Operation Aborted",
0xbf, 0, "Buffer Overflow for Read All Subcodes Command",
0x53, 2, "Medium removal prevented",
0x6f, 0, "Authentication failed during key exchange",
0x6f, 1, "Key not present",
0x6f, 2, "Key not established",
0x6f, 3, "Read without proper authentication",
0x6f, 4, "Mismatched region to this logical unit",
0x6f, 5, "Region reset count error",
0xffff, 0x0, NULL
};
struct sd_sense_info {
int ssi_severity;
int ssi_pfa_flag;
};
typedef void (*sd_chain_t)(int index, struct sd_lun *un, struct buf *bp);
static sd_chain_t sd_iostart_chain[] = {
sd_mapblockaddr_iostart,
sd_pm_iostart,
sd_core_iostart,
sd_mapblockaddr_iostart,
sd_core_iostart,
sd_mapblockaddr_iostart,
sd_mapblocksize_iostart,
sd_pm_iostart,
sd_core_iostart,
sd_mapblockaddr_iostart,
sd_mapblocksize_iostart,
sd_core_iostart,
sd_mapblockaddr_iostart,
sd_checksum_iostart,
sd_pm_iostart,
sd_core_iostart,
sd_mapblockaddr_iostart,
sd_checksum_iostart,
sd_core_iostart,
sd_pm_iostart,
sd_core_iostart,
sd_checksum_uscsi_iostart,
sd_pm_iostart,
sd_core_iostart,
sd_core_iostart,
sd_core_iostart,
sd_mapblockaddr_iostart,
sd_mapblocksize_iostart,
sd_checksum_iostart,
sd_pm_iostart,
sd_core_iostart,
sd_mapblockaddr_iostart,
sd_mapblocksize_iostart,
sd_checksum_iostart,
sd_core_iostart,
};
#define SD_CHAIN_DISK_IOSTART 0
#define SD_CHAIN_DISK_IOSTART_NO_PM 3
#define SD_CHAIN_MSS_DISK_IOSTART 5
#define SD_CHAIN_RMMEDIA_IOSTART 5
#define SD_CHAIN_MSS_DISK_IOSTART_NO_PM 9
#define SD_CHAIN_RMMEDIA_IOSTART_NO_PM 9
#define SD_CHAIN_CHKSUM_IOSTART 12
#define SD_CHAIN_CHKSUM_IOSTART_NO_PM 16
#define SD_CHAIN_USCSI_CMD_IOSTART 19
#define SD_CHAIN_USCSI_CHKSUM_IOSTART 21
#define SD_CHAIN_DIRECT_CMD_IOSTART 24
#define SD_CHAIN_PRIORITY_CMD_IOSTART 25
#define SD_CHAIN_MSS_CHKSUM_IOSTART 26
#define SD_CHAIN_MSS_CHKSUM_IOSTART_NO_PM 31
static sd_chain_t sd_iodone_chain[] = {
sd_buf_iodone,
sd_mapblockaddr_iodone,
sd_pm_iodone,
sd_buf_iodone,
sd_mapblockaddr_iodone,
sd_buf_iodone,
sd_mapblockaddr_iodone,
sd_mapblocksize_iodone,
sd_pm_iodone,
sd_buf_iodone,
sd_mapblockaddr_iodone,
sd_mapblocksize_iodone,
sd_buf_iodone,
sd_mapblockaddr_iodone,
sd_checksum_iodone,
sd_pm_iodone,
sd_buf_iodone,
sd_mapblockaddr_iodone,
sd_checksum_iodone,
sd_uscsi_iodone,
sd_pm_iodone,
sd_uscsi_iodone,
sd_checksum_uscsi_iodone,
sd_pm_iodone,
sd_uscsi_iodone,
sd_uscsi_iodone,
sd_buf_iodone,
sd_mapblockaddr_iodone,
sd_mapblocksize_iodone,
sd_checksum_iodone,
sd_pm_iodone,
sd_buf_iodone,
sd_mapblockaddr_iodone,
sd_mapblocksize_iodone,
sd_checksum_iodone,
};
#define SD_CHAIN_DISK_IODONE 2
#define SD_CHAIN_DISK_IODONE_NO_PM 4
#define SD_CHAIN_RMMEDIA_IODONE 8
#define SD_CHAIN_MSS_DISK_IODONE 8
#define SD_CHAIN_RMMEDIA_IODONE_NO_PM 11
#define SD_CHAIN_MSS_DISK_IODONE_NO_PM 11
#define SD_CHAIN_CHKSUM_IODONE 15
#define SD_CHAIN_CHKSUM_IODONE_NO_PM 18
#define SD_CHAIN_USCSI_CMD_IODONE 20
#define SD_CHAIN_USCSI_CHKSUM_IODONE 22
#define SD_CHAIN_DIRECT_CMD_IODONE 24
#define SD_CHAIN_PRIORITY_CMD_IODONE 25
#define SD_CHAIN_MSS_CHKSUM_IODONE 30
#define SD_CHAIN_MSS_CHKSUM_IODONE_NO_PM 34
typedef int (*sd_initpkt_t)(struct buf *, struct scsi_pkt **);
static sd_initpkt_t sd_initpkt_map[] = {
sd_initpkt_for_buf,
sd_initpkt_for_buf,
sd_initpkt_for_buf,
sd_initpkt_for_buf,
sd_initpkt_for_buf,
sd_initpkt_for_buf,
sd_initpkt_for_buf,
sd_initpkt_for_buf,
sd_initpkt_for_buf,
sd_initpkt_for_buf,
sd_initpkt_for_buf,
sd_initpkt_for_buf,
sd_initpkt_for_buf,
sd_initpkt_for_buf,
sd_initpkt_for_buf,
sd_initpkt_for_buf,
sd_initpkt_for_buf,
sd_initpkt_for_buf,
sd_initpkt_for_buf,
sd_initpkt_for_uscsi,
sd_initpkt_for_uscsi,
sd_initpkt_for_uscsi,
sd_initpkt_for_uscsi,
sd_initpkt_for_uscsi,
sd_initpkt_for_uscsi,
sd_initpkt_for_uscsi,
sd_initpkt_for_buf,
sd_initpkt_for_buf,
sd_initpkt_for_buf,
sd_initpkt_for_buf,
sd_initpkt_for_buf,
sd_initpkt_for_buf,
sd_initpkt_for_buf,
sd_initpkt_for_buf,
sd_initpkt_for_buf,
};
typedef void (*sd_destroypkt_t)(struct buf *);
static sd_destroypkt_t sd_destroypkt_map[] = {
sd_destroypkt_for_buf,
sd_destroypkt_for_buf,
sd_destroypkt_for_buf,
sd_destroypkt_for_buf,
sd_destroypkt_for_buf,
sd_destroypkt_for_buf,
sd_destroypkt_for_buf,
sd_destroypkt_for_buf,
sd_destroypkt_for_buf,
sd_destroypkt_for_buf,
sd_destroypkt_for_buf,
sd_destroypkt_for_buf,
sd_destroypkt_for_buf,
sd_destroypkt_for_buf,
sd_destroypkt_for_buf,
sd_destroypkt_for_buf,
sd_destroypkt_for_buf,
sd_destroypkt_for_buf,
sd_destroypkt_for_buf,
sd_destroypkt_for_uscsi,
sd_destroypkt_for_uscsi,
sd_destroypkt_for_uscsi,
sd_destroypkt_for_uscsi,
sd_destroypkt_for_uscsi,
sd_destroypkt_for_uscsi,
sd_destroypkt_for_uscsi,
sd_destroypkt_for_buf,
sd_destroypkt_for_buf,
sd_destroypkt_for_buf,
sd_destroypkt_for_buf,
sd_destroypkt_for_buf,
sd_destroypkt_for_buf,
sd_destroypkt_for_buf,
sd_destroypkt_for_buf,
sd_destroypkt_for_buf,
};
#define SD_CHAIN_NULL 0
#define SD_CHAIN_BUFIO 1
#define SD_CHAIN_USCSI 2
#define SD_CHAIN_DIRECT 3
#define SD_CHAIN_DIRECT_PRIORITY 4
static int sd_chain_type_map[] = {
SD_CHAIN_BUFIO,
SD_CHAIN_BUFIO,
SD_CHAIN_BUFIO,
SD_CHAIN_BUFIO,
SD_CHAIN_BUFIO,
SD_CHAIN_BUFIO,
SD_CHAIN_BUFIO,
SD_CHAIN_BUFIO,
SD_CHAIN_BUFIO,
SD_CHAIN_BUFIO,
SD_CHAIN_BUFIO,
SD_CHAIN_BUFIO,
SD_CHAIN_BUFIO,
SD_CHAIN_BUFIO,
SD_CHAIN_BUFIO,
SD_CHAIN_BUFIO,
SD_CHAIN_BUFIO,
SD_CHAIN_BUFIO,
SD_CHAIN_BUFIO,
SD_CHAIN_USCSI,
SD_CHAIN_USCSI,
SD_CHAIN_USCSI,
SD_CHAIN_USCSI,
SD_CHAIN_USCSI,
SD_CHAIN_DIRECT,
SD_CHAIN_DIRECT_PRIORITY,
SD_CHAIN_BUFIO,
SD_CHAIN_BUFIO,
SD_CHAIN_BUFIO,
SD_CHAIN_BUFIO,
SD_CHAIN_BUFIO,
SD_CHAIN_BUFIO,
SD_CHAIN_BUFIO,
SD_CHAIN_BUFIO,
SD_CHAIN_BUFIO,
};
#define SD_IS_BUFIO(xp) \
(sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_BUFIO)
#define SD_IS_DIRECT_PRIORITY(xp) \
(sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_DIRECT_PRIORITY)
struct sd_chain_index {
int sci_iostart_index;
int sci_iodone_index;
};
static struct sd_chain_index sd_chain_index_map[] = {
{ SD_CHAIN_DISK_IOSTART, SD_CHAIN_DISK_IODONE },
{ SD_CHAIN_DISK_IOSTART_NO_PM, SD_CHAIN_DISK_IODONE_NO_PM },
{ SD_CHAIN_RMMEDIA_IOSTART, SD_CHAIN_RMMEDIA_IODONE },
{ SD_CHAIN_RMMEDIA_IOSTART_NO_PM, SD_CHAIN_RMMEDIA_IODONE_NO_PM },
{ SD_CHAIN_CHKSUM_IOSTART, SD_CHAIN_CHKSUM_IODONE },
{ SD_CHAIN_CHKSUM_IOSTART_NO_PM, SD_CHAIN_CHKSUM_IODONE_NO_PM },
{ SD_CHAIN_USCSI_CMD_IOSTART, SD_CHAIN_USCSI_CMD_IODONE },
{ SD_CHAIN_USCSI_CHKSUM_IOSTART, SD_CHAIN_USCSI_CHKSUM_IODONE },
{ SD_CHAIN_DIRECT_CMD_IOSTART, SD_CHAIN_DIRECT_CMD_IODONE },
{ SD_CHAIN_PRIORITY_CMD_IOSTART, SD_CHAIN_PRIORITY_CMD_IODONE },
{ SD_CHAIN_MSS_CHKSUM_IOSTART, SD_CHAIN_MSS_CHKSUM_IODONE },
{ SD_CHAIN_MSS_CHKSUM_IOSTART_NO_PM, SD_CHAIN_MSS_CHKSUM_IODONE_NO_PM },
};
#define SD_CHAIN_INFO_DISK 0
#define SD_CHAIN_INFO_DISK_NO_PM 1
#define SD_CHAIN_INFO_RMMEDIA 2
#define SD_CHAIN_INFO_MSS_DISK 2
#define SD_CHAIN_INFO_RMMEDIA_NO_PM 3
#define SD_CHAIN_INFO_MSS_DSK_NO_PM 3
#define SD_CHAIN_INFO_CHKSUM 4
#define SD_CHAIN_INFO_CHKSUM_NO_PM 5
#define SD_CHAIN_INFO_MSS_DISK_CHKSUM 10
#define SD_CHAIN_INFO_MSS_DISK_CHKSUM_NO_PM 11
#define SD_CHAIN_INFO_USCSI_CMD 6
#define SD_CHAIN_INFO_USCSI_CMD_NO_PM 8
#define SD_CHAIN_INFO_USCSI_CHKSUM 7
#define SD_CHAIN_INFO_DIRECT_CMD 8
#define SD_CHAIN_INFO_PRIORITY_CMD 9
#define MAX_INQUIRY_SIZE 0xF0
#define SD_BEGIN_IOSTART(index, un, bp) \
((*(sd_iostart_chain[index]))(index, un, bp))
#define SD_BEGIN_IODONE(index, un, bp) \
((*(sd_iodone_chain[index]))(index, un, bp))
#define SD_NEXT_IOSTART(index, un, bp) \
((*(sd_iostart_chain[(index) + 1]))((index) + 1, un, bp))
#define SD_NEXT_IODONE(index, un, bp) \
((*(sd_iodone_chain[(index) - 1]))((index) - 1, un, bp))
int
_init(void)
{
int err;
sd_label = (char *)mod_modname(&modlinkage);
err = ddi_soft_state_init(&sd_state, sizeof (struct sd_lun),
SD_MAXUNIT);
if (err != 0) {
return (err);
}
mutex_init(&sd_log_mutex, NULL, MUTEX_DRIVER, NULL);
mutex_init(&sd_tr.srq_resv_reclaim_mutex, NULL, MUTEX_DRIVER, NULL);
cv_init(&sd_tr.srq_resv_reclaim_cv, NULL, CV_DRIVER, NULL);
cv_init(&sd_tr.srq_inprocess_cv, NULL, CV_DRIVER, NULL);
sd_scsi_probe_cache_init();
sd_scsi_target_lun_init();
sd_taskq_create();
err = mod_install(&modlinkage);
if (err != 0) {
sd_taskq_delete();
mutex_destroy(&sd_log_mutex);
mutex_destroy(&sd_tr.srq_resv_reclaim_mutex);
cv_destroy(&sd_tr.srq_resv_reclaim_cv);
cv_destroy(&sd_tr.srq_inprocess_cv);
sd_scsi_probe_cache_fini();
sd_scsi_target_lun_fini();
ddi_soft_state_fini(&sd_state);
return (err);
}
return (err);
}
int
_fini(void)
{
int err;
if ((err = mod_remove(&modlinkage)) != 0) {
return (err);
}
sd_taskq_delete();
mutex_destroy(&sd_log_mutex);
mutex_destroy(&sd_tr.srq_resv_reclaim_mutex);
sd_scsi_probe_cache_fini();
sd_scsi_target_lun_fini();
cv_destroy(&sd_tr.srq_resv_reclaim_cv);
cv_destroy(&sd_tr.srq_inprocess_cv);
ddi_soft_state_fini(&sd_state);
return (err);
}
int
_info(struct modinfo *modinfop)
{
return (mod_info(&modlinkage, modinfop));
}
static void
sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...)
{
va_list ap;
dev_info_t *dev;
ASSERT(un != NULL);
dev = SD_DEVINFO(un);
ASSERT(dev != NULL);
if ((sd_component_mask & comp) && (sd_level_mask & SD_LOGMASK_ERROR) &&
((sd_debug_un == NULL) || (sd_debug_un == un))) {
mutex_enter(&sd_log_mutex);
va_start(ap, fmt);
(void) vsprintf(sd_log_buf, fmt, ap);
va_end(ap);
scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf);
mutex_exit(&sd_log_mutex);
}
#ifdef SD_FAULT_INJECTION
_NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask));
if (un->sd_injection_mask & comp) {
mutex_enter(&sd_log_mutex);
va_start(ap, fmt);
(void) vsprintf(sd_log_buf, fmt, ap);
va_end(ap);
sd_injection_log(sd_log_buf, un);
mutex_exit(&sd_log_mutex);
}
#endif
}
static void
sd_log_info(uint_t component, struct sd_lun *un, const char *fmt, ...)
{
va_list ap;
dev_info_t *dev;
ASSERT(un != NULL);
dev = SD_DEVINFO(un);
ASSERT(dev != NULL);
if ((sd_component_mask & component) &&
(sd_level_mask & SD_LOGMASK_INFO) &&
((sd_debug_un == NULL) || (sd_debug_un == un))) {
mutex_enter(&sd_log_mutex);
va_start(ap, fmt);
(void) vsprintf(sd_log_buf, fmt, ap);
va_end(ap);
scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf);
mutex_exit(&sd_log_mutex);
}
#ifdef SD_FAULT_INJECTION
_NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask));
if (un->sd_injection_mask & component) {
mutex_enter(&sd_log_mutex);
va_start(ap, fmt);
(void) vsprintf(sd_log_buf, fmt, ap);
va_end(ap);
sd_injection_log(sd_log_buf, un);
mutex_exit(&sd_log_mutex);
}
#endif
}
static void
sd_log_trace(uint_t component, struct sd_lun *un, const char *fmt, ...)
{
va_list ap;
dev_info_t *dev;
ASSERT(un != NULL);
dev = SD_DEVINFO(un);
ASSERT(dev != NULL);
if ((sd_component_mask & component) &&
(sd_level_mask & SD_LOGMASK_TRACE) &&
((sd_debug_un == NULL) || (sd_debug_un == un))) {
mutex_enter(&sd_log_mutex);
va_start(ap, fmt);
(void) vsprintf(sd_log_buf, fmt, ap);
va_end(ap);
scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf);
mutex_exit(&sd_log_mutex);
}
#ifdef SD_FAULT_INJECTION
_NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask));
if (un->sd_injection_mask & component) {
mutex_enter(&sd_log_mutex);
va_start(ap, fmt);
(void) vsprintf(sd_log_buf, fmt, ap);
va_end(ap);
sd_injection_log(sd_log_buf, un);
mutex_exit(&sd_log_mutex);
}
#endif
}
static int
sdprobe(dev_info_t *devi)
{
struct scsi_device *devp;
int rval;
int instance = ddi_get_instance(devi);
if (ddi_dev_is_sid(devi) == DDI_SUCCESS) {
return (DDI_PROBE_DONTCARE);
}
devp = ddi_get_driver_private(devi);
if (devp == NULL) {
return (DDI_PROBE_FAILURE);
}
if (ddi_get_soft_state(sd_state, instance) != NULL) {
return (DDI_PROBE_PARTIAL);
}
switch (sd_scsi_probe_with_cache(devp, NULL_FUNC)) {
case SCSIPROBE_EXISTS:
switch (devp->sd_inq->inq_dtype) {
case DTYPE_DIRECT:
rval = DDI_PROBE_SUCCESS;
break;
case DTYPE_RODIRECT:
rval = DDI_PROBE_SUCCESS;
break;
case DTYPE_OPTICAL:
if (sd_dtype_optical_bind < 0) {
sd_dtype_optical_bind = ddi_prop_get_int
(DDI_DEV_T_ANY, devi, 0,
"optical-device-bind", 1);
}
if (sd_dtype_optical_bind == 0) {
rval = DDI_PROBE_FAILURE;
} else {
rval = DDI_PROBE_SUCCESS;
}
break;
case DTYPE_NOTPRESENT:
default:
rval = DDI_PROBE_FAILURE;
break;
}
break;
default:
rval = DDI_PROBE_PARTIAL;
break;
}
scsi_unprobe(devp);
return (rval);
}
static int
sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
{
struct sd_lun *un;
dev_t dev;
int instance;
int error;
switch (infocmd) {
case DDI_INFO_DEVT2DEVINFO:
dev = (dev_t)arg;
instance = SDUNIT(dev);
if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) {
return (DDI_FAILURE);
}
*result = (void *) SD_DEVINFO(un);
error = DDI_SUCCESS;
break;
case DDI_INFO_DEVT2INSTANCE:
dev = (dev_t)arg;
instance = SDUNIT(dev);
*result = (void *)(uintptr_t)instance;
error = DDI_SUCCESS;
break;
default:
error = DDI_FAILURE;
}
return (error);
}
static int
sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
char *name, caddr_t valuep, int *lengthp)
{
struct sd_lun *un;
if ((un = ddi_get_soft_state(sd_state, ddi_get_instance(dip))) == NULL)
return (ddi_prop_op(dev, dip, prop_op, mod_flags,
name, valuep, lengthp));
return (cmlb_prop_op(un->un_cmlbhandle,
dev, dip, prop_op, mod_flags, name, valuep, lengthp,
SDPART(dev), (void *)SD_PATH_DIRECT));
}
static void
sd_scsi_probe_cache_init(void)
{
mutex_init(&sd_scsi_probe_cache_mutex, NULL, MUTEX_DRIVER, NULL);
sd_scsi_probe_cache_head = NULL;
}
static void
sd_scsi_probe_cache_fini(void)
{
struct sd_scsi_probe_cache *cp;
struct sd_scsi_probe_cache *ncp;
for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = ncp) {
ncp = cp->next;
kmem_free(cp, sizeof (struct sd_scsi_probe_cache));
}
sd_scsi_probe_cache_head = NULL;
mutex_destroy(&sd_scsi_probe_cache_mutex);
}
static void
sd_scsi_clear_probe_cache(void)
{
struct sd_scsi_probe_cache *cp;
int i;
mutex_enter(&sd_scsi_probe_cache_mutex);
for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) {
for (i = 0; i < NTARGETS_WIDE; i++) {
cp->cache[i] = SCSIPROBE_EXISTS;
}
}
mutex_exit(&sd_scsi_probe_cache_mutex);
}
static int
sd_scsi_probe_with_cache(struct scsi_device *devp, int (*waitfn)())
{
struct sd_scsi_probe_cache *cp;
dev_info_t *pdip = ddi_get_parent(devp->sd_dev);
int lun, tgt;
lun = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS,
SCSI_ADDR_PROP_LUN, 0);
tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS,
SCSI_ADDR_PROP_TARGET, -1);
if ((tgt < 0) || (tgt >= NTARGETS_WIDE)) {
return (scsi_probe(devp, waitfn));
}
mutex_enter(&sd_scsi_probe_cache_mutex);
for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) {
if (cp->pdip == pdip) {
break;
}
}
if (cp == NULL) {
int i;
cp = kmem_zalloc(sizeof (struct sd_scsi_probe_cache),
KM_SLEEP);
cp->pdip = pdip;
cp->next = sd_scsi_probe_cache_head;
sd_scsi_probe_cache_head = cp;
for (i = 0; i < NTARGETS_WIDE; i++) {
cp->cache[i] = SCSIPROBE_EXISTS;
}
}
mutex_exit(&sd_scsi_probe_cache_mutex);
if (lun == 0) {
cp->cache[tgt] = SCSIPROBE_EXISTS;
}
if (cp->cache[tgt] != SCSIPROBE_EXISTS) {
return (SCSIPROBE_NORESP);
}
return (cp->cache[tgt] = scsi_probe(devp, waitfn));
}
static void
sd_scsi_target_lun_init(void)
{
mutex_init(&sd_scsi_target_lun_mutex, NULL, MUTEX_DRIVER, NULL);
sd_scsi_target_lun_head = NULL;
}
static void
sd_scsi_target_lun_fini(void)
{
struct sd_scsi_hba_tgt_lun *cp;
struct sd_scsi_hba_tgt_lun *ncp;
for (cp = sd_scsi_target_lun_head; cp != NULL; cp = ncp) {
ncp = cp->next;
kmem_free(cp, sizeof (struct sd_scsi_hba_tgt_lun));
}
sd_scsi_target_lun_head = NULL;
mutex_destroy(&sd_scsi_target_lun_mutex);
}
static int
sd_scsi_get_target_lun_count(dev_info_t *dip, int target)
{
struct sd_scsi_hba_tgt_lun *cp;
if ((target < 0) || (target >= NTARGETS_WIDE)) {
return (-1);
}
mutex_enter(&sd_scsi_target_lun_mutex);
for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) {
if (cp->pdip == dip) {
break;
}
}
mutex_exit(&sd_scsi_target_lun_mutex);
if (cp == NULL) {
return (-1);
}
return (cp->nlun[target]);
}
static void
sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag)
{
struct sd_scsi_hba_tgt_lun *cp;
mutex_enter(&sd_scsi_target_lun_mutex);
for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) {
if (cp->pdip == dip) {
break;
}
}
if ((cp == NULL) && (flag == SD_SCSI_LUN_ATTACH)) {
cp = kmem_zalloc(sizeof (struct sd_scsi_hba_tgt_lun),
KM_SLEEP);
cp->pdip = dip;
cp->next = sd_scsi_target_lun_head;
sd_scsi_target_lun_head = cp;
}
mutex_exit(&sd_scsi_target_lun_mutex);
if (cp != NULL) {
if (flag == SD_SCSI_LUN_ATTACH) {
cp->nlun[target] ++;
} else {
cp->nlun[target] --;
}
}
}
static int
sd_spin_up_unit(sd_ssc_t *ssc)
{
size_t resid = 0;
int has_conflict = FALSE;
uchar_t *bufaddr;
int status;
struct sd_lun *un;
ASSERT(ssc != NULL);
un = ssc->ssc_un;
ASSERT(un != NULL);
status = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP,
SD_TARGET_START, SD_PATH_DIRECT);
if (status != 0) {
if (status == EACCES)
has_conflict = TRUE;
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
}
bufaddr = kmem_zalloc(SUN_INQSIZE, KM_SLEEP);
if (sd_send_scsi_INQUIRY(ssc, bufaddr, SUN_INQSIZE, 0, 0, &resid)
!= 0) {
kmem_free(bufaddr, SUN_INQSIZE);
sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
return (EIO);
}
if ((SUN_INQSIZE - resid) >= SUN_MIN_INQLEN) {
bcopy(bufaddr, SD_INQUIRY(un), SUN_INQSIZE);
}
kmem_free(bufaddr, SUN_INQSIZE);
if (has_conflict == TRUE) {
return (EACCES);
}
return (0);
}
static void
sd_enable_descr_sense(sd_ssc_t *ssc)
{
uchar_t *header;
struct mode_control_scsi3 *ctrl_bufp;
size_t buflen;
size_t bd_len;
int status;
struct sd_lun *un;
ASSERT(ssc != NULL);
un = ssc->ssc_un;
ASSERT(un != NULL);
buflen = MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH +
sizeof (struct mode_control_scsi3);
header = kmem_zalloc(buflen, KM_SLEEP);
status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen,
MODEPAGE_CTRL_MODE, SD_PATH_DIRECT);
if (status != 0) {
SD_ERROR(SD_LOG_COMMON, un,
"sd_enable_descr_sense: mode sense ctrl page failed\n");
goto eds_exit;
}
bd_len = ((struct mode_header *)header)->bdesc_length;
((struct mode_header *)header)->length = 0;
ctrl_bufp = (struct mode_control_scsi3 *)
(header + MODE_HEADER_LENGTH + bd_len);
if (ctrl_bufp->mode_page.length <
sizeof (struct mode_control_scsi3) - 2) {
SD_ERROR(SD_LOG_COMMON, un,
"sd_enable_descr_sense: enable D_SENSE failed\n");
goto eds_exit;
}
ctrl_bufp->mode_page.ps = 0;
ctrl_bufp->d_sense = 1;
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
status = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, header,
buflen, SD_DONTSAVE_PAGE, SD_PATH_DIRECT);
if (status != 0) {
SD_INFO(SD_LOG_COMMON, un,
"sd_enable_descr_sense: mode select ctrl page failed\n");
} else {
kmem_free(header, buflen);
return;
}
eds_exit:
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
kmem_free(header, buflen);
}
static void
sd_reenable_dsense_task(void *arg)
{
struct sd_lun *un = arg;
sd_ssc_t *ssc;
ASSERT(un != NULL);
ssc = sd_ssc_init(un);
sd_enable_descr_sense(ssc);
sd_ssc_fini(ssc);
}
static void
sd_set_mmc_caps(sd_ssc_t *ssc)
{
struct mode_header_grp2 *sense_mhp;
uchar_t *sense_page;
caddr_t buf;
int bd_len;
int status;
struct uscsi_cmd com;
int rtn;
uchar_t *out_data_rw, *out_data_hd;
uchar_t *rqbuf_rw, *rqbuf_hd;
uchar_t *out_data_gesn;
int gesn_len;
struct sd_lun *un;
ASSERT(ssc != NULL);
un = ssc->ssc_un;
ASSERT(un != NULL);
un->un_f_mmc_cap = FALSE;
un->un_f_dvdram_writable_device = FALSE;
un->un_f_cfg_cdda = FALSE;
buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP);
status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, (uchar_t *)buf,
BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, SD_PATH_DIRECT);
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
if (status != 0) {
kmem_free(buf, BUFLEN_MODE_CDROM_CAP);
return;
}
un->un_f_mmc_cap = TRUE;
if (un->un_f_mmc_gesn_polling) {
gesn_len = SD_GESN_HEADER_LEN + SD_GESN_MEDIA_DATA_LEN;
out_data_gesn = kmem_zalloc(gesn_len, KM_SLEEP);
rtn = sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION(ssc,
out_data_gesn, gesn_len, 1 << SD_GESN_MEDIA_CLASS);
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
if ((rtn != 0) || !sd_gesn_media_data_valid(out_data_gesn)) {
un->un_f_mmc_gesn_polling = FALSE;
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_set_mmc_caps: gesn not supported "
"%d %x %x %x %x\n", rtn,
out_data_gesn[0], out_data_gesn[1],
out_data_gesn[2], out_data_gesn[3]);
}
kmem_free(out_data_gesn, gesn_len);
}
sense_mhp = (struct mode_header_grp2 *)buf;
bd_len = (sense_mhp->bdesc_length_hi << 8) |
sense_mhp->bdesc_length_lo;
if (bd_len > MODE_BLK_DESC_LENGTH) {
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"sd_set_mmc_caps: Mode Sense returned "
"invalid block descriptor length\n");
kmem_free(buf, BUFLEN_MODE_CDROM_CAP);
return;
}
sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 +
bd_len);
un->un_f_cfg_cdda = (sense_page[5] & 0x01) ? TRUE : FALSE;
un->un_f_dvdram_writable_device = (sense_page[3] & 0x20) ? TRUE : FALSE;
if (un->un_f_dvdram_writable_device == TRUE) {
kmem_free(buf, BUFLEN_MODE_CDROM_CAP);
return;
}
if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) {
kmem_free(buf, BUFLEN_MODE_CDROM_CAP);
return;
}
kmem_free(buf, BUFLEN_MODE_CDROM_CAP);
out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP);
rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP);
rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_rw,
SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN,
RANDOM_WRITABLE, SD_PATH_STANDARD);
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
if (rtn != 0) {
kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN);
kmem_free(rqbuf_rw, SENSE_LENGTH);
return;
}
out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP);
rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP);
rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_hd,
SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN,
HARDWARE_DEFECT_MANAGEMENT, SD_PATH_STANDARD);
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
if (rtn == 0) {
if ((out_data_rw[9] & RANDOM_WRITABLE) &&
(out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT)) {
un->un_f_dvdram_writable_device = TRUE;
}
}
kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN);
kmem_free(rqbuf_rw, SENSE_LENGTH);
kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN);
kmem_free(rqbuf_hd, SENSE_LENGTH);
}
static void
sd_check_for_writable_cd(sd_ssc_t *ssc, int path_flag)
{
struct uscsi_cmd com;
uchar_t *out_data;
uchar_t *rqbuf;
int rtn;
uchar_t *out_data_rw, *out_data_hd;
uchar_t *rqbuf_rw, *rqbuf_hd;
struct mode_header_grp2 *sense_mhp;
uchar_t *sense_page;
caddr_t buf;
int bd_len;
int status;
struct sd_lun *un;
ASSERT(ssc != NULL);
un = ssc->ssc_un;
ASSERT(un != NULL);
ASSERT(mutex_owned(SD_MUTEX(un)));
un->un_f_mmc_writable_media = FALSE;
mutex_exit(SD_MUTEX(un));
out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP);
rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP);
rtn = sd_send_scsi_GET_CONFIGURATION(ssc, &com, rqbuf, SENSE_LENGTH,
out_data, SD_PROFILE_HEADER_LEN, path_flag);
if (rtn != 0)
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
mutex_enter(SD_MUTEX(un));
if (rtn == 0) {
if ((out_data[6] == 0) && (out_data[7] == 0x12)) {
un->un_f_mmc_writable_media = TRUE;
kmem_free(out_data, SD_PROFILE_HEADER_LEN);
kmem_free(rqbuf, SENSE_LENGTH);
return;
}
}
kmem_free(out_data, SD_PROFILE_HEADER_LEN);
kmem_free(rqbuf, SENSE_LENGTH);
mutex_exit(SD_MUTEX(un));
buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP);
status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, (uchar_t *)buf,
BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, path_flag);
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
mutex_enter(SD_MUTEX(un));
if (status != 0) {
kmem_free(buf, BUFLEN_MODE_CDROM_CAP);
return;
}
sense_mhp = (struct mode_header_grp2 *)buf;
bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo;
if (bd_len > MODE_BLK_DESC_LENGTH) {
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"sd_check_for_writable_cd: Mode Sense returned "
"invalid block descriptor length\n");
kmem_free(buf, BUFLEN_MODE_CDROM_CAP);
return;
}
sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + bd_len);
if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) {
kmem_free(buf, BUFLEN_MODE_CDROM_CAP);
return;
}
kmem_free(buf, BUFLEN_MODE_CDROM_CAP);
mutex_exit(SD_MUTEX(un));
out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP);
rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP);
rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_rw,
SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN,
RANDOM_WRITABLE, path_flag);
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
if (rtn != 0) {
kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN);
kmem_free(rqbuf_rw, SENSE_LENGTH);
mutex_enter(SD_MUTEX(un));
return;
}
out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP);
rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP);
rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_hd,
SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN,
HARDWARE_DEFECT_MANAGEMENT, path_flag);
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
mutex_enter(SD_MUTEX(un));
if (rtn == 0) {
if ((out_data_rw[9] & RANDOM_WRITABLE) &&
(out_data_rw[10] & 0x1) &&
(out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT) &&
(out_data_hd[10] & 0x1)) {
un->un_f_mmc_writable_media = TRUE;
}
}
kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN);
kmem_free(rqbuf_rw, SENSE_LENGTH);
kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN);
kmem_free(rqbuf_hd, SENSE_LENGTH);
}
static void
sd_read_unit_properties(struct sd_lun *un)
{
ASSERT(un != NULL);
if (sd_process_sdconf_file(un) == SD_FAILURE) {
sd_process_sdconf_table(un);
}
}
static int
sd_process_sdconf_file(struct sd_lun *un)
{
char **config_list = NULL;
uint_t nelements;
char *vidptr;
int vidlen;
char *dnlist_ptr;
char *dataname_ptr;
char *dataname_lasts;
int *data_list = NULL;
uint_t data_list_len;
int rval = SD_FAILURE;
int i;
ASSERT(un != NULL);
if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, SD_DEVINFO(un),
DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, sd_config_list,
&config_list, &nelements) != DDI_PROP_SUCCESS) {
return (SD_FAILURE);
}
if (nelements & 1) {
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"sd-config-list should show as pairs of strings.\n");
if (config_list)
ddi_prop_free(config_list);
return (SD_FAILURE);
}
for (i = 0; i < nelements; i += 2) {
vidptr = config_list[i];
vidlen = (int)strlen(vidptr);
if (sd_sdconf_id_match(un, vidptr, vidlen) != SD_SUCCESS) {
continue;
}
dnlist_ptr = config_list[i + 1];
if (strchr(dnlist_ptr, ':') != NULL) {
sd_nvpair_str_decode(un, dnlist_ptr);
} else {
for (dataname_ptr = strtok_r(dnlist_ptr, " \t",
&dataname_lasts); dataname_ptr != NULL;
dataname_ptr = strtok_r(NULL, " \t",
&dataname_lasts)) {
int version;
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_process_sdconf_file: disk:%s, "
"data:%s\n", vidptr, dataname_ptr);
if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY,
SD_DEVINFO(un), 0, dataname_ptr, &data_list,
&data_list_len) != DDI_PROP_SUCCESS) {
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_process_sdconf_file: data "
"property (%s) has no value\n",
dataname_ptr);
continue;
}
version = data_list[0];
if (version == SD_CONF_VERSION_1) {
sd_tunables values;
if (sd_chk_vers1_data(un, data_list[1],
&data_list[2], data_list_len,
dataname_ptr) == SD_SUCCESS) {
sd_get_tunables_from_conf(un,
data_list[1], &data_list[2],
&values);
sd_set_vers1_properties(un,
data_list[1], &values);
rval = SD_SUCCESS;
} else {
rval = SD_FAILURE;
}
} else {
scsi_log(SD_DEVINFO(un), sd_label,
CE_WARN, "data property %s version "
"0x%x is invalid.",
dataname_ptr, version);
rval = SD_FAILURE;
}
ddi_prop_free(data_list);
}
}
}
if (config_list) {
ddi_prop_free(config_list);
}
return (rval);
}
static void
sd_nvpair_str_decode(struct sd_lun *un, char *nvpair_str)
{
char *nv, *name, *value, *token;
char *nv_lasts, *v_lasts, *x_lasts;
for (nv = strtok_r(nvpair_str, ",", &nv_lasts); nv != NULL;
nv = strtok_r(NULL, ",", &nv_lasts)) {
token = strtok_r(nv, ":", &v_lasts);
name = strtok_r(token, " \t", &x_lasts);
token = strtok_r(NULL, ":", &v_lasts);
value = strtok_r(token, " \t", &x_lasts);
if (name == NULL || value == NULL) {
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_nvpair_str_decode: "
"name or value is not valid!\n");
} else {
sd_set_properties(un, name, value);
}
}
}
static void
sd_set_properties(struct sd_lun *un, char *name, char *value)
{
char *endptr = NULL;
long val = 0;
if (strcasecmp(name, "cache-nonvolatile") == 0) {
if (strcasecmp(value, "true") == 0) {
un->un_f_suppress_cache_flush = TRUE;
} else if (strcasecmp(value, "false") == 0) {
un->un_f_suppress_cache_flush = FALSE;
} else {
goto value_invalid;
}
SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
"suppress_cache_flush flag set to %d\n",
un->un_f_suppress_cache_flush);
return;
}
if (strcasecmp(name, "controller-type") == 0) {
if (ddi_strtol(value, &endptr, 0, &val) == 0) {
un->un_ctype = val;
} else {
goto value_invalid;
}
SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
"ctype set to %d\n", un->un_ctype);
return;
}
if (strcasecmp(name, "delay-busy") == 0) {
if (ddi_strtol(value, &endptr, 0, &val) == 0) {
un->un_busy_timeout = drv_usectohz(val / 1000);
} else {
goto value_invalid;
}
SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
"busy_timeout set to %d\n", un->un_busy_timeout);
return;
}
if (strcasecmp(name, "disksort") == 0) {
if (strcasecmp(value, "true") == 0) {
un->un_f_disksort_disabled = FALSE;
} else if (strcasecmp(value, "false") == 0) {
un->un_f_disksort_disabled = TRUE;
} else {
goto value_invalid;
}
SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
"disksort disabled flag set to %d\n",
un->un_f_disksort_disabled);
return;
}
if (strcasecmp(name, "power-condition") == 0) {
if (strcasecmp(value, "true") == 0) {
un->un_f_power_condition_disabled = FALSE;
} else if (strcasecmp(value, "false") == 0) {
un->un_f_power_condition_disabled = TRUE;
} else {
goto value_invalid;
}
SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
"power condition disabled flag set to %d\n",
un->un_f_power_condition_disabled);
return;
}
if (strcasecmp(name, "timeout-releasereservation") == 0) {
if (ddi_strtol(value, &endptr, 0, &val) == 0) {
un->un_reserve_release_time = val;
} else {
goto value_invalid;
}
SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
"reservation release timeout set to %d\n",
un->un_reserve_release_time);
return;
}
if (strcasecmp(name, "reset-lun") == 0) {
if (strcasecmp(value, "true") == 0) {
un->un_f_lun_reset_enabled = TRUE;
} else if (strcasecmp(value, "false") == 0) {
un->un_f_lun_reset_enabled = FALSE;
} else {
goto value_invalid;
}
SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
"lun reset enabled flag set to %d\n",
un->un_f_lun_reset_enabled);
return;
}
if (strcasecmp(name, "retries-busy") == 0) {
if (ddi_strtol(value, &endptr, 0, &val) == 0) {
un->un_busy_retry_count = val;
} else {
goto value_invalid;
}
SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
"busy retry count set to %d\n", un->un_busy_retry_count);
return;
}
if (strcasecmp(name, "retries-timeout") == 0) {
if (ddi_strtol(value, &endptr, 0, &val) == 0) {
un->un_retry_count = val;
} else {
goto value_invalid;
}
SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
"timeout retry count set to %d\n", un->un_retry_count);
return;
}
if (strcasecmp(name, "retries-notready") == 0) {
if (ddi_strtol(value, &endptr, 0, &val) == 0) {
un->un_notready_retry_count = val;
} else {
goto value_invalid;
}
SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
"notready retry count set to %d\n",
un->un_notready_retry_count);
return;
}
if (strcasecmp(name, "retries-reset") == 0) {
if (ddi_strtol(value, &endptr, 0, &val) == 0) {
un->un_reset_retry_count = val;
} else {
goto value_invalid;
}
SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
"reset retry count set to %d\n",
un->un_reset_retry_count);
return;
}
if (strcasecmp(name, "throttle-max") == 0) {
if (ddi_strtol(value, &endptr, 0, &val) == 0) {
un->un_saved_throttle = un->un_throttle = val;
} else {
goto value_invalid;
}
SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
"throttle set to %d\n", un->un_throttle);
}
if (strcasecmp(name, "throttle-min") == 0) {
if (ddi_strtol(value, &endptr, 0, &val) == 0) {
un->un_min_throttle = val;
} else {
goto value_invalid;
}
SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
"min throttle set to %d\n", un->un_min_throttle);
}
if (strcasecmp(name, "rmw-type") == 0) {
if (ddi_strtol(value, &endptr, 0, &val) == 0) {
un->un_f_rmw_type = val;
} else {
goto value_invalid;
}
SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
"RMW type set to %d\n", un->un_f_rmw_type);
}
if (strcasecmp(name, "physical-block-size") == 0) {
if (ddi_strtol(value, &endptr, 0, &val) == 0 &&
ISP2(val) && val >= un->un_tgt_blocksize &&
val >= un->un_sys_blocksize) {
un->un_phy_blocksize = val;
} else {
goto value_invalid;
}
SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
"physical block size set to %d\n", un->un_phy_blocksize);
}
if (strcasecmp(name, "retries-victim") == 0) {
if (ddi_strtol(value, &endptr, 0, &val) == 0) {
un->un_victim_retry_count = val;
} else {
goto value_invalid;
}
SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
"victim retry count set to %d\n",
un->un_victim_retry_count);
return;
}
if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) ||
(un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) ||
(un->un_min_throttle > un->un_throttle)) {
un->un_saved_throttle = un->un_throttle = sd_max_throttle;
un->un_min_throttle = sd_min_throttle;
}
if (strcasecmp(name, "mmc-gesn-polling") == 0) {
if (strcasecmp(value, "true") == 0) {
un->un_f_mmc_gesn_polling = TRUE;
} else if (strcasecmp(value, "false") == 0) {
un->un_f_mmc_gesn_polling = FALSE;
} else {
goto value_invalid;
}
SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
"mmc-gesn-polling set to %d\n",
un->un_f_mmc_gesn_polling);
}
return;
value_invalid:
SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
"value of prop %s is invalid\n", name);
}
static void
sd_get_tunables_from_conf(struct sd_lun *un, int flags, int *data_list,
sd_tunables *values)
{
int i;
int mask;
bzero(values, sizeof (sd_tunables));
for (i = 0; i < SD_CONF_MAX_ITEMS; i++) {
mask = 1 << i;
if (mask > flags) {
break;
}
switch (mask & flags) {
case 0:
continue;
case SD_CONF_BSET_THROTTLE:
values->sdt_throttle = data_list[i];
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_get_tunables_from_conf: throttle = %d\n",
values->sdt_throttle);
break;
case SD_CONF_BSET_CTYPE:
values->sdt_ctype = data_list[i];
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_get_tunables_from_conf: ctype = %d\n",
values->sdt_ctype);
break;
case SD_CONF_BSET_NRR_COUNT:
values->sdt_not_rdy_retries = data_list[i];
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_get_tunables_from_conf: not_rdy_retries = %d\n",
values->sdt_not_rdy_retries);
break;
case SD_CONF_BSET_BSY_RETRY_COUNT:
values->sdt_busy_retries = data_list[i];
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_get_tunables_from_conf: busy_retries = %d\n",
values->sdt_busy_retries);
break;
case SD_CONF_BSET_RST_RETRIES:
values->sdt_reset_retries = data_list[i];
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_get_tunables_from_conf: reset_retries = %d\n",
values->sdt_reset_retries);
break;
case SD_CONF_BSET_RSV_REL_TIME:
values->sdt_reserv_rel_time = data_list[i];
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_get_tunables_from_conf: reserv_rel_time = %d\n",
values->sdt_reserv_rel_time);
break;
case SD_CONF_BSET_MIN_THROTTLE:
values->sdt_min_throttle = data_list[i];
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_get_tunables_from_conf: min_throttle = %d\n",
values->sdt_min_throttle);
break;
case SD_CONF_BSET_DISKSORT_DISABLED:
values->sdt_disk_sort_dis = data_list[i];
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_get_tunables_from_conf: disk_sort_dis = %d\n",
values->sdt_disk_sort_dis);
break;
case SD_CONF_BSET_LUN_RESET_ENABLED:
values->sdt_lun_reset_enable = data_list[i];
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_get_tunables_from_conf: lun_reset_enable = %d"
"\n", values->sdt_lun_reset_enable);
break;
case SD_CONF_BSET_CACHE_IS_NV:
values->sdt_suppress_cache_flush = data_list[i];
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_get_tunables_from_conf: \
suppress_cache_flush = %d"
"\n", values->sdt_suppress_cache_flush);
break;
case SD_CONF_BSET_PC_DISABLED:
values->sdt_disk_sort_dis = data_list[i];
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_get_tunables_from_conf: power_condition_dis = "
"%d\n", values->sdt_power_condition_dis);
break;
}
}
}
static void
sd_process_sdconf_table(struct sd_lun *un)
{
char *id = NULL;
int table_index;
int idlen;
ASSERT(un != NULL);
for (table_index = 0; table_index < sd_disk_table_size;
table_index++) {
id = sd_disk_table[table_index].device_id;
idlen = strlen(id);
if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) {
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_process_sdconf_table: disk %s\n", id);
sd_set_vers1_properties(un,
sd_disk_table[table_index].flags,
sd_disk_table[table_index].properties);
break;
}
}
}
static int
sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen)
{
struct scsi_inquiry *sd_inq;
int rval = SD_SUCCESS;
ASSERT(un != NULL);
sd_inq = un->un_sd->sd_inq;
ASSERT(id != NULL);
if (strncasecmp(sd_inq->inq_vid, id, idlen) != 0) {
rval = sd_blank_cmp(un, id, idlen);
if (rval != SD_SUCCESS) {
if ((id[0] == '*') && (id[idlen - 1] == '*')) {
char *pidptr = &id[1];
int i;
int j;
int pidstrlen = idlen - 2;
j = sizeof (SD_INQUIRY(un)->inq_pid) -
pidstrlen;
if (j < 0) {
return (SD_FAILURE);
}
for (i = 0; i < j; i++) {
if (bcmp(&SD_INQUIRY(un)->inq_pid[i],
pidptr, pidstrlen) == 0) {
rval = SD_SUCCESS;
break;
}
}
}
}
}
return (rval);
}
static int
sd_blank_cmp(struct sd_lun *un, char *id, int idlen)
{
char *p1;
char *p2;
int cnt;
cnt = sizeof (SD_INQUIRY(un)->inq_vid) +
sizeof (SD_INQUIRY(un)->inq_pid);
ASSERT(un != NULL);
p2 = un->un_sd->sd_inq->inq_vid;
ASSERT(id != NULL);
p1 = id;
if ((id[0] == ' ') && (id[idlen - 1] == ' ')) {
for (;;) {
while ((*p1 != '\0') && (*p1 == ' ')) {
p1++;
}
while ((cnt != 0) && (*p2 == ' ')) {
p2++;
cnt--;
}
if ((cnt == 0) ||
(SD_TOUPPER(*p1) != SD_TOUPPER(*p2))) {
break;
}
while ((cnt > 0) &&
(SD_TOUPPER(*p1) == SD_TOUPPER(*p2))) {
p1++;
p2++;
cnt--;
}
}
}
return (((*p1 == '\0') && (cnt == 0)) ? SD_SUCCESS : SD_FAILURE);
}
static int
sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list,
int list_len, char *dataname_ptr)
{
int i;
int mask = 1;
int index = 0;
ASSERT(un != NULL);
if (dataname_ptr == NULL) {
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"sd_chk_vers1_data: NULL data property name.");
return (SD_FAILURE);
}
if (prop_list == NULL) {
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"sd_chk_vers1_data: %s NULL data property list.",
dataname_ptr);
return (SD_FAILURE);
}
if (flags & ~SD_CONF_BIT_MASK) {
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"sd_chk_vers1_data: invalid bits 0x%x in data list %s. "
"Properties not set.",
(flags & ~SD_CONF_BIT_MASK), dataname_ptr);
return (SD_FAILURE);
}
for (i = 0; i < SD_CONF_MAX_ITEMS; i++) {
if (flags & mask) {
index++;
}
mask = 1 << i;
}
if (list_len < (index + 2)) {
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"sd_chk_vers1_data: "
"Data property list %s size is incorrect. "
"Properties not set.", dataname_ptr);
scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, "Size expected: "
"version + 1 flagword + %d properties", SD_CONF_MAX_ITEMS);
return (SD_FAILURE);
}
return (SD_SUCCESS);
}
static void
sd_set_vers1_properties(struct sd_lun *un, int flags, sd_tunables *prop_list)
{
ASSERT(un != NULL);
if (flags & SD_CONF_BSET_NOCACHE) {
un->un_f_opt_disable_cache = TRUE;
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_set_vers1_properties: caching disabled flag set\n");
}
if (flags & SD_CONF_BSET_PLAYMSF_BCD) {
un->un_f_cfg_playmsf_bcd = TRUE;
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_set_vers1_properties: playmsf_bcd set\n");
}
if (flags & SD_CONF_BSET_READSUB_BCD) {
un->un_f_cfg_readsub_bcd = TRUE;
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_set_vers1_properties: readsub_bcd set\n");
}
if (flags & SD_CONF_BSET_READ_TOC_TRK_BCD) {
un->un_f_cfg_read_toc_trk_bcd = TRUE;
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_set_vers1_properties: read_toc_trk_bcd set\n");
}
if (flags & SD_CONF_BSET_READ_TOC_ADDR_BCD) {
un->un_f_cfg_read_toc_addr_bcd = TRUE;
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_set_vers1_properties: read_toc_addr_bcd set\n");
}
if (flags & SD_CONF_BSET_NO_READ_HEADER) {
un->un_f_cfg_no_read_header = TRUE;
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_set_vers1_properties: no_read_header set\n");
}
if (flags & SD_CONF_BSET_READ_CD_XD4) {
un->un_f_cfg_read_cd_xd4 = TRUE;
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_set_vers1_properties: read_cd_xd4 set\n");
}
if (flags & SD_CONF_BSET_FAB_DEVID) {
un->un_f_opt_fab_devid = TRUE;
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_set_vers1_properties: fab_devid bit set\n");
}
if (flags & SD_CONF_BSET_THROTTLE) {
ASSERT(prop_list != NULL);
un->un_saved_throttle = un->un_throttle =
prop_list->sdt_throttle;
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_set_vers1_properties: throttle set to %d\n",
prop_list->sdt_throttle);
}
if (flags & SD_CONF_BSET_NRR_COUNT) {
ASSERT(prop_list != NULL);
if (prop_list->sdt_not_rdy_retries) {
un->un_notready_retry_count =
prop_list->sdt_not_rdy_retries;
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_set_vers1_properties: not ready retry count"
" set to %d\n", un->un_notready_retry_count);
}
}
if (flags & SD_CONF_BSET_CTYPE) {
ASSERT(prop_list != NULL);
switch (prop_list->sdt_ctype) {
case CTYPE_CDROM:
un->un_ctype = prop_list->sdt_ctype;
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_set_vers1_properties: ctype set to "
"CTYPE_CDROM\n");
break;
case CTYPE_CCS:
un->un_ctype = prop_list->sdt_ctype;
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_set_vers1_properties: ctype set to "
"CTYPE_CCS\n");
break;
case CTYPE_ROD:
un->un_ctype = prop_list->sdt_ctype;
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_set_vers1_properties: ctype set to "
"CTYPE_ROD\n");
break;
default:
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"sd_set_vers1_properties: Could not set "
"invalid ctype value (%d)",
prop_list->sdt_ctype);
}
}
if (flags & SD_CONF_BSET_BSY_RETRY_COUNT) {
ASSERT(prop_list != NULL);
un->un_busy_retry_count =
prop_list->sdt_busy_retries;
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_set_vers1_properties: "
"busy retry count set to %d\n",
un->un_busy_retry_count);
}
if (flags & SD_CONF_BSET_RST_RETRIES) {
ASSERT(prop_list != NULL);
un->un_reset_retry_count =
prop_list->sdt_reset_retries;
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_set_vers1_properties: "
"reset retry count set to %d\n",
un->un_reset_retry_count);
}
if (flags & SD_CONF_BSET_RSV_REL_TIME) {
ASSERT(prop_list != NULL);
un->un_reserve_release_time =
prop_list->sdt_reserv_rel_time;
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_set_vers1_properties: "
"reservation release timeout set to %d\n",
un->un_reserve_release_time);
}
if (flags & SD_CONF_BSET_TUR_CHECK) {
un->un_f_cfg_tur_check = TRUE;
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_set_vers1_properties: tur queue check set\n");
}
if (flags & SD_CONF_BSET_MIN_THROTTLE) {
un->un_min_throttle = prop_list->sdt_min_throttle;
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_set_vers1_properties: min throttle set to %d\n",
un->un_min_throttle);
}
if (flags & SD_CONF_BSET_DISKSORT_DISABLED) {
un->un_f_disksort_disabled =
(prop_list->sdt_disk_sort_dis != 0) ?
TRUE : FALSE;
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_set_vers1_properties: disksort disabled "
"flag set to %d\n",
prop_list->sdt_disk_sort_dis);
}
if (flags & SD_CONF_BSET_LUN_RESET_ENABLED) {
un->un_f_lun_reset_enabled =
(prop_list->sdt_lun_reset_enable != 0) ?
TRUE : FALSE;
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_set_vers1_properties: lun reset enabled "
"flag set to %d\n",
prop_list->sdt_lun_reset_enable);
}
if (flags & SD_CONF_BSET_CACHE_IS_NV) {
un->un_f_suppress_cache_flush =
(prop_list->sdt_suppress_cache_flush != 0) ?
TRUE : FALSE;
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_set_vers1_properties: suppress_cache_flush "
"flag set to %d\n",
prop_list->sdt_suppress_cache_flush);
}
if (flags & SD_CONF_BSET_PC_DISABLED) {
un->un_f_power_condition_disabled =
(prop_list->sdt_power_condition_dis != 0) ?
TRUE : FALSE;
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_set_vers1_properties: power_condition_disabled "
"flag set to %d\n",
prop_list->sdt_power_condition_dis);
}
if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) ||
(un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) ||
(un->un_min_throttle > un->un_throttle)) {
un->un_saved_throttle = un->un_throttle = sd_max_throttle;
un->un_min_throttle = sd_min_throttle;
}
}
static int
sd_get_physical_geometry(struct sd_lun *un, cmlb_geom_t *pgeom_p,
diskaddr_t capacity, int lbasize, int path_flag)
{
struct mode_format *page3p;
struct mode_geometry *page4p;
struct mode_header *headerp;
int sector_size;
int nsect;
int nhead;
int ncyl;
int intrlv;
int spc;
diskaddr_t modesense_capacity;
int rpm;
int bd_len;
int mode_header_length;
uchar_t *p3bufp;
uchar_t *p4bufp;
int cdbsize;
int ret = EIO;
sd_ssc_t *ssc;
int status;
ASSERT(un != NULL);
if (lbasize == 0) {
if (ISCD(un)) {
lbasize = 2048;
} else {
lbasize = un->un_sys_blocksize;
}
}
pgeom_p->g_secsize = (unsigned short)lbasize;
if (ISCD(un) ||
un->un_interconnect_type == SD_INTERCONNECT_SATA ||
(un->un_ctype == CTYPE_CCS && SD_INQUIRY(un)->inq_ansi >= 5))
return (ret);
cdbsize = (un->un_f_cfg_is_atapi == TRUE) ? CDB_GROUP2 : CDB_GROUP0;
p3bufp = kmem_zalloc(SD_MODE_SENSE_PAGE3_LENGTH, KM_SLEEP);
ssc = sd_ssc_init(un);
status = sd_send_scsi_MODE_SENSE(ssc, cdbsize, p3bufp,
SD_MODE_SENSE_PAGE3_LENGTH, SD_MODE_SENSE_PAGE3_CODE, path_flag);
if (status != 0) {
SD_ERROR(SD_LOG_COMMON, un,
"sd_get_physical_geometry: mode sense page 3 failed\n");
goto page3_exit;
}
headerp = (struct mode_header *)p3bufp;
if (un->un_f_cfg_is_atapi == TRUE) {
struct mode_header_grp2 *mhp =
(struct mode_header_grp2 *)headerp;
mode_header_length = MODE_HEADER_LENGTH_GRP2;
bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo;
} else {
mode_header_length = MODE_HEADER_LENGTH;
bd_len = ((struct mode_header *)headerp)->bdesc_length;
}
if (bd_len > MODE_BLK_DESC_LENGTH) {
sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON,
"sd_get_physical_geometry: received unexpected bd_len "
"of %d, page3\n", bd_len);
status = EIO;
goto page3_exit;
}
page3p = (struct mode_format *)
((caddr_t)headerp + mode_header_length + bd_len);
if (page3p->mode_page.code != SD_MODE_SENSE_PAGE3_CODE) {
sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON,
"sd_get_physical_geometry: mode sense pg3 code mismatch "
"%d\n", page3p->mode_page.code);
status = EIO;
goto page3_exit;
}
sector_size = BE_16(page3p->data_bytes_sect);
if (sector_size == 0) {
sector_size = un->un_sys_blocksize;
} else {
sector_size &= ~(un->un_sys_blocksize - 1);
}
nsect = BE_16(page3p->sect_track);
intrlv = BE_16(page3p->interleave);
SD_INFO(SD_LOG_COMMON, un,
"sd_get_physical_geometry: Format Parameters (page 3)\n");
SD_INFO(SD_LOG_COMMON, un,
" mode page: %d; nsect: %d; sector size: %d;\n",
page3p->mode_page.code, nsect, sector_size);
SD_INFO(SD_LOG_COMMON, un,
" interleave: %d; track skew: %d; cylinder skew: %d;\n", intrlv,
BE_16(page3p->track_skew),
BE_16(page3p->cylinder_skew));
sd_ssc_assessment(ssc, SD_FMT_STANDARD);
p4bufp = kmem_zalloc(SD_MODE_SENSE_PAGE4_LENGTH, KM_SLEEP);
status = sd_send_scsi_MODE_SENSE(ssc, cdbsize, p4bufp,
SD_MODE_SENSE_PAGE4_LENGTH, SD_MODE_SENSE_PAGE4_CODE, path_flag);
if (status != 0) {
SD_ERROR(SD_LOG_COMMON, un,
"sd_get_physical_geometry: mode sense page 4 failed\n");
goto page4_exit;
}
headerp = (struct mode_header *)p4bufp;
if (un->un_f_cfg_is_atapi == TRUE) {
struct mode_header_grp2 *mhp =
(struct mode_header_grp2 *)headerp;
bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo;
} else {
bd_len = ((struct mode_header *)headerp)->bdesc_length;
}
if (bd_len > MODE_BLK_DESC_LENGTH) {
sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON,
"sd_get_physical_geometry: received unexpected bd_len of "
"%d, page4\n", bd_len);
status = EIO;
goto page4_exit;
}
page4p = (struct mode_geometry *)
((caddr_t)headerp + mode_header_length + bd_len);
if (page4p->mode_page.code != SD_MODE_SENSE_PAGE4_CODE) {
sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON,
"sd_get_physical_geometry: mode sense pg4 code mismatch "
"%d\n", page4p->mode_page.code);
status = EIO;
goto page4_exit;
}
nhead = (int)page4p->heads;
spc = nhead * nsect;
ncyl = (page4p->cyl_ub << 16) + (page4p->cyl_mb << 8) + page4p->cyl_lb;
rpm = BE_16(page4p->rpm);
modesense_capacity = spc * ncyl;
SD_INFO(SD_LOG_COMMON, un,
"sd_get_physical_geometry: Geometry Parameters (page 4)\n");
SD_INFO(SD_LOG_COMMON, un,
" cylinders: %d; heads: %d; rpm: %d;\n", ncyl, nhead, rpm);
SD_INFO(SD_LOG_COMMON, un,
" computed capacity(h*s*c): %d;\n", modesense_capacity);
SD_INFO(SD_LOG_COMMON, un, " pgeom_p: %p; read cap: %d\n",
(void *)pgeom_p, capacity);
if (modesense_capacity >= capacity) {
SD_INFO(SD_LOG_COMMON, un,
"sd_get_physical_geometry: adjusting acyl; "
"old: %d; new: %d\n", pgeom_p->g_acyl,
(modesense_capacity - capacity + spc - 1) / spc);
if (sector_size != 0) {
pgeom_p->g_secsize = (unsigned short)sector_size;
}
pgeom_p->g_nsect = (unsigned short)nsect;
pgeom_p->g_nhead = (unsigned short)nhead;
pgeom_p->g_capacity = capacity;
pgeom_p->g_acyl =
(modesense_capacity - pgeom_p->g_capacity + spc - 1) / spc;
pgeom_p->g_ncyl = ncyl - pgeom_p->g_acyl;
}
pgeom_p->g_rpm = (unsigned short)rpm;
pgeom_p->g_intrlv = (unsigned short)intrlv;
ret = 0;
SD_INFO(SD_LOG_COMMON, un,
"sd_get_physical_geometry: mode sense geometry:\n");
SD_INFO(SD_LOG_COMMON, un,
" nsect: %d; sector size: %d; interlv: %d\n",
nsect, sector_size, intrlv);
SD_INFO(SD_LOG_COMMON, un,
" nhead: %d; ncyl: %d; rpm: %d; capacity(ms): %d\n",
nhead, ncyl, rpm, modesense_capacity);
SD_INFO(SD_LOG_COMMON, un,
"sd_get_physical_geometry: (cached)\n");
SD_INFO(SD_LOG_COMMON, un,
" ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n",
pgeom_p->g_ncyl, pgeom_p->g_acyl,
pgeom_p->g_nhead, pgeom_p->g_nsect);
SD_INFO(SD_LOG_COMMON, un,
" lbasize: %d; capacity: %ld; intrlv: %d; rpm: %d\n",
pgeom_p->g_secsize, pgeom_p->g_capacity,
pgeom_p->g_intrlv, pgeom_p->g_rpm);
sd_ssc_assessment(ssc, SD_FMT_STANDARD);
page4_exit:
kmem_free(p4bufp, SD_MODE_SENSE_PAGE4_LENGTH);
page3_exit:
kmem_free(p3bufp, SD_MODE_SENSE_PAGE3_LENGTH);
if (status != 0) {
if (status == EIO) {
uint8_t *sensep;
int senlen;
sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf;
senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen -
ssc->ssc_uscsi_cmd->uscsi_rqresid);
if (senlen > 0 &&
scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) {
sd_ssc_assessment(ssc,
SD_FMT_IGNORE_COMPROMISE);
} else {
sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
}
} else {
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
}
}
sd_ssc_fini(ssc);
return (ret);
}
static int
sd_get_virtual_geometry(struct sd_lun *un, cmlb_geom_t *lgeom_p,
diskaddr_t capacity, int lbasize)
{
uint_t geombuf;
int spc;
ASSERT(un != NULL);
(void) scsi_ifsetcap(SD_ADDRESS(un), "sector-size", lbasize, 1);
(void) scsi_ifsetcap(SD_ADDRESS(un), "total-sectors", capacity, 1);
geombuf = (uint_t)scsi_ifgetcap(SD_ADDRESS(un), "geometry", 1);
if (geombuf == (-1)) {
return (EINVAL);
}
lgeom_p->g_nhead = (geombuf >> 16) & 0xffff;
lgeom_p->g_nsect = geombuf & 0xffff;
lgeom_p->g_secsize = un->un_sys_blocksize;
spc = lgeom_p->g_nhead * lgeom_p->g_nsect;
lgeom_p->g_capacity = capacity;
if (spc == 0) {
lgeom_p->g_ncyl = 0;
} else {
lgeom_p->g_ncyl = lgeom_p->g_capacity / spc;
}
lgeom_p->g_acyl = 0;
SD_INFO(SD_LOG_COMMON, un, "sd_get_virtual_geometry: (cached)\n");
return (0);
}
static void
sd_update_block_info(struct sd_lun *un, uint32_t lbasize, uint64_t capacity)
{
if (lbasize != 0) {
un->un_tgt_blocksize = lbasize;
un->un_f_tgt_blocksize_is_valid = TRUE;
if (!un->un_f_has_removable_media) {
un->un_sys_blocksize = lbasize;
}
}
if (capacity != 0) {
un->un_blockcount = capacity;
un->un_f_blockcount_is_valid = TRUE;
if (un->un_errstats != NULL) {
struct sd_errstats *stp;
capacity *= un->un_sys_blocksize;
stp = (struct sd_errstats *)un->un_errstats->ks_data;
if (stp->sd_capacity.value.ui64 < capacity)
stp->sd_capacity.value.ui64 = capacity;
}
}
}
static void
sd_parse_blk_limits_vpd(struct sd_lun *un, uchar_t *vpd_pg)
{
sd_blk_limits_t *lim = &un->un_blk_lim;
unsigned pg_len;
if (vpd_pg != NULL)
pg_len = BE_IN16(&vpd_pg[2]);
else
pg_len = 0;
if (pg_len >= 0x10) {
lim->lim_opt_xfer_len_gran = BE_IN16(&vpd_pg[6]);
lim->lim_max_xfer_len = BE_IN32(&vpd_pg[8]);
lim->lim_opt_xfer_len = BE_IN32(&vpd_pg[12]);
if (lim->lim_max_xfer_len == 0)
lim->lim_max_xfer_len = UINT32_MAX;
if (lim->lim_opt_xfer_len == 0)
lim->lim_opt_xfer_len = UINT32_MAX;
} else {
lim->lim_opt_xfer_len_gran = 0;
lim->lim_max_xfer_len = UINT32_MAX;
lim->lim_opt_xfer_len = UINT32_MAX;
}
if (pg_len >= 0x3c) {
lim->lim_max_pfetch_len = BE_IN32(&vpd_pg[16]);
lim->lim_max_unmap_lba_cnt = BE_IN32(&vpd_pg[20]);
lim->lim_max_unmap_descr_cnt = BE_IN32(&vpd_pg[24]);
lim->lim_opt_unmap_gran = BE_IN32(&vpd_pg[28]);
if ((vpd_pg[32] >> 7) == 1) {
lim->lim_unmap_gran_align =
((vpd_pg[32] & 0x7f) << 24) | (vpd_pg[33] << 16) |
(vpd_pg[34] << 8) | vpd_pg[35];
} else {
lim->lim_unmap_gran_align = 0;
}
lim->lim_max_write_same_len = BE_IN64(&vpd_pg[36]);
} else {
lim->lim_max_pfetch_len = UINT32_MAX;
lim->lim_max_unmap_lba_cnt = UINT32_MAX;
lim->lim_max_unmap_descr_cnt = SD_UNMAP_MAX_DESCR;
lim->lim_opt_unmap_gran = 0;
lim->lim_unmap_gran_align = 0;
lim->lim_max_write_same_len = UINT64_MAX;
}
}
static void
sd_setup_blk_limits(sd_ssc_t *ssc)
{
struct sd_lun *un = ssc->ssc_un;
uchar_t *inqB0 = NULL;
size_t inqB0_resid = 0;
int rval;
if (un->un_vpd_page_mask & SD_VPD_BLK_LIMITS_PG) {
inqB0 = kmem_zalloc(MAX_INQUIRY_SIZE, KM_SLEEP);
rval = sd_send_scsi_INQUIRY(ssc, inqB0, MAX_INQUIRY_SIZE, 0x01,
0xB0, &inqB0_resid);
if (rval != 0) {
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
kmem_free(inqB0, MAX_INQUIRY_SIZE);
inqB0 = NULL;
}
}
sd_parse_blk_limits_vpd(ssc->ssc_un, inqB0);
if (inqB0)
kmem_free(inqB0, MAX_INQUIRY_SIZE);
}
static void
sd_register_devid(sd_ssc_t *ssc, dev_info_t *devi, int reservation_flag)
{
int rval = 0;
uchar_t *inq80 = NULL;
size_t inq80_len = MAX_INQUIRY_SIZE;
size_t inq80_resid = 0;
uchar_t *inq83 = NULL;
size_t inq83_len = MAX_INQUIRY_SIZE;
size_t inq83_resid = 0;
int dlen, len;
char *sn;
struct sd_lun *un;
ASSERT(ssc != NULL);
un = ssc->ssc_un;
ASSERT(un != NULL);
ASSERT(mutex_owned(SD_MUTEX(un)));
ASSERT((SD_DEVINFO(un)) == devi);
if (sd_check_vpd_page_support(ssc) == 0) {
if (un->un_vpd_page_mask & SD_VPD_UNIT_SERIAL_PG) {
mutex_exit(SD_MUTEX(un));
inq80 = kmem_zalloc(inq80_len, KM_SLEEP);
rval = sd_send_scsi_INQUIRY(ssc, inq80, inq80_len,
0x01, 0x80, &inq80_resid);
if (rval != 0) {
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
kmem_free(inq80, inq80_len);
inq80 = NULL;
inq80_len = 0;
} else if (ddi_prop_exists(
DDI_DEV_T_NONE, SD_DEVINFO(un),
DDI_PROP_NOTPROM | DDI_PROP_DONTPASS,
INQUIRY_SERIAL_NO) == 0) {
dlen = inq80_len - inq80_resid;
len = (size_t)inq80[3];
if ((dlen >= 4) && ((len + 4) <= dlen)) {
sn = (char *)&inq80[4];
sn[len] = 0;
while (*sn && (*sn == ' '))
sn++;
if (*sn) {
(void) ddi_prop_update_string(
DDI_DEV_T_NONE,
SD_DEVINFO(un),
INQUIRY_SERIAL_NO, sn);
}
}
}
mutex_enter(SD_MUTEX(un));
}
if (un->un_vpd_page_mask & SD_VPD_DEVID_WWN_PG) {
mutex_exit(SD_MUTEX(un));
inq83 = kmem_zalloc(inq83_len, KM_SLEEP);
rval = sd_send_scsi_INQUIRY(ssc, inq83, inq83_len,
0x01, 0x83, &inq83_resid);
if (rval != 0) {
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
kmem_free(inq83, inq83_len);
inq83 = NULL;
inq83_len = 0;
}
mutex_enter(SD_MUTEX(un));
}
}
if (ddi_devid_get(SD_DEVINFO(un), &un->un_devid) == DDI_SUCCESS) {
ASSERT(un->un_devid);
un->un_f_devid_transport_defined = TRUE;
goto cleanup;
}
if (un->un_f_opt_fab_devid == TRUE) {
if ((sd_get_devid(ssc) == EINVAL) &&
(reservation_flag != SD_TARGET_IS_RESERVED)) {
(void) sd_create_devid(ssc);
}
if (un->un_devid != NULL) {
(void) ddi_devid_register(SD_DEVINFO(un),
un->un_devid);
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_register_devid: Devid Fabricated\n");
}
goto cleanup;
}
if (ddi_devid_scsi_encode(DEVID_SCSI_ENCODE_VERSION_LATEST,
(char *)ddi_driver_name(SD_DEVINFO(un)),
(uchar_t *)SD_INQUIRY(un), sizeof (*SD_INQUIRY(un)),
inq80, inq80_len - inq80_resid, inq83, inq83_len -
inq83_resid, &un->un_devid) == DDI_SUCCESS) {
(void) ddi_devid_register(SD_DEVINFO(un), un->un_devid);
} else {
if (sd_get_devid(ssc) == EINVAL) {
(void) sd_create_devid(ssc);
}
un->un_f_opt_fab_devid = TRUE;
if (un->un_devid != NULL) {
(void) ddi_devid_register(SD_DEVINFO(un),
un->un_devid);
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_register_devid: devid fabricated using "
"ddi framework\n");
}
}
cleanup:
if (inq80 != NULL) {
kmem_free(inq80, inq80_len);
}
if (inq83 != NULL) {
kmem_free(inq83, inq83_len);
}
}
static int
sd_get_devid(sd_ssc_t *ssc)
{
struct dk_devid *dkdevid;
ddi_devid_t tmpid;
uint_t *ip;
size_t sz;
diskaddr_t blk;
int status;
int chksum;
int i;
size_t buffer_size;
struct sd_lun *un;
ASSERT(ssc != NULL);
un = ssc->ssc_un;
ASSERT(un != NULL);
ASSERT(mutex_owned(SD_MUTEX(un)));
SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: entry: un: 0x%p\n",
un);
if (un->un_devid != NULL) {
return (0);
}
mutex_exit(SD_MUTEX(un));
if (cmlb_get_devid_block(un->un_cmlbhandle, &blk,
(void *)SD_PATH_DIRECT) != 0) {
mutex_enter(SD_MUTEX(un));
return (EINVAL);
}
mutex_enter(SD_MUTEX(un));
buffer_size = SD_REQBYTES2TGTBYTES(un, sizeof (struct dk_devid));
mutex_exit(SD_MUTEX(un));
dkdevid = kmem_alloc(buffer_size, KM_SLEEP);
status = sd_send_scsi_READ(ssc, dkdevid, buffer_size, blk,
SD_PATH_DIRECT);
if (status != 0) {
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
goto error;
}
if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) ||
(dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) {
status = EINVAL;
goto error;
}
chksum = 0;
ip = (uint_t *)dkdevid;
for (i = 0; i < ((DEV_BSIZE - sizeof (int)) / sizeof (int));
i++) {
chksum ^= ip[i];
}
if (DKD_GETCHKSUM(dkdevid) != chksum) {
status = EINVAL;
goto error;
}
if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) {
status = EINVAL;
goto error;
}
sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid);
tmpid = kmem_alloc(sz, KM_SLEEP);
mutex_enter(SD_MUTEX(un));
un->un_devid = tmpid;
bcopy(&dkdevid->dkd_devid, un->un_devid, sz);
kmem_free(dkdevid, buffer_size);
SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: exit: un:0x%p\n", un);
return (status);
error:
mutex_enter(SD_MUTEX(un));
kmem_free(dkdevid, buffer_size);
return (status);
}
static ddi_devid_t
sd_create_devid(sd_ssc_t *ssc)
{
struct sd_lun *un;
ASSERT(ssc != NULL);
un = ssc->ssc_un;
ASSERT(un != NULL);
if (ddi_devid_init(SD_DEVINFO(un), DEVID_FAB, 0, NULL, &un->un_devid)
== DDI_FAILURE) {
return (NULL);
}
if (sd_write_deviceid(ssc) != 0) {
ddi_devid_free(un->un_devid);
un->un_devid = NULL;
}
return (un->un_devid);
}
static int
sd_write_deviceid(sd_ssc_t *ssc)
{
struct dk_devid *dkdevid;
uchar_t *buf;
diskaddr_t blk;
uint_t *ip, chksum;
int status;
int i;
struct sd_lun *un;
ASSERT(ssc != NULL);
un = ssc->ssc_un;
ASSERT(un != NULL);
ASSERT(mutex_owned(SD_MUTEX(un)));
mutex_exit(SD_MUTEX(un));
if (cmlb_get_devid_block(un->un_cmlbhandle, &blk,
(void *)SD_PATH_DIRECT) != 0) {
mutex_enter(SD_MUTEX(un));
return (-1);
}
buf = kmem_zalloc(un->un_sys_blocksize, KM_SLEEP);
dkdevid = (struct dk_devid *)buf;
dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB;
dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB;
mutex_enter(SD_MUTEX(un));
bcopy(un->un_devid, &dkdevid->dkd_devid,
ddi_devid_sizeof(un->un_devid));
mutex_exit(SD_MUTEX(un));
chksum = 0;
ip = (uint_t *)dkdevid;
for (i = 0; i < ((DEV_BSIZE - sizeof (int)) / sizeof (int));
i++) {
chksum ^= ip[i];
}
DKD_FORMCHKSUM(chksum, dkdevid);
status = sd_send_scsi_WRITE(ssc, buf, un->un_sys_blocksize, blk,
SD_PATH_DIRECT);
if (status != 0)
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
kmem_free(buf, un->un_sys_blocksize);
mutex_enter(SD_MUTEX(un));
return (status);
}
static int
sd_check_vpd_page_support(sd_ssc_t *ssc)
{
uchar_t *page_list = NULL;
uchar_t page_length = 0xff;
uchar_t evpd = 0x01;
uchar_t page_code = 0x00;
int rval = 0;
int counter;
struct sd_lun *un;
ASSERT(ssc != NULL);
un = ssc->ssc_un;
ASSERT(un != NULL);
ASSERT(mutex_owned(SD_MUTEX(un)));
mutex_exit(SD_MUTEX(un));
page_list = kmem_zalloc(page_length, KM_SLEEP);
rval = sd_send_scsi_INQUIRY(ssc, page_list, page_length, evpd,
page_code, NULL);
if (rval != 0)
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
mutex_enter(SD_MUTEX(un));
if ((rval == 0) && (page_list[VPD_MODE_PAGE] == 0x00)) {
counter = 4;
while ((page_list[counter] <= 0xB1) &&
(counter <= (page_list[VPD_PAGE_LENGTH] +
VPD_HEAD_OFFSET))) {
switch (page_list[counter]) {
case 0x00:
un->un_vpd_page_mask |= SD_VPD_SUPPORTED_PG;
break;
case 0x80:
un->un_vpd_page_mask |= SD_VPD_UNIT_SERIAL_PG;
break;
case 0x81:
un->un_vpd_page_mask |= SD_VPD_OPERATING_PG;
break;
case 0x82:
un->un_vpd_page_mask |= SD_VPD_ASCII_OP_PG;
break;
case 0x83:
un->un_vpd_page_mask |= SD_VPD_DEVID_WWN_PG;
break;
case 0x86:
un->un_vpd_page_mask |= SD_VPD_EXTENDED_DATA_PG;
break;
case 0xB0:
un->un_vpd_page_mask |= SD_VPD_BLK_LIMITS_PG;
break;
case 0xB1:
un->un_vpd_page_mask |= SD_VPD_DEV_CHARACTER_PG;
break;
}
counter++;
}
} else {
rval = -1;
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_check_vpd_page_support: This drive does not implement "
"VPD pages.\n");
}
kmem_free(page_list, page_length);
return (rval);
}
static void
sd_setup_pm(sd_ssc_t *ssc, dev_info_t *devi)
{
uint_t log_page_size;
uchar_t *log_page_data;
int rval = 0;
struct sd_lun *un;
ASSERT(ssc != NULL);
un = ssc->ssc_un;
ASSERT(un != NULL);
ASSERT(!mutex_owned(SD_MUTEX(un)));
(void) ddi_prop_update_string(DDI_DEV_T_NONE, devi,
"pm-hardware-state", "needs-suspend-resume");
if (un->un_f_pm_supported) {
un->un_f_start_stop_supported = TRUE;
if (un->un_f_power_condition_supported) {
rval = sd_send_scsi_START_STOP_UNIT(ssc,
SD_POWER_CONDITION, SD_TARGET_ACTIVE,
SD_PATH_DIRECT);
if (rval != 0) {
un->un_f_power_condition_supported = FALSE;
}
}
if (!un->un_f_power_condition_supported) {
rval = sd_send_scsi_START_STOP_UNIT(ssc,
SD_START_STOP, SD_TARGET_START, SD_PATH_DIRECT);
}
if (rval != 0) {
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
un->un_f_start_stop_supported = FALSE;
}
un->un_f_pm_is_enabled = TRUE;
(void) sd_create_pm_components(devi, un);
if (un->un_f_log_sense_supported) {
rval = sd_log_page_supported(ssc,
START_STOP_CYCLE_PAGE);
if (rval == 1) {
un->un_start_stop_cycle_page =
START_STOP_CYCLE_PAGE;
} else {
un->un_f_log_sense_supported = FALSE;
un->un_f_pm_log_sense_smart = FALSE;
}
}
return;
}
if (!un->un_f_log_sense_supported) {
un->un_power_level = SD_SPINDLE_ON;
un->un_f_pm_is_enabled = FALSE;
return;
}
rval = sd_log_page_supported(ssc, START_STOP_CYCLE_PAGE);
#ifdef SDDEBUG
if (sd_force_pm_supported) {
rval = 1;
}
#endif
if (rval == -1) {
un->un_power_level = SD_SPINDLE_ON;
un->un_f_pm_is_enabled = FALSE;
} else if (rval == 0) {
if (sd_log_page_supported(ssc, START_STOP_CYCLE_VU_PAGE) == 1) {
un->un_start_stop_cycle_page = START_STOP_CYCLE_VU_PAGE;
un->un_f_pm_is_enabled = TRUE;
} else {
un->un_power_level = SD_SPINDLE_ON;
un->un_f_pm_is_enabled = FALSE;
}
} else {
un->un_start_stop_cycle_page = START_STOP_CYCLE_PAGE;
un->un_f_pm_is_enabled = TRUE;
}
if (un->un_f_pm_is_enabled == TRUE) {
log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE;
log_page_data = kmem_zalloc(log_page_size, KM_SLEEP);
rval = sd_send_scsi_LOG_SENSE(ssc, log_page_data,
log_page_size, un->un_start_stop_cycle_page,
0x01, 0, SD_PATH_DIRECT);
if (rval != 0) {
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
}
#ifdef SDDEBUG
if (sd_force_pm_supported) {
rval = 0;
}
#endif
if (rval == 0) {
(void) sd_create_pm_components(devi, un);
} else {
un->un_power_level = SD_SPINDLE_ON;
un->un_f_pm_is_enabled = FALSE;
}
kmem_free(log_page_data, log_page_size);
}
}
static void
sd_create_pm_components(dev_info_t *devi, struct sd_lun *un)
{
ASSERT(!mutex_owned(SD_MUTEX(un)));
if (un->un_f_power_condition_supported) {
if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi,
"pm-components", sd_pwr_pc.pm_comp, 5)
!= DDI_PROP_SUCCESS) {
un->un_power_level = SD_SPINDLE_ACTIVE;
un->un_f_pm_is_enabled = FALSE;
return;
}
} else {
if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi,
"pm-components", sd_pwr_ss.pm_comp, 3)
!= DDI_PROP_SUCCESS) {
un->un_power_level = SD_SPINDLE_ON;
un->un_f_pm_is_enabled = FALSE;
return;
}
}
if (un->un_f_attach_spinup && (pm_raise_power(SD_DEVINFO(un), 0,
SD_PM_STATE_ACTIVE(un)) == DDI_SUCCESS)) {
mutex_enter(SD_MUTEX(un));
un->un_power_level = SD_PM_STATE_ACTIVE(un);
mutex_enter(&un->un_pm_mutex);
un->un_pm_count = 0;
} else {
mutex_enter(SD_MUTEX(un));
un->un_power_level = SD_PM_STATE_STOPPED(un);
mutex_enter(&un->un_pm_mutex);
un->un_pm_count = -1;
}
mutex_exit(&un->un_pm_mutex);
mutex_exit(SD_MUTEX(un));
}
static int
sd_ddi_suspend(dev_info_t *devi)
{
struct sd_lun *un;
clock_t wait_cmds_complete;
un = ddi_get_soft_state(sd_state, ddi_get_instance(devi));
if (un == NULL) {
return (DDI_FAILURE);
}
SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: entry\n");
mutex_enter(SD_MUTEX(un));
if (un->un_state == SD_STATE_SUSPENDED) {
mutex_exit(SD_MUTEX(un));
SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: "
"device already suspended, exiting\n");
return (DDI_SUCCESS);
}
if (un->un_resvd_status &
(SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE)) {
mutex_exit(SD_MUTEX(un));
SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: "
"device in use by HA, exiting\n");
return (DDI_FAILURE);
}
if ((un->un_state == SD_STATE_RWAIT) ||
(un->un_state == SD_STATE_PM_CHANGING)) {
mutex_exit(SD_MUTEX(un));
SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: "
"device in resource wait state, exiting\n");
return (DDI_FAILURE);
}
un->un_save_state = un->un_last_state;
New_state(un, SD_STATE_SUSPENDED);
wait_cmds_complete = ddi_get_lbolt() +
(sd_wait_cmds_complete * drv_usectohz(1000000));
while (un->un_ncmds_in_transport != 0) {
if (cv_timedwait(&un->un_disk_busy_cv, SD_MUTEX(un),
wait_cmds_complete) == -1) {
Restore_state(un);
un->un_last_state = un->un_save_state;
cv_broadcast(&un->un_suspend_cv);
mutex_exit(SD_MUTEX(un));
SD_ERROR(SD_LOG_IO_PM, un,
"sd_ddi_suspend: failed due to outstanding cmds\n");
SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exiting\n");
return (DDI_FAILURE);
}
}
if (SD_OK_TO_SUSPEND_SCSI_WATCHER(un)) {
opaque_t temp_token = un->un_swr_token;
mutex_exit(SD_MUTEX(un));
scsi_watch_suspend(temp_token);
mutex_enter(SD_MUTEX(un));
}
if (un->un_reset_throttle_timeid != NULL) {
timeout_id_t temp_id = un->un_reset_throttle_timeid;
un->un_reset_throttle_timeid = NULL;
mutex_exit(SD_MUTEX(un));
(void) untimeout(temp_id);
mutex_enter(SD_MUTEX(un));
}
if (un->un_dcvb_timeid != NULL) {
timeout_id_t temp_id = un->un_dcvb_timeid;
un->un_dcvb_timeid = NULL;
mutex_exit(SD_MUTEX(un));
(void) untimeout(temp_id);
mutex_enter(SD_MUTEX(un));
}
mutex_enter(&un->un_pm_mutex);
if (un->un_pm_timeid != NULL) {
timeout_id_t temp_id = un->un_pm_timeid;
un->un_pm_timeid = NULL;
mutex_exit(&un->un_pm_mutex);
mutex_exit(SD_MUTEX(un));
(void) untimeout(temp_id);
mutex_enter(SD_MUTEX(un));
} else {
mutex_exit(&un->un_pm_mutex);
}
if (un->un_rmw_msg_timeid != NULL) {
timeout_id_t temp_id = un->un_rmw_msg_timeid;
un->un_rmw_msg_timeid = NULL;
mutex_exit(SD_MUTEX(un));
(void) untimeout(temp_id);
mutex_enter(SD_MUTEX(un));
}
if (un->un_retry_timeid != NULL) {
timeout_id_t temp_id = un->un_retry_timeid;
un->un_retry_timeid = NULL;
mutex_exit(SD_MUTEX(un));
(void) untimeout(temp_id);
mutex_enter(SD_MUTEX(un));
if (un->un_retry_bp != NULL) {
un->un_retry_bp->av_forw = un->un_waitq_headp;
un->un_waitq_headp = un->un_retry_bp;
if (un->un_waitq_tailp == NULL) {
un->un_waitq_tailp = un->un_retry_bp;
}
un->un_retry_bp = NULL;
un->un_retry_statp = NULL;
}
}
if (un->un_direct_priority_timeid != NULL) {
timeout_id_t temp_id = un->un_direct_priority_timeid;
un->un_direct_priority_timeid = NULL;
mutex_exit(SD_MUTEX(un));
(void) untimeout(temp_id);
mutex_enter(SD_MUTEX(un));
}
if (un->un_f_is_fibre == TRUE) {
if (un->un_insert_event != NULL) {
mutex_exit(SD_MUTEX(un));
(void) ddi_remove_event_handler(un->un_insert_cb_id);
mutex_enter(SD_MUTEX(un));
un->un_insert_event = NULL;
}
if (un->un_remove_event != NULL) {
mutex_exit(SD_MUTEX(un));
(void) ddi_remove_event_handler(un->un_remove_cb_id);
mutex_enter(SD_MUTEX(un));
un->un_remove_event = NULL;
}
}
mutex_exit(SD_MUTEX(un));
SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exit\n");
return (DDI_SUCCESS);
}
static int
sd_ddi_resume(dev_info_t *devi)
{
struct sd_lun *un;
un = ddi_get_soft_state(sd_state, ddi_get_instance(devi));
if (un == NULL) {
return (DDI_FAILURE);
}
SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: entry\n");
mutex_enter(SD_MUTEX(un));
Restore_state(un);
un->un_last_state = un->un_save_state;
un->un_throttle = un->un_saved_throttle;
if (un->un_f_attach_spinup) {
mutex_exit(SD_MUTEX(un));
(void) pm_raise_power(SD_DEVINFO(un), 0,
SD_PM_STATE_ACTIVE(un));
mutex_enter(SD_MUTEX(un));
}
cv_broadcast(&un->un_suspend_cv);
cv_broadcast(&un->un_state_cv);
if (SD_OK_TO_RESUME_SCSI_WATCHER(un)) {
scsi_watch_resume(un->un_swr_token);
}
ddi_xbuf_dispatch(un->un_xbuf_attr);
sd_start_cmds(un, NULL);
mutex_exit(SD_MUTEX(un));
SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: exit\n");
return (DDI_SUCCESS);
}
static int
sd_pm_state_change(struct sd_lun *un, int level, int flag)
{
ASSERT(un != NULL);
SD_TRACE(SD_LOG_POWER, un, "sd_pm_state_change: entry\n");
ASSERT(!mutex_owned(SD_MUTEX(un)));
mutex_enter(SD_MUTEX(un));
if (flag == SD_PM_STATE_ROLLBACK || SD_PM_IS_IO_CAPABLE(un, level)) {
un->un_power_level = level;
ASSERT(!mutex_owned(&un->un_pm_mutex));
mutex_enter(&un->un_pm_mutex);
if (SD_DEVICE_IS_IN_LOW_POWER(un)) {
un->un_pm_count++;
ASSERT(un->un_pm_count == 0);
}
mutex_exit(&un->un_pm_mutex);
} else {
if ((un->un_f_pm_is_enabled == FALSE) || (un->un_resvd_status &
(SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE))) {
mutex_exit(SD_MUTEX(un));
SD_TRACE(SD_LOG_POWER, un,
"sd_pm_state_change: exiting\n");
return (DDI_FAILURE);
}
SD_INFO(SD_LOG_POWER, un, "sd_pm_state_change: "
"un_ncmds_in_driver=%ld\n", un->un_ncmds_in_driver);
if ((un->un_ncmds_in_driver == 0) &&
(un->un_state != SD_STATE_RWAIT)) {
mutex_enter(&un->un_pm_mutex);
un->un_pm_count = -1;
mutex_exit(&un->un_pm_mutex);
un->un_power_level = level;
}
}
mutex_exit(SD_MUTEX(un));
SD_TRACE(SD_LOG_POWER, un, "sd_pm_state_change: exit\n");
return (DDI_SUCCESS);
}
static void
sd_pm_idletimeout_handler(void *arg)
{
const hrtime_t idletime = sd_pm_idletime * NANOSEC;
struct sd_lun *un = arg;
mutex_enter(SD_MUTEX(un));
mutex_enter(&un->un_pm_mutex);
if (un->un_pm_idle_timeid == NULL) {
mutex_exit(&un->un_pm_mutex);
mutex_exit(SD_MUTEX(un));
return;
}
if (((gethrtime() - un->un_pm_idle_time) > idletime) &&
(un->un_ncmds_in_driver == 0) && (un->un_pm_count == 0)) {
if (un->un_f_non_devbsize_supported) {
un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA;
} else {
un->un_buf_chain_type = SD_CHAIN_INFO_DISK;
}
un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD;
SD_TRACE(SD_LOG_IO_PM, un,
"sd_pm_idletimeout_handler: idling device\n");
(void) pm_idle_component(SD_DEVINFO(un), 0);
un->un_pm_idle_timeid = NULL;
} else {
un->un_pm_idle_timeid =
timeout(sd_pm_idletimeout_handler, un,
(drv_usectohz((clock_t)300000)));
}
mutex_exit(&un->un_pm_mutex);
mutex_exit(SD_MUTEX(un));
}
static void
sd_pm_timeout_handler(void *arg)
{
struct sd_lun *un = arg;
(void) pm_idle_component(SD_DEVINFO(un), 0);
mutex_enter(&un->un_pm_mutex);
un->un_pm_timeid = NULL;
mutex_exit(&un->un_pm_mutex);
}
static int
sdpower(dev_info_t *devi, int component, int level)
{
struct sd_lun *un;
int instance;
int rval = DDI_SUCCESS;
uint_t i, log_page_size, maxcycles, ncycles;
uchar_t *log_page_data;
int log_sense_page;
int medium_present;
time_t intvlp;
struct pm_trans_data sd_pm_tran_data;
uchar_t save_state = SD_STATE_NORMAL;
int sval;
uchar_t state_before_pm;
sd_ssc_t *ssc;
int last_power_level = SD_SPINDLE_UNINIT;
instance = ddi_get_instance(devi);
if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) ||
!SD_PM_IS_LEVEL_VALID(un, level) || component != 0) {
return (DDI_FAILURE);
}
ssc = sd_ssc_init(un);
SD_TRACE(SD_LOG_IO_PM, un, "sdpower: entry, level = %d\n", level);
mutex_enter(SD_MUTEX(un));
SD_INFO(SD_LOG_POWER, un, "sdpower: un_ncmds_in_driver = %ld\n",
un->un_ncmds_in_driver);
if ((!SD_PM_IS_IO_CAPABLE(un, level)) &&
(un->un_ncmds_in_driver != 0)) {
mutex_exit(SD_MUTEX(un));
SD_TRACE(SD_LOG_IO_PM, un,
"sdpower: exit, device has queued cmds.\n");
goto sdpower_failed;
}
if ((un->un_state == SD_STATE_OFFLINE) ||
(un->un_state == SD_STATE_SUSPENDED)) {
mutex_exit(SD_MUTEX(un));
SD_TRACE(SD_LOG_IO_PM, un,
"sdpower: exit, device is off-line.\n");
goto sdpower_failed;
}
state_before_pm = un->un_state;
un->un_state = SD_STATE_PM_CHANGING;
mutex_exit(SD_MUTEX(un));
if (SD_PM_STOP_MOTOR_NEEDED(un, level) &&
un->un_f_log_sense_supported) {
log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE;
log_page_data = kmem_zalloc(log_page_size, KM_SLEEP);
mutex_enter(SD_MUTEX(un));
log_sense_page = un->un_start_stop_cycle_page;
mutex_exit(SD_MUTEX(un));
rval = sd_send_scsi_LOG_SENSE(ssc, log_page_data,
log_page_size, log_sense_page, 0x01, 0, SD_PATH_DIRECT);
if (rval != 0) {
if (rval == EIO)
sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
else
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
}
#ifdef SDDEBUG
if (sd_force_pm_supported) {
rval = 0;
}
#endif
if (rval != 0) {
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"Log Sense Failed\n");
kmem_free(log_page_data, log_page_size);
mutex_enter(SD_MUTEX(un));
un->un_state = state_before_pm;
cv_broadcast(&un->un_suspend_cv);
mutex_exit(SD_MUTEX(un));
SD_TRACE(SD_LOG_IO_PM, un,
"sdpower: exit, Log Sense Failed.\n");
goto sdpower_failed;
}
maxcycles =
(log_page_data[0x1c] << 24) | (log_page_data[0x1d] << 16) |
(log_page_data[0x1E] << 8) | log_page_data[0x1F];
ncycles =
(log_page_data[0x24] << 24) | (log_page_data[0x25] << 16) |
(log_page_data[0x26] << 8) | log_page_data[0x27];
if (un->un_f_pm_log_sense_smart) {
sd_pm_tran_data.un.smart_count.allowed = maxcycles;
sd_pm_tran_data.un.smart_count.consumed = ncycles;
sd_pm_tran_data.un.smart_count.flag = 0;
sd_pm_tran_data.format = DC_SMART_FORMAT;
} else {
sd_pm_tran_data.un.scsi_cycles.lifemax = maxcycles;
sd_pm_tran_data.un.scsi_cycles.ncycles = ncycles;
for (i = 0; i < DC_SCSI_MFR_LEN; i++) {
sd_pm_tran_data.un.scsi_cycles.svc_date[i] =
log_page_data[8+i];
}
sd_pm_tran_data.un.scsi_cycles.flag = 0;
sd_pm_tran_data.format = DC_SCSI_FORMAT;
}
kmem_free(log_page_data, log_page_size);
rval = pm_trans_check(&sd_pm_tran_data, &intvlp);
#ifdef SDDEBUG
if (sd_force_pm_supported) {
rval = 1;
}
#endif
switch (rval) {
case 0:
mutex_enter(&un->un_pm_mutex);
if (un->un_pm_timeid == NULL) {
un->un_pm_timeid =
timeout(sd_pm_timeout_handler,
un, intvlp * drv_usectohz(1000000));
mutex_exit(&un->un_pm_mutex);
(void) pm_busy_component(SD_DEVINFO(un), 0);
} else {
mutex_exit(&un->un_pm_mutex);
}
mutex_enter(SD_MUTEX(un));
un->un_state = state_before_pm;
cv_broadcast(&un->un_suspend_cv);
mutex_exit(SD_MUTEX(un));
SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, "
"trans check Failed, not ok to power cycle.\n");
goto sdpower_failed;
case -1:
mutex_enter(SD_MUTEX(un));
un->un_state = state_before_pm;
cv_broadcast(&un->un_suspend_cv);
mutex_exit(SD_MUTEX(un));
SD_TRACE(SD_LOG_IO_PM, un,
"sdpower: exit, trans check command Failed.\n");
goto sdpower_failed;
}
}
if (!SD_PM_IS_IO_CAPABLE(un, level)) {
mutex_enter(SD_MUTEX(un));
save_state = un->un_last_state;
last_power_level = un->un_power_level;
ASSERT(un->un_ncmds_in_driver == 0);
mutex_exit(SD_MUTEX(un));
if ((rval = sd_pm_state_change(un, level, SD_PM_STATE_CHANGE))
== DDI_FAILURE) {
mutex_enter(SD_MUTEX(un));
un->un_state = state_before_pm;
un->un_power_level = last_power_level;
cv_broadcast(&un->un_suspend_cv);
mutex_exit(SD_MUTEX(un));
SD_TRACE(SD_LOG_IO_PM, un,
"sdpower: exit, PM suspend Failed.\n");
goto sdpower_failed;
}
}
medium_present = TRUE;
if (SD_PM_IS_IO_CAPABLE(un, level)) {
sval = sd_send_scsi_TEST_UNIT_READY(ssc,
SD_DONT_RETRY_TUR | SD_BYPASS_PM);
if (sval != 0)
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
}
if (un->un_f_power_condition_supported) {
char *pm_condition_name[] = {"STOPPED", "STANDBY",
"IDLE", "ACTIVE"};
SD_TRACE(SD_LOG_IO_PM, un,
"sdpower: sending \'%s\' power condition",
pm_condition_name[level]);
sval = sd_send_scsi_START_STOP_UNIT(ssc, SD_POWER_CONDITION,
sd_pl2pc[level], SD_PATH_DIRECT);
} else {
SD_TRACE(SD_LOG_IO_PM, un, "sdpower: sending \'%s\' unit\n",
((level == SD_SPINDLE_ON) ? "START" : "STOP"));
sval = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP,
((level == SD_SPINDLE_ON) ? SD_TARGET_START :
SD_TARGET_STOP), SD_PATH_DIRECT);
}
if (sval != 0) {
if (sval == EIO)
sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
else
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
}
if ((sval == ENXIO) && un->un_f_has_removable_media) {
medium_present = FALSE;
}
if (!SD_PM_IS_IO_CAPABLE(un, level)) {
if ((medium_present == TRUE) && (sval != 0)) {
rval = DDI_FAILURE;
(void) sd_pm_state_change(un, last_power_level,
SD_PM_STATE_ROLLBACK);
mutex_enter(SD_MUTEX(un));
un->un_last_state = save_state;
mutex_exit(SD_MUTEX(un));
} else if (un->un_f_monitor_media_state) {
mutex_enter(SD_MUTEX(un));
un->un_f_watcht_stopped = FALSE;
if (un->un_swr_token != NULL) {
opaque_t temp_token = un->un_swr_token;
un->un_f_watcht_stopped = TRUE;
un->un_swr_token = NULL;
mutex_exit(SD_MUTEX(un));
(void) scsi_watch_request_terminate(temp_token,
SCSI_WATCH_TERMINATE_ALL_WAIT);
} else {
mutex_exit(SD_MUTEX(un));
}
}
} else {
if ((sval != 0) && medium_present) {
rval = DDI_FAILURE;
} else {
(void) sd_pm_state_change(un, level,
SD_PM_STATE_CHANGE);
if (un->un_f_monitor_media_state) {
mutex_enter(SD_MUTEX(un));
if (un->un_f_watcht_stopped == TRUE) {
opaque_t temp_token;
un->un_f_watcht_stopped = FALSE;
mutex_exit(SD_MUTEX(un));
temp_token =
sd_watch_request_submit(un);
mutex_enter(SD_MUTEX(un));
un->un_swr_token = temp_token;
}
mutex_exit(SD_MUTEX(un));
}
}
}
mutex_enter(SD_MUTEX(un));
un->un_state = state_before_pm;
cv_broadcast(&un->un_suspend_cv);
mutex_exit(SD_MUTEX(un));
SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, status = 0x%x\n", rval);
sd_ssc_fini(ssc);
return (rval);
sdpower_failed:
sd_ssc_fini(ssc);
return (DDI_FAILURE);
}
static int
sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd)
{
switch (cmd) {
case DDI_ATTACH:
return (sd_unit_attach(devi));
case DDI_RESUME:
return (sd_ddi_resume(devi));
default:
break;
}
return (DDI_FAILURE);
}
static int
sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd)
{
switch (cmd) {
case DDI_DETACH:
return (sd_unit_detach(devi));
case DDI_SUSPEND:
return (sd_ddi_suspend(devi));
default:
break;
}
return (DDI_FAILURE);
}
static void
sd_sync_with_callback(struct sd_lun *un)
{
ASSERT(un != NULL);
mutex_enter(SD_MUTEX(un));
ASSERT(un->un_in_callback >= 0);
while (un->un_in_callback > 0) {
mutex_exit(SD_MUTEX(un));
delay(2);
mutex_enter(SD_MUTEX(un));
}
mutex_exit(SD_MUTEX(un));
}
static int
sd_unit_attach(dev_info_t *devi)
{
struct scsi_device *devp;
struct sd_lun *un;
char *variantp;
char name_str[48];
int reservation_flag = SD_TARGET_IS_UNRESERVED;
int instance;
int rval;
int wc_enabled;
int wc_changeable;
int tgt;
uint64_t capacity;
uint_t lbasize = 0;
dev_info_t *pdip = ddi_get_parent(devi);
int offbyone = 0;
int geom_label_valid = 0;
sd_ssc_t *ssc;
int status;
struct sd_fm_internal *sfip = NULL;
int max_xfer_size;
devp = ddi_get_driver_private(devi);
tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
SCSI_ADDR_PROP_TARGET, -1);
(void) scsi_ifsetcap(&devp->sd_address, "lun-reset", 0, 1);
(void) scsi_ifsetcap(&devp->sd_address, "wide-xfer", 0, 1);
(void) scsi_ifsetcap(&devp->sd_address, "auto-rqsense", 0, 1);
if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) {
(void) scsi_ifsetcap(&devp->sd_address, "tagged-qing", 0, 1);
}
if (scsi_probe(devp, SLEEP_FUNC) != SCSIPROBE_EXISTS) {
goto probe_failed;
}
switch (devp->sd_inq->inq_dtype) {
case DTYPE_DIRECT:
break;
case DTYPE_RODIRECT:
break;
case DTYPE_OPTICAL:
break;
case DTYPE_NOTPRESENT:
default:
goto probe_failed;
}
instance = ddi_get_instance(devp->sd_dev);
if (ddi_soft_state_zalloc(sd_state, instance) != DDI_SUCCESS) {
goto probe_failed;
}
if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) {
panic("sd_unit_attach: NULL soft state on instance:0x%x",
instance);
}
un->un_sd = devp;
devp->sd_private = (opaque_t)un;
#ifdef SDDEBUG
SD_TRACE(SD_LOG_ATTACH_DETACH, un,
"%s_unit_attach: un:0x%p instance:%d\n",
ddi_driver_name(devi), un, instance);
#endif
switch (devp->sd_inq->inq_dtype) {
case DTYPE_RODIRECT:
un->un_node_type = DDI_NT_CD_CHAN;
un->un_ctype = CTYPE_CDROM;
break;
case DTYPE_OPTICAL:
un->un_node_type = DDI_NT_BLOCK_CHAN;
un->un_ctype = CTYPE_ROD;
break;
default:
un->un_node_type = DDI_NT_BLOCK_CHAN;
un->un_ctype = CTYPE_CCS;
break;
}
un->un_f_is_fibre = TRUE;
switch (scsi_ifgetcap(SD_ADDRESS(un), "interconnect-type", -1)) {
case INTERCONNECT_SSA:
un->un_interconnect_type = SD_INTERCONNECT_SSA;
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_unit_attach: un:0x%p SD_INTERCONNECT_SSA\n", un);
break;
case INTERCONNECT_PARALLEL:
un->un_f_is_fibre = FALSE;
un->un_interconnect_type = SD_INTERCONNECT_PARALLEL;
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_unit_attach: un:0x%p SD_INTERCONNECT_PARALLEL\n", un);
break;
case INTERCONNECT_SAS:
un->un_f_is_fibre = FALSE;
un->un_interconnect_type = SD_INTERCONNECT_SAS;
un->un_node_type = DDI_NT_BLOCK_SAS;
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_unit_attach: un:0x%p SD_INTERCONNECT_SAS\n", un);
break;
case INTERCONNECT_SATA:
un->un_f_is_fibre = FALSE;
un->un_interconnect_type = SD_INTERCONNECT_SATA;
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_unit_attach: un:0x%p SD_INTERCONNECT_SATA\n", un);
break;
case INTERCONNECT_FIBRE:
un->un_interconnect_type = SD_INTERCONNECT_FIBRE;
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_unit_attach: un:0x%p SD_INTERCONNECT_FIBRE\n", un);
break;
case INTERCONNECT_FABRIC:
un->un_interconnect_type = SD_INTERCONNECT_FABRIC;
un->un_node_type = DDI_NT_BLOCK_FABRIC;
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_unit_attach: un:0x%p SD_INTERCONNECT_FABRIC\n", un);
break;
default:
un->un_interconnect_type = SD_DEFAULT_INTERCONNECT_TYPE;
if (!SD_IS_PARALLEL_SCSI(un)) {
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_unit_attach: un:0x%p Assuming "
"INTERCONNECT_FIBRE\n", un);
} else {
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_unit_attach: un:0x%p Assuming "
"INTERCONNECT_PARALLEL\n", un);
un->un_f_is_fibre = FALSE;
}
break;
}
if (un->un_f_is_fibre == TRUE) {
if (scsi_ifgetcap(SD_ADDRESS(un), "scsi-version", 1) ==
SCSI_VERSION_3) {
switch (un->un_interconnect_type) {
case SD_INTERCONNECT_FIBRE:
case SD_INTERCONNECT_SSA:
un->un_node_type = DDI_NT_BLOCK_WWN;
break;
default:
break;
}
}
}
if (sd_alloc_rqs(devp, un) != DDI_SUCCESS) {
goto alloc_rqs_failed;
}
un->un_retry_count = un->un_f_is_fibre ? 3 : 5;
un->un_notready_retry_count =
ISCD(un) ? CD_NOT_READY_RETRY_COUNT(un)
: DISK_NOT_READY_RETRY_COUNT(un);
un->un_busy_retry_count = un->un_retry_count;
un->un_reset_retry_count = (un->un_retry_count / 2);
un->un_victim_retry_count = (2 * un->un_retry_count);
un->un_reserve_release_time = 5;
un->un_max_xfer_size = (uint_t)SD_DEFAULT_MAX_XFER_SIZE;
un->un_partial_dma_supported = 1;
if (un->un_f_is_fibre == TRUE) {
un->un_f_allow_bus_device_reset = TRUE;
} else {
if (ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
"allow-bus-device-reset", 1) != 0) {
un->un_f_allow_bus_device_reset = TRUE;
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_unit_attach: un:0x%p Bus device reset "
"enabled\n", un);
} else {
un->un_f_allow_bus_device_reset = FALSE;
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_unit_attach: un:0x%p Bus device reset "
"disabled\n", un);
}
}
if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "atapi", -1) != -1) {
un->un_f_cfg_is_atapi = TRUE;
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_unit_attach: un:0x%p Atapi device\n", un);
}
if (ddi_prop_lookup_string(DDI_DEV_T_ANY, devi, 0, "variant",
&variantp) == DDI_PROP_SUCCESS) {
if (strcmp(variantp, "atapi") == 0) {
un->un_f_cfg_is_atapi = TRUE;
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_unit_attach: un:0x%p Atapi device\n", un);
}
ddi_prop_free(variantp);
}
un->un_cmd_timeout = SD_IO_TIME;
un->un_busy_timeout = SD_BSY_TIMEOUT;
un->un_state = SD_STATE_NORMAL;
un->un_last_state = SD_STATE_NORMAL;
un->un_throttle = sd_max_throttle;
un->un_saved_throttle = sd_max_throttle;
un->un_min_throttle = sd_min_throttle;
if (un->un_f_is_fibre == TRUE) {
un->un_f_use_adaptive_throttle = TRUE;
} else {
un->un_f_use_adaptive_throttle = FALSE;
}
cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL);
un->un_mediastate = DKIO_NONE;
un->un_specified_mediastate = DKIO_NONE;
cv_init(&un->un_suspend_cv, NULL, CV_DRIVER, NULL);
cv_init(&un->un_disk_busy_cv, NULL, CV_DRIVER, NULL);
un->un_power_level = SD_SPINDLE_UNINIT;
cv_init(&un->un_wcc_cv, NULL, CV_DRIVER, NULL);
un->un_f_wcc_inprog = 0;
un->un_f_disksort_disabled = FALSE;
un->un_f_rmw_type = SD_RMW_TYPE_DEFAULT;
un->un_f_enable_rmw = FALSE;
un->un_f_mmc_gesn_polling = TRUE;
un->un_phy_blocksize = DEV_BSIZE;
sd_read_unit_properties(un);
SD_TRACE(SD_LOG_ATTACH_DETACH, un,
"sd_unit_attach: un:0x%p property configuration complete.\n", un);
if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "hotpluggable",
-1) != -1) {
un->un_f_is_hotpluggable = TRUE;
}
sd_set_unit_attributes(un, devi);
un->un_f_blockcount_is_valid = FALSE;
un->un_f_tgt_blocksize_is_valid = FALSE;
un->un_tgt_blocksize = un->un_sys_blocksize = DEV_BSIZE;
un->un_blockcount = 0;
sd_init_cdb_limits(un);
if (un->un_f_non_devbsize_supported) {
un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA;
} else {
un->un_buf_chain_type = SD_CHAIN_INFO_DISK;
}
un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD;
un->un_direct_chain_type = SD_CHAIN_INFO_DIRECT_CMD;
un->un_priority_chain_type = SD_CHAIN_INFO_PRIORITY_CMD;
un->un_xbuf_attr = ddi_xbuf_attr_create(sizeof (struct sd_xbuf),
sd_xbuf_strategy, un, sd_xbuf_active_limit, sd_xbuf_reserve_limit,
ddi_driver_major(devi), DDI_XBUF_QTHREAD_DRIVER);
ddi_xbuf_attr_register_devinfo(un->un_xbuf_attr, devi);
if (ISCD(un)) {
un->un_additional_codes = sd_additional_codes;
} else {
un->un_additional_codes = NULL;
}
un->un_stats = kstat_create(sd_label, instance,
NULL, "disk", KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT);
if (un->un_stats != NULL) {
un->un_stats->ks_lock = SD_MUTEX(un);
kstat_install(un->un_stats);
}
SD_TRACE(SD_LOG_ATTACH_DETACH, un,
"sd_unit_attach: un:0x%p un_stats created\n", un);
un->un_unmapstats_ks = kstat_create(sd_label, instance, "unmapstats",
"misc", KSTAT_TYPE_NAMED, sizeof (*un->un_unmapstats) /
sizeof (kstat_named_t), 0);
if (un->un_unmapstats_ks) {
un->un_unmapstats = un->un_unmapstats_ks->ks_data;
kstat_named_init(&un->un_unmapstats->us_cmds,
"commands", KSTAT_DATA_UINT64);
kstat_named_init(&un->un_unmapstats->us_errs,
"errors", KSTAT_DATA_UINT64);
kstat_named_init(&un->un_unmapstats->us_extents,
"extents", KSTAT_DATA_UINT64);
kstat_named_init(&un->un_unmapstats->us_bytes,
"bytes", KSTAT_DATA_UINT64);
kstat_install(un->un_unmapstats_ks);
} else {
cmn_err(CE_NOTE, "!Cannot create unmap kstats for disk %d",
instance);
}
sd_create_errstats(un, instance);
if (un->un_errstats == NULL) {
goto create_errstats_failed;
}
SD_TRACE(SD_LOG_ATTACH_DETACH, un,
"sd_unit_attach: un:0x%p errstats created\n", un);
if (SD_IS_PARALLEL_SCSI(un) || SD_IS_SERIAL(un)) {
int tq_trigger_flag = (((devp->sd_inq->inq_ansi == 4) ||
(devp->sd_inq->inq_ansi == 5)) &&
devp->sd_inq->inq_bque) || devp->sd_inq->inq_cmdque;
un->un_tagflags = 0;
if ((devp->sd_inq->inq_rdf == RDF_SCSI2) && tq_trigger_flag &&
(un->un_f_arq_enabled == TRUE)) {
if (scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing",
1, 1) == 1) {
un->un_tagflags = FLAG_STAG;
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_unit_attach: un:0x%p tag queueing "
"enabled\n", un);
} else if (scsi_ifgetcap(SD_ADDRESS(un),
"untagged-qing", 0) == 1) {
un->un_f_opt_queueing = TRUE;
un->un_saved_throttle = un->un_throttle =
min(un->un_throttle, 3);
} else {
un->un_f_opt_queueing = FALSE;
un->un_saved_throttle = un->un_throttle = 1;
}
} else if ((scsi_ifgetcap(SD_ADDRESS(un), "untagged-qing", 0)
== 1) && (un->un_f_arq_enabled == TRUE)) {
un->un_f_opt_queueing = TRUE;
un->un_saved_throttle = un->un_throttle =
min(un->un_throttle, 3);
} else {
un->un_f_opt_queueing = FALSE;
un->un_saved_throttle = un->un_throttle = 1;
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_unit_attach: un:0x%p no tag queueing\n", un);
}
if (SD_IS_SERIAL(un)) {
un->un_max_xfer_size =
ddi_getprop(DDI_DEV_T_ANY, devi, 0,
sd_max_xfer_size, SD_MAX_XFER_SIZE);
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_unit_attach: un:0x%p max transfer "
"size=0x%x\n", un, un->un_max_xfer_size);
}
if (SD_IS_PARALLEL_SCSI(un) &&
(devp->sd_inq->inq_rdf == RDF_SCSI2) &&
(devp->sd_inq->inq_wbus16 || devp->sd_inq->inq_wbus32)) {
if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer",
1, 1) == 1) {
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_unit_attach: un:0x%p Wide Transfer "
"enabled\n", un);
}
if (un->un_saved_throttle == sd_max_throttle) {
un->un_max_xfer_size =
ddi_getprop(DDI_DEV_T_ANY, devi, 0,
sd_max_xfer_size, SD_MAX_XFER_SIZE);
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_unit_attach: un:0x%p max transfer "
"size=0x%x\n", un, un->un_max_xfer_size);
}
} else {
if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer",
0, 1) == 1) {
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_unit_attach: un:0x%p "
"Wide Transfer disabled\n", un);
}
}
} else {
un->un_tagflags = FLAG_STAG;
un->un_max_xfer_size = ddi_getprop(DDI_DEV_T_ANY,
devi, 0, sd_max_xfer_size, SD_MAX_XFER_SIZE);
}
if (un->un_f_lun_reset_enabled) {
if (scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 1, 1) == 1) {
SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: "
"un:0x%p lun_reset capability set\n", un);
} else {
SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: "
"un:0x%p lun-reset capability not set\n", un);
}
}
max_xfer_size = scsi_ifgetcap(SD_ADDRESS(un), "dma-max", 1);
if ((max_xfer_size > 0) && (max_xfer_size < un->un_max_xfer_size)) {
un->un_max_xfer_size = max_xfer_size;
if (un->un_partial_dma_supported == 0)
un->un_partial_dma_supported = 1;
}
if (ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un),
DDI_PROP_DONTPASS, "buf_break", 0) == 1) {
if (ddi_xbuf_attr_setup_brk(un->un_xbuf_attr,
un->un_max_xfer_size) == 1) {
un->un_buf_breakup_supported = 1;
SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: "
"un:0x%p Buf breakup enabled\n", un);
}
}
if (un->un_partial_dma_supported == 1) {
un->un_pkt_flags = PKT_DMA_PARTIAL;
} else {
un->un_pkt_flags = 0;
}
ssc = sd_ssc_init(un);
scsi_fm_init(devp);
un->un_fm_private =
kmem_zalloc(sizeof (struct sd_fm_internal), KM_SLEEP);
sfip = (struct sd_fm_internal *)un->un_fm_private;
sfip->fm_ssc.ssc_uscsi_cmd = &sfip->fm_ucmd;
sfip->fm_ssc.ssc_uscsi_info = &sfip->fm_uinfo;
sfip->fm_ssc.ssc_un = un;
if (ISCD(un) ||
un->un_f_has_removable_media ||
devp->sd_fm_capable == DDI_FM_NOT_CAPABLE) {
sfip->fm_log_level = SD_FM_LOG_NSUP;
} else {
int fm_scsi_log;
fm_scsi_log = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un),
DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "fm-scsi-log", 0);
if (fm_scsi_log)
sfip->fm_log_level = SD_FM_LOG_EREPORT;
else
sfip->fm_log_level = SD_FM_LOG_SILENT;
}
status = sd_send_scsi_TEST_UNIT_READY(ssc, SD_DONT_RETRY_TUR);
if (status != 0) {
if (status == EACCES)
reservation_flag = SD_TARGET_IS_RESERVED;
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
}
if (un->un_f_descr_format_supported) {
switch (sd_spin_up_unit(ssc)) {
case 0:
SD_TRACE(SD_LOG_ATTACH_DETACH, un,
"sd_unit_attach: un:0x%p spin-up successful\n", un);
status = sd_send_scsi_READ_CAPACITY(ssc, &capacity,
&lbasize, SD_PATH_DIRECT);
switch (status) {
case 0: {
if (capacity > DK_MAX_BLOCKS) {
if ((capacity + 1) >
SD_GROUP1_MAX_ADDRESS) {
sd_enable_descr_sense(ssc);
}
}
sd_update_block_info(un, lbasize, capacity);
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_unit_attach: un:0x%p capacity = %ld "
"blocks; lbasize= %ld.\n", un,
un->un_blockcount, un->un_tgt_blocksize);
break;
}
case EINVAL:
scsi_log(SD_DEVINFO(un),
sd_label, CE_WARN,
"disk capacity is too large "
"for current cdb length");
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
goto spinup_failed;
case EACCES:
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_unit_attach: un:0x%p "
"sd_send_scsi_READ_CAPACITY "
"returned reservation conflict\n", un);
reservation_flag = SD_TARGET_IS_RESERVED;
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
break;
default:
if (status == EIO)
sd_ssc_assessment(ssc,
SD_FMT_STATUS_CHECK);
else
sd_ssc_assessment(ssc,
SD_FMT_IGNORE);
break;
}
break;
case EACCES:
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_unit_attach: un:0x%p spin-up reservation "
"conflict.\n", un);
reservation_flag = SD_TARGET_IS_RESERVED;
break;
default:
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_unit_attach: un:0x%p spin-up failed.", un);
goto spinup_failed;
}
}
if (ISCD(un)) {
sd_set_mmc_caps(ssc);
}
(void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP,
DDI_KERNEL_IOCTL, NULL, 0);
(void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP,
"ddi-failfast-supported", NULL, 0);
mutex_init(&un->un_pm_mutex, NULL, MUTEX_DRIVER, NULL);
cv_init(&un->un_pm_busy_cv, NULL, CV_DRIVER, NULL);
sd_setup_pm(ssc, devi);
if (un->un_f_pm_is_enabled == FALSE) {
if (un->un_f_non_devbsize_supported) {
un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA_NO_PM;
} else {
un->un_buf_chain_type = SD_CHAIN_INFO_DISK_NO_PM;
}
un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM;
}
sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, devi,
DDI_PROP_DONTPASS, "retry-on-reservation-conflict",
sd_retry_on_reservation_conflict);
if (sd_retry_on_reservation_conflict != 0) {
sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY,
devi, DDI_PROP_DONTPASS, sd_resv_conflict_name,
sd_retry_on_reservation_conflict);
}
if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0,
"qfull-retries", -1)) != -1) {
(void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retries",
rval, 1);
}
if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0,
"qfull-retry-interval", -1)) != -1) {
(void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retry-interval",
rval, 1);
}
ddi_report_dev(devi);
un->un_mediastate = DKIO_NONE;
sd_check_bdc_vpd(ssc);
sd_check_emulation_mode(ssc);
cmlb_alloc_handle(&un->un_cmlbhandle);
#if defined(__x86)
if (!un->un_f_has_removable_media && !un->un_f_is_hotpluggable &&
(lbasize == un->un_sys_blocksize))
offbyone = CMLB_OFF_BY_ONE;
#endif
if (cmlb_attach(devi, &sd_tgops, (int)devp->sd_inq->inq_dtype,
VOID2BOOLEAN(un->un_f_has_removable_media != 0),
VOID2BOOLEAN(un->un_f_is_hotpluggable != 0),
un->un_node_type, offbyone, un->un_cmlbhandle,
(void *)SD_PATH_DIRECT) != 0) {
goto cmlb_attach_failed;
}
geom_label_valid = (cmlb_validate(un->un_cmlbhandle, 0,
(void *)SD_PATH_DIRECT) == 0) ? 1: 0;
mutex_enter(SD_MUTEX(un));
if (un->un_f_devid_supported) {
sd_register_devid(ssc, devi, reservation_flag);
}
mutex_exit(SD_MUTEX(un));
if (un->un_f_opt_disable_cache == TRUE) {
if (sd_cache_control(ssc, SD_CACHE_DISABLE, SD_CACHE_DISABLE) !=
0) {
SD_ERROR(SD_LOG_ATTACH_DETACH, un,
"sd_unit_attach: un:0x%p Could not disable "
"caching", un);
goto devid_failed;
}
}
(void) sd_get_write_cache_enabled(ssc, &wc_enabled);
sd_get_write_cache_changeable(ssc, &wc_changeable);
mutex_enter(SD_MUTEX(un));
un->un_f_write_cache_enabled = (wc_enabled != 0);
un->un_f_cache_mode_changeable = (wc_changeable != 0);
mutex_exit(SD_MUTEX(un));
if ((un->un_f_rmw_type != SD_RMW_TYPE_RETURN_ERROR &&
un->un_tgt_blocksize != DEV_BSIZE) ||
un->un_f_enable_rmw) {
if (!(un->un_wm_cache)) {
(void) snprintf(name_str, sizeof (name_str),
"%s%d_cache",
ddi_driver_name(SD_DEVINFO(un)),
ddi_get_instance(SD_DEVINFO(un)));
un->un_wm_cache = kmem_cache_create(
name_str, sizeof (struct sd_w_map),
8, sd_wm_cache_constructor,
sd_wm_cache_destructor, NULL,
(void *)un, NULL, 0);
if (!(un->un_wm_cache)) {
goto wm_cache_failed;
}
}
}
sd_get_nv_sup(ssc);
status = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_KEYS, 0, NULL);
switch (status) {
case 0:
un->un_reservation_type = SD_SCSI3_RESERVATION;
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_unit_attach: un:0x%p SCSI-3 reservations\n", un);
break;
case ENOTSUP:
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_unit_attach: un:0x%p SCSI-2 reservations\n", un);
un->un_reservation_type = SD_SCSI2_RESERVATION;
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
break;
default:
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_unit_attach: un:0x%p default SCSI3 reservations\n", un);
un->un_reservation_type = SD_SCSI3_RESERVATION;
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
break;
}
if (un->un_f_pkstats_enabled && geom_label_valid) {
sd_set_pstats(un);
SD_TRACE(SD_LOG_IO_PARTITION, un,
"sd_unit_attach: un:0x%p pstats created and set\n", un);
}
sd_set_errstats(un);
SD_TRACE(SD_LOG_ATTACH_DETACH, un,
"sd_unit_attach: un:0x%p errstats set\n", un);
sd_setup_blk_limits(ssc);
if (SD_IS_PARALLEL_SCSI(un) && (tgt >= 0) && (tgt < NTARGETS_WIDE)) {
sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_ATTACH);
}
SD_TRACE(SD_LOG_ATTACH_DETACH, un,
"sd_unit_attach: un:0x%p exit success\n", un);
sd_ssc_fini(ssc);
return (DDI_SUCCESS);
wm_cache_failed:
devid_failed:
ddi_remove_minor_node(devi, NULL);
cmlb_attach_failed:
(void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1);
(void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1);
if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) {
(void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1);
}
if (un->un_f_is_fibre == FALSE) {
(void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1);
}
spinup_failed:
sd_ssc_fini(ssc);
mutex_enter(SD_MUTEX(un));
kmem_free(un->un_fm_private, sizeof (struct sd_fm_internal));
if (un->un_direct_priority_timeid != NULL) {
timeout_id_t temp_id = un->un_direct_priority_timeid;
un->un_direct_priority_timeid = NULL;
mutex_exit(SD_MUTEX(un));
(void) untimeout(temp_id);
mutex_enter(SD_MUTEX(un));
}
if (un->un_startstop_timeid != NULL) {
timeout_id_t temp_id = un->un_startstop_timeid;
un->un_startstop_timeid = NULL;
mutex_exit(SD_MUTEX(un));
(void) untimeout(temp_id);
mutex_enter(SD_MUTEX(un));
}
if (un->un_reset_throttle_timeid != NULL) {
timeout_id_t temp_id = un->un_reset_throttle_timeid;
un->un_reset_throttle_timeid = NULL;
mutex_exit(SD_MUTEX(un));
(void) untimeout(temp_id);
mutex_enter(SD_MUTEX(un));
}
if (un->un_rmw_msg_timeid != NULL) {
timeout_id_t temp_id = un->un_rmw_msg_timeid;
un->un_rmw_msg_timeid = NULL;
mutex_exit(SD_MUTEX(un));
(void) untimeout(temp_id);
mutex_enter(SD_MUTEX(un));
}
if (un->un_retry_timeid != NULL) {
timeout_id_t temp_id = un->un_retry_timeid;
un->un_retry_timeid = NULL;
mutex_exit(SD_MUTEX(un));
(void) untimeout(temp_id);
mutex_enter(SD_MUTEX(un));
}
if (un->un_dcvb_timeid != NULL) {
timeout_id_t temp_id = un->un_dcvb_timeid;
un->un_dcvb_timeid = NULL;
mutex_exit(SD_MUTEX(un));
(void) untimeout(temp_id);
mutex_enter(SD_MUTEX(un));
}
mutex_exit(SD_MUTEX(un));
ASSERT(un->un_ncmds_in_transport == 0);
ASSERT(un->un_ncmds_in_driver == 0);
sd_sync_with_callback(un);
if (un->un_errstats != NULL) {
kstat_delete(un->un_errstats);
un->un_errstats = NULL;
}
create_errstats_failed:
if (un->un_stats != NULL) {
kstat_delete(un->un_stats);
un->un_stats = NULL;
}
ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi);
ddi_xbuf_attr_destroy(un->un_xbuf_attr);
ddi_prop_remove_all(devi);
cv_destroy(&un->un_state_cv);
sd_free_rqs(un);
alloc_rqs_failed:
devp->sd_private = NULL;
bzero(un, sizeof (struct sd_lun));
#ifndef XPV_HVM_DRIVER
ddi_soft_state_free(sd_state, instance);
#endif
probe_failed:
scsi_unprobe(devp);
return (DDI_FAILURE);
}
static int
sd_unit_detach(dev_info_t *devi)
{
struct scsi_device *devp;
struct sd_lun *un;
int i;
int tgt;
dev_t dev;
dev_info_t *pdip = ddi_get_parent(devi);
int instance = ddi_get_instance(devi);
devp = ddi_get_driver_private(devi);
if ((devp == NULL) ||
((un = (struct sd_lun *)devp->sd_private) == NULL) ||
(un->un_ncmds_in_driver != 0)) {
return (DDI_FAILURE);
}
SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: entry 0x%p\n", un);
tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
SCSI_ADDR_PROP_TARGET, -1);
dev = sd_make_device(SD_DEVINFO(un));
#ifndef lint
_NOTE(COMPETING_THREADS_NOW);
#endif
mutex_enter(SD_MUTEX(un));
for (i = 0; i < NDKMAP; i++) {
if (un->un_ocmap.lyropen[i] != 0) {
goto err_notclosed;
}
}
if ((un->un_ncmds_in_transport != 0) || (un->un_retry_timeid != NULL) ||
(un->un_direct_priority_timeid != NULL) ||
(un->un_state == SD_STATE_RWAIT)) {
mutex_exit(SD_MUTEX(un));
SD_ERROR(SD_LOG_ATTACH_DETACH, un,
"sd_dr_detach: Detach failure due to outstanding cmds\n");
goto err_stillbusy;
}
if ((un->un_resvd_status & SD_RESERVE) &&
!(un->un_resvd_status & SD_LOST_RESERVE)) {
mutex_exit(SD_MUTEX(un));
if (sd_reserve_release(dev, SD_RELEASE) != 0) {
SD_ERROR(SD_LOG_ATTACH_DETACH, un,
"sd_dr_detach: Cannot release reservation \n");
}
} else {
mutex_exit(SD_MUTEX(un));
}
mutex_enter(SD_MUTEX(un));
if (un->un_resvd_timeid != NULL) {
timeout_id_t temp_id = un->un_resvd_timeid;
un->un_resvd_timeid = NULL;
mutex_exit(SD_MUTEX(un));
(void) untimeout(temp_id);
mutex_enter(SD_MUTEX(un));
}
if (un->un_reset_throttle_timeid != NULL) {
timeout_id_t temp_id = un->un_reset_throttle_timeid;
un->un_reset_throttle_timeid = NULL;
mutex_exit(SD_MUTEX(un));
(void) untimeout(temp_id);
mutex_enter(SD_MUTEX(un));
}
if (un->un_startstop_timeid != NULL) {
timeout_id_t temp_id = un->un_startstop_timeid;
un->un_startstop_timeid = NULL;
mutex_exit(SD_MUTEX(un));
(void) untimeout(temp_id);
mutex_enter(SD_MUTEX(un));
}
if (un->un_rmw_msg_timeid != NULL) {
timeout_id_t temp_id = un->un_rmw_msg_timeid;
un->un_rmw_msg_timeid = NULL;
mutex_exit(SD_MUTEX(un));
(void) untimeout(temp_id);
mutex_enter(SD_MUTEX(un));
}
if (un->un_dcvb_timeid != NULL) {
timeout_id_t temp_id = un->un_dcvb_timeid;
un->un_dcvb_timeid = NULL;
mutex_exit(SD_MUTEX(un));
(void) untimeout(temp_id);
} else {
mutex_exit(SD_MUTEX(un));
}
sd_rmv_resv_reclaim_req(dev);
mutex_enter(SD_MUTEX(un));
if (un->un_direct_priority_timeid != NULL) {
timeout_id_t temp_id = un->un_direct_priority_timeid;
un->un_direct_priority_timeid = NULL;
mutex_exit(SD_MUTEX(un));
(void) untimeout(temp_id);
mutex_enter(SD_MUTEX(un));
}
if (un->un_mhd_token != NULL) {
mutex_exit(SD_MUTEX(un));
_NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_mhd_token));
if (scsi_watch_request_terminate(un->un_mhd_token,
SCSI_WATCH_TERMINATE_NOWAIT)) {
SD_ERROR(SD_LOG_ATTACH_DETACH, un,
"sd_dr_detach: Cannot cancel mhd watch request\n");
goto err_stillbusy;
}
mutex_enter(SD_MUTEX(un));
un->un_mhd_token = NULL;
}
if (un->un_swr_token != NULL) {
mutex_exit(SD_MUTEX(un));
_NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_swr_token));
if (scsi_watch_request_terminate(un->un_swr_token,
SCSI_WATCH_TERMINATE_NOWAIT)) {
SD_ERROR(SD_LOG_ATTACH_DETACH, un,
"sd_dr_detach: Cannot cancel swr watch request\n");
goto err_stillbusy;
}
mutex_enter(SD_MUTEX(un));
un->un_swr_token = NULL;
}
mutex_exit(SD_MUTEX(un));
(void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL,
sd_mhd_reset_notify_cb, (caddr_t)un);
_NOTE(NO_COMPETING_THREADS_NOW);
mutex_enter(&un->un_pm_mutex);
if (un->un_pm_idle_timeid != NULL) {
timeout_id_t temp_id = un->un_pm_idle_timeid;
un->un_pm_idle_timeid = NULL;
mutex_exit(&un->un_pm_mutex);
(void) untimeout(temp_id);
(void) pm_idle_component(SD_DEVINFO(un), 0);
mutex_enter(&un->un_pm_mutex);
}
if (un->un_pm_timeid != NULL) {
timeout_id_t temp_id = un->un_pm_timeid;
un->un_pm_timeid = NULL;
mutex_exit(&un->un_pm_mutex);
(void) untimeout(temp_id);
(void) pm_idle_component(SD_DEVINFO(un), 0);
} else {
mutex_exit(&un->un_pm_mutex);
if ((un->un_f_pm_is_enabled == TRUE) &&
(pm_lower_power(SD_DEVINFO(un), 0, SD_PM_STATE_STOPPED(un))
!= DDI_SUCCESS)) {
SD_ERROR(SD_LOG_ATTACH_DETACH, un,
"sd_dr_detach: Lower power request failed, ignoring.\n");
mutex_enter(&un->un_pm_mutex);
if (un->un_pm_timeid != NULL) {
timeout_id_t temp_id = un->un_pm_timeid;
un->un_pm_timeid = NULL;
mutex_exit(&un->un_pm_mutex);
(void) untimeout(temp_id);
(void) pm_idle_component(SD_DEVINFO(un), 0);
} else {
mutex_exit(&un->un_pm_mutex);
}
}
}
(void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1);
(void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1);
if (sd_scsi_get_target_lun_count(pdip, tgt) <= 1) {
(void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1);
}
if (un->un_f_is_fibre == FALSE) {
(void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1);
}
if (un->un_f_is_fibre == TRUE) {
if ((un->un_insert_event != NULL) &&
(ddi_remove_event_handler(un->un_insert_cb_id) !=
DDI_SUCCESS)) {
SD_ERROR(SD_LOG_ATTACH_DETACH, un,
"sd_dr_detach: Cannot cancel insert event\n");
goto err_remove_event;
}
un->un_insert_event = NULL;
if ((un->un_remove_event != NULL) &&
(ddi_remove_event_handler(un->un_remove_cb_id) !=
DDI_SUCCESS)) {
SD_ERROR(SD_LOG_ATTACH_DETACH, un,
"sd_dr_detach: Cannot cancel remove event\n");
goto err_remove_event;
}
un->un_remove_event = NULL;
}
sd_sync_with_callback(un);
cmlb_detach(un->un_cmlbhandle, (void *)SD_PATH_DIRECT);
cmlb_free_handle(&un->un_cmlbhandle);
scsi_fm_fini(devp);
kmem_free(un->un_fm_private, sizeof (struct sd_fm_internal));
if (un->un_f_devid_transport_defined == FALSE)
ddi_devid_unregister(devi);
if (un->un_devid) {
ddi_devid_free(un->un_devid);
un->un_devid = NULL;
}
if (un->un_wm_cache != NULL) {
kmem_cache_destroy(un->un_wm_cache);
un->un_wm_cache = NULL;
}
if (un->un_stats != NULL) {
kstat_delete(un->un_stats);
un->un_stats = NULL;
}
if (un->un_unmapstats != NULL) {
kstat_delete(un->un_unmapstats_ks);
un->un_unmapstats_ks = NULL;
un->un_unmapstats = NULL;
}
if (un->un_errstats != NULL) {
kstat_delete(un->un_errstats);
un->un_errstats = NULL;
}
if (un->un_f_pkstats_enabled) {
for (i = 0; i < NSDMAP; i++) {
if (un->un_pstats[i] != NULL) {
kstat_delete(un->un_pstats[i]);
un->un_pstats[i] = NULL;
}
}
}
ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi);
ddi_xbuf_attr_destroy(un->un_xbuf_attr);
ddi_prop_remove_all(devi);
mutex_destroy(&un->un_pm_mutex);
cv_destroy(&un->un_pm_busy_cv);
cv_destroy(&un->un_wcc_cv);
cv_destroy(&un->un_state_cv);
cv_destroy(&un->un_suspend_cv);
cv_destroy(&un->un_disk_busy_cv);
sd_free_rqs(un);
devp->sd_private = NULL;
bzero(un, sizeof (struct sd_lun));
ddi_soft_state_free(sd_state, instance);
scsi_unprobe(devp);
if ((tgt >= 0) && (tgt < NTARGETS_WIDE)) {
sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_DETACH);
}
return (DDI_SUCCESS);
err_notclosed:
mutex_exit(SD_MUTEX(un));
err_stillbusy:
_NOTE(NO_COMPETING_THREADS_NOW);
err_remove_event:
SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: exit failure\n");
return (DDI_FAILURE);
}
static void
sd_create_errstats(struct sd_lun *un, int instance)
{
struct sd_errstats *stp;
char kstatmodule_err[KSTAT_STRLEN];
char kstatname[KSTAT_STRLEN];
int ndata = (sizeof (struct sd_errstats) / sizeof (kstat_named_t));
ASSERT(un != NULL);
if (un->un_errstats != NULL) {
return;
}
(void) snprintf(kstatmodule_err, sizeof (kstatmodule_err),
"%serr", sd_label);
(void) snprintf(kstatname, sizeof (kstatname),
"%s%d,err", sd_label, instance);
un->un_errstats = kstat_create(kstatmodule_err, instance, kstatname,
"device_error", KSTAT_TYPE_NAMED, ndata, KSTAT_FLAG_PERSISTENT);
if (un->un_errstats == NULL) {
SD_ERROR(SD_LOG_ATTACH_DETACH, un,
"sd_create_errstats: Failed kstat_create\n");
return;
}
stp = (struct sd_errstats *)un->un_errstats->ks_data;
kstat_named_init(&stp->sd_softerrs, "Soft Errors",
KSTAT_DATA_UINT32);
kstat_named_init(&stp->sd_harderrs, "Hard Errors",
KSTAT_DATA_UINT32);
kstat_named_init(&stp->sd_transerrs, "Transport Errors",
KSTAT_DATA_UINT32);
kstat_named_init(&stp->sd_vid, "Vendor",
KSTAT_DATA_CHAR);
kstat_named_init(&stp->sd_pid, "Product",
KSTAT_DATA_CHAR);
kstat_named_init(&stp->sd_revision, "Revision",
KSTAT_DATA_CHAR);
kstat_named_init(&stp->sd_serial, "Serial No",
KSTAT_DATA_CHAR);
kstat_named_init(&stp->sd_capacity, "Size",
KSTAT_DATA_ULONGLONG);
kstat_named_init(&stp->sd_rq_media_err, "Media Error",
KSTAT_DATA_UINT32);
kstat_named_init(&stp->sd_rq_ntrdy_err, "Device Not Ready",
KSTAT_DATA_UINT32);
kstat_named_init(&stp->sd_rq_nodev_err, "No Device",
KSTAT_DATA_UINT32);
kstat_named_init(&stp->sd_rq_recov_err, "Recoverable",
KSTAT_DATA_UINT32);
kstat_named_init(&stp->sd_rq_illrq_err, "Illegal Request",
KSTAT_DATA_UINT32);
kstat_named_init(&stp->sd_rq_pfa_err, "Predictive Failure Analysis",
KSTAT_DATA_UINT32);
un->un_errstats->ks_private = un;
un->un_errstats->ks_update = nulldev;
kstat_install(un->un_errstats);
}
static void
sd_set_errstats(struct sd_lun *un)
{
struct sd_errstats *stp;
char *sn;
ASSERT(un != NULL);
ASSERT(un->un_errstats != NULL);
stp = (struct sd_errstats *)un->un_errstats->ks_data;
ASSERT(stp != NULL);
(void) strncpy(stp->sd_vid.value.c, un->un_sd->sd_inq->inq_vid, 8);
(void) strncpy(stp->sd_pid.value.c, un->un_sd->sd_inq->inq_pid, 16);
(void) strncpy(stp->sd_revision.value.c,
un->un_sd->sd_inq->inq_revision, 4);
if ((bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) != 0) ||
(bcmp(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c,
sizeof (SD_INQUIRY(un)->inq_serial)) != 0)) {
stp->sd_softerrs.value.ui32 = 0;
stp->sd_harderrs.value.ui32 = 0;
stp->sd_transerrs.value.ui32 = 0;
stp->sd_rq_media_err.value.ui32 = 0;
stp->sd_rq_ntrdy_err.value.ui32 = 0;
stp->sd_rq_nodev_err.value.ui32 = 0;
stp->sd_rq_recov_err.value.ui32 = 0;
stp->sd_rq_illrq_err.value.ui32 = 0;
stp->sd_rq_pfa_err.value.ui32 = 0;
}
if (bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) == 0) {
bcopy(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c,
sizeof (SD_INQUIRY(un)->inq_serial));
} else {
if (ddi_prop_lookup_string(DDI_DEV_T_ANY, SD_DEVINFO(un),
DDI_PROP_NOTPROM | DDI_PROP_DONTPASS,
INQUIRY_SERIAL_NO, &sn) == DDI_SUCCESS) {
(void) strlcpy(stp->sd_serial.value.c, sn,
sizeof (stp->sd_serial.value.c));
ddi_prop_free(sn);
}
}
if (un->un_f_blockcount_is_valid != TRUE) {
stp->sd_capacity.value.ui64 = 0;
} else {
stp->sd_capacity.value.ui64 = (uint64_t)
((uint64_t)un->un_blockcount * un->un_sys_blocksize);
}
}
static void
sd_set_pstats(struct sd_lun *un)
{
char kstatname[KSTAT_STRLEN];
int instance;
int i;
diskaddr_t nblks = 0;
char *partname = NULL;
ASSERT(un != NULL);
instance = ddi_get_instance(SD_DEVINFO(un));
for (i = 0; i < NSDMAP; i++) {
if (cmlb_partinfo(un->un_cmlbhandle, i,
&nblks, NULL, &partname, NULL, (void *)SD_PATH_DIRECT) != 0)
continue;
mutex_enter(SD_MUTEX(un));
if ((un->un_pstats[i] == NULL) &&
(nblks != 0)) {
(void) snprintf(kstatname, sizeof (kstatname),
"%s%d,%s", sd_label, instance,
partname);
un->un_pstats[i] = kstat_create(sd_label,
instance, kstatname, "partition", KSTAT_TYPE_IO,
1, KSTAT_FLAG_PERSISTENT);
if (un->un_pstats[i] != NULL) {
un->un_pstats[i]->ks_lock = SD_MUTEX(un);
kstat_install(un->un_pstats[i]);
}
}
mutex_exit(SD_MUTEX(un));
}
}
#define SDC_CDB_GROUP(un) ((un->un_f_cfg_is_atapi == TRUE) ? \
CDB_GROUP1 : CDB_GROUP0)
#define SDC_HDRLEN(un) ((un->un_f_cfg_is_atapi == TRUE) ? \
MODE_HEADER_LENGTH_GRP2 : MODE_HEADER_LENGTH)
#define SDC_BUFLEN(un) (SDC_HDRLEN(un) + MODE_BLK_DESC_LENGTH + \
sizeof (struct mode_cache_scsi3))
static int
sd_get_caching_mode_page(sd_ssc_t *ssc, uchar_t page_control, uchar_t **header,
int *bdlen)
{
struct sd_lun *un = ssc->ssc_un;
struct mode_caching *mode_caching_page;
size_t buflen = SDC_BUFLEN(un);
int hdrlen = SDC_HDRLEN(un);
int rval;
if (sd_send_scsi_TEST_UNIT_READY(ssc, 0) != 0)
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
*header = kmem_zalloc(buflen, KM_SLEEP);
rval = sd_send_scsi_MODE_SENSE(ssc, SDC_CDB_GROUP(un), *header, buflen,
page_control | MODEPAGE_CACHING, SD_PATH_DIRECT);
if (rval != 0) {
SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, "%s: Mode Sense Failed\n",
__func__);
goto mode_sense_failed;
}
if (un->un_f_cfg_is_atapi == TRUE) {
struct mode_header_grp2 *mhp =
(struct mode_header_grp2 *)(*header);
*bdlen = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo;
} else {
*bdlen = ((struct mode_header *)(*header))->bdesc_length;
}
if (*bdlen > MODE_BLK_DESC_LENGTH) {
sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 0,
"%s: Mode Sense returned invalid block descriptor length\n",
__func__);
rval = EIO;
goto mode_sense_failed;
}
mode_caching_page = (struct mode_caching *)(*header + hdrlen + *bdlen);
if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) {
sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON,
"%s: Mode Sense caching page code mismatch %d\n",
__func__, mode_caching_page->mode_page.code);
rval = EIO;
}
mode_sense_failed:
if (rval != 0) {
kmem_free(*header, buflen);
*header = NULL;
*bdlen = 0;
}
return (rval);
}
static int
sd_cache_control(sd_ssc_t *ssc, int rcd_flag, int wce_flag)
{
struct sd_lun *un = ssc->ssc_un;
struct mode_caching *mode_caching_page;
uchar_t *header;
size_t buflen = SDC_BUFLEN(un);
int hdrlen = SDC_HDRLEN(un);
int bdlen;
int rval;
rval = sd_get_caching_mode_page(ssc, MODEPAGE_CURRENT, &header, &bdlen);
switch (rval) {
case 0:
mode_caching_page = (struct mode_caching *)(header + hdrlen +
bdlen);
if ((mode_caching_page->rcd && rcd_flag == SD_CACHE_ENABLE) ||
(!mode_caching_page->rcd && rcd_flag == SD_CACHE_DISABLE) ||
(mode_caching_page->wce && wce_flag == SD_CACHE_DISABLE) ||
(!mode_caching_page->wce && wce_flag == SD_CACHE_ENABLE)) {
size_t sbuflen;
uchar_t save_pg;
sbuflen = hdrlen + bdlen + sizeof (struct mode_page) +
(int)mode_caching_page->mode_page.length;
if (rcd_flag == SD_CACHE_ENABLE)
mode_caching_page->rcd = 0;
else if (rcd_flag == SD_CACHE_DISABLE)
mode_caching_page->rcd = 1;
if (wce_flag == SD_CACHE_ENABLE)
mode_caching_page->wce = 1;
else if (wce_flag == SD_CACHE_DISABLE)
mode_caching_page->wce = 0;
save_pg = mode_caching_page->mode_page.ps ?
SD_SAVE_PAGE : SD_DONTSAVE_PAGE;
mode_caching_page->mode_page.ps = 0;
bzero(header, hdrlen);
if (un->un_f_cfg_is_atapi == TRUE) {
struct mode_header_grp2 *mhp =
(struct mode_header_grp2 *)header;
mhp->bdesc_length_hi = bdlen >> 8;
mhp->bdesc_length_lo = (uchar_t)bdlen & 0xff;
} else {
((struct mode_header *)header)->bdesc_length =
bdlen;
}
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
rval = sd_send_scsi_MODE_SELECT(ssc, SDC_CDB_GROUP(un),
header, sbuflen, save_pg, SD_PATH_DIRECT);
}
kmem_free(header, buflen);
break;
case EIO:
sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
break;
default:
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
break;
}
return (rval);
}
static int
sd_get_write_cache_enabled(sd_ssc_t *ssc, int *is_enabled)
{
struct sd_lun *un = ssc->ssc_un;
struct mode_caching *mode_caching_page;
uchar_t *header;
size_t buflen = SDC_BUFLEN(un);
int hdrlen = SDC_HDRLEN(un);
int bdlen;
int rval;
*is_enabled = TRUE;
rval = sd_get_caching_mode_page(ssc, MODEPAGE_CURRENT, &header, &bdlen);
switch (rval) {
case 0:
mode_caching_page = (struct mode_caching *)(header + hdrlen +
bdlen);
*is_enabled = mode_caching_page->wce;
sd_ssc_assessment(ssc, SD_FMT_STANDARD);
kmem_free(header, buflen);
break;
case EIO: {
uint8_t *sensep;
int senlen;
sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf;
senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen -
ssc->ssc_uscsi_cmd->uscsi_rqresid);
if (senlen > 0 &&
scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) {
sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE);
} else {
sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
}
break;
}
default:
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
break;
}
return (rval);
}
static void
sd_get_write_cache_changeable(sd_ssc_t *ssc, int *is_changeable)
{
struct sd_lun *un = ssc->ssc_un;
struct mode_caching *mode_caching_page;
uchar_t *header;
size_t buflen = SDC_BUFLEN(un);
int hdrlen = SDC_HDRLEN(un);
int bdlen;
int rval;
*is_changeable = TRUE;
rval = sd_get_caching_mode_page(ssc, MODEPAGE_CHANGEABLE, &header,
&bdlen);
switch (rval) {
case 0:
mode_caching_page = (struct mode_caching *)(header + hdrlen +
bdlen);
*is_changeable = mode_caching_page->wce;
kmem_free(header, buflen);
sd_ssc_assessment(ssc, SD_FMT_STANDARD);
break;
case EIO:
sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
break;
default:
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
break;
}
}
static void
sd_get_nv_sup(sd_ssc_t *ssc)
{
int rval = 0;
uchar_t *inq86 = NULL;
size_t inq86_len = MAX_INQUIRY_SIZE;
size_t inq86_resid = 0;
struct dk_callback *dkc;
struct sd_lun *un;
ASSERT(ssc != NULL);
un = ssc->ssc_un;
ASSERT(un != NULL);
mutex_enter(SD_MUTEX(un));
un->un_f_sync_nv_supported = FALSE;
if (un->un_f_suppress_cache_flush == TRUE) {
mutex_exit(SD_MUTEX(un));
return;
}
if (sd_check_vpd_page_support(ssc) == 0 &&
un->un_vpd_page_mask & SD_VPD_EXTENDED_DATA_PG) {
mutex_exit(SD_MUTEX(un));
inq86 = kmem_zalloc(inq86_len, KM_SLEEP);
rval = sd_send_scsi_INQUIRY(ssc, inq86, inq86_len,
0x01, 0x86, &inq86_resid);
if (rval == 0 && (inq86_len - inq86_resid > 6)) {
SD_TRACE(SD_LOG_COMMON, un,
"sd_get_nv_sup: \
successfully get VPD page: %x \
PAGE LENGTH: %x BYTE 6: %x\n",
inq86[1], inq86[3], inq86[6]);
mutex_enter(SD_MUTEX(un));
if (inq86[6] & SD_VPD_NV_SUP) {
un->un_f_sync_nv_supported = TRUE;
}
mutex_exit(SD_MUTEX(un));
} else if (rval != 0) {
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
}
kmem_free(inq86, inq86_len);
} else {
mutex_exit(SD_MUTEX(un));
}
mutex_enter(SD_MUTEX(un));
if (un->un_f_sync_nv_supported) {
mutex_exit(SD_MUTEX(un));
dkc = kmem_zalloc(sizeof (struct dk_callback), KM_SLEEP);
dkc->dkc_flag = FLUSH_VOLATILE;
(void) sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc);
rval = sd_send_scsi_TEST_UNIT_READY(ssc, SD_DONT_RETRY_TUR);
if (rval != 0)
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
kmem_free(dkc, sizeof (struct dk_callback));
} else {
mutex_exit(SD_MUTEX(un));
}
SD_TRACE(SD_LOG_COMMON, un, "sd_get_nv_sup: \
un_f_suppress_cache_flush is set to %d\n",
un->un_f_suppress_cache_flush);
}
static dev_t
sd_make_device(dev_info_t *devi)
{
return (makedevice(ddi_driver_major(devi),
ddi_get_instance(devi) << SDUNIT_SHIFT));
}
static int
sd_pm_entry(struct sd_lun *un)
{
int return_status = DDI_SUCCESS;
ASSERT(!mutex_owned(SD_MUTEX(un)));
ASSERT(!mutex_owned(&un->un_pm_mutex));
SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: entry\n");
if (un->un_f_pm_is_enabled == FALSE) {
SD_TRACE(SD_LOG_IO_PM, un,
"sd_pm_entry: exiting, PM not enabled\n");
return (return_status);
}
mutex_enter(&un->un_pm_mutex);
while (un->un_pm_busy == TRUE) {
cv_wait(&un->un_pm_busy_cv, &un->un_pm_mutex);
}
un->un_pm_busy = TRUE;
if (un->un_pm_count < 1) {
SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: busy component\n");
mutex_exit(&un->un_pm_mutex);
return_status = pm_busy_component(SD_DEVINFO(un), 0);
ASSERT(return_status == DDI_SUCCESS);
mutex_enter(&un->un_pm_mutex);
if (un->un_pm_count < 0) {
mutex_exit(&un->un_pm_mutex);
SD_TRACE(SD_LOG_IO_PM, un,
"sd_pm_entry: power up component\n");
return_status = pm_raise_power(SD_DEVINFO(un), 0,
SD_PM_STATE_ACTIVE(un));
mutex_enter(&un->un_pm_mutex);
if (return_status != DDI_SUCCESS) {
SD_TRACE(SD_LOG_IO_PM, un,
"sd_pm_entry: power up failed,"
" idle the component\n");
(void) pm_idle_component(SD_DEVINFO(un), 0);
un->un_pm_count--;
} else {
ASSERT(un->un_pm_count == 0);
}
}
if (return_status == DDI_SUCCESS) {
mutex_exit(&un->un_pm_mutex);
mutex_enter(SD_MUTEX(un));
SD_TRACE(SD_LOG_IO_PM, un,
"sd_pm_entry: changing uscsi_chain_type from %d\n",
un->un_uscsi_chain_type);
if (un->un_f_non_devbsize_supported) {
un->un_buf_chain_type =
SD_CHAIN_INFO_RMMEDIA_NO_PM;
} else {
un->un_buf_chain_type =
SD_CHAIN_INFO_DISK_NO_PM;
}
un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM;
SD_TRACE(SD_LOG_IO_PM, un,
" changed uscsi_chain_type to %d\n",
un->un_uscsi_chain_type);
mutex_exit(SD_MUTEX(un));
mutex_enter(&un->un_pm_mutex);
if (un->un_pm_idle_timeid == NULL) {
un->un_pm_idle_timeid =
timeout(sd_pm_idletimeout_handler, un,
(drv_usectohz((clock_t)300000)));
(void) pm_busy_component(SD_DEVINFO(un), 0);
}
}
}
un->un_pm_busy = FALSE;
cv_signal(&un->un_pm_busy_cv);
un->un_pm_count++;
SD_TRACE(SD_LOG_IO_PM, un,
"sd_pm_entry: exiting, un_pm_count = %d\n", un->un_pm_count);
mutex_exit(&un->un_pm_mutex);
return (return_status);
}
static void
sd_pm_exit(struct sd_lun *un)
{
ASSERT(!mutex_owned(SD_MUTEX(un)));
ASSERT(!mutex_owned(&un->un_pm_mutex));
SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: entry\n");
if (un->un_f_pm_is_enabled == TRUE) {
mutex_enter(&un->un_pm_mutex);
un->un_pm_count--;
SD_TRACE(SD_LOG_IO_PM, un,
"sd_pm_exit: un_pm_count = %d\n", un->un_pm_count);
ASSERT(un->un_pm_count >= 0);
if (un->un_pm_count == 0) {
mutex_exit(&un->un_pm_mutex);
SD_TRACE(SD_LOG_IO_PM, un,
"sd_pm_exit: idle component\n");
(void) pm_idle_component(SD_DEVINFO(un), 0);
} else {
mutex_exit(&un->un_pm_mutex);
}
}
SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: exiting\n");
}
static int
sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p)
{
struct sd_lun *un;
int nodelay;
int part;
uint64_t partmask;
int instance;
dev_t dev;
int rval = EIO;
diskaddr_t nblks = 0;
diskaddr_t label_cap;
if (otyp >= OTYPCNT) {
return (EINVAL);
}
dev = *dev_p;
instance = SDUNIT(dev);
if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) {
sd_scsi_clear_probe_cache();
return (ENXIO);
}
nodelay = (flag & (FNDELAY | FNONBLOCK));
part = SDPART(dev);
partmask = 1 << part;
mutex_enter(SD_MUTEX(un));
if (!nodelay) {
while ((un->un_state == SD_STATE_SUSPENDED) ||
(un->un_state == SD_STATE_PM_CHANGING)) {
cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
}
mutex_exit(SD_MUTEX(un));
if (sd_pm_entry(un) != DDI_SUCCESS) {
rval = EIO;
SD_ERROR(SD_LOG_OPEN_CLOSE, un,
"sdopen: sd_pm_entry failed\n");
goto open_failed_with_pm;
}
mutex_enter(SD_MUTEX(un));
}
SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: un=%p\n", (void *)un);
if (otyp == OTYP_LYR) {
SD_TRACE(SD_LOG_OPEN_CLOSE, un,
"sdopen: exclopen=%x, flag=%x, un_ocmap.lyropen=%x\n",
un->un_exclopen, flag, un->un_ocmap.lyropen[part]);
} else {
SD_TRACE(SD_LOG_OPEN_CLOSE, un,
"sdopen: exclopen=%x, flag=%x, regopen=%x\n",
un->un_exclopen, flag, un->un_ocmap.regopen[otyp]);
}
if (un->un_exclopen & (partmask)) {
goto excl_open_fail;
}
if (flag & FEXCL) {
int i;
if (un->un_ocmap.lyropen[part]) {
goto excl_open_fail;
}
for (i = 0; i < (OTYPCNT - 1); i++) {
if (un->un_ocmap.regopen[i] & (partmask)) {
goto excl_open_fail;
}
}
}
if (un->un_f_chk_wp_open) {
if ((flag & FWRITE) && (!nodelay)) {
mutex_exit(SD_MUTEX(un));
if (un->un_f_dvdram_writable_device == FALSE) {
if (ISCD(un) || sr_check_wp(dev)) {
rval = EROFS;
mutex_enter(SD_MUTEX(un));
SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: "
"write to cd or write protected media\n");
goto open_fail;
}
}
mutex_enter(SD_MUTEX(un));
}
}
if (!nodelay) {
sd_ssc_t *ssc;
mutex_exit(SD_MUTEX(un));
ssc = sd_ssc_init(un);
rval = sd_ready_and_valid(ssc, part);
sd_ssc_fini(ssc);
mutex_enter(SD_MUTEX(un));
nblks = 0;
if (rval == SD_READY_VALID && (!ISCD(un))) {
mutex_exit(SD_MUTEX(un));
(void) cmlb_partinfo(un->un_cmlbhandle, part, &nblks,
NULL, NULL, NULL, (void *)SD_PATH_DIRECT);
mutex_enter(SD_MUTEX(un));
}
if ((rval != SD_READY_VALID) ||
(!ISCD(un) && nblks <= 0)) {
rval = un->un_f_has_removable_media ? ENXIO : EIO;
SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: "
"device not ready or invalid disk block value\n");
goto open_fail;
}
#if defined(__x86)
} else {
uchar_t *cp;
cp = &un->un_ocmap.chkd[0];
while (cp < &un->un_ocmap.chkd[OCSIZE]) {
if (*cp != (uchar_t)0) {
break;
}
cp++;
}
if (cp == &un->un_ocmap.chkd[OCSIZE]) {
mutex_exit(SD_MUTEX(un));
cmlb_invalidate(un->un_cmlbhandle,
(void *)SD_PATH_DIRECT);
mutex_enter(SD_MUTEX(un));
}
#endif
}
if (otyp == OTYP_LYR) {
un->un_ocmap.lyropen[part]++;
} else {
un->un_ocmap.regopen[otyp] |= partmask;
}
if (flag & FEXCL) {
un->un_exclopen |= (partmask);
}
if (!nodelay) {
mutex_exit(SD_MUTEX(un));
if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap,
(void*)SD_PATH_DIRECT) == 0) {
mutex_enter(SD_MUTEX(un));
if (un->un_f_blockcount_is_valid &&
un->un_blockcount > label_cap &&
un->un_f_expnevent == B_FALSE) {
un->un_f_expnevent = B_TRUE;
mutex_exit(SD_MUTEX(un));
sd_log_lun_expansion_event(un,
(nodelay ? KM_NOSLEEP : KM_SLEEP));
mutex_enter(SD_MUTEX(un));
}
} else {
mutex_enter(SD_MUTEX(un));
}
}
SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: "
"open of part %d type %d\n", part, otyp);
mutex_exit(SD_MUTEX(un));
if (!nodelay) {
sd_pm_exit(un);
}
SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: exit success\n");
return (DDI_SUCCESS);
excl_open_fail:
SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: fail exclusive open\n");
rval = EBUSY;
open_fail:
mutex_exit(SD_MUTEX(un));
if (!nodelay) {
sd_pm_exit(un);
}
open_failed_with_pm:
return (rval);
}
static int
sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p)
{
struct sd_lun *un;
uchar_t *cp;
int part;
int nodelay;
int rval = 0;
if (otyp >= OTYPCNT) {
return (ENXIO);
}
if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
return (ENXIO);
}
part = SDPART(dev);
nodelay = flag & (FNDELAY | FNONBLOCK);
SD_TRACE(SD_LOG_OPEN_CLOSE, un,
"sdclose: close of part %d type %d\n", part, otyp);
mutex_enter(SD_MUTEX(un));
while (un->un_state == SD_STATE_PM_CHANGING) {
cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
}
if (un->un_exclopen & (1 << part)) {
un->un_exclopen &= ~(1 << part);
}
if (otyp == OTYP_LYR) {
un->un_ocmap.lyropen[part] -= 1;
} else {
un->un_ocmap.regopen[otyp] &= ~(1 << part);
}
cp = &un->un_ocmap.chkd[0];
while (cp < &un->un_ocmap.chkd[OCSIZE]) {
if (*cp != '\0') {
break;
}
cp++;
}
if (cp == &un->un_ocmap.chkd[OCSIZE]) {
SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdclose: last close\n");
un->un_throttle = un->un_saved_throttle;
if (un->un_state == SD_STATE_OFFLINE) {
if (un->un_f_is_fibre == FALSE) {
scsi_log(SD_DEVINFO(un), sd_label,
CE_WARN, "offline\n");
}
mutex_exit(SD_MUTEX(un));
cmlb_invalidate(un->un_cmlbhandle,
(void *)SD_PATH_DIRECT);
mutex_enter(SD_MUTEX(un));
} else {
if ((un->un_f_sync_cache_supported &&
un->un_f_sync_cache_required) ||
un->un_f_dvdram_writable_device == TRUE) {
mutex_exit(SD_MUTEX(un));
if (sd_pm_entry(un) == DDI_SUCCESS) {
rval =
sd_send_scsi_SYNCHRONIZE_CACHE(un,
NULL);
if (rval == ENOTSUP) {
rval = 0;
} else if (rval != 0) {
rval = EIO;
}
sd_pm_exit(un);
} else {
rval = EIO;
}
mutex_enter(SD_MUTEX(un));
}
if (un->un_f_doorlock_supported) {
mutex_exit(SD_MUTEX(un));
if (sd_pm_entry(un) == DDI_SUCCESS) {
sd_ssc_t *ssc;
ssc = sd_ssc_init(un);
rval = sd_send_scsi_DOORLOCK(ssc,
SD_REMOVAL_ALLOW, SD_PATH_DIRECT);
if (rval != 0)
sd_ssc_assessment(ssc,
SD_FMT_IGNORE);
sd_ssc_fini(ssc);
sd_pm_exit(un);
if (ISCD(un) && (rval != 0) &&
(nodelay != 0)) {
rval = ENXIO;
}
} else {
rval = EIO;
}
mutex_enter(SD_MUTEX(un));
}
if (un->un_f_has_removable_media) {
sr_ejected(un);
}
if ((un->un_wm_cache != NULL) &&
(un->un_ncmds_in_driver == 0)) {
kmem_cache_destroy(un->un_wm_cache);
un->un_wm_cache = NULL;
}
}
}
mutex_exit(SD_MUTEX(un));
return (rval);
}
static int
sd_ready_and_valid(sd_ssc_t *ssc, int part)
{
struct sd_errstats *stp;
uint64_t capacity;
uint_t lbasize;
int rval = SD_READY_VALID;
char name_str[48];
boolean_t is_valid;
struct sd_lun *un;
int status;
ASSERT(ssc != NULL);
un = ssc->ssc_un;
ASSERT(un != NULL);
ASSERT(!mutex_owned(SD_MUTEX(un)));
mutex_enter(SD_MUTEX(un));
if (un->un_f_has_removable_media) {
mutex_exit(SD_MUTEX(un));
status = sd_send_scsi_TEST_UNIT_READY(ssc, 0);
if (status != 0) {
rval = SD_NOT_READY_VALID;
mutex_enter(SD_MUTEX(un));
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
goto done;
}
is_valid = SD_IS_VALID_LABEL(un);
mutex_enter(SD_MUTEX(un));
if (!is_valid ||
(un->un_f_blockcount_is_valid == FALSE) ||
(un->un_f_tgt_blocksize_is_valid == FALSE)) {
mutex_exit(SD_MUTEX(un));
status = sd_send_scsi_READ_CAPACITY(ssc, &capacity,
&lbasize, SD_PATH_DIRECT);
if (status != 0) {
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
cmlb_invalidate(un->un_cmlbhandle,
(void *)SD_PATH_DIRECT);
mutex_enter(SD_MUTEX(un));
rval = SD_NOT_READY_VALID;
goto done;
} else {
mutex_enter(SD_MUTEX(un));
sd_update_block_info(un, lbasize, capacity);
}
}
if (!is_valid && ISCD(un)) {
sd_check_for_writable_cd(ssc, SD_PATH_DIRECT);
}
} else {
mutex_exit(SD_MUTEX(un));
status = sd_send_scsi_TEST_UNIT_READY(ssc, 0);
if (status != 0) {
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
}
mutex_enter(SD_MUTEX(un));
}
if (((un->un_f_rmw_type != SD_RMW_TYPE_RETURN_ERROR ||
un->un_f_non_devbsize_supported) &&
un->un_tgt_blocksize != DEV_BSIZE) ||
un->un_f_enable_rmw) {
if (!(un->un_wm_cache)) {
(void) snprintf(name_str, sizeof (name_str),
"%s%d_cache",
ddi_driver_name(SD_DEVINFO(un)),
ddi_get_instance(SD_DEVINFO(un)));
un->un_wm_cache = kmem_cache_create(
name_str, sizeof (struct sd_w_map),
8, sd_wm_cache_constructor,
sd_wm_cache_destructor, NULL,
(void *)un, NULL, 0);
if (!(un->un_wm_cache)) {
rval = ENOMEM;
goto done;
}
}
}
if (un->un_state == SD_STATE_NORMAL) {
int err;
mutex_exit(SD_MUTEX(un));
err = sd_send_scsi_TEST_UNIT_READY(ssc, 0);
mutex_enter(SD_MUTEX(un));
if (err != 0) {
mutex_exit(SD_MUTEX(un));
cmlb_invalidate(un->un_cmlbhandle,
(void *)SD_PATH_DIRECT);
mutex_enter(SD_MUTEX(un));
if (err == EACCES) {
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"reservation conflict\n");
rval = SD_RESERVED_BY_OTHERS;
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
} else {
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"drive offline\n");
rval = SD_NOT_READY_VALID;
sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
}
goto done;
}
}
if (un->un_f_format_in_progress == FALSE) {
mutex_exit(SD_MUTEX(un));
(void) cmlb_validate(un->un_cmlbhandle, 0,
(void *)SD_PATH_DIRECT);
if (cmlb_partinfo(un->un_cmlbhandle, part, NULL, NULL, NULL,
NULL, (void *) SD_PATH_DIRECT) != 0) {
rval = SD_NOT_READY_VALID;
mutex_enter(SD_MUTEX(un));
goto done;
}
if (un->un_f_pkstats_enabled) {
sd_set_pstats(un);
SD_TRACE(SD_LOG_IO_PARTITION, un,
"sd_ready_and_valid: un:0x%p pstats created and "
"set\n", un);
}
mutex_enter(SD_MUTEX(un));
}
if (un->un_f_doorlock_supported) {
mutex_exit(SD_MUTEX(un));
status = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT,
SD_PATH_DIRECT);
if ((status != 0) && ISCD(un)) {
rval = SD_NOT_READY_VALID;
mutex_enter(SD_MUTEX(un));
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
goto done;
} else if (status != 0)
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
mutex_enter(SD_MUTEX(un));
}
un->un_mediastate = DKIO_INSERTED;
cv_broadcast(&un->un_state_cv);
rval = SD_READY_VALID;
done:
if (un->un_errstats != NULL) {
stp = (struct sd_errstats *)un->un_errstats->ks_data;
if ((stp->sd_capacity.value.ui64 == 0) &&
(un->un_f_blockcount_is_valid == TRUE)) {
stp->sd_capacity.value.ui64 =
(uint64_t)((uint64_t)un->un_blockcount *
un->un_sys_blocksize);
}
}
mutex_exit(SD_MUTEX(un));
return (rval);
}
static void
sdmin(struct buf *bp)
{
struct sd_lun *un;
int instance;
instance = SDUNIT(bp->b_edev);
un = ddi_get_soft_state(sd_state, instance);
ASSERT(un != NULL);
if (un->un_buf_breakup_supported) {
return;
}
if (bp->b_bcount > un->un_max_xfer_size) {
bp->b_bcount = un->un_max_xfer_size;
}
}
static int
sdread(dev_t dev, struct uio *uio, cred_t *cred_p)
{
struct sd_lun *un = NULL;
int secmask;
int err = 0;
sd_ssc_t *ssc;
if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
return (ENXIO);
}
ASSERT(!mutex_owned(SD_MUTEX(un)));
if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) {
mutex_enter(SD_MUTEX(un));
while ((un->un_state == SD_STATE_SUSPENDED) ||
(un->un_state == SD_STATE_PM_CHANGING)) {
cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
}
un->un_ncmds_in_driver++;
mutex_exit(SD_MUTEX(un));
ssc = sd_ssc_init(un);
if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) {
err = EIO;
} else {
err = 0;
}
sd_ssc_fini(ssc);
mutex_enter(SD_MUTEX(un));
un->un_ncmds_in_driver--;
ASSERT(un->un_ncmds_in_driver >= 0);
mutex_exit(SD_MUTEX(un));
if (err != 0)
return (err);
}
if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR &&
!un->un_f_enable_rmw)
secmask = un->un_tgt_blocksize - 1;
else
secmask = DEV_BSIZE - 1;
if (uio->uio_loffset & ((offset_t)(secmask))) {
SD_ERROR(SD_LOG_READ_WRITE, un,
"sdread: file offset not modulo %d\n",
secmask + 1);
err = EINVAL;
} else if (uio->uio_iov->iov_len & (secmask)) {
SD_ERROR(SD_LOG_READ_WRITE, un,
"sdread: transfer length not modulo %d\n",
secmask + 1);
err = EINVAL;
} else {
err = physio(sdstrategy, NULL, dev, B_READ, sdmin, uio);
}
return (err);
}
static int
sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p)
{
struct sd_lun *un = NULL;
int secmask;
int err = 0;
sd_ssc_t *ssc;
if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
return (ENXIO);
}
ASSERT(!mutex_owned(SD_MUTEX(un)));
if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) {
mutex_enter(SD_MUTEX(un));
while ((un->un_state == SD_STATE_SUSPENDED) ||
(un->un_state == SD_STATE_PM_CHANGING)) {
cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
}
un->un_ncmds_in_driver++;
mutex_exit(SD_MUTEX(un));
ssc = sd_ssc_init(un);
if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) {
err = EIO;
} else {
err = 0;
}
sd_ssc_fini(ssc);
mutex_enter(SD_MUTEX(un));
un->un_ncmds_in_driver--;
ASSERT(un->un_ncmds_in_driver >= 0);
mutex_exit(SD_MUTEX(un));
if (err != 0)
return (err);
}
if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR &&
!un->un_f_enable_rmw)
secmask = un->un_tgt_blocksize - 1;
else
secmask = DEV_BSIZE - 1;
if (uio->uio_loffset & ((offset_t)(secmask))) {
SD_ERROR(SD_LOG_READ_WRITE, un,
"sdwrite: file offset not modulo %d\n",
secmask + 1);
err = EINVAL;
} else if (uio->uio_iov->iov_len & (secmask)) {
SD_ERROR(SD_LOG_READ_WRITE, un,
"sdwrite: transfer length not modulo %d\n",
secmask + 1);
err = EINVAL;
} else {
err = physio(sdstrategy, NULL, dev, B_WRITE, sdmin, uio);
}
return (err);
}
static int
sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p)
{
struct sd_lun *un = NULL;
struct uio *uio = aio->aio_uio;
int secmask;
int err = 0;
sd_ssc_t *ssc;
if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
return (ENXIO);
}
ASSERT(!mutex_owned(SD_MUTEX(un)));
if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) {
mutex_enter(SD_MUTEX(un));
while ((un->un_state == SD_STATE_SUSPENDED) ||
(un->un_state == SD_STATE_PM_CHANGING)) {
cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
}
un->un_ncmds_in_driver++;
mutex_exit(SD_MUTEX(un));
ssc = sd_ssc_init(un);
if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) {
err = EIO;
} else {
err = 0;
}
sd_ssc_fini(ssc);
mutex_enter(SD_MUTEX(un));
un->un_ncmds_in_driver--;
ASSERT(un->un_ncmds_in_driver >= 0);
mutex_exit(SD_MUTEX(un));
if (err != 0)
return (err);
}
if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR &&
!un->un_f_enable_rmw)
secmask = un->un_tgt_blocksize - 1;
else
secmask = DEV_BSIZE - 1;
if (uio->uio_loffset & ((offset_t)(secmask))) {
SD_ERROR(SD_LOG_READ_WRITE, un,
"sdaread: file offset not modulo %d\n",
secmask + 1);
err = EINVAL;
} else if (uio->uio_iov->iov_len & (secmask)) {
SD_ERROR(SD_LOG_READ_WRITE, un,
"sdaread: transfer length not modulo %d\n",
secmask + 1);
err = EINVAL;
} else {
err = aphysio(sdstrategy, anocancel, dev, B_READ, sdmin, aio);
}
return (err);
}
static int
sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p)
{
struct sd_lun *un = NULL;
struct uio *uio = aio->aio_uio;
int secmask;
int err = 0;
sd_ssc_t *ssc;
if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
return (ENXIO);
}
ASSERT(!mutex_owned(SD_MUTEX(un)));
if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) {
mutex_enter(SD_MUTEX(un));
while ((un->un_state == SD_STATE_SUSPENDED) ||
(un->un_state == SD_STATE_PM_CHANGING)) {
cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
}
un->un_ncmds_in_driver++;
mutex_exit(SD_MUTEX(un));
ssc = sd_ssc_init(un);
if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) {
err = EIO;
} else {
err = 0;
}
sd_ssc_fini(ssc);
mutex_enter(SD_MUTEX(un));
un->un_ncmds_in_driver--;
ASSERT(un->un_ncmds_in_driver >= 0);
mutex_exit(SD_MUTEX(un));
if (err != 0)
return (err);
}
if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR &&
!un->un_f_enable_rmw)
secmask = un->un_tgt_blocksize - 1;
else
secmask = DEV_BSIZE - 1;
if (uio->uio_loffset & ((offset_t)(secmask))) {
SD_ERROR(SD_LOG_READ_WRITE, un,
"sdawrite: file offset not modulo %d\n",
secmask + 1);
err = EINVAL;
} else if (uio->uio_iov->iov_len & (secmask)) {
SD_ERROR(SD_LOG_READ_WRITE, un,
"sdawrite: transfer length not modulo %d\n",
secmask + 1);
err = EINVAL;
} else {
err = aphysio(sdstrategy, anocancel, dev, B_WRITE, sdmin, aio);
}
return (err);
}
#define SD_TASKQ_NUMTHREADS 8
#define SD_TASKQ_MINALLOC 256
#define SD_TASKQ_MAXALLOC 256
static taskq_t *sd_tq = NULL;
_NOTE(SCHEME_PROTECTS_DATA("stable data", sd_tq))
static int sd_taskq_minalloc = SD_TASKQ_MINALLOC;
static int sd_taskq_maxalloc = SD_TASKQ_MAXALLOC;
#define SD_WMR_TASKQ_NUMTHREADS 1
static taskq_t *sd_wmr_tq = NULL;
_NOTE(SCHEME_PROTECTS_DATA("stable data", sd_wmr_tq))
static void
sd_taskq_create(void)
{
char taskq_name[TASKQ_NAMELEN];
ASSERT(sd_tq == NULL);
ASSERT(sd_wmr_tq == NULL);
(void) snprintf(taskq_name, sizeof (taskq_name),
"%s_drv_taskq", sd_label);
sd_tq = (taskq_create(taskq_name, SD_TASKQ_NUMTHREADS,
(v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc,
TASKQ_PREPOPULATE));
(void) snprintf(taskq_name, sizeof (taskq_name),
"%s_rmw_taskq", sd_label);
sd_wmr_tq = (taskq_create(taskq_name, SD_WMR_TASKQ_NUMTHREADS,
(v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc,
TASKQ_PREPOPULATE));
}
static void
sd_taskq_delete(void)
{
ASSERT(sd_tq != NULL);
ASSERT(sd_wmr_tq != NULL);
taskq_destroy(sd_tq);
taskq_destroy(sd_wmr_tq);
sd_tq = NULL;
sd_wmr_tq = NULL;
}
static int
sdstrategy(struct buf *bp)
{
struct sd_lun *un;
un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp));
if (un == NULL) {
bioerror(bp, EIO);
bp->b_resid = bp->b_bcount;
biodone(bp);
return (0);
}
if (un->un_state == SD_STATE_DUMPING) {
bioerror(bp, ENXIO);
bp->b_resid = bp->b_bcount;
biodone(bp);
return (0);
}
ASSERT(!mutex_owned(SD_MUTEX(un)));
mutex_enter(SD_MUTEX(un));
while ((un->un_state == SD_STATE_SUSPENDED) ||
(un->un_state == SD_STATE_PM_CHANGING)) {
cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
}
un->un_ncmds_in_driver++;
if (un->un_f_cfg_is_atapi == TRUE) {
mutex_exit(SD_MUTEX(un));
bp_mapin(bp);
mutex_enter(SD_MUTEX(un));
}
SD_INFO(SD_LOG_IO, un, "sdstrategy: un_ncmds_in_driver = %ld\n",
un->un_ncmds_in_driver);
if (bp->b_flags & B_WRITE)
un->un_f_sync_cache_required = TRUE;
mutex_exit(SD_MUTEX(un));
return (ddi_xbuf_qstrategy(bp, un->un_xbuf_attr));
}
static void
sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg)
{
struct sd_lun *un = arg;
ASSERT(bp != NULL);
ASSERT(xp != NULL);
ASSERT(un != NULL);
ASSERT(!mutex_owned(SD_MUTEX(un)));
sd_xbuf_init(un, bp, xp, SD_CHAIN_BUFIO, NULL);
SD_BEGIN_IOSTART(((struct sd_xbuf *)xp)->xb_chain_iostart, un, bp);
}
static void
sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp,
uchar_t chain_type, void *pktinfop)
{
int index;
ASSERT(un != NULL);
ASSERT(bp != NULL);
ASSERT(xp != NULL);
SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: buf:0x%p chain type:0x%x\n",
bp, chain_type);
xp->xb_un = un;
xp->xb_pktp = NULL;
xp->xb_pktinfo = pktinfop;
xp->xb_private = bp->b_private;
xp->xb_blkno = (daddr_t)bp->b_blkno;
switch (chain_type) {
case SD_CHAIN_NULL:
case SD_CHAIN_BUFIO:
index = un->un_buf_chain_type;
if ((!un->un_f_has_removable_media) &&
(un->un_tgt_blocksize != 0) &&
(un->un_tgt_blocksize != DEV_BSIZE ||
un->un_f_enable_rmw)) {
int secmask = 0, blknomask = 0;
if (un->un_f_enable_rmw) {
blknomask =
(un->un_phy_blocksize / DEV_BSIZE) - 1;
secmask = un->un_phy_blocksize - 1;
} else {
blknomask =
(un->un_tgt_blocksize / DEV_BSIZE) - 1;
secmask = un->un_tgt_blocksize - 1;
}
if ((bp->b_lblkno & (blknomask)) ||
(bp->b_bcount & (secmask))) {
if ((un->un_f_rmw_type !=
SD_RMW_TYPE_RETURN_ERROR) ||
un->un_f_enable_rmw) {
if (un->un_f_pm_is_enabled == FALSE)
index =
SD_CHAIN_INFO_MSS_DSK_NO_PM;
else
index =
SD_CHAIN_INFO_MSS_DISK;
}
}
}
break;
case SD_CHAIN_USCSI:
index = un->un_uscsi_chain_type;
break;
case SD_CHAIN_DIRECT:
index = un->un_direct_chain_type;
break;
case SD_CHAIN_DIRECT_PRIORITY:
index = un->un_priority_chain_type;
break;
default:
panic("sd_xbuf_init: illegal chain type!");
}
xp->xb_chain_iostart = sd_chain_index_map[index].sci_iostart_index;
xp->xb_chain_iodone = sd_chain_index_map[index].sci_iodone_index;
xp->xb_pkt_flags = 0;
xp->xb_dma_resid = 0;
xp->xb_retry_count = 0;
xp->xb_victim_retry_count = 0;
xp->xb_ua_retry_count = 0;
xp->xb_nr_retry_count = 0;
xp->xb_sense_bp = NULL;
xp->xb_sense_status = 0;
xp->xb_sense_state = 0;
xp->xb_sense_resid = 0;
xp->xb_ena = 0;
bp->b_private = xp;
bp->b_flags &= ~(B_DONE | B_ERROR);
bp->b_resid = 0;
bp->av_forw = NULL;
bp->av_back = NULL;
bioerror(bp, 0);
SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: done.\n");
}
static int
sd_uscsi_strategy(struct buf *bp)
{
struct sd_lun *un;
struct sd_uscsi_info *uip;
struct sd_xbuf *xp;
uchar_t chain_type;
uchar_t cmd;
ASSERT(bp != NULL);
un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp));
if (un == NULL) {
bioerror(bp, EIO);
bp->b_resid = bp->b_bcount;
biodone(bp);
return (0);
}
ASSERT(!mutex_owned(SD_MUTEX(un)));
SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: entry: buf:0x%p\n", bp);
ASSERT(bp->b_private != NULL);
uip = (struct sd_uscsi_info *)bp->b_private;
cmd = ((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_cdb[0];
mutex_enter(SD_MUTEX(un));
if (un->un_f_cfg_is_atapi == TRUE) {
mutex_exit(SD_MUTEX(un));
bp_mapin(bp);
mutex_enter(SD_MUTEX(un));
}
un->un_ncmds_in_driver++;
SD_INFO(SD_LOG_IO, un, "sd_uscsi_strategy: un_ncmds_in_driver = %ld\n",
un->un_ncmds_in_driver);
if ((bp->b_flags & B_WRITE) && (bp->b_bcount != 0) &&
(cmd != SCMD_MODE_SELECT) && (cmd != SCMD_MODE_SELECT_G1))
un->un_f_sync_cache_required = TRUE;
mutex_exit(SD_MUTEX(un));
switch (uip->ui_flags) {
case SD_PATH_DIRECT:
chain_type = SD_CHAIN_DIRECT;
break;
case SD_PATH_DIRECT_PRIORITY:
chain_type = SD_CHAIN_DIRECT_PRIORITY;
break;
default:
chain_type = SD_CHAIN_USCSI;
break;
}
if (((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_rqlen >
SENSE_LENGTH) {
xp = kmem_zalloc(sizeof (struct sd_xbuf) - SENSE_LENGTH +
MAX_SENSE_LENGTH, KM_SLEEP);
} else {
xp = kmem_zalloc(sizeof (struct sd_xbuf), KM_SLEEP);
}
sd_xbuf_init(un, bp, xp, chain_type, uip->ui_cmdp);
SD_BEGIN_IOSTART(xp->xb_chain_iostart, un, bp);
SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: exit: buf:0x%p\n", bp);
return (0);
}
static int
sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag,
enum uio_seg dataspace, int path_flag)
{
struct sd_lun *un;
sd_ssc_t *ssc;
int rval;
un = ddi_get_soft_state(sd_state, SDUNIT(dev));
if (un == NULL) {
return (ENXIO);
}
ssc = sd_ssc_init(un);
rval = sd_ssc_send(ssc, incmd, flag, dataspace, path_flag);
sd_ssc_fini(ssc);
return (rval);
}
static sd_ssc_t *
sd_ssc_init(struct sd_lun *un)
{
sd_ssc_t *ssc;
struct uscsi_cmd *ucmdp;
struct sd_uscsi_info *uip;
ASSERT(un != NULL);
ASSERT(!mutex_owned(SD_MUTEX(un)));
ssc = kmem_zalloc(sizeof (sd_ssc_t), KM_SLEEP);
ucmdp = scsi_uscsi_alloc();
uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP);
ssc->ssc_uscsi_cmd = ucmdp;
ssc->ssc_uscsi_info = uip;
ssc->ssc_un = un;
return (ssc);
}
static void
sd_ssc_fini(sd_ssc_t *ssc)
{
scsi_uscsi_free(ssc->ssc_uscsi_cmd);
if (ssc->ssc_uscsi_info != NULL) {
kmem_free(ssc->ssc_uscsi_info, sizeof (struct sd_uscsi_info));
ssc->ssc_uscsi_info = NULL;
}
kmem_free(ssc, sizeof (sd_ssc_t));
ssc = NULL;
}
static int
sd_ssc_send(sd_ssc_t *ssc, struct uscsi_cmd *incmd, int flag,
enum uio_seg dataspace, int path_flag)
{
struct sd_uscsi_info *uip;
struct uscsi_cmd *uscmd;
struct sd_lun *un;
dev_t dev;
int format = 0;
int rval;
ASSERT(ssc != NULL);
un = ssc->ssc_un;
ASSERT(un != NULL);
uscmd = ssc->ssc_uscsi_cmd;
ASSERT(uscmd != NULL);
ASSERT(!mutex_owned(SD_MUTEX(un)));
if (ssc->ssc_flags & SSC_FLAGS_NEED_ASSESSMENT) {
if (uscmd->uscsi_cdb != NULL) {
SD_INFO(SD_LOG_SDTEST, un,
"sd_ssc_send is missing the alternative "
"sd_ssc_assessment when running command 0x%x.\n",
uscmd->uscsi_cdb[0]);
}
ssc->ssc_flags = SSC_FLAGS_UNKNOWN;
}
ssc->ssc_flags |= SSC_FLAGS_NEED_ASSESSMENT;
mutex_enter(SD_MUTEX(un));
mutex_enter(&un->un_pm_mutex);
if ((uscmd->uscsi_flags & USCSI_PMFAILFAST) &&
SD_DEVICE_IS_IN_LOW_POWER(un)) {
SD_TRACE(SD_LOG_IO, un, "sd_ssc_send:"
"un:0x%p is in low power\n", un);
mutex_exit(&un->un_pm_mutex);
mutex_exit(SD_MUTEX(un));
return (ECANCELED);
}
mutex_exit(&un->un_pm_mutex);
mutex_exit(SD_MUTEX(un));
#ifdef SDDEBUG
switch (dataspace) {
case UIO_USERSPACE:
SD_TRACE(SD_LOG_IO, un,
"sd_ssc_send: entry: un:0x%p UIO_USERSPACE\n", un);
break;
case UIO_SYSSPACE:
SD_TRACE(SD_LOG_IO, un,
"sd_ssc_send: entry: un:0x%p UIO_SYSSPACE\n", un);
break;
default:
SD_TRACE(SD_LOG_IO, un,
"sd_ssc_send: entry: un:0x%p UNEXPECTED SPACE\n", un);
break;
}
#endif
rval = scsi_uscsi_copyin((intptr_t)incmd, flag,
SD_ADDRESS(un), &uscmd);
if (rval != 0) {
SD_TRACE(SD_LOG_IO, un, "sd_sense_scsi_cmd: "
"scsi_uscsi_alloc_and_copyin failed\n", un);
return (rval);
}
if ((uscmd->uscsi_cdb != NULL) &&
(uscmd->uscsi_cdb[0] == SCMD_FORMAT)) {
mutex_enter(SD_MUTEX(un));
un->un_f_format_in_progress = TRUE;
mutex_exit(SD_MUTEX(un));
format = 1;
}
uip = ssc->ssc_uscsi_info;
uip->ui_flags = path_flag;
uip->ui_cmdp = uscmd;
if (path_flag == SD_PATH_DIRECT_PRIORITY) {
uscmd->uscsi_flags |= USCSI_DIAGNOSE;
}
uscmd->uscsi_flags &= ~USCSI_NOINTR;
dev = SD_GET_DEV(un);
rval = scsi_uscsi_handle_cmd(dev, dataspace, uscmd,
sd_uscsi_strategy, NULL, uip);
ssc->ssc_flags |= SSC_FLAGS_CMD_ISSUED;
#ifdef SDDEBUG
SD_INFO(SD_LOG_IO, un, "sd_ssc_send: "
"uscsi_status: 0x%02x uscsi_resid:0x%x\n",
uscmd->uscsi_status, uscmd->uscsi_resid);
if (uscmd->uscsi_bufaddr != NULL) {
SD_INFO(SD_LOG_IO, un, "sd_ssc_send: "
"uscmd->uscsi_bufaddr: 0x%p uscmd->uscsi_buflen:%d\n",
uscmd->uscsi_bufaddr, uscmd->uscsi_buflen);
if (dataspace == UIO_SYSSPACE) {
SD_DUMP_MEMORY(un, SD_LOG_IO,
"data", (uchar_t *)uscmd->uscsi_bufaddr,
uscmd->uscsi_buflen, SD_LOG_HEX);
}
}
#endif
if (format == 1) {
mutex_enter(SD_MUTEX(un));
un->un_f_format_in_progress = FALSE;
mutex_exit(SD_MUTEX(un));
}
(void) scsi_uscsi_copyout((intptr_t)incmd, uscmd);
return (rval);
}
static void
sd_ssc_print(sd_ssc_t *ssc, int sd_severity)
{
struct uscsi_cmd *ucmdp;
struct scsi_device *devp;
dev_info_t *devinfo;
uchar_t *sensep;
int senlen;
union scsi_cdb *cdbp;
uchar_t com;
extern struct scsi_key_strings scsi_cmds[];
ASSERT(ssc != NULL);
ASSERT(ssc->ssc_un != NULL);
if (SD_FM_LOG(ssc->ssc_un) != SD_FM_LOG_EREPORT)
return;
ucmdp = ssc->ssc_uscsi_cmd;
devp = SD_SCSI_DEVP(ssc->ssc_un);
devinfo = SD_DEVINFO(ssc->ssc_un);
ASSERT(ucmdp != NULL);
ASSERT(devp != NULL);
ASSERT(devinfo != NULL);
sensep = (uint8_t *)ucmdp->uscsi_rqbuf;
senlen = ucmdp->uscsi_rqlen - ucmdp->uscsi_rqresid;
cdbp = (union scsi_cdb *)ucmdp->uscsi_cdb;
if (cdbp == NULL)
return;
if (senlen == 0)
sensep = NULL;
com = cdbp->scc_cmd;
scsi_generic_errmsg(devp, sd_label, sd_severity, 0, 0, com,
scsi_cmds, sensep, ssc->ssc_un->un_additional_codes, NULL);
}
static void
sd_ssc_assessment(sd_ssc_t *ssc, enum sd_type_assessment tp_assess)
{
int senlen = 0;
struct uscsi_cmd *ucmdp = NULL;
struct sd_lun *un;
ASSERT(ssc != NULL);
un = ssc->ssc_un;
ASSERT(un != NULL);
ucmdp = ssc->ssc_uscsi_cmd;
ASSERT(ucmdp != NULL);
if (ssc->ssc_flags & SSC_FLAGS_NEED_ASSESSMENT) {
ssc->ssc_flags &= ~SSC_FLAGS_NEED_ASSESSMENT;
} else {
if (ucmdp->uscsi_cdb != NULL) {
SD_INFO(SD_LOG_SDTEST, un,
"sd_ssc_assessment is missing the "
"alternative sd_ssc_send when running 0x%x, "
"or there are superfluous sd_ssc_assessment for "
"the same sd_ssc_send.\n",
ucmdp->uscsi_cdb[0]);
}
ssc->ssc_flags = SSC_FLAGS_UNKNOWN;
return;
}
if (!(ssc->ssc_flags & SSC_FLAGS_CMD_ISSUED)) {
sd_ssc_print(ssc, SCSI_ERR_INFO);
return;
} else {
ssc->ssc_flags &= ~SSC_FLAGS_CMD_ISSUED;
}
if (ucmdp->uscsi_flags & USCSI_DIAGNOSE) {
ssc->ssc_flags = SSC_FLAGS_UNKNOWN;
return;
}
switch (tp_assess) {
case SD_FMT_IGNORE:
case SD_FMT_IGNORE_COMPROMISE:
break;
case SD_FMT_STATUS_CHECK:
sd_ssc_post(ssc, SD_FM_DRV_FATAL);
break;
case SD_FMT_STANDARD:
senlen = ssc->ssc_uscsi_cmd->uscsi_rqlen -
ssc->ssc_uscsi_cmd->uscsi_rqresid;
if ((ssc->ssc_uscsi_info->ui_pkt_state & STATE_ARQ_DONE) &&
(un->un_f_arq_enabled == TRUE) &&
senlen > 0 &&
ssc->ssc_uscsi_cmd->uscsi_rqbuf != NULL) {
sd_ssc_post(ssc, SD_FM_DRV_NOTICE);
}
break;
default:
scsi_log(SD_DEVINFO(un), sd_label, CE_CONT,
"sd_ssc_assessment got wrong "
"sd_type_assessment %d.\n", tp_assess);
break;
}
ssc->ssc_flags = SSC_FLAGS_UNKNOWN;
}
static void
sd_ssc_post(sd_ssc_t *ssc, enum sd_driver_assessment sd_assess)
{
struct sd_lun *un;
int sd_severity;
ASSERT(ssc != NULL);
un = ssc->ssc_un;
ASSERT(un != NULL);
if (ISCD(un) || un->un_f_has_removable_media) {
ssc->ssc_flags = SSC_FLAGS_UNKNOWN;
return;
}
switch (sd_assess) {
case SD_FM_DRV_FATAL:
sd_severity = SCSI_ERR_FATAL;
break;
case SD_FM_DRV_RECOVERY:
sd_severity = SCSI_ERR_RECOVERED;
break;
case SD_FM_DRV_RETRY:
sd_severity = SCSI_ERR_RETRYABLE;
break;
case SD_FM_DRV_NOTICE:
sd_severity = SCSI_ERR_INFO;
break;
default:
sd_severity = SCSI_ERR_UNKNOWN;
}
sd_ssc_print(ssc, sd_severity);
sd_ssc_ereport_post(ssc, sd_assess);
}
static void
sd_ssc_set_info(sd_ssc_t *ssc, int ssc_flags, uint_t comp, const char *fmt, ...)
{
va_list ap;
ASSERT(ssc != NULL);
ASSERT(ssc->ssc_un != NULL);
ssc->ssc_flags |= ssc_flags;
va_start(ap, fmt);
(void) vsnprintf(ssc->ssc_info, sizeof (ssc->ssc_info), fmt, ap);
va_end(ap);
if (ssc_flags & SSC_FLAGS_INVALID_DATA) {
if (SD_FM_LOG(ssc->ssc_un) == SD_FM_LOG_NSUP) {
if (comp > 0) {
SD_ERROR(comp, ssc->ssc_un, ssc->ssc_info);
} else if (comp == 0) {
scsi_log(SD_DEVINFO(ssc->ssc_un), sd_label,
CE_WARN, ssc->ssc_info);
}
}
}
}
static void
sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp)
{
struct sd_xbuf *xp;
ASSERT(un != NULL);
ASSERT(bp != NULL);
ASSERT(!mutex_owned(SD_MUTEX(un)));
SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: entry.\n");
xp = SD_GET_XBUF(bp);
ASSERT(xp != NULL);
if (ddi_xbuf_done(bp, un->un_xbuf_attr)) {
mutex_enter(SD_MUTEX(un));
un->un_pm_idle_time = gethrtime();
un->un_ncmds_in_driver--;
ASSERT(un->un_ncmds_in_driver >= 0);
SD_INFO(SD_LOG_IO, un,
"sd_buf_iodone: un_ncmds_in_driver = %ld\n",
un->un_ncmds_in_driver);
mutex_exit(SD_MUTEX(un));
}
biodone(bp);
SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: exit.\n");
}
static void
sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp)
{
struct sd_xbuf *xp;
ASSERT(un != NULL);
ASSERT(bp != NULL);
xp = SD_GET_XBUF(bp);
ASSERT(xp != NULL);
ASSERT(!mutex_owned(SD_MUTEX(un)));
SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: entry.\n");
bp->b_private = xp->xb_private;
mutex_enter(SD_MUTEX(un));
un->un_pm_idle_time = gethrtime();
un->un_ncmds_in_driver--;
ASSERT(un->un_ncmds_in_driver >= 0);
SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: un_ncmds_in_driver = %ld\n",
un->un_ncmds_in_driver);
mutex_exit(SD_MUTEX(un));
if (((struct uscsi_cmd *)(xp->xb_pktinfo))->uscsi_rqlen >
SENSE_LENGTH) {
kmem_free(xp, sizeof (struct sd_xbuf) - SENSE_LENGTH +
MAX_SENSE_LENGTH);
} else {
kmem_free(xp, sizeof (struct sd_xbuf));
}
biodone(bp);
SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: exit.\n");
}
static void
sd_mapblockaddr_iostart(int index, struct sd_lun *un, struct buf *bp)
{
diskaddr_t nblocks;
daddr_t blocknum;
size_t requested_nblocks;
size_t available_nblocks;
int partition;
diskaddr_t partition_offset;
struct sd_xbuf *xp;
int secmask = 0, blknomask = 0;
ushort_t is_aligned = TRUE;
ASSERT(un != NULL);
ASSERT(bp != NULL);
ASSERT(!mutex_owned(SD_MUTEX(un)));
SD_TRACE(SD_LOG_IO_PARTITION, un,
"sd_mapblockaddr_iostart: entry: buf:0x%p\n", bp);
xp = SD_GET_XBUF(bp);
ASSERT(xp != NULL);
partition = SDPART(bp->b_edev);
if (!SD_IS_VALID_LABEL(un)) {
sd_ssc_t *ssc;
ssc = sd_ssc_init(un);
if (sd_ready_and_valid(ssc, partition) != SD_READY_VALID) {
if (!un->un_f_has_removable_media) {
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"i/o to invalid geometry\n");
}
bioerror(bp, EIO);
bp->b_resid = bp->b_bcount;
SD_BEGIN_IODONE(index, un, bp);
sd_ssc_fini(ssc);
return;
}
sd_ssc_fini(ssc);
}
nblocks = 0;
(void) cmlb_partinfo(un->un_cmlbhandle, partition,
&nblocks, &partition_offset, NULL, NULL, (void *)SD_PATH_DIRECT);
if (un->un_f_enable_rmw) {
blknomask = (un->un_phy_blocksize / DEV_BSIZE) - 1;
secmask = un->un_phy_blocksize - 1;
} else {
blknomask = (un->un_tgt_blocksize / DEV_BSIZE) - 1;
secmask = un->un_tgt_blocksize - 1;
}
if ((bp->b_lblkno & (blknomask)) || (bp->b_bcount & (secmask))) {
is_aligned = FALSE;
}
if (!(NOT_DEVBSIZE(un)) || un->un_f_enable_rmw) {
if (is_aligned) {
xp->xb_blkno = SD_SYS2TGTBLOCK(un, xp->xb_blkno);
} else {
if (bp->b_flags & B_READ) {
} else if (!un->un_f_enable_rmw &&
un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR) {
bp->b_flags |= B_ERROR;
goto error_exit;
} else if (un->un_f_rmw_type == SD_RMW_TYPE_DEFAULT) {
mutex_enter(SD_MUTEX(un));
if (!un->un_f_enable_rmw &&
un->un_rmw_msg_timeid == NULL) {
scsi_log(SD_DEVINFO(un), sd_label,
CE_WARN, "I/O request is not "
"aligned with %d disk sector size. "
"It is handled through Read Modify "
"Write but the performance is "
"very low.\n",
un->un_tgt_blocksize);
un->un_rmw_msg_timeid =
timeout(sd_rmw_msg_print_handler,
un, SD_RMW_MSG_PRINT_TIMEOUT);
} else {
un->un_rmw_incre_count ++;
}
mutex_exit(SD_MUTEX(un));
}
nblocks = SD_TGT2SYSBLOCK(un, nblocks);
partition_offset = SD_TGT2SYSBLOCK(un,
partition_offset);
}
}
blocknum = xp->xb_blkno;
if (blocknum == nblocks) {
goto error_exit;
}
if ((blocknum < 0) || (blocknum >= nblocks) ||
((bp->b_bcount & (DEV_BSIZE - 1)) != 0)) {
bp->b_flags |= B_ERROR;
goto error_exit;
}
if ((!NOT_DEVBSIZE(un)) && is_aligned) {
requested_nblocks = SD_BYTES2TGTBLOCKS(un, bp->b_bcount);
} else {
requested_nblocks = SD_BYTES2SYSBLOCKS(bp->b_bcount);
}
available_nblocks = (size_t)(nblocks - blocknum);
ASSERT(nblocks >= blocknum);
if (requested_nblocks > available_nblocks) {
size_t resid;
if ((!NOT_DEVBSIZE(un)) && is_aligned) {
resid = SD_TGTBLOCKS2BYTES(un,
(offset_t)(requested_nblocks - available_nblocks));
} else {
resid = SD_SYSBLOCKS2BYTES(
(offset_t)(requested_nblocks - available_nblocks));
}
size_t count = bp->b_bcount - resid;
ASSERT(bp->b_bcount >= resid);
bp = sd_bioclone_alloc(bp, count, blocknum,
(int (*)(struct buf *))(uintptr_t)sd_mapblockaddr_iodone);
xp = SD_GET_XBUF(bp);
ASSERT(xp != NULL);
}
ASSERT(bp->b_resid == 0);
xp->xb_blkno += partition_offset;
SD_NEXT_IOSTART(index, un, bp);
SD_TRACE(SD_LOG_IO_PARTITION, un,
"sd_mapblockaddr_iostart: exit 0: buf:0x%p\n", bp);
return;
error_exit:
bp->b_resid = bp->b_bcount;
SD_BEGIN_IODONE(index, un, bp);
SD_TRACE(SD_LOG_IO_PARTITION, un,
"sd_mapblockaddr_iostart: exit 1: buf:0x%p\n", bp);
}
static void
sd_mapblockaddr_iodone(int index, struct sd_lun *un, struct buf *bp)
{
ASSERT(un != NULL);
ASSERT(bp != NULL);
ASSERT(!mutex_owned(SD_MUTEX(un)));
SD_TRACE(SD_LOG_IO_PARTITION, un,
"sd_mapblockaddr_iodone: entry: buf:0x%p\n", bp);
if ((uintptr_t)bp->b_iodone == (uintptr_t)sd_mapblockaddr_iodone) {
struct sd_xbuf *xp;
struct buf *obp;
xp = SD_GET_XBUF(bp);
ASSERT(xp != NULL);
obp = (struct buf *)xp->xb_private;
ASSERT(obp != NULL);
obp->b_resid = obp->b_bcount - (bp->b_bcount - bp->b_resid);
bioerror(obp, bp->b_error);
sd_bioclone_free(bp);
bp = obp;
}
SD_NEXT_IODONE(index, un, bp);
SD_TRACE(SD_LOG_IO_PARTITION, un,
"sd_mapblockaddr_iodone: exit: buf:0x%p\n", bp);
}
static void
sd_mapblocksize_iostart(int index, struct sd_lun *un, struct buf *bp)
{
struct sd_mapblocksize_info *bsp;
struct sd_xbuf *xp;
offset_t first_byte;
daddr_t start_block, end_block;
daddr_t request_bytes;
ushort_t is_aligned = FALSE;
ASSERT(un != NULL);
ASSERT(bp != NULL);
ASSERT(!mutex_owned(SD_MUTEX(un)));
ASSERT(bp->b_resid == 0);
SD_TRACE(SD_LOG_IO_RMMEDIA, un,
"sd_mapblocksize_iostart: entry: buf:0x%p\n", bp);
if (ISCD(un) && ((bp->b_flags & B_READ) == 0) &&
(un->un_f_mmc_writable_media == FALSE)) {
bioerror(bp, EIO);
bp->b_resid = bp->b_bcount;
SD_BEGIN_IODONE(index, un, bp);
return;
}
if ((un->un_tgt_blocksize == DEV_BSIZE && !un->un_f_enable_rmw) ||
(bp->b_bcount == 0)) {
goto done;
}
#if defined(__x86)
ASSERT(!ISROD(un));
#endif
xp = SD_GET_XBUF(bp);
ASSERT(xp != NULL);
SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: "
"tgt_blocksize:0x%x sys_blocksize: 0x%x\n",
un->un_tgt_blocksize, DEV_BSIZE);
SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: "
"request start block:0x%x\n", xp->xb_blkno);
SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: "
"request len:0x%x\n", bp->b_bcount);
bsp = kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP);
bsp->mbs_oprivate = xp->xb_private;
xp->xb_private = bsp;
first_byte = SD_SYSBLOCKS2BYTES((offset_t)xp->xb_blkno);
if (un->un_f_enable_rmw) {
start_block = xp->xb_blkno =
(first_byte / un->un_phy_blocksize) *
(un->un_phy_blocksize / DEV_BSIZE);
end_block = ((first_byte + bp->b_bcount +
un->un_phy_blocksize - 1) / un->un_phy_blocksize) *
(un->un_phy_blocksize / DEV_BSIZE);
} else {
start_block = xp->xb_blkno = first_byte / un->un_tgt_blocksize;
end_block = (first_byte + bp->b_bcount +
un->un_tgt_blocksize - 1) / un->un_tgt_blocksize;
}
request_bytes = (end_block - start_block) * un->un_tgt_blocksize;
if (un->un_f_enable_rmw) {
if (((first_byte % un->un_phy_blocksize) == 0) &&
((bp->b_bcount % un->un_phy_blocksize) == 0)) {
is_aligned = TRUE;
}
} else {
if (((first_byte % un->un_tgt_blocksize) == 0) &&
((bp->b_bcount % un->un_tgt_blocksize) == 0)) {
is_aligned = TRUE;
}
}
if ((bp->b_flags & B_READ) == 0) {
bsp->mbs_wmp = sd_range_lock(un, start_block, end_block - 1,
(is_aligned == TRUE) ? SD_WTYPE_SIMPLE : SD_WTYPE_RMW);
}
if (is_aligned == FALSE) {
struct sd_mapblocksize_info *shadow_bsp;
struct sd_xbuf *shadow_xp;
struct buf *shadow_bp;
shadow_bp = sd_shadow_buf_alloc(bp, request_bytes, B_READ,
xp->xb_blkno,
(int (*)(struct buf *))(uintptr_t)sd_mapblocksize_iodone);
shadow_xp = SD_GET_XBUF(shadow_bp);
shadow_xp->xb_private = shadow_bsp =
kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP);
if (un->un_f_enable_rmw) {
bsp->mbs_copy_offset = (ssize_t)(first_byte -
((offset_t)xp->xb_blkno * un->un_sys_blocksize));
ASSERT((bsp->mbs_copy_offset >= 0) &&
(bsp->mbs_copy_offset < un->un_phy_blocksize));
} else {
bsp->mbs_copy_offset = (ssize_t)(first_byte -
((offset_t)xp->xb_blkno * un->un_tgt_blocksize));
ASSERT((bsp->mbs_copy_offset >= 0) &&
(bsp->mbs_copy_offset < un->un_tgt_blocksize));
}
shadow_bsp->mbs_copy_offset = bsp->mbs_copy_offset;
shadow_bsp->mbs_layer_index = bsp->mbs_layer_index = index;
shadow_bsp->mbs_wmp = bsp->mbs_wmp;
bsp->mbs_wmp = NULL;
shadow_bsp->mbs_orig_bp = bp;
bp = shadow_bp;
}
SD_INFO(SD_LOG_IO_RMMEDIA, un,
"sd_mapblocksize_iostart: tgt start block:0x%x\n", xp->xb_blkno);
SD_INFO(SD_LOG_IO_RMMEDIA, un,
"sd_mapblocksize_iostart: tgt request len:0x%x\n",
request_bytes);
SD_INFO(SD_LOG_IO_RMMEDIA, un,
"sd_mapblocksize_iostart: shadow buf:0x%x\n", bp);
done:
SD_NEXT_IOSTART(index, un, bp);
SD_TRACE(SD_LOG_IO_RMMEDIA, un,
"sd_mapblocksize_iostart: exit: buf:0x%p\n", bp);
}
static void
sd_mapblocksize_iodone(int index, struct sd_lun *un, struct buf *bp)
{
struct sd_mapblocksize_info *bsp;
struct sd_xbuf *xp;
struct sd_xbuf *orig_xp;
struct buf *orig_bp;
offset_t shadow_end;
offset_t request_end;
offset_t shadow_start;
ssize_t copy_offset;
size_t copy_length;
size_t shortfall;
uint_t is_write;
uint_t has_wmap;
ASSERT(un != NULL);
ASSERT(bp != NULL);
SD_TRACE(SD_LOG_IO_RMMEDIA, un,
"sd_mapblocksize_iodone: entry: buf:0x%p\n", bp);
if ((un->un_tgt_blocksize == DEV_BSIZE && !un->un_f_enable_rmw) ||
(bp->b_bcount == 0)) {
goto exit;
}
xp = SD_GET_XBUF(bp);
ASSERT(xp != NULL);
bsp = xp->xb_private;
is_write = ((bp->b_flags & B_READ) == 0) ? TRUE : FALSE;
has_wmap = (bsp->mbs_wmp != NULL) ? TRUE : FALSE;
if (is_write) {
sd_range_unlock(un, bsp->mbs_wmp);
bsp->mbs_wmp = NULL;
}
if ((uintptr_t)bp->b_iodone != (uintptr_t)sd_mapblocksize_iodone) {
goto done;
}
orig_bp = bsp->mbs_orig_bp;
ASSERT(orig_bp != NULL);
orig_xp = SD_GET_XBUF(orig_bp);
ASSERT(orig_xp != NULL);
ASSERT(!mutex_owned(SD_MUTEX(un)));
if (!is_write && has_wmap) {
if ((bp->b_resid != 0) || (bp->b_error != 0)) {
orig_bp->b_resid = orig_bp->b_bcount;
bioerror(orig_bp, bp->b_error);
sd_range_unlock(un, bsp->mbs_wmp);
goto freebuf_done;
}
}
if (un->un_f_enable_rmw) {
shadow_start = SD_SYSBLOCKS2BYTES((offset_t)xp->xb_blkno);
} else {
shadow_start = SD_TGTBLOCKS2BYTES(un, (offset_t)xp->xb_blkno);
}
shadow_end = shadow_start + bp->b_bcount - bp->b_resid;
copy_offset = bsp->mbs_copy_offset;
if (un->un_f_enable_rmw) {
ASSERT((copy_offset >= 0) &&
(copy_offset < un->un_phy_blocksize));
} else {
ASSERT((copy_offset >= 0) &&
(copy_offset < un->un_tgt_blocksize));
}
copy_length = orig_bp->b_bcount;
request_end = shadow_start + copy_offset + orig_bp->b_bcount;
if (shadow_end >= request_end) {
orig_bp->b_resid = 0;
} else {
shortfall = (size_t)(request_end - shadow_end);
if (shortfall > orig_bp->b_bcount) {
orig_bp->b_resid = orig_bp->b_bcount;
} else {
orig_bp->b_resid = shortfall;
}
ASSERT(copy_length >= orig_bp->b_resid);
copy_length -= orig_bp->b_resid;
}
bioerror(orig_bp, bp->b_error);
if (is_write) {
goto freebuf_done;
}
if (has_wmap) {
bcopy(orig_bp->b_un.b_addr, bp->b_un.b_addr + copy_offset,
copy_length);
bp->b_flags &= ~((int)B_READ);
if (taskq_dispatch(sd_wmr_tq, sd_read_modify_write_task, bp,
KM_NOSLEEP) != TASKQID_INVALID) {
return;
}
bioerror(orig_bp, EIO);
orig_bp->b_resid = orig_bp->b_bcount;
} else {
bcopy(bp->b_un.b_addr + copy_offset, orig_bp->b_un.b_addr,
copy_length);
}
freebuf_done:
sd_shadow_buf_free(bp);
kmem_free(bsp, sizeof (struct sd_mapblocksize_info));
bp = orig_bp;
xp = SD_GET_XBUF(bp);
ASSERT(xp != NULL);
ASSERT(xp == orig_xp);
bsp = xp->xb_private;
ASSERT(bsp != NULL);
done:
xp->xb_private = bsp->mbs_oprivate;
kmem_free(bsp, sizeof (struct sd_mapblocksize_info));
exit:
SD_TRACE(SD_LOG_IO_RMMEDIA, SD_GET_UN(bp),
"sd_mapblocksize_iodone: calling SD_NEXT_IODONE: buf:0x%p\n", bp);
SD_NEXT_IODONE(index, un, bp);
}
static void
sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp)
{
ASSERT(un != NULL);
ASSERT(bp != NULL);
ASSERT(!mutex_owned(SD_MUTEX(un)));
SD_NEXT_IOSTART(index, un, bp);
}
static void
sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp)
{
ASSERT(un != NULL);
ASSERT(bp != NULL);
ASSERT(!mutex_owned(SD_MUTEX(un)));
SD_NEXT_IODONE(index, un, bp);
}
static void
sd_checksum_uscsi_iostart(int index, struct sd_lun *un, struct buf *bp)
{
ASSERT(un != NULL);
ASSERT(bp != NULL);
ASSERT(!mutex_owned(SD_MUTEX(un)));
SD_NEXT_IOSTART(index, un, bp);
}
static void
sd_checksum_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp)
{
ASSERT(un != NULL);
ASSERT(bp != NULL);
ASSERT(!mutex_owned(SD_MUTEX(un)));
SD_NEXT_IODONE(index, un, bp);
}
static void
sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp)
{
ASSERT(un != NULL);
ASSERT(bp != NULL);
ASSERT(!mutex_owned(SD_MUTEX(un)));
ASSERT(!mutex_owned(&un->un_pm_mutex));
SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: entry\n");
if (sd_pm_entry(un) != DDI_SUCCESS) {
bioerror(bp, EIO);
bp->b_resid = bp->b_bcount;
SD_BEGIN_IODONE(index, un, bp);
SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n");
return;
}
SD_NEXT_IOSTART(index, un, bp);
SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n");
}
static void
sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp)
{
ASSERT(un != NULL);
ASSERT(bp != NULL);
ASSERT(!mutex_owned(&un->un_pm_mutex));
SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: entry\n");
if (un->un_f_pm_is_enabled == TRUE) {
sd_pm_exit(un);
}
SD_NEXT_IODONE(index, un, bp);
SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: exit\n");
}
static void
sd_core_iostart(int index, struct sd_lun *un, struct buf *bp)
{
struct sd_xbuf *xp;
ASSERT(un != NULL);
ASSERT(bp != NULL);
ASSERT(!mutex_owned(SD_MUTEX(un)));
ASSERT(bp->b_resid == 0);
SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: entry: bp:0x%p\n", bp);
xp = SD_GET_XBUF(bp);
ASSERT(xp != NULL);
mutex_enter(SD_MUTEX(un));
if ((bp->b_flags & B_FAILFAST) &&
(un->un_failfast_state == SD_FAILFAST_ACTIVE)) {
mutex_exit(SD_MUTEX(un));
bioerror(bp, EIO);
bp->b_resid = bp->b_bcount;
SD_BEGIN_IODONE(index, un, bp);
return;
}
if (SD_IS_DIRECT_PRIORITY(xp)) {
sd_start_cmds(un, bp);
} else {
sd_add_buf_to_waitq(un, bp);
SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp);
sd_start_cmds(un, NULL);
}
mutex_exit(SD_MUTEX(un));
SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: exit: bp:0x%p\n", bp);
}
static void
sd_init_cdb_limits(struct sd_lun *un)
{
int hba_cdb_limit;
un->un_mincdb = SD_CDB_GROUP1;
if (!un->un_f_is_fibre && !un->un_f_cfg_is_atapi && !ISROD(un) &&
!un->un_f_has_removable_media) {
un->un_mincdb = SD_CDB_GROUP0;
}
un->un_max_hba_cdb = scsi_ifgetcap(SD_ADDRESS(un), "max-cdb-length", 1);
if (0 >= un->un_max_hba_cdb) {
un->un_max_hba_cdb = CDB_GROUP4;
hba_cdb_limit = SD_CDB_GROUP4;
} else if (0 < un->un_max_hba_cdb &&
un->un_max_hba_cdb < CDB_GROUP1) {
hba_cdb_limit = SD_CDB_GROUP0;
} else if (CDB_GROUP1 <= un->un_max_hba_cdb &&
un->un_max_hba_cdb < CDB_GROUP5) {
hba_cdb_limit = SD_CDB_GROUP1;
} else if (CDB_GROUP5 <= un->un_max_hba_cdb &&
un->un_max_hba_cdb < CDB_GROUP4) {
hba_cdb_limit = SD_CDB_GROUP5;
} else {
hba_cdb_limit = SD_CDB_GROUP4;
}
un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 :
min(hba_cdb_limit, SD_CDB_GROUP4);
un->un_status_len = (int)((un->un_f_arq_enabled == TRUE)
? sizeof (struct scsi_arq_status) : 1);
if (!ISCD(un))
un->un_cmd_timeout = (ushort_t)sd_io_time;
un->un_uscsi_timeout = ((ISCD(un)) ? 2 : 1) * un->un_cmd_timeout;
}
static int
sd_initpkt_for_buf(struct buf *bp, struct scsi_pkt **pktpp)
{
struct sd_xbuf *xp;
struct scsi_pkt *pktp = NULL;
struct sd_lun *un;
size_t blockcount;
daddr_t startblock;
int rval;
int cmd_flags;
ASSERT(bp != NULL);
ASSERT(pktpp != NULL);
xp = SD_GET_XBUF(bp);
ASSERT(xp != NULL);
un = SD_GET_UN(bp);
ASSERT(un != NULL);
ASSERT(mutex_owned(SD_MUTEX(un)));
ASSERT(bp->b_resid == 0);
SD_TRACE(SD_LOG_IO_CORE, un,
"sd_initpkt_for_buf: entry: buf:0x%p\n", bp);
mutex_exit(SD_MUTEX(un));
#if defined(__x86)
if (xp->xb_pkt_flags & SD_XB_DMA_FREED) {
ASSERT(xp->xb_pktp != NULL);
pktp = xp->xb_pktp;
} else {
pktp = NULL;
}
#endif
startblock = xp->xb_blkno;
blockcount = SD_BYTES2TGTBLOCKS(un, bp->b_bcount);
cmd_flags = un->un_pkt_flags | (xp->xb_pkt_flags & SD_XB_INITPKT_MASK);
rval = sd_setup_rw_pkt(un, &pktp, bp,
cmd_flags, sdrunout, (caddr_t)un,
startblock, blockcount);
if (rval == 0) {
if ((un->un_pkt_flags & PKT_DMA_PARTIAL) != 0 &&
(pktp->pkt_resid != 0)) {
xp->xb_dma_resid = pktp->pkt_resid;
pktp->pkt_resid = 0;
} else {
xp->xb_dma_resid = 0;
}
pktp->pkt_flags = un->un_tagflags;
pktp->pkt_time = un->un_cmd_timeout;
pktp->pkt_comp = sdintr;
pktp->pkt_private = bp;
*pktpp = pktp;
SD_TRACE(SD_LOG_IO_CORE, un,
"sd_initpkt_for_buf: exit: buf:0x%p\n", bp);
#if defined(__x86)
xp->xb_pkt_flags &= ~SD_XB_DMA_FREED;
#endif
mutex_enter(SD_MUTEX(un));
return (SD_PKT_ALLOC_SUCCESS);
}
ASSERT(rval == SD_PKT_ALLOC_FAILURE);
if (rval == SD_PKT_ALLOC_FAILURE) {
*pktpp = NULL;
mutex_enter(SD_MUTEX(un));
New_state(un, SD_STATE_RWAIT);
SD_ERROR(SD_LOG_IO_CORE, un,
"sd_initpkt_for_buf: No pktp. exit bp:0x%p\n", bp);
if ((bp->b_flags & B_ERROR) != 0) {
return (SD_PKT_ALLOC_FAILURE_NO_DMA);
}
return (SD_PKT_ALLOC_FAILURE);
} else {
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"Request rejected: too large for CDB: "
"lba:0x%08lx len:0x%08lx\n", startblock, blockcount);
SD_ERROR(SD_LOG_IO_CORE, un,
"sd_initpkt_for_buf: No cp. exit bp:0x%p\n", bp);
mutex_enter(SD_MUTEX(un));
return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL);
}
}
static void
sd_destroypkt_for_buf(struct buf *bp)
{
ASSERT(bp != NULL);
ASSERT(SD_GET_UN(bp) != NULL);
SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp),
"sd_destroypkt_for_buf: entry: buf:0x%p\n", bp);
ASSERT(SD_GET_PKTP(bp) != NULL);
scsi_destroy_pkt(SD_GET_PKTP(bp));
SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp),
"sd_destroypkt_for_buf: exit: buf:0x%p\n", bp);
}
int
sd_setup_rw_pkt(struct sd_lun *un,
struct scsi_pkt **pktpp, struct buf *bp, int flags,
int (*callback)(caddr_t), caddr_t callback_arg,
diskaddr_t lba, uint32_t blockcount)
{
struct scsi_pkt *return_pktp;
union scsi_cdb *cdbp;
struct sd_cdbinfo *cp = NULL;
int i;
for (i = un->un_mincdb; i <= un->un_maxcdb; i++) {
if ((lba + blockcount - 1 <= sd_cdbtab[i].sc_maxlba) &&
(blockcount <= sd_cdbtab[i].sc_maxlen)) {
cp = sd_cdbtab + i;
return_pktp = scsi_init_pkt(SD_ADDRESS(un), *pktpp,
bp, cp->sc_grpcode, un->un_status_len, 0,
flags, callback, callback_arg);
if (return_pktp != NULL) {
*pktpp = return_pktp;
bzero(return_pktp->pkt_cdbp, cp->sc_grpcode);
if (return_pktp->pkt_resid != 0) {
blockcount -=
SD_BYTES2TGTBLOCKS(un,
return_pktp->pkt_resid);
}
cdbp = (union scsi_cdb *)return_pktp->pkt_cdbp;
cdbp->scc_cmd = cp->sc_grpmask |
((bp->b_flags & B_READ) ?
SCMD_READ : SCMD_WRITE);
SD_FILL_SCSI1_LUN(un, return_pktp);
ASSERT((cp->sc_grpcode == CDB_GROUP1) ||
(cp->sc_grpcode == CDB_GROUP4) ||
(cp->sc_grpcode == CDB_GROUP0) ||
(cp->sc_grpcode == CDB_GROUP5));
if (cp->sc_grpcode == CDB_GROUP1) {
FORMG1ADDR(cdbp, lba);
FORMG1COUNT(cdbp, blockcount);
return (0);
} else if (cp->sc_grpcode == CDB_GROUP4) {
FORMG4LONGADDR(cdbp, lba);
FORMG4COUNT(cdbp, blockcount);
return (0);
} else if (cp->sc_grpcode == CDB_GROUP0) {
FORMG0ADDR(cdbp, lba);
FORMG0COUNT(cdbp, blockcount);
return (0);
} else if (cp->sc_grpcode == CDB_GROUP5) {
FORMG5ADDR(cdbp, lba);
FORMG5COUNT(cdbp, blockcount);
return (0);
}
cdbp->scc_cmd = SCMD_TEST_UNIT_READY;
return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL);
} else {
return (SD_PKT_ALLOC_FAILURE);
}
}
}
return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL);
}
int
sd_setup_next_rw_pkt(struct sd_lun *un,
struct scsi_pkt *pktp, struct buf *bp,
diskaddr_t lba, uint32_t blockcount)
{
uchar_t com;
union scsi_cdb *cdbp;
uchar_t cdb_group_id;
ASSERT(pktp != NULL);
ASSERT(pktp->pkt_cdbp != NULL);
cdbp = (union scsi_cdb *)pktp->pkt_cdbp;
com = cdbp->scc_cmd;
cdb_group_id = CDB_GROUPID(com);
ASSERT((cdb_group_id == CDB_GROUPID_0) ||
(cdb_group_id == CDB_GROUPID_1) ||
(cdb_group_id == CDB_GROUPID_4) ||
(cdb_group_id == CDB_GROUPID_5));
if (scsi_init_pkt(SD_ADDRESS(un), pktp, bp, 0, 0, 0, 0,
NULL_FUNC, NULL) == pktp) {
if (pktp->pkt_resid != 0) {
blockcount -=
SD_BYTES2TGTBLOCKS(un, pktp->pkt_resid);
}
cdbp->scc_cmd = com;
SD_FILL_SCSI1_LUN(un, pktp);
if (cdb_group_id == CDB_GROUPID_1) {
FORMG1ADDR(cdbp, lba);
FORMG1COUNT(cdbp, blockcount);
return (0);
} else if (cdb_group_id == CDB_GROUPID_4) {
FORMG4LONGADDR(cdbp, lba);
FORMG4COUNT(cdbp, blockcount);
return (0);
} else if (cdb_group_id == CDB_GROUPID_0) {
FORMG0ADDR(cdbp, lba);
FORMG0COUNT(cdbp, blockcount);
return (0);
} else if (cdb_group_id == CDB_GROUPID_5) {
FORMG5ADDR(cdbp, lba);
FORMG5COUNT(cdbp, blockcount);
return (0);
}
return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL);
}
return (SD_PKT_ALLOC_FAILURE);
}
static int
sd_initpkt_for_uscsi(struct buf *bp, struct scsi_pkt **pktpp)
{
struct uscsi_cmd *uscmd;
struct sd_xbuf *xp;
struct scsi_pkt *pktp;
struct sd_lun *un;
uint32_t flags = 0;
ASSERT(bp != NULL);
ASSERT(pktpp != NULL);
xp = SD_GET_XBUF(bp);
ASSERT(xp != NULL);
un = SD_GET_UN(bp);
ASSERT(un != NULL);
ASSERT(mutex_owned(SD_MUTEX(un)));
uscmd = (struct uscsi_cmd *)xp->xb_pktinfo;
ASSERT(uscmd != NULL);
SD_TRACE(SD_LOG_IO_CORE, un,
"sd_initpkt_for_uscsi: entry: buf:0x%p\n", bp);
if (uscmd->uscsi_rqlen > SENSE_LENGTH) {
pktp = scsi_init_pkt(SD_ADDRESS(un), NULL,
((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen,
((int)(uscmd->uscsi_rqlen) + sizeof (struct scsi_arq_status)
- sizeof (struct scsi_extended_sense)), 0,
(un->un_pkt_flags & ~PKT_DMA_PARTIAL) | PKT_XARQ,
sdrunout, (caddr_t)un);
} else {
pktp = scsi_init_pkt(SD_ADDRESS(un), NULL,
((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen,
sizeof (struct scsi_arq_status), 0,
(un->un_pkt_flags & ~PKT_DMA_PARTIAL),
sdrunout, (caddr_t)un);
}
if (pktp == NULL) {
*pktpp = NULL;
New_state(un, SD_STATE_RWAIT);
SD_ERROR(SD_LOG_IO_CORE, un,
"sd_initpkt_for_uscsi: No pktp. exit bp:0x%p\n", bp);
if ((bp->b_flags & B_ERROR) != 0) {
return (SD_PKT_ALLOC_FAILURE_NO_DMA);
}
return (SD_PKT_ALLOC_FAILURE);
}
if ((un->un_pkt_flags & PKT_DMA_PARTIAL) &&
(bp->b_bcount != 0) && (pktp->pkt_resid != 0)) {
scsi_destroy_pkt(pktp);
SD_ERROR(SD_LOG_IO_CORE, un, "sd_initpkt_for_uscsi: "
"No partial DMA for USCSI. exit: buf:0x%p\n", bp);
return (SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL);
}
(void) scsi_setup_cdb((union scsi_cdb *)pktp->pkt_cdbp,
uscmd->uscsi_cdb[0], 0, 0, 0);
SD_FILL_SCSI1_LUN(un, pktp);
if (uscmd->uscsi_flags & USCSI_SILENT) {
flags |= FLAG_SILENT;
}
if (uscmd->uscsi_flags & USCSI_DIAGNOSE) {
flags |= FLAG_DIAGNOSE;
}
if (uscmd->uscsi_flags & USCSI_ISOLATE) {
flags |= FLAG_ISOLATE;
}
if (un->un_f_is_fibre == FALSE) {
if (uscmd->uscsi_flags & USCSI_RENEGOT) {
flags |= FLAG_RENEGOTIATE_WIDE_SYNC;
}
}
if (uscmd->uscsi_flags & USCSI_HEAD) {
flags |= FLAG_HEAD;
}
if (uscmd->uscsi_flags & USCSI_NOINTR) {
flags |= FLAG_NOINTR;
}
if ((uscmd->uscsi_flags & USCSI_NOTAG) == 0) {
if (uscmd->uscsi_flags & USCSI_HTAG) {
flags |= FLAG_HTAG;
} else if (uscmd->uscsi_flags & USCSI_OTAG) {
flags |= FLAG_OTAG;
} else {
flags |= un->un_tagflags & FLAG_TAGMASK;
}
}
if (uscmd->uscsi_flags & USCSI_NODISCON) {
flags = (flags & ~FLAG_TAGMASK) | FLAG_NODISCON;
}
pktp->pkt_flags = flags;
(void) scsi_uscsi_pktinit(uscmd, pktp);
bcopy(uscmd->uscsi_cdb, pktp->pkt_cdbp, uscmd->uscsi_cdblen);
if (uscmd->uscsi_timeout == 0) {
pktp->pkt_time = un->un_uscsi_timeout;
} else {
pktp->pkt_time = uscmd->uscsi_timeout;
}
xp->xb_pkt_flags |= SD_XB_USCSICMD;
xp->xb_sense_resid = uscmd->uscsi_rqresid;
pktp->pkt_private = bp;
pktp->pkt_comp = sdintr;
*pktpp = pktp;
SD_TRACE(SD_LOG_IO_CORE, un,
"sd_initpkt_for_uscsi: exit: buf:0x%p\n", bp);
return (SD_PKT_ALLOC_SUCCESS);
}
static void
sd_destroypkt_for_uscsi(struct buf *bp)
{
struct uscsi_cmd *uscmd;
struct sd_xbuf *xp;
struct scsi_pkt *pktp;
struct sd_lun *un;
struct sd_uscsi_info *suip;
ASSERT(bp != NULL);
xp = SD_GET_XBUF(bp);
ASSERT(xp != NULL);
un = SD_GET_UN(bp);
ASSERT(un != NULL);
ASSERT(!mutex_owned(SD_MUTEX(un)));
pktp = SD_GET_PKTP(bp);
ASSERT(pktp != NULL);
SD_TRACE(SD_LOG_IO_CORE, un,
"sd_destroypkt_for_uscsi: entry: buf:0x%p\n", bp);
uscmd = (struct uscsi_cmd *)xp->xb_pktinfo;
ASSERT(uscmd != NULL);
uscmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK);
uscmd->uscsi_resid = bp->b_resid;
(void) scsi_uscsi_pktfini(pktp, uscmd);
if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) &&
(uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) {
uscmd->uscsi_rqstatus = xp->xb_sense_status;
uscmd->uscsi_rqresid = xp->xb_sense_resid;
if (uscmd->uscsi_rqlen > SENSE_LENGTH) {
bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf,
MAX_SENSE_LENGTH);
} else {
bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf,
SENSE_LENGTH);
}
}
ASSERT(xp->xb_private != NULL);
suip = (struct sd_uscsi_info *)xp->xb_private;
suip->ui_pkt_reason = pktp->pkt_reason;
suip->ui_pkt_state = pktp->pkt_state;
suip->ui_pkt_statistics = pktp->pkt_statistics;
suip->ui_lba = (uint64_t)SD_GET_BLKNO(bp);
ASSERT(SD_GET_PKTP(bp) != NULL);
scsi_destroy_pkt(SD_GET_PKTP(bp));
SD_TRACE(SD_LOG_IO_CORE, un,
"sd_destroypkt_for_uscsi: exit: buf:0x%p\n", bp);
}
static struct buf *
sd_bioclone_alloc(struct buf *bp, size_t datalen, daddr_t blkno,
int (*func)(struct buf *))
{
struct sd_lun *un;
struct sd_xbuf *xp;
struct sd_xbuf *new_xp;
struct buf *new_bp;
ASSERT(bp != NULL);
xp = SD_GET_XBUF(bp);
ASSERT(xp != NULL);
un = SD_GET_UN(bp);
ASSERT(un != NULL);
ASSERT(!mutex_owned(SD_MUTEX(un)));
new_bp = bioclone(bp, 0, datalen, SD_GET_DEV(un), blkno, func,
NULL, KM_SLEEP);
new_bp->b_lblkno = blkno;
new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP);
bcopy(xp, new_xp, sizeof (struct sd_xbuf));
new_xp->xb_private = bp;
new_bp->b_private = new_xp;
return (new_bp);
}
static struct buf *
sd_shadow_buf_alloc(struct buf *bp, size_t datalen, uint_t bflags,
daddr_t blkno, int (*func)(struct buf *))
{
struct sd_lun *un;
struct sd_xbuf *xp;
struct sd_xbuf *new_xp;
struct buf *new_bp;
ASSERT(bp != NULL);
xp = SD_GET_XBUF(bp);
ASSERT(xp != NULL);
un = SD_GET_UN(bp);
ASSERT(un != NULL);
ASSERT(!mutex_owned(SD_MUTEX(un)));
if (bp->b_flags & (B_PAGEIO | B_PHYS)) {
bp_mapin(bp);
}
bflags &= (B_READ | B_WRITE);
#if defined(__x86)
new_bp = getrbuf(KM_SLEEP);
new_bp->b_un.b_addr = kmem_zalloc(datalen, KM_SLEEP);
new_bp->b_bcount = datalen;
new_bp->b_flags = bflags |
(bp->b_flags & ~(B_PAGEIO | B_PHYS | B_REMAPPED | B_SHADOW));
#else
new_bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), NULL,
datalen, bflags, SLEEP_FUNC, NULL);
#endif
new_bp->av_forw = NULL;
new_bp->av_back = NULL;
new_bp->b_dev = bp->b_dev;
new_bp->b_blkno = blkno;
new_bp->b_iodone = func;
new_bp->b_edev = bp->b_edev;
new_bp->b_resid = 0;
if (bp->b_flags & B_FAILFAST) {
new_bp->b_flags |= B_FAILFAST;
}
new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP);
bcopy(xp, new_xp, sizeof (struct sd_xbuf));
new_xp->xb_pkt_flags |= PKT_CONSISTENT;
new_xp->xb_private = bp;
new_bp->b_private = new_xp;
return (new_bp);
}
static void
sd_bioclone_free(struct buf *bp)
{
struct sd_xbuf *xp;
ASSERT(bp != NULL);
xp = SD_GET_XBUF(bp);
ASSERT(xp != NULL);
bp_mapout(bp);
bp->b_iodone = NULL;
freerbuf(bp);
kmem_free(xp, sizeof (struct sd_xbuf));
}
static void
sd_shadow_buf_free(struct buf *bp)
{
struct sd_xbuf *xp;
ASSERT(bp != NULL);
xp = SD_GET_XBUF(bp);
ASSERT(xp != NULL);
#if defined(__sparc)
bp_mapout(bp);
#endif
bp->b_iodone = NULL;
#if defined(__x86)
kmem_free(bp->b_un.b_addr, bp->b_bcount);
freerbuf(bp);
#else
scsi_free_consistent_buf(bp);
#endif
kmem_free(xp, sizeof (struct sd_xbuf));
}
static void
sd_print_transport_rejected_message(struct sd_lun *un, struct sd_xbuf *xp,
int code)
{
ASSERT(un != NULL);
ASSERT(mutex_owned(SD_MUTEX(un)));
ASSERT(xp != NULL);
if (((xp->xb_pktp->pkt_flags & FLAG_SILENT) == 0) &&
(SD_FM_LOG(un) == SD_FM_LOG_NSUP)) {
if ((sd_level_mask & SD_LOGMASK_DIAG) ||
(code != TRAN_FATAL_ERROR) ||
(un->un_tran_fatal_count == 1)) {
switch (code) {
case TRAN_BADPKT:
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"transport rejected bad packet\n");
break;
case TRAN_FATAL_ERROR:
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"transport rejected fatal error\n");
break;
default:
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"transport rejected (%d)\n", code);
break;
}
}
}
}
static void
sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp)
{
struct buf *ap;
ASSERT(bp != NULL);
ASSERT(un != NULL);
ASSERT(mutex_owned(SD_MUTEX(un)));
if (un->un_waitq_headp == NULL) {
ASSERT(un->un_waitq_tailp == NULL);
un->un_waitq_headp = un->un_waitq_tailp = bp;
bp->av_forw = NULL;
return;
}
ASSERT(un->un_waitq_tailp != NULL);
if (un->un_f_disksort_disabled || un->un_f_enable_rmw) {
un->un_waitq_tailp->av_forw = bp;
un->un_waitq_tailp = bp;
bp->av_forw = NULL;
return;
}
ap = un->un_waitq_headp;
if (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap)) {
while (ap->av_forw != NULL) {
if (SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) {
do {
if (SD_GET_BLKNO(bp) <
SD_GET_BLKNO(ap->av_forw)) {
goto insert;
}
ap = ap->av_forw;
} while (ap->av_forw != NULL);
goto insert;
}
ap = ap->av_forw;
}
goto insert;
}
while (ap->av_forw != NULL) {
if ((SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) ||
(SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap->av_forw))) {
goto insert;
}
ap = ap->av_forw;
}
insert:
bp->av_forw = ap->av_forw;
ap->av_forw = bp;
if (ap == un->un_waitq_tailp) {
un->un_waitq_tailp = bp;
}
}
static void
sd_start_cmds(struct sd_lun *un, struct buf *immed_bp)
{
struct sd_xbuf *xp;
struct buf *bp;
void (*statp)(kstat_io_t *);
#if defined(__x86)
void (*saved_statp)(kstat_io_t *);
#endif
int rval;
struct sd_fm_internal *sfip = NULL;
ASSERT(un != NULL);
ASSERT(mutex_owned(SD_MUTEX(un)));
ASSERT(un->un_ncmds_in_transport >= 0);
ASSERT(un->un_throttle >= 0);
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: entry\n");
do {
#if defined(__x86)
saved_statp = NULL;
#endif
if ((un->un_state == SD_STATE_DUMPING) ||
(ddi_in_panic() && (un->un_in_callback > 1))) {
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sd_start_cmds: panicking\n");
goto exit;
}
if ((bp = immed_bp) != NULL) {
statp = kstat_runq_enter;
if (bp == un->un_retry_bp) {
ASSERT((un->un_retry_statp == NULL) ||
(un->un_retry_statp == kstat_waitq_enter) ||
(un->un_retry_statp ==
kstat_runq_back_to_waitq));
if ((un->un_retry_statp == kstat_waitq_enter) ||
(un->un_retry_statp ==
kstat_runq_back_to_waitq)) {
statp = kstat_waitq_to_runq;
}
#if defined(__x86)
saved_statp = un->un_retry_statp;
#endif
un->un_retry_statp = NULL;
SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un,
"sd_start_cmds: un:0x%p: GOT retry_bp:0x%p "
"un_throttle:%d un_ncmds_in_transport:%d\n",
un, un->un_retry_bp, un->un_throttle,
un->un_ncmds_in_transport);
} else {
SD_TRACE(SD_LOG_IO_CORE, un, "sd_start_cmds: "
"processing priority bp:0x%p\n", bp);
}
} else if ((bp = un->un_waitq_headp) != NULL) {
if (un->un_ncmds_in_transport >= un->un_throttle) {
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sd_start_cmds: exiting, "
"throttle limit reached!\n");
goto exit;
}
if (un->un_retry_bp != NULL) {
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sd_start_cmds: exiting, retry pending!\n");
goto exit;
}
if (un->un_startstop_timeid != NULL) {
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sd_start_cmds: exiting, "
"START_STOP pending!\n");
goto exit;
}
if (un->un_direct_priority_timeid != NULL) {
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sd_start_cmds: exiting, "
"SD_PATH_DIRECT_PRIORITY cmd. pending!\n");
goto exit;
}
un->un_waitq_headp = bp->av_forw;
if (un->un_waitq_headp == NULL) {
un->un_waitq_tailp = NULL;
}
bp->av_forw = NULL;
statp = kstat_waitq_to_runq;
SD_TRACE(SD_LOG_IO_CORE, un,
"sd_start_cmds: processing waitq bp:0x%p\n", bp);
} else {
SD_TRACE(SD_LOG_IO_CORE, un,
"sd_start_cmds: no more work, exiting!\n");
goto exit;
}
if ((un->un_state != SD_STATE_SUSPENDED) &&
(un->un_state != SD_STATE_PM_CHANGING)) {
New_state(un, SD_STATE_NORMAL);
}
xp = SD_GET_XBUF(bp);
ASSERT(xp != NULL);
#if defined(__x86)
if ((xp->xb_pktp == NULL) ||
((xp->xb_pkt_flags & SD_XB_DMA_FREED) != 0)) {
#else
if (xp->xb_pktp == NULL) {
#endif
struct scsi_pkt *pktp;
int (*funcp)(struct buf *bp, struct scsi_pkt **pktp);
ASSERT(bp != un->un_rqs_bp);
funcp = sd_initpkt_map[xp->xb_chain_iostart];
switch ((*funcp)(bp, &pktp)) {
case SD_PKT_ALLOC_SUCCESS:
xp->xb_pktp = pktp;
SD_TRACE(SD_LOG_IO_CORE, un,
"sd_start_cmd: SD_PKT_ALLOC_SUCCESS 0x%p\n",
pktp);
goto got_pkt;
case SD_PKT_ALLOC_FAILURE:
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sd_start_cmds: SD_PKT_ALLOC_FAILURE\n");
#if defined(__x86)
if (bp == immed_bp) {
if ((xp->xb_pkt_flags &
SD_XB_DMA_FREED) == 0) {
break;
}
if (bp != un->un_retry_bp) {
break;
}
if (un->un_retry_statp == NULL) {
un->un_retry_statp =
saved_statp;
}
if ((un->un_startstop_timeid == NULL) &&
(un->un_retry_timeid == NULL) &&
(un->un_direct_priority_timeid ==
NULL)) {
un->un_retry_timeid =
timeout(
sd_start_retry_command,
un, SD_RESTART_TIMEOUT);
}
goto exit;
}
#else
if (bp == immed_bp) {
break;
}
#endif
bp->av_forw = un->un_waitq_headp;
un->un_waitq_headp = bp;
if (un->un_waitq_tailp == NULL) {
un->un_waitq_tailp = bp;
}
goto exit;
case SD_PKT_ALLOC_FAILURE_NO_DMA:
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sd_start_cmds: "
"SD_PKT_ALLOC_FAILURE_NO_DMA\n");
break;
case SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL:
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sd_start_cmds: "
"SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL\n");
break;
case SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL:
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sd_start_cmds: "
"SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL\n");
break;
default:
panic("scsi_initpkt error");
}
if (statp == kstat_waitq_to_runq) {
SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp);
}
sd_return_failed_command_no_restart(un, bp, EIO);
if (bp == immed_bp) {
immed_bp = NULL;
}
continue;
}
got_pkt:
if (bp == immed_bp) {
xp->xb_pktp->pkt_flags |= FLAG_HEAD;
}
un->un_ncmds_in_transport++;
SD_UPDATE_KSTATS(un, statp, bp);
SD_TRACE(SD_LOG_IO_CORE, un,
"sd_start_cmds: calling scsi_transport()\n");
DTRACE_PROBE1(scsi__transport__dispatch, struct buf *, bp);
mutex_exit(SD_MUTEX(un));
rval = scsi_transport(xp->xb_pktp);
mutex_enter(SD_MUTEX(un));
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sd_start_cmds: scsi_transport() returned %d\n", rval);
switch (rval) {
case TRAN_ACCEPT:
un->un_tran_fatal_count = 0;
break;
case TRAN_BUSY:
un->un_ncmds_in_transport--;
ASSERT(un->un_ncmds_in_transport >= 0);
if (bp == un->un_rqs_bp) {
SD_UPDATE_KSTATS(un, kstat_runq_exit, bp);
bp = sd_mark_rqs_idle(un, xp);
sd_retry_command(un, bp, SD_RETRIES_STANDARD,
NULL, NULL, EIO, un->un_busy_timeout / 500,
kstat_waitq_enter);
goto exit;
}
#if defined(__x86)
if ((un->un_f_is_fibre == TRUE) &&
((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) &&
((xp->xb_pktp->pkt_flags & FLAG_SENSING) == 0)) {
scsi_dmafree(xp->xb_pktp);
xp->xb_pkt_flags |= SD_XB_DMA_FREED;
}
#endif
if (SD_IS_DIRECT_PRIORITY(SD_GET_XBUF(bp))) {
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sd_start_cmds: scsi_transport() returned "
"TRAN_BUSY for DIRECT_PRIORITY cmd!\n");
SD_UPDATE_KSTATS(un, kstat_runq_exit, bp);
un->un_direct_priority_timeid =
timeout(sd_start_direct_priority_command,
bp, un->un_busy_timeout / 500);
goto exit;
}
if (bp != un->un_retry_bp) {
sd_reduce_throttle(un, SD_THROTTLE_TRAN_BUSY);
}
sd_set_retry_bp(un, bp, un->un_busy_timeout / 500,
kstat_runq_back_to_waitq);
goto exit;
case TRAN_FATAL_ERROR:
un->un_tran_fatal_count++;
case TRAN_BADPKT:
default:
un->un_ncmds_in_transport--;
ASSERT(un->un_ncmds_in_transport >= 0);
if (bp == un->un_rqs_bp) {
bp = sd_mark_rqs_idle(un, xp);
xp = SD_GET_XBUF(bp);
} else {
SD_UPDATE_ERRSTATS(un, sd_transerrs);
}
SD_UPDATE_KSTATS(un, kstat_runq_exit, bp);
sd_print_transport_rejected_message(un, xp, rval);
if (xp->xb_ena > 0) {
ASSERT(un->un_fm_private != NULL);
sfip = un->un_fm_private;
sfip->fm_ssc.ssc_flags |= SSC_FLAGS_TRAN_ABORT;
sd_ssc_extract_info(&sfip->fm_ssc, un,
xp->xb_pktp, bp, xp);
sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_FATAL);
}
sd_return_failed_command_no_restart(un, bp, EIO);
if (un->un_state == SD_STATE_SUSPENDED) {
cv_broadcast(&un->un_disk_busy_cv);
}
if (bp == immed_bp) {
immed_bp = NULL;
}
break;
}
} while (immed_bp == NULL);
exit:
ASSERT(mutex_owned(SD_MUTEX(un)));
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: exit\n");
}
static void
sd_return_command(struct sd_lun *un, struct buf *bp)
{
struct sd_xbuf *xp;
struct scsi_pkt *pktp;
struct sd_fm_internal *sfip;
ASSERT(bp != NULL);
ASSERT(un != NULL);
ASSERT(mutex_owned(SD_MUTEX(un)));
ASSERT(bp != un->un_rqs_bp);
xp = SD_GET_XBUF(bp);
ASSERT(xp != NULL);
pktp = SD_GET_PKTP(bp);
sfip = (struct sd_fm_internal *)un->un_fm_private;
ASSERT(sfip != NULL);
SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: entry\n");
if ((un->un_partial_dma_supported == 1) &&
((xp->xb_pkt_flags & SD_XB_USCSICMD) != SD_XB_USCSICMD) &&
(geterror(bp) == 0) && (xp->xb_dma_resid != 0) &&
(xp->xb_pktp->pkt_resid == 0)) {
if (sd_setup_next_xfer(un, bp, pktp, xp) != 0) {
sd_retry_command(un, bp, SD_RETRIES_NOCHECK,
NULL, NULL, 0, (clock_t)0, NULL);
sd_start_cmds(un, NULL);
return;
}
}
if (bp == un->un_failfast_bp) {
ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE);
un->un_failfast_bp = NULL;
}
if (bp->b_error == 0) {
un->un_failfast_state = SD_FAILFAST_INACTIVE;
if (xp->xb_ena > 0) {
sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp);
sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_RECOVERY);
}
} else {
if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) {
sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp);
sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_FATAL);
}
}
if (bp == un->un_retry_bp) {
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sd_return_command: un:0x%p: "
"RETURNING retry_bp:0x%p\n", un, un->un_retry_bp);
un->un_retry_bp = NULL;
un->un_retry_statp = NULL;
}
SD_UPDATE_RDWR_STATS(un, bp);
SD_UPDATE_PARTITION_STATS(un, bp);
switch (un->un_state) {
case SD_STATE_SUSPENDED:
cv_broadcast(&un->un_disk_busy_cv);
break;
default:
sd_start_cmds(un, NULL);
break;
}
mutex_exit(SD_MUTEX(un));
(*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp);
xp->xb_pktp = NULL;
SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp);
ASSERT(!mutex_owned(SD_MUTEX(un)));
mutex_enter(SD_MUTEX(un));
SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: exit\n");
}
static void
sd_return_failed_command(struct sd_lun *un, struct buf *bp, int errcode)
{
ASSERT(bp != NULL);
ASSERT(un != NULL);
ASSERT(mutex_owned(SD_MUTEX(un)));
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sd_return_failed_command: entry\n");
SD_BIOERROR(bp, errcode);
sd_return_command(un, bp);
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sd_return_failed_command: exit\n");
}
static void
sd_return_failed_command_no_restart(struct sd_lun *un, struct buf *bp,
int errcode)
{
struct sd_xbuf *xp;
ASSERT(bp != NULL);
ASSERT(un != NULL);
ASSERT(mutex_owned(SD_MUTEX(un)));
xp = SD_GET_XBUF(bp);
ASSERT(xp != NULL);
ASSERT(errcode != 0);
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sd_return_failed_command_no_restart: entry\n");
SD_BIOERROR(bp, errcode);
if (bp == un->un_failfast_bp) {
ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE);
un->un_failfast_bp = NULL;
}
if (bp == un->un_retry_bp) {
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sd_return_failed_command_no_restart: "
" un:0x%p: RETURNING retry_bp:0x%p\n", un, un->un_retry_bp);
un->un_retry_bp = NULL;
un->un_retry_statp = NULL;
}
SD_UPDATE_RDWR_STATS(un, bp);
SD_UPDATE_PARTITION_STATS(un, bp);
mutex_exit(SD_MUTEX(un));
if (xp->xb_pktp != NULL) {
(*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp);
xp->xb_pktp = NULL;
}
SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp);
mutex_enter(SD_MUTEX(un));
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sd_return_failed_command_no_restart: exit\n");
}
static void
sd_retry_command(struct sd_lun *un, struct buf *bp, int retry_check_flag,
void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, int code),
void *user_arg, int failure_code, clock_t retry_delay,
void (*statp)(kstat_io_t *))
{
struct sd_xbuf *xp;
struct scsi_pkt *pktp;
struct sd_fm_internal *sfip;
ASSERT(un != NULL);
ASSERT(mutex_owned(SD_MUTEX(un)));
ASSERT(bp != NULL);
xp = SD_GET_XBUF(bp);
ASSERT(xp != NULL);
pktp = SD_GET_PKTP(bp);
ASSERT(pktp != NULL);
sfip = (struct sd_fm_internal *)un->un_fm_private;
ASSERT(sfip != NULL);
SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un,
"sd_retry_command: entry: bp:0x%p xp:0x%p\n", bp, xp);
if (ddi_in_panic()) {
goto fail_command_no_log;
}
if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) {
scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE,
"ERROR, retrying FLAG_DIAGNOSE command.\n");
sd_dump_memory(un, SD_LOG_IO, "CDB",
(uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX);
sd_dump_memory(un, SD_LOG_IO, "Sense Data",
(uchar_t *)xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX);
goto fail_command;
}
switch (un->un_state) {
case SD_STATE_SUSPENDED:
case SD_STATE_DUMPING:
bp->av_forw = un->un_waitq_headp;
un->un_waitq_headp = bp;
if (un->un_waitq_tailp == NULL) {
un->un_waitq_tailp = bp;
}
if (bp == un->un_retry_bp) {
un->un_retry_bp = NULL;
un->un_retry_statp = NULL;
}
SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp);
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: "
"exiting; cmd bp:0x%p requeued for SUSPEND/DUMP\n", bp);
return;
default:
break;
}
if ((retry_check_flag & SD_RETRIES_ISOLATE) != 0) {
if ((pktp->pkt_flags & FLAG_ISOLATE) != 0) {
goto fail_command;
}
}
if (retry_check_flag & SD_RETRIES_FAILFAST) {
if (un->un_failfast_state == SD_FAILFAST_ACTIVE) {
ASSERT(un->un_failfast_bp == NULL);
if (bp->b_flags & B_FAILFAST) {
goto fail_command;
}
} else {
if (un->un_failfast_bp == NULL) {
un->un_failfast_bp = bp;
} else if (un->un_failfast_bp == bp) {
un->un_failfast_state = SD_FAILFAST_ACTIVE;
un->un_failfast_bp = NULL;
sd_failfast_flushq(un);
if (bp->b_flags & B_FAILFAST) {
goto fail_command;
}
#if !defined(lint) && !defined(__lint)
} else {
#endif
}
}
} else {
un->un_failfast_state = SD_FAILFAST_INACTIVE;
}
switch (retry_check_flag & SD_RETRIES_MASK) {
case SD_RETRIES_VICTIM:
if (xp->xb_victim_retry_count < un->un_victim_retry_count) {
xp->xb_victim_retry_count++;
break;
}
case SD_RETRIES_STANDARD:
if (xp->xb_retry_count >= un->un_retry_count) {
SD_TRACE(SD_LOG_IO_CORE, un,
"sd_retry_command: retries exhausted!\n");
if ((pktp->pkt_reason == CMD_CMPLT) &&
(SD_GET_PKT_STATUS(pktp) == STATUS_GOOD) &&
(pktp->pkt_resid != 0)) {
uchar_t op = SD_GET_PKT_OPCODE(pktp) & 0x1F;
if ((op == SCMD_READ) || (op == SCMD_WRITE)) {
SD_UPDATE_B_RESID(bp, pktp);
}
}
goto fail_command;
}
xp->xb_retry_count++;
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sd_retry_command: retry count:%d\n", xp->xb_retry_count);
break;
case SD_RETRIES_UA:
if (xp->xb_ua_retry_count >= sd_ua_retry_count) {
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"Unit Attention retries exhausted. "
"Check the target.\n");
goto fail_command;
}
xp->xb_ua_retry_count++;
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sd_retry_command: retry count:%d\n",
xp->xb_ua_retry_count);
break;
case SD_RETRIES_BUSY:
if (xp->xb_retry_count >= un->un_busy_retry_count) {
SD_TRACE(SD_LOG_IO_CORE, un,
"sd_retry_command: retries exhausted!\n");
goto fail_command;
}
xp->xb_retry_count++;
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sd_retry_command: retry count:%d\n", xp->xb_retry_count);
break;
case SD_RETRIES_NOCHECK:
default:
break;
}
xp->xb_pktp->pkt_flags |= FLAG_HEAD;
if (failure_code != 0) {
if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) {
sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp);
sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_RETRY);
}
}
if (retry_delay == 0) {
if (un->un_ncmds_in_transport >= un->un_throttle) {
retry_delay = un->un_busy_timeout;
statp = kstat_waitq_enter;
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sd_retry_command: immed. retry hit "
"throttle!\n");
} else {
if (user_funcp != NULL) {
(*user_funcp)(un, bp, user_arg,
SD_IMMEDIATE_RETRY_ISSUED);
#ifdef __lock_lint
sd_print_incomplete_msg(un, bp, user_arg,
SD_IMMEDIATE_RETRY_ISSUED);
sd_print_cmd_incomplete_msg(un, bp, user_arg,
SD_IMMEDIATE_RETRY_ISSUED);
sd_print_sense_failed_msg(un, bp, user_arg,
SD_IMMEDIATE_RETRY_ISSUED);
#endif
}
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sd_retry_command: issuing immediate retry\n");
sd_start_cmds(un, bp);
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sd_retry_command exit\n");
return;
}
}
if (user_funcp != NULL) {
(*user_funcp)(un, bp, user_arg, SD_DELAYED_RETRY_ISSUED);
}
sd_set_retry_bp(un, bp, retry_delay, statp);
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n");
return;
fail_command:
if (user_funcp != NULL) {
(*user_funcp)(un, bp, user_arg, SD_NO_RETRY_ISSUED);
}
fail_command_no_log:
SD_INFO(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sd_retry_command: returning failed command\n");
sd_return_failed_command(un, bp, failure_code);
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n");
}
static void
sd_set_retry_bp(struct sd_lun *un, struct buf *bp, clock_t retry_delay,
void (*statp)(kstat_io_t *))
{
ASSERT(un != NULL);
ASSERT(mutex_owned(SD_MUTEX(un)));
ASSERT(bp != NULL);
SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un,
"sd_set_retry_bp: entry: un:0x%p bp:0x%p\n", un, bp);
if (un->un_retry_bp == NULL) {
ASSERT(un->un_retry_statp == NULL);
un->un_retry_bp = bp;
if (retry_delay == 0) {
un->un_retry_statp = statp;
goto done;
}
}
if (un->un_retry_bp == bp) {
un->un_retry_statp = statp;
if ((retry_delay != 0) && (un->un_startstop_timeid == NULL) &&
(un->un_direct_priority_timeid == NULL)) {
un->un_retry_timeid =
timeout(sd_start_retry_command, un, retry_delay);
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sd_set_retry_bp: setting timeout: un: 0x%p"
" bp:0x%p un_retry_timeid:0x%p\n",
un, bp, un->un_retry_timeid);
}
} else {
if ((un->un_failfast_bp != NULL) &&
(un->un_failfast_bp == un->un_waitq_headp)) {
bp->av_forw = un->un_waitq_headp->av_forw;
un->un_waitq_headp->av_forw = bp;
if (un->un_waitq_headp == un->un_waitq_tailp) {
un->un_waitq_tailp = bp;
}
} else {
bp->av_forw = un->un_waitq_headp;
un->un_waitq_headp = bp;
if (un->un_waitq_tailp == NULL) {
un->un_waitq_tailp = bp;
}
}
if (statp == NULL) {
statp = kstat_waitq_enter;
}
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sd_set_retry_bp: un:0x%p already delayed retry\n", un);
}
done:
if (statp != NULL) {
SD_UPDATE_KSTATS(un, statp, bp);
}
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sd_set_retry_bp: exit un:0x%p\n", un);
}
static void
sd_start_retry_command(void *arg)
{
struct sd_lun *un = arg;
ASSERT(un != NULL);
ASSERT(!mutex_owned(SD_MUTEX(un)));
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sd_start_retry_command: entry\n");
mutex_enter(SD_MUTEX(un));
un->un_retry_timeid = NULL;
if (un->un_retry_bp != NULL) {
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sd_start_retry_command: un:0x%p STARTING bp:0x%p\n",
un, un->un_retry_bp);
sd_start_cmds(un, un->un_retry_bp);
}
mutex_exit(SD_MUTEX(un));
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sd_start_retry_command: exit\n");
}
static void
sd_rmw_msg_print_handler(void *arg)
{
struct sd_lun *un = arg;
ASSERT(un != NULL);
ASSERT(!mutex_owned(SD_MUTEX(un)));
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sd_rmw_msg_print_handler: entry\n");
mutex_enter(SD_MUTEX(un));
if (un->un_rmw_incre_count > 0) {
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"%"PRIu64" I/O requests are not aligned with %d disk "
"sector size in %ld seconds. They are handled through "
"Read Modify Write but the performance is very low!\n",
un->un_rmw_incre_count, un->un_tgt_blocksize,
drv_hztousec(SD_RMW_MSG_PRINT_TIMEOUT) / 1000000);
un->un_rmw_incre_count = 0;
un->un_rmw_msg_timeid = timeout(sd_rmw_msg_print_handler,
un, SD_RMW_MSG_PRINT_TIMEOUT);
} else {
un->un_rmw_msg_timeid = NULL;
}
mutex_exit(SD_MUTEX(un));
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sd_rmw_msg_print_handler: exit\n");
}
static void
sd_start_direct_priority_command(void *arg)
{
struct buf *priority_bp = arg;
struct sd_lun *un;
ASSERT(priority_bp != NULL);
un = SD_GET_UN(priority_bp);
ASSERT(un != NULL);
ASSERT(!mutex_owned(SD_MUTEX(un)));
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sd_start_direct_priority_command: entry\n");
mutex_enter(SD_MUTEX(un));
un->un_direct_priority_timeid = NULL;
sd_start_cmds(un, priority_bp);
mutex_exit(SD_MUTEX(un));
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sd_start_direct_priority_command: exit\n");
}
static void
sd_send_request_sense_command(struct sd_lun *un, struct buf *bp,
struct scsi_pkt *pktp)
{
ASSERT(bp != NULL);
ASSERT(un != NULL);
ASSERT(mutex_owned(SD_MUTEX(un)));
SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_send_request_sense_command: "
"entry: buf:0x%p\n", bp);
if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) ||
(un->un_state == SD_STATE_DUMPING)) {
sd_return_failed_command(un, bp, EIO);
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sd_send_request_sense_command: syncing/dumping, exit\n");
return;
}
if ((un->un_sense_isbusy != 0) || (un->un_ncmds_in_transport > 0)) {
if ((pktp->pkt_flags & FLAG_DIAGNOSE) == 0) {
sd_retry_command(un, bp, SD_RETRIES_NOCHECK,
NULL, NULL, 0, un->un_busy_timeout,
kstat_waitq_enter);
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sd_send_request_sense_command: "
"at full throttle, retrying exit\n");
} else {
sd_return_failed_command(un, bp, EIO);
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sd_send_request_sense_command: "
"at full throttle, non-retryable exit\n");
}
return;
}
sd_mark_rqs_busy(un, bp);
sd_start_cmds(un, un->un_rqs_bp);
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sd_send_request_sense_command: exit\n");
}
static void
sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp)
{
struct sd_xbuf *sense_xp;
ASSERT(un != NULL);
ASSERT(bp != NULL);
ASSERT(mutex_owned(SD_MUTEX(un)));
ASSERT(un->un_sense_isbusy == 0);
SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: entry: "
"buf:0x%p xp:0x%p un:0x%p\n", bp, SD_GET_XBUF(bp), un);
sense_xp = SD_GET_XBUF(un->un_rqs_bp);
ASSERT(sense_xp != NULL);
SD_INFO(SD_LOG_IO, un,
"sd_mark_rqs_busy: entry: sense_xp:0x%p\n", sense_xp);
ASSERT(sense_xp->xb_pktp != NULL);
ASSERT((sense_xp->xb_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD))
== (FLAG_SENSING | FLAG_HEAD));
un->un_sense_isbusy = 1;
un->un_rqs_bp->b_resid = 0;
sense_xp->xb_pktp->pkt_resid = 0;
sense_xp->xb_pktp->pkt_reason = 0;
sense_xp->xb_sense_bp = bp;
bzero(un->un_rqs_bp->b_un.b_addr, SENSE_LENGTH);
((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags |= FLAG_SENSING;
if (scsi_pkt_allocated_correctly((SD_GET_XBUF(bp))->xb_pktp) &&
((SD_GET_XBUF(bp))->xb_pktp)->pkt_path_instance)
sense_xp->xb_pktp->pkt_path_instance =
((SD_GET_XBUF(bp))->xb_pktp)->pkt_path_instance;
sense_xp->xb_retry_count = 0;
sense_xp->xb_victim_retry_count = 0;
sense_xp->xb_ua_retry_count = 0;
sense_xp->xb_nr_retry_count = 0;
sense_xp->xb_dma_resid = 0;
sense_xp->xb_sense_status = 0;
sense_xp->xb_sense_state = 0;
sense_xp->xb_sense_resid = 0;
bzero(sense_xp->xb_sense_data, sizeof (sense_xp->xb_sense_data));
SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: exit\n");
}
static struct buf *
sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *sense_xp)
{
struct buf *bp;
ASSERT(un != NULL);
ASSERT(sense_xp != NULL);
ASSERT(mutex_owned(SD_MUTEX(un)));
ASSERT(un->un_sense_isbusy != 0);
un->un_sense_isbusy = 0;
bp = sense_xp->xb_sense_bp;
sense_xp->xb_sense_bp = NULL;
((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags &= ~FLAG_SENSING;
return (bp);
}
static int
sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un)
{
struct sd_xbuf *xp;
ASSERT(un != NULL);
ASSERT(!mutex_owned(SD_MUTEX(un)));
ASSERT(un->un_rqs_bp == NULL);
ASSERT(un->un_rqs_pktp == NULL);
un->un_rqs_bp = scsi_alloc_consistent_buf(&devp->sd_address, NULL,
MAX_SENSE_LENGTH, B_READ, SLEEP_FUNC, NULL);
if (un->un_rqs_bp == NULL) {
return (DDI_FAILURE);
}
un->un_rqs_pktp = scsi_init_pkt(&devp->sd_address, NULL, un->un_rqs_bp,
CDB_GROUP0, 1, 0, PKT_CONSISTENT, SLEEP_FUNC, NULL);
if (un->un_rqs_pktp == NULL) {
sd_free_rqs(un);
return (DDI_FAILURE);
}
(void) scsi_setup_cdb((union scsi_cdb *)un->un_rqs_pktp->pkt_cdbp,
SCMD_REQUEST_SENSE, 0, MAX_SENSE_LENGTH, 0);
SD_FILL_SCSI1_LUN(un, un->un_rqs_pktp);
un->un_rqs_pktp->pkt_comp = sdintr;
un->un_rqs_pktp->pkt_time = sd_io_time;
un->un_rqs_pktp->pkt_flags |=
(FLAG_SENSING | FLAG_HEAD);
xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP);
sd_xbuf_init(un, un->un_rqs_bp, xp, SD_CHAIN_NULL, NULL);
xp->xb_pktp = un->un_rqs_pktp;
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_alloc_rqs: un 0x%p, rqs xp 0x%p, pkt 0x%p, buf 0x%p\n",
un, xp, un->un_rqs_pktp, un->un_rqs_bp);
un->un_rqs_pktp->pkt_private = un->un_rqs_bp;
ASSERT(un->un_rqs_bp->b_private == xp);
if (un->un_f_is_fibre == TRUE) {
un->un_f_arq_enabled = TRUE;
} else {
#if defined(__x86)
(void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1);
#endif
switch (scsi_ifgetcap(SD_ADDRESS(un), "auto-rqsense", 1)) {
case 0:
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_alloc_rqs: HBA supports ARQ\n");
if (scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1)
== 1) {
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_alloc_rqs: ARQ enabled\n");
un->un_f_arq_enabled = TRUE;
} else {
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_alloc_rqs: failed ARQ enable\n");
un->un_f_arq_enabled = FALSE;
}
break;
case 1:
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_alloc_rqs: ARQ already enabled\n");
un->un_f_arq_enabled = TRUE;
break;
default:
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_alloc_rqs: HBA does not support ARQ\n");
un->un_f_arq_enabled = FALSE;
break;
}
}
return (DDI_SUCCESS);
}
static void
sd_free_rqs(struct sd_lun *un)
{
ASSERT(un != NULL);
SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: entry\n");
if (un->un_rqs_pktp != NULL) {
scsi_destroy_pkt(un->un_rqs_pktp);
un->un_rqs_pktp = NULL;
}
if (un->un_rqs_bp != NULL) {
struct sd_xbuf *xp = SD_GET_XBUF(un->un_rqs_bp);
if (xp != NULL) {
kmem_free(xp, sizeof (struct sd_xbuf));
}
scsi_free_consistent_buf(un->un_rqs_bp);
un->un_rqs_bp = NULL;
}
SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: exit\n");
}
static void
sd_reduce_throttle(struct sd_lun *un, int throttle_type)
{
ASSERT(un != NULL);
ASSERT(mutex_owned(SD_MUTEX(un)));
ASSERT(un->un_ncmds_in_transport >= 0);
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: "
"entry: un:0x%p un_throttle:%d un_ncmds_in_transport:%d\n",
un, un->un_throttle, un->un_ncmds_in_transport);
if (un->un_throttle > 1) {
if (un->un_f_use_adaptive_throttle == TRUE) {
switch (throttle_type) {
case SD_THROTTLE_TRAN_BUSY:
if (un->un_busy_throttle == 0) {
un->un_busy_throttle = un->un_throttle;
}
break;
case SD_THROTTLE_QFULL:
un->un_busy_throttle = 0;
break;
default:
ASSERT(FALSE);
}
if (un->un_ncmds_in_transport > 0) {
un->un_throttle = un->un_ncmds_in_transport;
}
} else {
if (un->un_ncmds_in_transport == 0) {
un->un_throttle = 1;
} else {
un->un_throttle = un->un_ncmds_in_transport;
}
}
}
if (un->un_reset_throttle_timeid == NULL) {
un->un_reset_throttle_timeid = timeout(sd_restore_throttle,
un, SD_THROTTLE_RESET_INTERVAL);
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sd_reduce_throttle: timeout scheduled!\n");
}
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: "
"exit: un:0x%p un_throttle:%d\n", un, un->un_throttle);
}
static void
sd_restore_throttle(void *arg)
{
struct sd_lun *un = arg;
ASSERT(un != NULL);
ASSERT(!mutex_owned(SD_MUTEX(un)));
mutex_enter(SD_MUTEX(un));
SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: "
"entry: un:0x%p un_throttle:%d\n", un, un->un_throttle);
un->un_reset_throttle_timeid = NULL;
if (un->un_f_use_adaptive_throttle == TRUE) {
if (un->un_busy_throttle > 0) {
un->un_throttle = un->un_busy_throttle;
un->un_busy_throttle = 0;
} else {
short throttle;
if (sd_qfull_throttle_enable) {
throttle = un->un_throttle +
max((un->un_throttle / 10), 1);
un->un_throttle =
(throttle < un->un_saved_throttle) ?
throttle : un->un_saved_throttle;
if (un->un_throttle < un->un_saved_throttle) {
un->un_reset_throttle_timeid =
timeout(sd_restore_throttle,
un,
SD_QFULL_THROTTLE_RESET_INTERVAL);
}
}
}
if (un->un_throttle < un->un_min_throttle) {
un->un_throttle = un->un_saved_throttle;
}
} else {
SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: "
"restoring limit from 0x%x to 0x%x\n",
un->un_throttle, un->un_saved_throttle);
un->un_throttle = un->un_saved_throttle;
}
SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un,
"sd_restore_throttle: calling sd_start_cmds!\n");
sd_start_cmds(un, NULL);
SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un,
"sd_restore_throttle: exit: un:0x%p un_throttle:%d\n",
un, un->un_throttle);
mutex_exit(SD_MUTEX(un));
SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: exit\n");
}
static int
sdrunout(caddr_t arg)
{
struct sd_lun *un = (struct sd_lun *)arg;
ASSERT(un != NULL);
ASSERT(!mutex_owned(SD_MUTEX(un)));
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: entry\n");
mutex_enter(SD_MUTEX(un));
sd_start_cmds(un, NULL);
mutex_exit(SD_MUTEX(un));
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: exit\n");
return (1);
}
static void
sdintr(struct scsi_pkt *pktp)
{
struct buf *bp;
struct sd_xbuf *xp;
struct sd_lun *un;
size_t actual_len;
sd_ssc_t *sscp;
ASSERT(pktp != NULL);
bp = (struct buf *)pktp->pkt_private;
ASSERT(bp != NULL);
xp = SD_GET_XBUF(bp);
ASSERT(xp != NULL);
ASSERT(xp->xb_pktp != NULL);
un = SD_GET_UN(bp);
ASSERT(un != NULL);
ASSERT(!mutex_owned(SD_MUTEX(un)));
#ifdef SD_FAULT_INJECTION
SD_INFO(SD_LOG_IOERR, un, "sdintr: sdintr calling Fault injection\n");
sd_faultinjection(pktp);
#endif
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: entry: buf:0x%p,"
" xp:0x%p, un:0x%p\n", bp, xp, un);
mutex_enter(SD_MUTEX(un));
ASSERT(un->un_fm_private != NULL);
sscp = &((struct sd_fm_internal *)(un->un_fm_private))->fm_ssc;
ASSERT(sscp != NULL);
un->un_ncmds_in_transport--;
ASSERT(un->un_ncmds_in_transport >= 0);
un->un_in_callback++;
SD_UPDATE_KSTATS(un, kstat_runq_exit, bp);
#ifdef SDDEBUG
if (bp == un->un_retry_bp) {
SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sdintr: "
"un:0x%p: GOT retry_bp:0x%p un_ncmds_in_transport:%d\n",
un, un->un_retry_bp, un->un_ncmds_in_transport);
}
#endif
if (pktp->pkt_reason == CMD_DEV_GONE) {
if (un->un_last_pkt_reason != CMD_DEV_GONE) {
un->un_last_pkt_reason = CMD_DEV_GONE;
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"Command failed to complete...Device is gone\n");
}
if (un->un_mediastate != DKIO_DEV_GONE) {
un->un_mediastate = DKIO_DEV_GONE;
cv_broadcast(&un->un_state_cv);
}
if (bp == un->un_rqs_bp) {
bp = sd_mark_rqs_idle(un, xp);
}
sd_return_failed_command(un, bp, EIO);
goto exit;
}
if (pktp->pkt_state & STATE_XARQ_DONE) {
SD_TRACE(SD_LOG_COMMON, un,
"sdintr: extra sense data received. pkt=%p\n", pktp);
}
if ((pktp->pkt_state & STATE_ARQ_DONE) &&
(un->un_f_arq_enabled == TRUE)) {
if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) {
struct scsi_arq_status *asp;
asp = (struct scsi_arq_status *)(pktp->pkt_scbp);
xp->xb_sense_status =
*((uchar_t *)(&(asp->sts_rqpkt_status)));
xp->xb_sense_state = asp->sts_rqpkt_state;
xp->xb_sense_resid = asp->sts_rqpkt_resid;
if (pktp->pkt_state & STATE_XARQ_DONE) {
actual_len = MAX_SENSE_LENGTH -
xp->xb_sense_resid;
bcopy(&asp->sts_sensedata, xp->xb_sense_data,
MAX_SENSE_LENGTH);
} else {
if (xp->xb_sense_resid > SENSE_LENGTH) {
actual_len = MAX_SENSE_LENGTH -
xp->xb_sense_resid;
} else {
actual_len = SENSE_LENGTH -
xp->xb_sense_resid;
}
if (xp->xb_pkt_flags & SD_XB_USCSICMD) {
if ((((struct uscsi_cmd *)
(xp->xb_pktinfo))->uscsi_rqlen) >
actual_len) {
xp->xb_sense_resid =
(((struct uscsi_cmd *)
(xp->xb_pktinfo))->
uscsi_rqlen) - actual_len;
} else {
xp->xb_sense_resid = 0;
}
}
bcopy(&asp->sts_sensedata, xp->xb_sense_data,
SENSE_LENGTH);
}
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sdintr: arq done and FLAG_DIAGNOSE set\n");
sd_return_failed_command(un, bp, EIO);
goto exit;
}
#if (defined(__x86))
if ((un->un_f_is_fibre == TRUE) &&
((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) &&
((pktp->pkt_flags & FLAG_SENSING) == 0)) {
scsi_dmafree(pktp);
xp->xb_pkt_flags |= SD_XB_DMA_FREED;
}
#endif
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sdintr: arq done, sd_handle_auto_request_sense\n");
sd_handle_auto_request_sense(un, bp, xp, pktp);
goto exit;
}
if (pktp->pkt_flags & FLAG_SENSING) {
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sdintr: sd_handle_request_sense\n");
sd_handle_request_sense(un, bp, xp, pktp);
goto exit;
}
if ((pktp->pkt_reason == CMD_CMPLT) &&
(SD_GET_PKT_STATUS(pktp) == STATUS_GOOD)) {
if (pktp->pkt_resid == 0) {
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sdintr: returning command for resid == 0\n");
} else if (((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_READ) &&
((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_WRITE)) {
SD_UPDATE_B_RESID(bp, pktp);
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sdintr: returning command for resid != 0\n");
} else if (xp->xb_pkt_flags & SD_XB_USCSICMD) {
SD_UPDATE_B_RESID(bp, pktp);
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sdintr: returning uscsi command\n");
} else {
goto not_successful;
}
sd_return_command(un, bp);
un->un_in_callback--;
ASSERT(un->un_in_callback >= 0);
mutex_exit(SD_MUTEX(un));
return;
}
not_successful:
#if (defined(__x86))
if ((un->un_f_is_fibre == TRUE) &&
((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) &&
((pktp->pkt_flags & FLAG_SENSING) == 0)) {
scsi_dmafree(pktp);
xp->xb_pkt_flags |= SD_XB_DMA_FREED;
}
#endif
if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) {
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sdintr: FLAG_DIAGNOSE: sd_return_failed_command\n");
if ((pktp->pkt_reason == CMD_CMPLT) &&
(SD_GET_PKT_STATUS(pktp) == STATUS_CHECK)) {
sd_send_request_sense_command(un, bp, pktp);
} else {
sd_return_failed_command(un, bp, EIO);
}
goto exit;
}
switch (pktp->pkt_reason) {
case CMD_CMPLT:
switch (SD_GET_PKT_STATUS(pktp)) {
case STATUS_GOOD:
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sdintr: STATUS_GOOD \n");
sd_pkt_status_good(un, bp, xp, pktp);
break;
case STATUS_CHECK:
case STATUS_TERMINATED:
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sdintr: STATUS_TERMINATED | STATUS_CHECK\n");
sd_pkt_status_check_condition(un, bp, xp, pktp);
break;
case STATUS_BUSY:
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sdintr: STATUS_BUSY\n");
sd_pkt_status_busy(un, bp, xp, pktp);
break;
case STATUS_RESERVATION_CONFLICT:
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sdintr: STATUS_RESERVATION_CONFLICT\n");
sd_pkt_status_reservation_conflict(un, bp, xp, pktp);
break;
case STATUS_QFULL:
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sdintr: STATUS_QFULL\n");
sd_pkt_status_qfull(un, bp, xp, pktp);
break;
case STATUS_MET:
case STATUS_INTERMEDIATE:
case STATUS_SCSI2:
case STATUS_INTERMEDIATE_MET:
case STATUS_ACA_ACTIVE:
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"Unexpected SCSI status received: 0x%x\n",
SD_GET_PKT_STATUS(pktp));
if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) {
sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_STATUS,
0, "stat-code");
}
sd_return_failed_command(un, bp, EIO);
break;
default:
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"Invalid SCSI status received: 0x%x\n",
SD_GET_PKT_STATUS(pktp));
if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) {
sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_STATUS,
0, "stat-code");
}
sd_return_failed_command(un, bp, EIO);
break;
}
break;
case CMD_INCOMPLETE:
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sdintr: CMD_INCOMPLETE\n");
sd_pkt_reason_cmd_incomplete(un, bp, xp, pktp);
break;
case CMD_TRAN_ERR:
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sdintr: CMD_TRAN_ERR\n");
sd_pkt_reason_cmd_tran_err(un, bp, xp, pktp);
break;
case CMD_RESET:
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sdintr: CMD_RESET \n");
sd_pkt_reason_cmd_reset(un, bp, xp, pktp);
break;
case CMD_ABORTED:
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sdintr: CMD_ABORTED \n");
sd_pkt_reason_cmd_aborted(un, bp, xp, pktp);
break;
case CMD_TIMEOUT:
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sdintr: CMD_TIMEOUT\n");
sd_pkt_reason_cmd_timeout(un, bp, xp, pktp);
break;
case CMD_UNX_BUS_FREE:
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sdintr: CMD_UNX_BUS_FREE \n");
sd_pkt_reason_cmd_unx_bus_free(un, bp, xp, pktp);
break;
case CMD_TAG_REJECT:
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sdintr: CMD_TAG_REJECT\n");
sd_pkt_reason_cmd_tag_reject(un, bp, xp, pktp);
break;
default:
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sdintr: default\n");
if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) {
sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_PKT_REASON,
0, "pkt-reason");
}
sd_pkt_reason_default(un, bp, xp, pktp);
break;
}
exit:
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: exit\n");
un->un_in_callback--;
ASSERT(un->un_in_callback >= 0);
mutex_exit(SD_MUTEX(un));
}
static void
sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, int code)
{
struct scsi_pkt *pktp;
char *msgp;
char *cmdp = arg;
ASSERT(un != NULL);
ASSERT(mutex_owned(SD_MUTEX(un)));
ASSERT(bp != NULL);
ASSERT(arg != NULL);
pktp = SD_GET_PKTP(bp);
ASSERT(pktp != NULL);
switch (code) {
case SD_DELAYED_RETRY_ISSUED:
case SD_IMMEDIATE_RETRY_ISSUED:
msgp = "retrying";
break;
case SD_NO_RETRY_ISSUED:
default:
msgp = "giving up";
break;
}
if ((pktp->pkt_flags & FLAG_SILENT) == 0) {
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"incomplete %s- %s\n", cmdp, msgp);
}
}
static void
sd_pkt_status_good(struct sd_lun *un, struct buf *bp,
struct sd_xbuf *xp, struct scsi_pkt *pktp)
{
char *cmdp;
ASSERT(un != NULL);
ASSERT(mutex_owned(SD_MUTEX(un)));
ASSERT(bp != NULL);
ASSERT(xp != NULL);
ASSERT(pktp != NULL);
ASSERT(pktp->pkt_reason == CMD_CMPLT);
ASSERT(SD_GET_PKT_STATUS(pktp) == STATUS_GOOD);
ASSERT(pktp->pkt_resid != 0);
SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: entry\n");
SD_UPDATE_ERRSTATS(un, sd_harderrs);
switch (SD_GET_PKT_OPCODE(pktp) & 0x1F) {
case SCMD_READ:
cmdp = "read";
break;
case SCMD_WRITE:
cmdp = "write";
break;
default:
SD_UPDATE_B_RESID(bp, pktp);
sd_return_command(un, bp);
SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n");
return;
}
sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_incomplete_msg,
cmdp, EIO, (clock_t)0, NULL);
SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n");
}
static void
sd_handle_request_sense(struct sd_lun *un, struct buf *sense_bp,
struct sd_xbuf *sense_xp, struct scsi_pkt *sense_pktp)
{
struct buf *cmd_bp;
struct sd_xbuf *cmd_xp;
struct scsi_pkt *cmd_pktp;
size_t actual_len;
ASSERT(un != NULL);
ASSERT(mutex_owned(SD_MUTEX(un)));
ASSERT(sense_bp != NULL);
ASSERT(sense_xp != NULL);
ASSERT(sense_pktp != NULL);
ASSERT(sense_pktp == un->un_rqs_pktp);
ASSERT(sense_bp == un->un_rqs_bp);
ASSERT((sense_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) ==
(FLAG_SENSING | FLAG_HEAD));
ASSERT((((SD_GET_XBUF(sense_xp->xb_sense_bp))->xb_pktp->pkt_flags) &
FLAG_SENSING) == FLAG_SENSING);
cmd_bp = sense_xp->xb_sense_bp;
cmd_xp = SD_GET_XBUF(cmd_bp);
cmd_pktp = SD_GET_PKTP(cmd_bp);
if (sense_pktp->pkt_reason != CMD_CMPLT) {
SD_UPDATE_ERRSTATS(un, sd_harderrs);
if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) {
cmd_bp = sd_mark_rqs_idle(un, sense_xp);
sd_retry_command(un, cmd_bp, SD_RETRIES_STANDARD,
NULL, NULL, EIO, (clock_t)0, NULL);
return;
}
}
cmd_xp->xb_sense_status = *(sense_pktp->pkt_scbp);
cmd_xp->xb_sense_state = sense_pktp->pkt_state;
actual_len = MAX_SENSE_LENGTH - sense_pktp->pkt_resid;
if ((cmd_xp->xb_pkt_flags & SD_XB_USCSICMD) &&
(((struct uscsi_cmd *)cmd_xp->xb_pktinfo)->uscsi_rqlen >
SENSE_LENGTH)) {
bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data,
MAX_SENSE_LENGTH);
cmd_xp->xb_sense_resid = sense_pktp->pkt_resid;
} else {
bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data,
SENSE_LENGTH);
if (actual_len < SENSE_LENGTH) {
cmd_xp->xb_sense_resid = SENSE_LENGTH - actual_len;
} else {
cmd_xp->xb_sense_resid = 0;
}
}
(void) sd_mark_rqs_idle(un, sense_xp);
if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) {
if (sd_validate_sense_data(un, cmd_bp, cmd_xp, actual_len) ==
SD_SENSE_DATA_IS_VALID) {
sd_decode_sense(un, cmd_bp, cmd_xp, cmd_pktp);
}
} else {
SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Failed CDB",
(uchar_t *)cmd_pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX);
SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Sense Data",
(uchar_t *)cmd_xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX);
sd_return_failed_command(un, cmd_bp, EIO);
}
}
static void
sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp,
struct sd_xbuf *xp, struct scsi_pkt *pktp)
{
struct scsi_arq_status *asp;
size_t actual_len;
ASSERT(un != NULL);
ASSERT(mutex_owned(SD_MUTEX(un)));
ASSERT(bp != NULL);
ASSERT(xp != NULL);
ASSERT(pktp != NULL);
ASSERT(pktp != un->un_rqs_pktp);
ASSERT(bp != un->un_rqs_bp);
asp = (struct scsi_arq_status *)(pktp->pkt_scbp);
if (asp->sts_rqpkt_reason != CMD_CMPLT) {
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"auto request sense failed (reason=%s)\n",
scsi_rname(asp->sts_rqpkt_reason));
sd_reset_target(un, pktp);
sd_retry_command(un, bp, SD_RETRIES_STANDARD,
NULL, NULL, EIO, (clock_t)0, NULL);
return;
}
xp->xb_sense_status = *((uchar_t *)(&(asp->sts_rqpkt_status)));
xp->xb_sense_state = asp->sts_rqpkt_state;
xp->xb_sense_resid = asp->sts_rqpkt_resid;
if (xp->xb_sense_state & STATE_XARQ_DONE) {
actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid;
bcopy(&asp->sts_sensedata, xp->xb_sense_data,
MAX_SENSE_LENGTH);
} else {
if (xp->xb_sense_resid > SENSE_LENGTH) {
actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid;
} else {
actual_len = SENSE_LENGTH - xp->xb_sense_resid;
}
if (xp->xb_pkt_flags & SD_XB_USCSICMD) {
if ((((struct uscsi_cmd *)
(xp->xb_pktinfo))->uscsi_rqlen) > actual_len) {
xp->xb_sense_resid = (((struct uscsi_cmd *)
(xp->xb_pktinfo))->uscsi_rqlen) -
actual_len;
} else {
xp->xb_sense_resid = 0;
}
}
bcopy(&asp->sts_sensedata, xp->xb_sense_data, SENSE_LENGTH);
}
if (sd_validate_sense_data(un, bp, xp, actual_len) ==
SD_SENSE_DATA_IS_VALID) {
sd_decode_sense(un, bp, xp, pktp);
}
}
static void
sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, void *arg,
int code)
{
char *msgp = arg;
ASSERT(un != NULL);
ASSERT(mutex_owned(SD_MUTEX(un)));
ASSERT(bp != NULL);
if ((code == SD_NO_RETRY_ISSUED) && (msgp != NULL)) {
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, msgp);
}
}
static int
sd_validate_sense_data(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp,
size_t actual_len)
{
struct scsi_extended_sense *esp;
struct scsi_pkt *pktp;
char *msgp = NULL;
sd_ssc_t *sscp;
ASSERT(un != NULL);
ASSERT(mutex_owned(SD_MUTEX(un)));
ASSERT(bp != NULL);
ASSERT(bp != un->un_rqs_bp);
ASSERT(xp != NULL);
ASSERT(un->un_fm_private != NULL);
pktp = SD_GET_PKTP(bp);
ASSERT(pktp != NULL);
sscp = &((struct sd_fm_internal *)(un->un_fm_private))->fm_ssc;
ASSERT(sscp != NULL);
switch (xp->xb_sense_status & STATUS_MASK) {
case STATUS_GOOD:
break;
case STATUS_RESERVATION_CONFLICT:
sd_pkt_status_reservation_conflict(un, bp, xp, pktp);
return (SD_SENSE_DATA_IS_INVALID);
case STATUS_BUSY:
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"Busy Status on REQUEST SENSE\n");
sd_retry_command(un, bp, SD_RETRIES_BUSY, NULL,
NULL, EIO, un->un_busy_timeout / 500, kstat_waitq_enter);
return (SD_SENSE_DATA_IS_INVALID);
case STATUS_QFULL:
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"QFULL Status on REQUEST SENSE\n");
sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL,
NULL, EIO, un->un_busy_timeout / 500, kstat_waitq_enter);
return (SD_SENSE_DATA_IS_INVALID);
case STATUS_CHECK:
case STATUS_TERMINATED:
msgp = "Check Condition on REQUEST SENSE\n";
goto sense_failed;
default:
msgp = "Not STATUS_GOOD on REQUEST_SENSE\n";
goto sense_failed;
}
if (((xp->xb_sense_state & STATE_XFERRED_DATA) == 0) ||
(actual_len == 0)) {
msgp = "Request Sense couldn't get sense data\n";
goto sense_failed;
}
if (actual_len < SUN_MIN_SENSE_LENGTH) {
msgp = "Not enough sense information\n";
if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) {
sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0,
"sense-data");
}
goto sense_failed;
}
esp = (struct scsi_extended_sense *)xp->xb_sense_data;
if (esp->es_class != CLASS_EXTENDED_SENSE) {
if ((pktp->pkt_flags & FLAG_SILENT) == 0) {
static char tmp[8];
static char buf[148];
char *p = (char *)(xp->xb_sense_data);
int i;
mutex_enter(&sd_sense_mutex);
(void) strcpy(buf, "undecodable sense information:");
for (i = 0; i < actual_len; i++) {
(void) sprintf(tmp, " 0x%x", *(p++) & 0xff);
(void) strcpy(&buf[strlen(buf)], tmp);
}
i = strlen(buf);
(void) strcpy(&buf[i], "-(assumed fatal)\n");
if (SD_FM_LOG(un) == SD_FM_LOG_NSUP) {
scsi_log(SD_DEVINFO(un), sd_label,
CE_WARN, buf);
}
mutex_exit(&sd_sense_mutex);
}
if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) {
sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0,
"sense-data");
}
sd_return_failed_command(un, bp, EIO);
return (SD_SENSE_DATA_IS_INVALID);
}
if ((esp->es_code != CODE_FMT_FIXED_CURRENT) &&
(esp->es_code != CODE_FMT_FIXED_DEFERRED) &&
(esp->es_code != CODE_FMT_DESCR_CURRENT) &&
(esp->es_code != CODE_FMT_DESCR_DEFERRED) &&
(esp->es_code != CODE_FMT_VENDOR_SPECIFIC)) {
if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) {
sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0,
"sense-data");
}
goto sense_failed;
}
return (SD_SENSE_DATA_IS_VALID);
sense_failed:
sd_retry_command(un, bp, SD_RETRIES_STANDARD,
sd_print_sense_failed_msg, msgp, EIO,
un->un_f_is_fibre ? drv_usectohz(100000) : (clock_t)0, NULL);
return (SD_SENSE_DATA_IS_INVALID);
}
static void
sd_decode_sense(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp,
struct scsi_pkt *pktp)
{
uint8_t sense_key;
ASSERT(un != NULL);
ASSERT(mutex_owned(SD_MUTEX(un)));
ASSERT(bp != NULL);
ASSERT(bp != un->un_rqs_bp);
ASSERT(xp != NULL);
ASSERT(pktp != NULL);
sense_key = scsi_sense_key(xp->xb_sense_data);
switch (sense_key) {
case KEY_NO_SENSE:
sd_sense_key_no_sense(un, bp, xp, pktp);
break;
case KEY_RECOVERABLE_ERROR:
sd_sense_key_recoverable_error(un, xp->xb_sense_data,
bp, xp, pktp);
break;
case KEY_NOT_READY:
sd_sense_key_not_ready(un, xp->xb_sense_data,
bp, xp, pktp);
break;
case KEY_MEDIUM_ERROR:
case KEY_HARDWARE_ERROR:
sd_sense_key_medium_or_hardware_error(un,
xp->xb_sense_data, bp, xp, pktp);
break;
case KEY_ILLEGAL_REQUEST:
sd_sense_key_illegal_request(un, bp, xp, pktp);
break;
case KEY_UNIT_ATTENTION:
sd_sense_key_unit_attention(un, xp->xb_sense_data,
bp, xp, pktp);
break;
case KEY_WRITE_PROTECT:
case KEY_VOLUME_OVERFLOW:
case KEY_MISCOMPARE:
sd_sense_key_fail_command(un, bp, xp, pktp);
break;
case KEY_BLANK_CHECK:
sd_sense_key_blank_check(un, bp, xp, pktp);
break;
case KEY_ABORTED_COMMAND:
sd_sense_key_aborted_command(un, bp, xp, pktp);
break;
case KEY_VENDOR_UNIQUE:
case KEY_COPY_ABORTED:
case KEY_EQUAL:
case KEY_RESERVED:
default:
sd_sense_key_default(un, xp->xb_sense_data,
bp, xp, pktp);
break;
}
}
#define SD_DUMP_MEMORY_BUF_SIZE 256
static char *sd_dump_format_string[] = {
" 0x%02x",
" %c"
};
static void
sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, uchar_t *data,
int len, int fmt)
{
int i, j;
int avail_count;
int start_offset;
int end_offset;
size_t entry_len;
char *bufp;
char *local_buf;
char *format_string;
ASSERT((fmt == SD_LOG_HEX) || (fmt == SD_LOG_CHAR));
#ifdef SDDEBUG
if (((sd_level_mask & (SD_LOGMASK_DUMP_MEM | SD_LOGMASK_DIAG)) == 0) ||
(sd_error_level != SCSI_ERR_ALL)) {
return;
}
if (((sd_component_mask & comp) == 0) ||
(sd_error_level != SCSI_ERR_ALL)) {
return;
}
#else
if (sd_error_level != SCSI_ERR_ALL) {
return;
}
#endif
local_buf = kmem_zalloc(SD_DUMP_MEMORY_BUF_SIZE, KM_SLEEP);
bufp = local_buf;
if (fmt == SD_LOG_HEX) {
format_string = sd_dump_format_string[0];
} else {
format_string = sd_dump_format_string[1];
}
(void) sprintf(bufp, format_string, data[0]);
entry_len = strlen(bufp);
avail_count = (SD_DUMP_MEMORY_BUF_SIZE - strlen(title) - 3) / entry_len;
j = 0;
while (j < len) {
bufp = local_buf;
bzero(bufp, SD_DUMP_MEMORY_BUF_SIZE);
start_offset = j;
end_offset = start_offset + avail_count;
(void) sprintf(bufp, "%s:", title);
bufp += strlen(bufp);
for (i = start_offset; ((i < end_offset) && (j < len));
i++, j++) {
(void) sprintf(bufp, format_string, data[i]);
bufp += entry_len;
}
(void) sprintf(bufp, "\n");
scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, "%s", local_buf);
}
kmem_free(local_buf, SD_DUMP_MEMORY_BUF_SIZE);
}
static void
sd_print_sense_msg(struct sd_lun *un, struct buf *bp, void *arg, int code)
{
struct sd_xbuf *xp;
struct scsi_pkt *pktp;
uint8_t *sensep;
daddr_t request_blkno;
diskaddr_t err_blkno;
int severity;
int pfa_flag;
extern struct scsi_key_strings scsi_cmds[];
ASSERT(un != NULL);
ASSERT(mutex_owned(SD_MUTEX(un)));
ASSERT(bp != NULL);
xp = SD_GET_XBUF(bp);
ASSERT(xp != NULL);
pktp = SD_GET_PKTP(bp);
ASSERT(pktp != NULL);
ASSERT(arg != NULL);
severity = ((struct sd_sense_info *)(arg))->ssi_severity;
pfa_flag = ((struct sd_sense_info *)(arg))->ssi_pfa_flag;
if ((code == SD_DELAYED_RETRY_ISSUED) ||
(code == SD_IMMEDIATE_RETRY_ISSUED)) {
severity = SCSI_ERR_RETRYABLE;
}
request_blkno = xp->xb_blkno;
sensep = xp->xb_sense_data;
if (scsi_sense_info_uint64(sensep, SENSE_LENGTH,
(uint64_t *)&err_blkno)) {
if ((SD_IS_BUFIO(xp) == FALSE) &&
((pktp->pkt_flags & FLAG_SILENT) == 0)) {
request_blkno = err_blkno;
}
} else {
err_blkno = (diskaddr_t)request_blkno;
}
sd_dump_memory(un, SD_LOG_IO, "Failed CDB",
(uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX);
sd_dump_memory(un, SD_LOG_IO, "Sense Data",
(uchar_t *)sensep, SENSE_LENGTH, SD_LOG_HEX);
if (pfa_flag == FALSE) {
if ((pktp->pkt_flags & FLAG_SILENT) != 0) {
return;
}
if ((SD_IS_BUFIO(xp) == TRUE) &&
(((sd_level_mask & SD_LOGMASK_DIAG) == 0) &&
(severity < sd_error_level))) {
return;
}
}
if (SD_FM_LOG(un) == SD_FM_LOG_NSUP ||
((scsi_sense_key(sensep) == KEY_RECOVERABLE_ERROR) &&
(pktp->pkt_resid == 0))) {
scsi_vu_errmsg(SD_SCSI_DEVP(un), pktp, sd_label, severity,
request_blkno, err_blkno, scsi_cmds,
(struct scsi_extended_sense *)sensep,
un->un_additional_codes, NULL);
}
}
static void
sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp,
struct scsi_pkt *pktp)
{
struct sd_sense_info si;
ASSERT(un != NULL);
ASSERT(mutex_owned(SD_MUTEX(un)));
ASSERT(bp != NULL);
ASSERT(xp != NULL);
ASSERT(pktp != NULL);
si.ssi_severity = SCSI_ERR_FATAL;
si.ssi_pfa_flag = FALSE;
SD_UPDATE_ERRSTATS(un, sd_softerrs);
sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg,
&si, EIO, (clock_t)0, NULL);
}
static void
sd_sense_key_recoverable_error(struct sd_lun *un, uint8_t *sense_datap,
struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp)
{
struct sd_sense_info si;
uint8_t asc = scsi_sense_asc(sense_datap);
uint8_t ascq = scsi_sense_ascq(sense_datap);
ASSERT(un != NULL);
ASSERT(mutex_owned(SD_MUTEX(un)));
ASSERT(bp != NULL);
ASSERT(xp != NULL);
ASSERT(pktp != NULL);
if (asc == 0x00 && ascq == 0x1D) {
sd_return_command(un, bp);
return;
}
if ((asc == 0x5D) && (sd_report_pfa != 0)) {
SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err);
si.ssi_severity = SCSI_ERR_INFO;
si.ssi_pfa_flag = TRUE;
} else {
SD_UPDATE_ERRSTATS(un, sd_softerrs);
SD_UPDATE_ERRSTATS(un, sd_rq_recov_err);
si.ssi_severity = SCSI_ERR_RECOVERED;
si.ssi_pfa_flag = FALSE;
}
if (pktp->pkt_resid == 0) {
sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED);
sd_return_command(un, bp);
return;
}
sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg,
&si, EIO, (clock_t)0, NULL);
}
static void
sd_sense_key_not_ready(struct sd_lun *un, uint8_t *sense_datap, struct buf *bp,
struct sd_xbuf *xp, struct scsi_pkt *pktp)
{
struct sd_sense_info si;
uint8_t asc = scsi_sense_asc(sense_datap);
uint8_t ascq = scsi_sense_ascq(sense_datap);
ASSERT(un != NULL);
ASSERT(mutex_owned(SD_MUTEX(un)));
ASSERT(bp != NULL);
ASSERT(xp != NULL);
ASSERT(pktp != NULL);
si.ssi_severity = SCSI_ERR_FATAL;
si.ssi_pfa_flag = FALSE;
if ((ISCD(un) && (asc == 0x3A)) ||
(xp->xb_nr_retry_count > 0)) {
SD_UPDATE_ERRSTATS(un, sd_harderrs);
SD_UPDATE_ERRSTATS(un, sd_rq_ntrdy_err);
}
if (xp->xb_nr_retry_count >= un->un_notready_retry_count) {
if (un->un_f_has_removable_media && (asc == 0x04) &&
(ascq >= 0x04)) {
si.ssi_severity = SCSI_ERR_ALL;
}
goto fail_command;
}
switch (asc) {
case 0x04:
if (sd_error_level < SCSI_ERR_RETRYABLE) {
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"logical unit not ready, resetting disk\n");
}
switch (ascq) {
case 0x00:
if (un->un_f_is_fibre == TRUE) {
if (((sd_level_mask & SD_LOGMASK_DIAG) ||
(xp->xb_nr_retry_count > 0)) &&
(un->un_startstop_timeid == NULL)) {
scsi_log(SD_DEVINFO(un), sd_label,
CE_WARN, "logical unit not ready, "
"resetting disk\n");
sd_reset_target(un, pktp);
}
} else {
if (((sd_level_mask & SD_LOGMASK_DIAG) ||
(xp->xb_nr_retry_count >
un->un_reset_retry_count)) &&
(un->un_startstop_timeid == NULL)) {
scsi_log(SD_DEVINFO(un), sd_label,
CE_WARN, "logical unit not ready, "
"resetting disk\n");
sd_reset_target(un, pktp);
}
}
break;
case 0x01:
goto do_retry;
case 0x02:
break;
case 0x03:
goto fail_command;
case 0x04:
case 0x05:
case 0x06:
case 0x07:
case 0x08:
default:
if (un->un_f_has_removable_media) {
si.ssi_severity = SCSI_ERR_ALL;
goto fail_command;
}
break;
}
if (pktp->pkt_cdbp[0] == SCMD_START_STOP) {
break;
}
if (un->un_startstop_timeid != NULL) {
SD_INFO(SD_LOG_ERROR, un,
"sd_sense_key_not_ready: restart already issued to"
" %s%d\n", ddi_driver_name(SD_DEVINFO(un)),
ddi_get_instance(SD_DEVINFO(un)));
break;
}
un->un_startstop_timeid = timeout(sd_start_stop_unit_callback,
un, un->un_busy_timeout / 2);
xp->xb_nr_retry_count++;
sd_set_retry_bp(un, bp, 0, kstat_waitq_enter);
return;
case 0x05:
if (sd_error_level < SCSI_ERR_RETRYABLE) {
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"unit does not respond to selection\n");
}
break;
case 0x3A:
if (sd_error_level >= SCSI_ERR_FATAL) {
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"Caddy not inserted in drive\n");
}
sr_ejected(un);
un->un_mediastate = DKIO_EJECTED;
cv_broadcast(&un->un_state_cv);
goto fail_command;
default:
if (sd_error_level < SCSI_ERR_RETRYABLE) {
scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE,
"Unit not Ready. Additional sense code 0x%x\n",
asc);
}
break;
}
do_retry:
xp->xb_nr_retry_count++;
si.ssi_severity = SCSI_ERR_RETRYABLE;
sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg,
&si, EIO, un->un_busy_timeout, NULL);
return;
fail_command:
sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED);
sd_return_failed_command(un, bp, EIO);
}
static void
sd_sense_key_medium_or_hardware_error(struct sd_lun *un, uint8_t *sense_datap,
struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp)
{
struct sd_sense_info si;
uint8_t sense_key = scsi_sense_key(sense_datap);
ASSERT(un != NULL);
ASSERT(mutex_owned(SD_MUTEX(un)));
ASSERT(bp != NULL);
ASSERT(xp != NULL);
ASSERT(pktp != NULL);
si.ssi_severity = SCSI_ERR_FATAL;
si.ssi_pfa_flag = FALSE;
if (sense_key == KEY_MEDIUM_ERROR) {
SD_UPDATE_ERRSTATS(un, sd_rq_media_err);
}
SD_UPDATE_ERRSTATS(un, sd_harderrs);
if ((un->un_reset_retry_count != 0) &&
(xp->xb_retry_count == un->un_reset_retry_count)) {
mutex_exit(SD_MUTEX(un));
if (un->un_f_allow_bus_device_reset == TRUE) {
int reset_retval = 0;
if (un->un_f_lun_reset_enabled == TRUE) {
SD_TRACE(SD_LOG_IO_CORE, un,
"sd_sense_key_medium_or_hardware_"
"error: issuing RESET_LUN\n");
reset_retval = scsi_reset(SD_ADDRESS(un),
RESET_LUN);
}
if (reset_retval == 0) {
SD_TRACE(SD_LOG_IO_CORE, un,
"sd_sense_key_medium_or_hardware_"
"error: issuing RESET_TARGET\n");
(void) scsi_reset(SD_ADDRESS(un),
RESET_TARGET);
}
}
mutex_enter(SD_MUTEX(un));
}
sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg,
&si, EIO, (clock_t)0, NULL);
}
static void
sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp,
struct sd_xbuf *xp, struct scsi_pkt *pktp)
{
struct sd_sense_info si;
ASSERT(un != NULL);
ASSERT(mutex_owned(SD_MUTEX(un)));
ASSERT(bp != NULL);
ASSERT(xp != NULL);
ASSERT(pktp != NULL);
SD_UPDATE_ERRSTATS(un, sd_rq_illrq_err);
si.ssi_severity = SCSI_ERR_INFO;
si.ssi_pfa_flag = FALSE;
sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED);
sd_return_failed_command(un, bp, EIO);
}
static void
sd_sense_key_unit_attention(struct sd_lun *un, uint8_t *sense_datap,
struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp)
{
int retry_check_flag = SD_RETRIES_UA;
boolean_t kstat_updated = B_FALSE;
struct sd_sense_info si;
uint8_t asc = scsi_sense_asc(sense_datap);
uint8_t ascq = scsi_sense_ascq(sense_datap);
ASSERT(un != NULL);
ASSERT(mutex_owned(SD_MUTEX(un)));
ASSERT(bp != NULL);
ASSERT(xp != NULL);
ASSERT(pktp != NULL);
si.ssi_severity = SCSI_ERR_INFO;
si.ssi_pfa_flag = FALSE;
switch (asc) {
case 0x5D:
if (sd_report_pfa != 0) {
SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err);
si.ssi_pfa_flag = TRUE;
retry_check_flag = SD_RETRIES_STANDARD;
goto do_retry;
}
break;
case 0x29:
if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) {
un->un_resvd_status |=
(SD_LOST_RESERVE | SD_WANT_RESERVE);
}
if (un->un_blockcount + 1 > SD_GROUP1_MAX_ADDRESS) {
if (taskq_dispatch(sd_tq, sd_reenable_dsense_task,
un, KM_NOSLEEP) == TASKQID_INVALID) {
SD_ERROR(SD_LOG_ERROR, un,
"sd_sense_key_unit_attention: "
"Could not dispatch "
"sd_reenable_dsense_task\n");
}
}
case 0x28:
if (!un->un_f_has_removable_media) {
break;
}
if (taskq_dispatch(sd_tq, sd_media_change_task, pktp,
KM_NOSLEEP) == TASKQID_INVALID) {
SD_UPDATE_ERRSTATS(un, sd_harderrs);
SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err);
si.ssi_severity = SCSI_ERR_FATAL;
sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED);
sd_return_failed_command(un, bp, EIO);
}
kstat_updated = B_TRUE;
return;
default:
break;
}
if (((asc == 0x2a) && (ascq == 0x09)) ||
((asc == 0x2a) && (ascq == 0x01)) ||
((asc == 0x3f) && (ascq == 0x0e))) {
if (taskq_dispatch(sd_tq, sd_target_change_task, un,
KM_NOSLEEP) == TASKQID_INVALID) {
SD_ERROR(SD_LOG_ERROR, un,
"sd_sense_key_unit_attention: "
"Could not dispatch sd_target_change_task\n");
}
}
if (!kstat_updated) {
SD_UPDATE_ERRSTATS(un, sd_harderrs);
SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err);
}
do_retry:
sd_retry_command(un, bp, retry_check_flag, sd_print_sense_msg, &si,
EIO, SD_UA_RETRY_DELAY, NULL);
}
static void
sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp,
struct scsi_pkt *pktp)
{
struct sd_sense_info si;
ASSERT(un != NULL);
ASSERT(mutex_owned(SD_MUTEX(un)));
ASSERT(bp != NULL);
ASSERT(xp != NULL);
ASSERT(pktp != NULL);
si.ssi_severity = SCSI_ERR_FATAL;
si.ssi_pfa_flag = FALSE;
sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED);
sd_return_failed_command(un, bp, EIO);
}
static void
sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp,
struct scsi_pkt *pktp)
{
struct sd_sense_info si;
ASSERT(un != NULL);
ASSERT(mutex_owned(SD_MUTEX(un)));
ASSERT(bp != NULL);
ASSERT(xp != NULL);
ASSERT(pktp != NULL);
si.ssi_severity = (un->un_f_has_removable_media) ? SCSI_ERR_ALL :
SCSI_ERR_FATAL;
si.ssi_pfa_flag = FALSE;
sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED);
sd_return_failed_command(un, bp, EIO);
}
static void
sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp,
struct sd_xbuf *xp, struct scsi_pkt *pktp)
{
struct sd_sense_info si;
ASSERT(un != NULL);
ASSERT(mutex_owned(SD_MUTEX(un)));
ASSERT(bp != NULL);
ASSERT(xp != NULL);
ASSERT(pktp != NULL);
si.ssi_severity = SCSI_ERR_FATAL;
si.ssi_pfa_flag = FALSE;
SD_UPDATE_ERRSTATS(un, sd_harderrs);
sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg,
&si, EIO, drv_usectohz(100000), NULL);
}
static void
sd_sense_key_default(struct sd_lun *un, uint8_t *sense_datap, struct buf *bp,
struct sd_xbuf *xp, struct scsi_pkt *pktp)
{
struct sd_sense_info si;
uint8_t sense_key = scsi_sense_key(sense_datap);
ASSERT(un != NULL);
ASSERT(mutex_owned(SD_MUTEX(un)));
ASSERT(bp != NULL);
ASSERT(xp != NULL);
ASSERT(pktp != NULL);
SD_UPDATE_ERRSTATS(un, sd_harderrs);
if ((pktp->pkt_flags & FLAG_SILENT) == 0) {
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"Unhandled Sense Key '%s'\n", sense_keys[sense_key]);
}
si.ssi_severity = SCSI_ERR_FATAL;
si.ssi_pfa_flag = FALSE;
sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg,
&si, EIO, (clock_t)0, NULL);
}
static void
sd_print_retry_msg(struct sd_lun *un, struct buf *bp, void *arg, int flag)
{
struct sd_xbuf *xp;
struct scsi_pkt *pktp;
char *reasonp;
char *msgp;
ASSERT(un != NULL);
ASSERT(mutex_owned(SD_MUTEX(un)));
ASSERT(bp != NULL);
pktp = SD_GET_PKTP(bp);
ASSERT(pktp != NULL);
xp = SD_GET_XBUF(bp);
ASSERT(xp != NULL);
ASSERT(!mutex_owned(&un->un_pm_mutex));
mutex_enter(&un->un_pm_mutex);
if ((un->un_state == SD_STATE_SUSPENDED) ||
(SD_DEVICE_IS_IN_LOW_POWER(un)) ||
(pktp->pkt_flags & FLAG_SILENT)) {
mutex_exit(&un->un_pm_mutex);
goto update_pkt_reason;
}
mutex_exit(&un->un_pm_mutex);
switch (flag) {
case SD_NO_RETRY_ISSUED:
msgp = "giving up";
break;
case SD_IMMEDIATE_RETRY_ISSUED:
case SD_DELAYED_RETRY_ISSUED:
if (ddi_in_panic() || (un->un_state == SD_STATE_OFFLINE) ||
((pktp->pkt_reason == un->un_last_pkt_reason) &&
(sd_error_level != SCSI_ERR_ALL))) {
return;
}
msgp = "retrying command";
break;
default:
goto update_pkt_reason;
}
reasonp = (((pktp->pkt_statistics & STAT_PERR) != 0) ? "parity error" :
scsi_rname(pktp->pkt_reason));
if (SD_FM_LOG(un) == SD_FM_LOG_NSUP) {
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"SCSI transport failed: reason '%s': %s\n", reasonp, msgp);
}
update_pkt_reason:
if ((pktp->pkt_reason != CMD_CMPLT) || (xp->xb_retry_count == 0)) {
un->un_last_pkt_reason = pktp->pkt_reason;
}
}
static void
sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg,
int code)
{
dev_info_t *dip;
ASSERT(un != NULL);
ASSERT(mutex_owned(SD_MUTEX(un)));
ASSERT(bp != NULL);
switch (code) {
case SD_NO_RETRY_ISSUED:
if (un->un_state != SD_STATE_OFFLINE) {
dip = un->un_sd->sd_dev;
if (!(DEVI_IS_DETACHING(dip) &&
DEVI_IS_DEVICE_REMOVED(dip))) {
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"disk not responding to selection\n");
}
New_state(un, SD_STATE_OFFLINE);
}
break;
case SD_DELAYED_RETRY_ISSUED:
case SD_IMMEDIATE_RETRY_ISSUED:
default:
sd_print_retry_msg(un, bp, arg, code);
break;
}
}
static void
sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp,
struct sd_xbuf *xp, struct scsi_pkt *pktp)
{
int flag = SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE;
ASSERT(un != NULL);
ASSERT(mutex_owned(SD_MUTEX(un)));
ASSERT(bp != NULL);
ASSERT(xp != NULL);
ASSERT(pktp != NULL);
if (pktp->pkt_state != STATE_GOT_BUS) {
SD_UPDATE_ERRSTATS(un, sd_transerrs);
sd_reset_target(un, pktp);
}
if ((pktp->pkt_state & STATE_GOT_TARGET) == 0) {
flag |= SD_RETRIES_FAILFAST;
}
SD_UPDATE_RESERVATION_STATUS(un, pktp);
sd_retry_command(un, bp, flag,
sd_print_cmd_incomplete_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL);
}
static void
sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp,
struct sd_xbuf *xp, struct scsi_pkt *pktp)
{
ASSERT(un != NULL);
ASSERT(mutex_owned(SD_MUTEX(un)));
ASSERT(bp != NULL);
ASSERT(xp != NULL);
ASSERT(pktp != NULL);
SD_UPDATE_ERRSTATS(un, sd_harderrs);
if (((pktp->pkt_statistics & STAT_PERR) == 0) &&
(pktp->pkt_state != STATE_GOT_BUS)) {
SD_UPDATE_ERRSTATS(un, sd_transerrs);
sd_reset_target(un, pktp);
}
SD_UPDATE_RESERVATION_STATUS(un, pktp);
sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE),
sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL);
}
static void
sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp,
struct scsi_pkt *pktp)
{
ASSERT(un != NULL);
ASSERT(mutex_owned(SD_MUTEX(un)));
ASSERT(bp != NULL);
ASSERT(xp != NULL);
ASSERT(pktp != NULL);
SD_UPDATE_ERRSTATS(un, sd_transerrs);
sd_reset_target(un, pktp);
SD_UPDATE_RESERVATION_STATUS(un, pktp);
sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE),
sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL);
}
static void
sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp,
struct scsi_pkt *pktp)
{
ASSERT(un != NULL);
ASSERT(mutex_owned(SD_MUTEX(un)));
ASSERT(bp != NULL);
ASSERT(xp != NULL);
ASSERT(pktp != NULL);
SD_UPDATE_ERRSTATS(un, sd_transerrs);
sd_reset_target(un, pktp);
SD_UPDATE_RESERVATION_STATUS(un, pktp);
sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE),
sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL);
}
static void
sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp,
struct scsi_pkt *pktp)
{
ASSERT(un != NULL);
ASSERT(mutex_owned(SD_MUTEX(un)));
ASSERT(bp != NULL);
ASSERT(xp != NULL);
ASSERT(pktp != NULL);
SD_UPDATE_ERRSTATS(un, sd_transerrs);
sd_reset_target(un, pktp);
SD_UPDATE_RESERVATION_STATUS(un, pktp);
sd_retry_command(un, bp,
(SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE | SD_RETRIES_FAILFAST),
sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL);
}
static void
sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp,
struct sd_xbuf *xp, struct scsi_pkt *pktp)
{
void (*funcp)(struct sd_lun *un, struct buf *bp, void *arg, int code);
ASSERT(un != NULL);
ASSERT(mutex_owned(SD_MUTEX(un)));
ASSERT(bp != NULL);
ASSERT(xp != NULL);
ASSERT(pktp != NULL);
SD_UPDATE_ERRSTATS(un, sd_harderrs);
SD_UPDATE_RESERVATION_STATUS(un, pktp);
funcp = ((pktp->pkt_statistics & STAT_PERR) == 0) ?
sd_print_retry_msg : NULL;
sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE),
funcp, NULL, EIO, SD_RESTART_TIMEOUT, NULL);
}
static void
sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp,
struct sd_xbuf *xp, struct scsi_pkt *pktp)
{
ASSERT(un != NULL);
ASSERT(mutex_owned(SD_MUTEX(un)));
ASSERT(bp != NULL);
ASSERT(xp != NULL);
ASSERT(pktp != NULL);
SD_UPDATE_ERRSTATS(un, sd_harderrs);
pktp->pkt_flags = 0;
un->un_tagflags = 0;
if (un->un_f_opt_queueing == TRUE) {
un->un_throttle = min(un->un_throttle, 3);
} else {
un->un_throttle = 1;
}
mutex_exit(SD_MUTEX(un));
(void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1);
mutex_enter(SD_MUTEX(un));
SD_UPDATE_RESERVATION_STATUS(un, pktp);
sd_retry_command(un, bp, (SD_RETRIES_NOCHECK | SD_RETRIES_ISOLATE),
sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL);
}
static void
sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp,
struct scsi_pkt *pktp)
{
ASSERT(un != NULL);
ASSERT(mutex_owned(SD_MUTEX(un)));
ASSERT(bp != NULL);
ASSERT(xp != NULL);
ASSERT(pktp != NULL);
SD_UPDATE_ERRSTATS(un, sd_transerrs);
sd_reset_target(un, pktp);
SD_UPDATE_RESERVATION_STATUS(un, pktp);
sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE),
sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL);
}
static void
sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp,
struct sd_xbuf *xp, struct scsi_pkt *pktp)
{
ASSERT(un != NULL);
ASSERT(mutex_owned(SD_MUTEX(un)));
ASSERT(bp != NULL);
ASSERT(xp != NULL);
ASSERT(pktp != NULL);
SD_TRACE(SD_LOG_IO, un, "sd_pkt_status_check_condition: "
"entry: buf:0x%p xp:0x%p\n", bp, xp);
if (un->un_f_arq_enabled == FALSE) {
SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: "
"no ARQ, sending request sense command\n");
sd_send_request_sense_command(un, bp, pktp);
} else {
SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: "
"ARQ,retrying request sense command\n");
sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO,
un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0,
NULL);
}
SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: exit\n");
}
static void
sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp,
struct scsi_pkt *pktp)
{
ASSERT(un != NULL);
ASSERT(mutex_owned(SD_MUTEX(un)));
ASSERT(bp != NULL);
ASSERT(xp != NULL);
ASSERT(pktp != NULL);
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sd_pkt_status_busy: entry\n");
if (xp->xb_retry_count >= un->un_busy_retry_count) {
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"device busy too long\n");
sd_return_failed_command(un, bp, EIO);
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sd_pkt_status_busy: exit\n");
return;
}
xp->xb_retry_count++;
if (xp->xb_retry_count ==
((un->un_reset_retry_count < 2) ? 2 : un->un_reset_retry_count)) {
int rval = 0;
mutex_exit(SD_MUTEX(un));
if (un->un_f_allow_bus_device_reset == TRUE) {
if (un->un_f_lun_reset_enabled == TRUE) {
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sd_pkt_status_busy: RESET_LUN\n");
rval = scsi_reset(SD_ADDRESS(un), RESET_LUN);
}
if (rval == 0) {
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sd_pkt_status_busy: RESET_TARGET\n");
rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET);
}
}
if (rval == 0) {
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sd_pkt_status_busy: RESET_ALL\n");
rval = scsi_reset(SD_ADDRESS(un), RESET_ALL);
}
mutex_enter(SD_MUTEX(un));
if (rval == 0) {
sd_return_failed_command(un, bp, EIO);
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sd_pkt_status_busy: exit (failed cmd)\n");
return;
}
}
sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL,
EIO, un->un_busy_timeout, NULL);
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sd_pkt_status_busy: exit\n");
}
static void
sd_pkt_status_reservation_conflict(struct sd_lun *un, struct buf *bp,
struct sd_xbuf *xp, struct scsi_pkt *pktp)
{
ASSERT(un != NULL);
ASSERT(mutex_owned(SD_MUTEX(un)));
ASSERT(bp != NULL);
ASSERT(xp != NULL);
ASSERT(pktp != NULL);
if (un->un_reservation_type == SD_SCSI3_RESERVATION) {
int cmd = SD_GET_PKT_OPCODE(pktp);
if ((cmd == SCMD_PERSISTENT_RESERVE_IN) ||
(cmd == SCMD_PERSISTENT_RESERVE_OUT)) {
sd_return_failed_command(un, bp, EACCES);
return;
}
}
un->un_resvd_status |= SD_RESERVATION_CONFLICT;
if ((un->un_resvd_status & SD_FAILFAST) != 0) {
if (sd_failfast_enable != 0) {
sd_panic_for_res_conflict(un);
}
SD_ERROR(SD_LOG_IO, un,
"sd_handle_resv_conflict: Disk Reserved\n");
sd_return_failed_command(un, bp, EACCES);
return;
}
if (sd_retry_on_reservation_conflict == 0) {
SD_ERROR(SD_LOG_IO, un,
"sd_handle_resv_conflict: Device Reserved\n");
sd_return_failed_command(un, bp, EIO);
return;
}
sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO,
(clock_t)2, NULL);
}
static void
sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp,
struct scsi_pkt *pktp)
{
ASSERT(un != NULL);
ASSERT(mutex_owned(SD_MUTEX(un)));
ASSERT(bp != NULL);
ASSERT(xp != NULL);
ASSERT(pktp != NULL);
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sd_pkt_status_qfull: entry\n");
sd_reduce_throttle(un, SD_THROTTLE_QFULL);
sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 0,
SD_RESTART_TIMEOUT, NULL);
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sd_pkt_status_qfull: exit\n");
}
static void
sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp)
{
int rval = 0;
ASSERT(un != NULL);
ASSERT(mutex_owned(SD_MUTEX(un)));
ASSERT(pktp != NULL);
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: entry\n");
if ((pktp->pkt_statistics &
(STAT_BUS_RESET | STAT_DEV_RESET | STAT_ABORTED)) != 0) {
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sd_reset_target: no reset\n");
return;
}
mutex_exit(SD_MUTEX(un));
if (un->un_f_allow_bus_device_reset == TRUE) {
if (un->un_f_lun_reset_enabled == TRUE) {
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sd_reset_target: RESET_LUN\n");
rval = scsi_reset(SD_ADDRESS(un), RESET_LUN);
}
if (rval == 0) {
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sd_reset_target: RESET_TARGET\n");
rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET);
}
}
if (rval == 0) {
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sd_reset_target: RESET_ALL\n");
(void) scsi_reset(SD_ADDRESS(un), RESET_ALL);
}
mutex_enter(SD_MUTEX(un));
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: exit\n");
}
static void
sd_target_change_task(void *arg)
{
struct sd_lun *un = arg;
uint64_t capacity;
diskaddr_t label_cap;
uint_t lbasize;
sd_ssc_t *ssc;
ASSERT(un != NULL);
ASSERT(!mutex_owned(SD_MUTEX(un)));
if ((un->un_f_blockcount_is_valid == FALSE) ||
(un->un_f_tgt_blocksize_is_valid == FALSE)) {
return;
}
ssc = sd_ssc_init(un);
if (sd_send_scsi_READ_CAPACITY(ssc, &capacity,
&lbasize, SD_PATH_DIRECT) != 0) {
SD_ERROR(SD_LOG_ERROR, un,
"sd_target_change_task: fail to read capacity\n");
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
goto task_exit;
}
mutex_enter(SD_MUTEX(un));
if (capacity <= un->un_blockcount) {
mutex_exit(SD_MUTEX(un));
goto task_exit;
}
sd_update_block_info(un, lbasize, capacity);
mutex_exit(SD_MUTEX(un));
if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap,
(void*)SD_PATH_DIRECT) == 0) {
mutex_enter(SD_MUTEX(un));
if (un->un_f_blockcount_is_valid &&
un->un_blockcount > label_cap) {
mutex_exit(SD_MUTEX(un));
sd_log_lun_expansion_event(un, KM_SLEEP);
} else {
mutex_exit(SD_MUTEX(un));
}
}
task_exit:
sd_ssc_fini(ssc);
}
static void
sd_log_dev_status_event(struct sd_lun *un, char *esc, int km_flag)
{
int err;
char *path;
nvlist_t *attr_list;
size_t n;
err = nvlist_alloc(&attr_list, NV_UNIQUE_NAME_TYPE, km_flag);
if (err != 0) {
SD_ERROR(SD_LOG_ERROR, un,
"sd_log_dev_status_event: fail to allocate space\n");
return;
}
path = kmem_alloc(MAXPATHLEN, km_flag);
if (path == NULL) {
nvlist_free(attr_list);
SD_ERROR(SD_LOG_ERROR, un,
"sd_log_dev_status_event: fail to allocate space\n");
return;
}
n = snprintf(path, MAXPATHLEN, "/devices");
(void) ddi_pathname(SD_DEVINFO(un), path + n);
n = strlen(path);
n += snprintf(path + n, MAXPATHLEN - n, ":x");
for (char c = 'a'; c < 'c'; c++) {
path[n - 1] = c;
err = nvlist_add_string(attr_list, DEV_PHYS_PATH, path);
if (err != 0) {
SD_ERROR(SD_LOG_ERROR, un,
"sd_log_dev_status_event: fail to add attribute\n");
break;
}
err = ddi_log_sysevent(SD_DEVINFO(un), SUNW_VENDOR,
EC_DEV_STATUS, esc, attr_list, NULL, km_flag);
if (err != DDI_SUCCESS) {
SD_ERROR(SD_LOG_ERROR, un,
"sd_log_dev_status_event: fail to log sysevent\n");
break;
}
}
nvlist_free(attr_list);
kmem_free(path, MAXPATHLEN);
}
static void
sd_log_lun_expansion_event(struct sd_lun *un, int km_flag)
{
sd_log_dev_status_event(un, ESC_DEV_DLE, km_flag);
}
static void
sd_log_eject_request_event(struct sd_lun *un, int km_flag)
{
sd_log_dev_status_event(un, ESC_DEV_EJECT_REQUEST, km_flag);
}
static void
sd_media_change_task(void *arg)
{
struct scsi_pkt *pktp = arg;
struct sd_lun *un;
struct buf *bp;
struct sd_xbuf *xp;
int err = 0;
int retry_count = 0;
int retry_limit = SD_UNIT_ATTENTION_RETRY/10;
struct sd_sense_info si;
ASSERT(pktp != NULL);
bp = (struct buf *)pktp->pkt_private;
ASSERT(bp != NULL);
xp = SD_GET_XBUF(bp);
ASSERT(xp != NULL);
un = SD_GET_UN(bp);
ASSERT(un != NULL);
ASSERT(!mutex_owned(SD_MUTEX(un)));
ASSERT(un->un_f_monitor_media_state);
si.ssi_severity = SCSI_ERR_INFO;
si.ssi_pfa_flag = FALSE;
while (retry_count++ < retry_limit) {
if ((err = sd_handle_mchange(un)) == 0) {
break;
}
if (err == EAGAIN) {
retry_limit = SD_UNIT_ATTENTION_RETRY;
}
delay(drv_usectohz(500000));
}
mutex_enter(SD_MUTEX(un));
if (err != SD_CMD_SUCCESS) {
SD_UPDATE_ERRSTATS(un, sd_harderrs);
SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err);
si.ssi_severity = SCSI_ERR_FATAL;
sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED);
sd_return_failed_command(un, bp, EIO);
} else {
sd_retry_command(un, bp, SD_RETRIES_UA, sd_print_sense_msg,
&si, EIO, (clock_t)0, NULL);
}
mutex_exit(SD_MUTEX(un));
}
static int
sd_handle_mchange(struct sd_lun *un)
{
uint64_t capacity;
uint32_t lbasize;
int rval;
sd_ssc_t *ssc;
ASSERT(!mutex_owned(SD_MUTEX(un)));
ASSERT(un->un_f_monitor_media_state);
ssc = sd_ssc_init(un);
rval = sd_send_scsi_READ_CAPACITY(ssc, &capacity, &lbasize,
SD_PATH_DIRECT_PRIORITY);
if (rval != 0)
goto failed;
mutex_enter(SD_MUTEX(un));
sd_update_block_info(un, lbasize, capacity);
if (un->un_errstats != NULL) {
struct sd_errstats *stp =
(struct sd_errstats *)un->un_errstats->ks_data;
stp->sd_capacity.value.ui64 = (uint64_t)
((uint64_t)un->un_blockcount *
(uint64_t)un->un_tgt_blocksize);
}
if (ISCD(un)) {
sd_check_for_writable_cd(ssc, SD_PATH_DIRECT_PRIORITY);
}
mutex_exit(SD_MUTEX(un));
cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY);
if (cmlb_validate(un->un_cmlbhandle, 0,
(void *)SD_PATH_DIRECT_PRIORITY) != 0) {
sd_ssc_fini(ssc);
return (EIO);
} else {
if (un->un_f_pkstats_enabled) {
sd_set_pstats(un);
SD_TRACE(SD_LOG_IO_PARTITION, un,
"sd_handle_mchange: un:0x%p pstats created and "
"set\n", un);
}
}
rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT,
SD_PATH_DIRECT_PRIORITY);
failed:
if (rval != 0)
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
sd_ssc_fini(ssc);
return (rval);
}
static int
sd_send_scsi_DOORLOCK(sd_ssc_t *ssc, int flag, int path_flag)
{
struct scsi_extended_sense sense_buf;
union scsi_cdb cdb;
struct uscsi_cmd ucmd_buf;
int status;
struct sd_lun *un;
ASSERT(ssc != NULL);
un = ssc->ssc_un;
ASSERT(un != NULL);
ASSERT(!mutex_owned(SD_MUTEX(un)));
SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_DOORLOCK: entry: un:0x%p\n", un);
if (un->un_f_doorlock_supported == FALSE) {
return (0);
}
if (flag == SD_REMOVAL_PREVENT) {
mutex_enter(SD_MUTEX(un));
if (un->un_f_ejecting == TRUE) {
mutex_exit(SD_MUTEX(un));
return (EAGAIN);
}
mutex_exit(SD_MUTEX(un));
}
bzero(&cdb, sizeof (cdb));
bzero(&ucmd_buf, sizeof (ucmd_buf));
cdb.scc_cmd = SCMD_DOORLOCK;
cdb.cdb_opaque[4] = (uchar_t)flag;
ucmd_buf.uscsi_cdb = (char *)&cdb;
ucmd_buf.uscsi_cdblen = CDB_GROUP0;
ucmd_buf.uscsi_bufaddr = NULL;
ucmd_buf.uscsi_buflen = 0;
ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
ucmd_buf.uscsi_rqlen = sizeof (sense_buf);
ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT;
ucmd_buf.uscsi_timeout = 15;
SD_TRACE(SD_LOG_IO, un,
"sd_send_scsi_DOORLOCK: returning sd_ssc_send\n");
status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
UIO_SYSSPACE, path_flag);
if (status == 0)
sd_ssc_assessment(ssc, SD_FMT_STANDARD);
if ((status == EIO) && (ucmd_buf.uscsi_status == STATUS_CHECK) &&
(ucmd_buf.uscsi_rqstatus == STATUS_GOOD) &&
(scsi_sense_key((uint8_t *)&sense_buf) == KEY_ILLEGAL_REQUEST)) {
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
un->un_f_doorlock_supported = FALSE;
return (0);
}
return (status);
}
#define SD_CAPACITY_SIZE sizeof (struct scsi_capacity)
static int
sd_send_scsi_READ_CAPACITY(sd_ssc_t *ssc, uint64_t *capp, uint32_t *lbap,
int path_flag)
{
struct scsi_extended_sense sense_buf;
struct uscsi_cmd ucmd_buf;
union scsi_cdb cdb;
uint32_t *capacity_buf;
uint64_t capacity;
uint32_t lbasize;
uint32_t pbsize;
int status;
struct sd_lun *un;
ASSERT(ssc != NULL);
un = ssc->ssc_un;
ASSERT(un != NULL);
ASSERT(!mutex_owned(SD_MUTEX(un)));
ASSERT(capp != NULL);
ASSERT(lbap != NULL);
SD_TRACE(SD_LOG_IO, un,
"sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un);
bzero(&cdb, sizeof (cdb));
bzero(&ucmd_buf, sizeof (ucmd_buf));
capacity_buf = kmem_zalloc(SD_CAPACITY_SIZE, KM_SLEEP);
cdb.scc_cmd = SCMD_READ_CAPACITY;
ucmd_buf.uscsi_cdb = (char *)&cdb;
ucmd_buf.uscsi_cdblen = CDB_GROUP1;
ucmd_buf.uscsi_bufaddr = (caddr_t)capacity_buf;
ucmd_buf.uscsi_buflen = SD_CAPACITY_SIZE;
ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
ucmd_buf.uscsi_rqlen = sizeof (sense_buf);
ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT;
ucmd_buf.uscsi_timeout = 60;
status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
UIO_SYSSPACE, path_flag);
switch (status) {
case 0:
if (ucmd_buf.uscsi_resid != 0) {
sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1,
"sd_send_scsi_READ_CAPACITY received invalid "
"capacity data");
kmem_free(capacity_buf, SD_CAPACITY_SIZE);
return (EIO);
}
capacity = BE_32(capacity_buf[0]);
lbasize = BE_32(capacity_buf[1]);
kmem_free(capacity_buf, SD_CAPACITY_SIZE);
if (capacity == 0xffffffff) {
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
status = sd_send_scsi_READ_CAPACITY_16(ssc, &capacity,
&lbasize, &pbsize, path_flag);
if (status != 0) {
return (status);
} else {
goto rc16_done;
}
}
break;
case EIO:
switch (ucmd_buf.uscsi_status) {
case STATUS_RESERVATION_CONFLICT:
status = EACCES;
break;
case STATUS_CHECK:
if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) &&
(scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) &&
(scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) {
kmem_free(capacity_buf, SD_CAPACITY_SIZE);
return (EAGAIN);
}
break;
default:
break;
}
default:
kmem_free(capacity_buf, SD_CAPACITY_SIZE);
return (status);
}
if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) {
lbasize = 2048;
}
capacity += 1;
if (un->un_f_has_removable_media)
capacity *= (lbasize / un->un_sys_blocksize);
rc16_done:
*capp = capacity;
*lbap = lbasize;
SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY: "
"capacity:0x%llx lbasize:0x%x\n", capacity, lbasize);
if ((capacity == 0) || (lbasize == 0)) {
sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1,
"sd_send_scsi_READ_CAPACITY received invalid value "
"capacity %llu lbasize %d", capacity, lbasize);
return (EIO);
}
sd_ssc_assessment(ssc, SD_FMT_STANDARD);
return (0);
}
#define SD_CAPACITY_16_SIZE sizeof (struct scsi_capacity_16)
static int
sd_send_scsi_READ_CAPACITY_16(sd_ssc_t *ssc, uint64_t *capp, uint32_t *lbap,
uint32_t *psp, int path_flag)
{
struct scsi_extended_sense sense_buf;
struct uscsi_cmd ucmd_buf;
union scsi_cdb cdb;
uint64_t *capacity16_buf;
uint64_t capacity;
uint32_t lbasize;
uint32_t pbsize;
uint32_t lbpb_exp;
int status;
struct sd_lun *un;
ASSERT(ssc != NULL);
un = ssc->ssc_un;
ASSERT(un != NULL);
ASSERT(!mutex_owned(SD_MUTEX(un)));
ASSERT(capp != NULL);
ASSERT(lbap != NULL);
SD_TRACE(SD_LOG_IO, un,
"sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un);
bzero(&cdb, sizeof (cdb));
bzero(&ucmd_buf, sizeof (ucmd_buf));
capacity16_buf = kmem_zalloc(SD_CAPACITY_16_SIZE, KM_SLEEP);
ucmd_buf.uscsi_cdb = (char *)&cdb;
ucmd_buf.uscsi_cdblen = CDB_GROUP4;
ucmd_buf.uscsi_bufaddr = (caddr_t)capacity16_buf;
ucmd_buf.uscsi_buflen = SD_CAPACITY_16_SIZE;
ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
ucmd_buf.uscsi_rqlen = sizeof (sense_buf);
ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT;
ucmd_buf.uscsi_timeout = 60;
cdb.scc_cmd = SCMD_SVC_ACTION_IN_G4;
cdb.cdb_opaque[1] = SSVC_ACTION_READ_CAPACITY_G4;
FORMG4COUNT(&cdb, ucmd_buf.uscsi_buflen);
status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
UIO_SYSSPACE, path_flag);
switch (status) {
case 0:
if (ucmd_buf.uscsi_resid > 20) {
sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1,
"sd_send_scsi_READ_CAPACITY_16 received invalid "
"capacity data");
kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE);
return (EIO);
}
capacity = BE_64(capacity16_buf[0]);
lbasize = BE_32(*(uint32_t *)&capacity16_buf[1]);
lbpb_exp = (BE_64(capacity16_buf[1]) >> 16) & 0x0f;
un->un_thin_flags = 0;
if (((uint8_t *)capacity16_buf)[14] & (1 << 7))
un->un_thin_flags |= SD_THIN_PROV_ENABLED;
if (((uint8_t *)capacity16_buf)[14] & (1 << 6))
un->un_thin_flags |= SD_THIN_PROV_READ_ZEROS;
pbsize = lbasize << lbpb_exp;
kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE);
if (capacity == 0xffffffffffffffff) {
sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1,
"disk is too large");
return (EIO);
}
break;
case EIO:
switch (ucmd_buf.uscsi_status) {
case STATUS_RESERVATION_CONFLICT:
status = EACCES;
break;
case STATUS_CHECK:
if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) &&
(scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) &&
(scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) {
kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE);
return (EAGAIN);
}
break;
default:
break;
}
default:
kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE);
return (status);
}
if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) {
lbasize = 2048;
}
capacity += 1;
if (un->un_f_has_removable_media)
capacity *= (lbasize / un->un_sys_blocksize);
*capp = capacity;
*lbap = lbasize;
*psp = pbsize;
SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY_16: "
"capacity:0x%llx lbasize:0x%x, pbsize: 0x%x\n",
capacity, lbasize, pbsize);
if ((capacity == 0) || (lbasize == 0) || (pbsize == 0)) {
sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1,
"sd_send_scsi_READ_CAPACITY_16 received invalid value "
"capacity %llu lbasize %d pbsize %d", capacity, lbasize);
return (EIO);
}
sd_ssc_assessment(ssc, SD_FMT_STANDARD);
return (0);
}
static int
sd_send_scsi_START_STOP_UNIT(sd_ssc_t *ssc, int pc_flag, int flag,
int path_flag)
{
struct scsi_extended_sense sense_buf;
union scsi_cdb cdb;
struct uscsi_cmd ucmd_buf;
int status;
struct sd_lun *un;
ASSERT(ssc != NULL);
un = ssc->ssc_un;
ASSERT(un != NULL);
ASSERT(!mutex_owned(SD_MUTEX(un)));
SD_TRACE(SD_LOG_IO, un,
"sd_send_scsi_START_STOP_UNIT: entry: un:0x%p\n", un);
if (un->un_f_check_start_stop &&
(pc_flag == SD_START_STOP) &&
((flag == SD_TARGET_START) || (flag == SD_TARGET_STOP)) &&
(un->un_f_start_stop_supported != TRUE)) {
return (0);
}
if (flag != SD_TARGET_EJECT) {
mutex_enter(SD_MUTEX(un));
if (un->un_f_ejecting == TRUE) {
mutex_exit(SD_MUTEX(un));
return (EAGAIN);
}
mutex_exit(SD_MUTEX(un));
}
bzero(&cdb, sizeof (cdb));
bzero(&ucmd_buf, sizeof (ucmd_buf));
bzero(&sense_buf, sizeof (struct scsi_extended_sense));
cdb.scc_cmd = SCMD_START_STOP;
cdb.cdb_opaque[4] = (pc_flag == SD_POWER_CONDITION) ?
(uchar_t)(flag << 4) : (uchar_t)flag;
ucmd_buf.uscsi_cdb = (char *)&cdb;
ucmd_buf.uscsi_cdblen = CDB_GROUP0;
ucmd_buf.uscsi_bufaddr = NULL;
ucmd_buf.uscsi_buflen = 0;
ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense);
ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT;
ucmd_buf.uscsi_timeout = 200;
status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
UIO_SYSSPACE, path_flag);
switch (status) {
case 0:
sd_ssc_assessment(ssc, SD_FMT_STANDARD);
break;
case EIO:
switch (ucmd_buf.uscsi_status) {
case STATUS_RESERVATION_CONFLICT:
status = EACCES;
break;
case STATUS_CHECK:
if (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) {
switch (scsi_sense_key(
(uint8_t *)&sense_buf)) {
case KEY_ILLEGAL_REQUEST:
status = ENOTSUP;
break;
case KEY_NOT_READY:
if (scsi_sense_asc(
(uint8_t *)&sense_buf)
== 0x3A) {
status = ENXIO;
}
break;
default:
break;
}
}
break;
default:
break;
}
break;
default:
break;
}
SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_START_STOP_UNIT: exit\n");
return (status);
}
static void
sd_start_stop_unit_callback(void *arg)
{
struct sd_lun *un = arg;
ASSERT(un != NULL);
ASSERT(!mutex_owned(SD_MUTEX(un)));
SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_callback: entry\n");
(void) taskq_dispatch(sd_tq, sd_start_stop_unit_task, un, KM_NOSLEEP);
}
static void
sd_start_stop_unit_task(void *arg)
{
struct sd_lun *un = arg;
sd_ssc_t *ssc;
int power_level;
int rval;
ASSERT(un != NULL);
ASSERT(!mutex_owned(SD_MUTEX(un)));
SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: entry\n");
mutex_enter(SD_MUTEX(un));
if (un->un_f_format_in_progress == TRUE) {
mutex_exit(SD_MUTEX(un));
return;
}
mutex_exit(SD_MUTEX(un));
ssc = sd_ssc_init(un);
if (un->un_f_power_condition_supported) {
mutex_enter(SD_MUTEX(un));
ASSERT(SD_PM_IS_LEVEL_VALID(un, un->un_power_level));
power_level = sd_pwr_pc.ran_perf[un->un_power_level]
> 0 ? un->un_power_level : SD_SPINDLE_ACTIVE;
mutex_exit(SD_MUTEX(un));
rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_POWER_CONDITION,
sd_pl2pc[power_level], SD_PATH_DIRECT_PRIORITY);
} else {
rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP,
SD_TARGET_START, SD_PATH_DIRECT_PRIORITY);
}
if (rval != 0)
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
sd_ssc_fini(ssc);
mutex_enter(SD_MUTEX(un));
SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
"sd_start_stop_unit_task: un:0x%p starting bp:0x%p\n",
un, un->un_retry_bp);
un->un_startstop_timeid = NULL;
sd_start_cmds(un, un->un_retry_bp);
mutex_exit(SD_MUTEX(un));
SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: exit\n");
}
static int
sd_send_scsi_INQUIRY(sd_ssc_t *ssc, uchar_t *bufaddr, size_t buflen,
uchar_t evpd, uchar_t page_code, size_t *residp)
{
union scsi_cdb cdb;
struct uscsi_cmd ucmd_buf;
int status;
struct sd_lun *un;
ASSERT(ssc != NULL);
un = ssc->ssc_un;
ASSERT(un != NULL);
ASSERT(!mutex_owned(SD_MUTEX(un)));
ASSERT(bufaddr != NULL);
SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: entry: un:0x%p\n", un);
bzero(&cdb, sizeof (cdb));
bzero(&ucmd_buf, sizeof (ucmd_buf));
bzero(bufaddr, buflen);
cdb.scc_cmd = SCMD_INQUIRY;
cdb.cdb_opaque[1] = evpd;
cdb.cdb_opaque[2] = page_code;
FORMG0COUNT(&cdb, buflen);
ucmd_buf.uscsi_cdb = (char *)&cdb;
ucmd_buf.uscsi_cdblen = CDB_GROUP0;
ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr;
ucmd_buf.uscsi_buflen = buflen;
ucmd_buf.uscsi_rqbuf = NULL;
ucmd_buf.uscsi_rqlen = 0;
ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT;
ucmd_buf.uscsi_timeout = 200;
status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
UIO_SYSSPACE, SD_PATH_DIRECT);
if (status == 0)
sd_ssc_assessment(ssc, SD_FMT_STANDARD);
if ((status == 0) && (residp != NULL)) {
*residp = ucmd_buf.uscsi_resid;
}
SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: exit\n");
return (status);
}
static int
sd_send_scsi_TEST_UNIT_READY(sd_ssc_t *ssc, int flag)
{
struct scsi_extended_sense sense_buf;
union scsi_cdb cdb;
struct uscsi_cmd ucmd_buf;
int status;
struct sd_lun *un;
ASSERT(ssc != NULL);
un = ssc->ssc_un;
ASSERT(un != NULL);
ASSERT(!mutex_owned(SD_MUTEX(un)));
SD_TRACE(SD_LOG_IO, un,
"sd_send_scsi_TEST_UNIT_READY: entry: un:0x%p\n", un);
if (un->un_f_cfg_tur_check == TRUE) {
mutex_enter(SD_MUTEX(un));
if (un->un_ncmds_in_transport != 0) {
mutex_exit(SD_MUTEX(un));
return (0);
}
mutex_exit(SD_MUTEX(un));
}
bzero(&cdb, sizeof (cdb));
bzero(&ucmd_buf, sizeof (ucmd_buf));
bzero(&sense_buf, sizeof (struct scsi_extended_sense));
cdb.scc_cmd = SCMD_TEST_UNIT_READY;
ucmd_buf.uscsi_cdb = (char *)&cdb;
ucmd_buf.uscsi_cdblen = CDB_GROUP0;
ucmd_buf.uscsi_bufaddr = NULL;
ucmd_buf.uscsi_buflen = 0;
ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense);
ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT;
if ((flag & SD_DONT_RETRY_TUR) != 0) {
ucmd_buf.uscsi_flags |= USCSI_DIAGNOSE;
}
ucmd_buf.uscsi_timeout = 60;
status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
UIO_SYSSPACE, ((flag & SD_BYPASS_PM) ? SD_PATH_DIRECT :
SD_PATH_STANDARD));
switch (status) {
case 0:
sd_ssc_assessment(ssc, SD_FMT_STANDARD);
break;
case EIO:
switch (ucmd_buf.uscsi_status) {
case STATUS_RESERVATION_CONFLICT:
status = EACCES;
break;
case STATUS_CHECK:
if ((flag & SD_CHECK_FOR_MEDIA) == 0) {
break;
}
if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) &&
(scsi_sense_key((uint8_t *)&sense_buf) ==
KEY_NOT_READY) &&
(scsi_sense_asc((uint8_t *)&sense_buf) == 0x3A)) {
status = ENXIO;
}
break;
default:
break;
}
break;
default:
break;
}
SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_TEST_UNIT_READY: exit\n");
return (status);
}
static int
sd_send_scsi_PERSISTENT_RESERVE_IN(sd_ssc_t *ssc, uchar_t usr_cmd,
uint16_t data_len, uchar_t *data_bufp)
{
struct scsi_extended_sense sense_buf;
union scsi_cdb cdb;
struct uscsi_cmd ucmd_buf;
int status;
int no_caller_buf = FALSE;
struct sd_lun *un;
ASSERT(ssc != NULL);
un = ssc->ssc_un;
ASSERT(un != NULL);
ASSERT(!mutex_owned(SD_MUTEX(un)));
ASSERT((usr_cmd == SD_READ_KEYS) || (usr_cmd == SD_READ_RESV));
SD_TRACE(SD_LOG_IO, un,
"sd_send_scsi_PERSISTENT_RESERVE_IN: entry: un:0x%p\n", un);
bzero(&cdb, sizeof (cdb));
bzero(&ucmd_buf, sizeof (ucmd_buf));
bzero(&sense_buf, sizeof (struct scsi_extended_sense));
if (data_bufp == NULL) {
ASSERT(data_len == 0);
data_len = MHIOC_RESV_KEY_SIZE;
data_bufp = kmem_zalloc(MHIOC_RESV_KEY_SIZE, KM_SLEEP);
no_caller_buf = TRUE;
}
cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_IN;
cdb.cdb_opaque[1] = usr_cmd;
FORMG1COUNT(&cdb, data_len);
ucmd_buf.uscsi_cdb = (char *)&cdb;
ucmd_buf.uscsi_cdblen = CDB_GROUP1;
ucmd_buf.uscsi_bufaddr = (caddr_t)data_bufp;
ucmd_buf.uscsi_buflen = data_len;
ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense);
ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT;
ucmd_buf.uscsi_timeout = 60;
status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
UIO_SYSSPACE, SD_PATH_STANDARD);
switch (status) {
case 0:
sd_ssc_assessment(ssc, SD_FMT_STANDARD);
break;
case EIO:
switch (ucmd_buf.uscsi_status) {
case STATUS_RESERVATION_CONFLICT:
status = EACCES;
break;
case STATUS_CHECK:
if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) &&
(scsi_sense_key((uint8_t *)&sense_buf) ==
KEY_ILLEGAL_REQUEST)) {
status = ENOTSUP;
}
break;
default:
break;
}
break;
default:
break;
}
SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_IN: exit\n");
if (no_caller_buf == TRUE) {
kmem_free(data_bufp, data_len);
}
return (status);
}
static int
sd_send_scsi_PERSISTENT_RESERVE_OUT(sd_ssc_t *ssc, uchar_t usr_cmd,
uchar_t *usr_bufp)
{
struct scsi_extended_sense sense_buf;
union scsi_cdb cdb;
struct uscsi_cmd ucmd_buf;
int status;
uchar_t data_len = sizeof (sd_prout_t);
sd_prout_t *prp;
struct sd_lun *un;
ASSERT(ssc != NULL);
un = ssc->ssc_un;
ASSERT(un != NULL);
ASSERT(!mutex_owned(SD_MUTEX(un)));
ASSERT(data_len == 24);
SD_TRACE(SD_LOG_IO, un,
"sd_send_scsi_PERSISTENT_RESERVE_OUT: entry: un:0x%p\n", un);
if (usr_bufp == NULL) {
return (EINVAL);
}
bzero(&cdb, sizeof (cdb));
bzero(&ucmd_buf, sizeof (ucmd_buf));
bzero(&sense_buf, sizeof (struct scsi_extended_sense));
prp = kmem_zalloc(data_len, KM_SLEEP);
cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_OUT;
cdb.cdb_opaque[1] = usr_cmd;
FORMG1COUNT(&cdb, data_len);
ucmd_buf.uscsi_cdb = (char *)&cdb;
ucmd_buf.uscsi_cdblen = CDB_GROUP1;
ucmd_buf.uscsi_bufaddr = (caddr_t)prp;
ucmd_buf.uscsi_buflen = data_len;
ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense);
ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT;
ucmd_buf.uscsi_timeout = 60;
switch (usr_cmd) {
case SD_SCSI3_REGISTER: {
mhioc_register_t *ptr = (mhioc_register_t *)usr_bufp;
bcopy(ptr->oldkey.key, prp->res_key, MHIOC_RESV_KEY_SIZE);
bcopy(ptr->newkey.key, prp->service_key,
MHIOC_RESV_KEY_SIZE);
prp->aptpl = ptr->aptpl;
break;
}
case SD_SCSI3_CLEAR: {
mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp;
bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE);
break;
}
case SD_SCSI3_RESERVE:
case SD_SCSI3_RELEASE: {
mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp;
bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE);
prp->scope_address = BE_32(ptr->scope_specific_addr);
cdb.cdb_opaque[2] = ptr->type;
break;
}
case SD_SCSI3_PREEMPTANDABORT: {
mhioc_preemptandabort_t *ptr =
(mhioc_preemptandabort_t *)usr_bufp;
bcopy(ptr->resvdesc.key.key, prp->res_key, MHIOC_RESV_KEY_SIZE);
bcopy(ptr->victim_key.key, prp->service_key,
MHIOC_RESV_KEY_SIZE);
prp->scope_address = BE_32(ptr->resvdesc.scope_specific_addr);
cdb.cdb_opaque[2] = ptr->resvdesc.type;
ucmd_buf.uscsi_flags |= USCSI_HEAD;
break;
}
case SD_SCSI3_REGISTERANDIGNOREKEY:
{
mhioc_registerandignorekey_t *ptr;
ptr = (mhioc_registerandignorekey_t *)usr_bufp;
bcopy(ptr->newkey.key,
prp->service_key, MHIOC_RESV_KEY_SIZE);
prp->aptpl = ptr->aptpl;
break;
}
default:
ASSERT(FALSE);
break;
}
status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
UIO_SYSSPACE, SD_PATH_STANDARD);
switch (status) {
case 0:
sd_ssc_assessment(ssc, SD_FMT_STANDARD);
break;
case EIO:
switch (ucmd_buf.uscsi_status) {
case STATUS_RESERVATION_CONFLICT:
status = EACCES;
break;
case STATUS_CHECK:
if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) &&
(scsi_sense_key((uint8_t *)&sense_buf) ==
KEY_ILLEGAL_REQUEST)) {
status = ENOTSUP;
}
break;
default:
break;
}
break;
default:
break;
}
kmem_free(prp, data_len);
SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_OUT: exit\n");
return (status);
}
static int
sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, struct dk_callback *dkc)
{
struct sd_uscsi_info *uip;
struct uscsi_cmd *uscmd;
union scsi_cdb *cdb;
struct buf *bp;
int rval = 0;
int is_async;
SD_TRACE(SD_LOG_IO, un,
"sd_send_scsi_SYNCHRONIZE_CACHE: entry: un:0x%p\n", un);
ASSERT(un != NULL);
ASSERT(!mutex_owned(SD_MUTEX(un)));
if (dkc == NULL || dkc->dkc_callback == NULL) {
is_async = FALSE;
} else {
is_async = TRUE;
}
mutex_enter(SD_MUTEX(un));
if (un->un_f_suppress_cache_flush == TRUE) {
mutex_exit(SD_MUTEX(un));
SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_SYNCHRONIZE_CACHE: \
skip the cache flush since suppress_cache_flush is %d!\n",
un->un_f_suppress_cache_flush);
if (is_async == TRUE) {
(*dkc->dkc_callback)(dkc->dkc_cookie, 0);
}
return (rval);
}
mutex_exit(SD_MUTEX(un));
cdb = kmem_zalloc(CDB_GROUP1, KM_SLEEP);
cdb->scc_cmd = SCMD_SYNCHRONIZE_CACHE;
mutex_enter(SD_MUTEX(un));
if (dkc != NULL && un->un_f_sync_nv_supported &&
(dkc->dkc_flag & FLUSH_VOLATILE)) {
cdb->cdb_un.tag |= SD_SYNC_NV_BIT;
}
mutex_exit(SD_MUTEX(un));
uscmd = kmem_zalloc(sizeof (struct uscsi_cmd), KM_SLEEP);
uscmd->uscsi_cdblen = CDB_GROUP1;
uscmd->uscsi_cdb = (caddr_t)cdb;
uscmd->uscsi_bufaddr = NULL;
uscmd->uscsi_buflen = 0;
uscmd->uscsi_rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP);
uscmd->uscsi_rqlen = SENSE_LENGTH;
uscmd->uscsi_rqresid = SENSE_LENGTH;
uscmd->uscsi_flags = USCSI_RQENABLE | USCSI_SILENT;
uscmd->uscsi_timeout = sd_io_time;
uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP);
uip->ui_flags = SD_PATH_DIRECT;
uip->ui_cmdp = uscmd;
bp = getrbuf(KM_SLEEP);
bp->b_private = uip;
bp->b_flags = B_BUSY;
bp->b_bcount = 0;
bp->b_blkno = 0;
if (is_async == TRUE) {
bp->b_iodone = sd_send_scsi_SYNCHRONIZE_CACHE_biodone;
uip->ui_dkc = *dkc;
}
bp->b_edev = SD_GET_DEV(un);
bp->b_dev = cmpdev(bp->b_edev);
mutex_enter(SD_MUTEX(un));
un->un_f_sync_cache_required = FALSE;
mutex_exit(SD_MUTEX(un));
(void) sd_uscsi_strategy(bp);
if (is_async == FALSE) {
(void) biowait(bp);
rval = sd_send_scsi_SYNCHRONIZE_CACHE_biodone(bp);
}
return (rval);
}
static int
sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp)
{
struct sd_uscsi_info *uip;
struct uscsi_cmd *uscmd;
uint8_t *sense_buf;
struct sd_lun *un;
int status;
union scsi_cdb *cdb;
uip = (struct sd_uscsi_info *)(bp->b_private);
ASSERT(uip != NULL);
uscmd = uip->ui_cmdp;
ASSERT(uscmd != NULL);
sense_buf = (uint8_t *)uscmd->uscsi_rqbuf;
ASSERT(sense_buf != NULL);
un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp));
ASSERT(un != NULL);
cdb = (union scsi_cdb *)uscmd->uscsi_cdb;
status = geterror(bp);
switch (status) {
case 0:
break;
case EIO:
switch (uscmd->uscsi_status) {
case STATUS_RESERVATION_CONFLICT:
status = 0;
goto done;
case STATUS_CHECK:
if ((uscmd->uscsi_rqstatus == STATUS_GOOD) &&
(scsi_sense_key(sense_buf) ==
KEY_ILLEGAL_REQUEST)) {
if (cdb->cdb_un.tag&SD_SYNC_NV_BIT) {
mutex_enter(SD_MUTEX(un));
un->un_f_sync_nv_supported = FALSE;
mutex_exit(SD_MUTEX(un));
status = 0;
SD_TRACE(SD_LOG_IO, un,
"un_f_sync_nv_supported \
is set to false.\n");
goto done;
}
mutex_enter(SD_MUTEX(un));
un->un_f_sync_cache_supported = FALSE;
mutex_exit(SD_MUTEX(un));
SD_TRACE(SD_LOG_IO, un,
"sd_send_scsi_SYNCHRONIZE_CACHE_biodone: \
un_f_sync_cache_supported set to false \
with asc = %x, ascq = %x\n",
scsi_sense_asc(sense_buf),
scsi_sense_ascq(sense_buf));
status = ENOTSUP;
goto done;
}
break;
default:
break;
}
default:
mutex_enter(SD_MUTEX(un));
un->un_f_sync_cache_required = TRUE;
mutex_exit(SD_MUTEX(un));
if (!un->un_f_has_removable_media) {
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"SYNCHRONIZE CACHE command failed (%d)\n", status);
}
break;
}
done:
if (uip->ui_dkc.dkc_callback != NULL) {
(*uip->ui_dkc.dkc_callback)(uip->ui_dkc.dkc_cookie, status);
}
ASSERT((bp->b_flags & B_REMAPPED) == 0);
freerbuf(bp);
kmem_free(uip, sizeof (struct sd_uscsi_info));
kmem_free(uscmd->uscsi_rqbuf, SENSE_LENGTH);
kmem_free(uscmd->uscsi_cdb, (size_t)uscmd->uscsi_cdblen);
kmem_free(uscmd, sizeof (struct uscsi_cmd));
return (status);
}
static int
sd_send_scsi_UNMAP_issue_one(sd_ssc_t *ssc, unmap_param_hdr_t *uph,
uint64_t num_descr, uint64_t bytes)
{
struct sd_lun *un = ssc->ssc_un;
struct scsi_extended_sense sense_buf;
union scsi_cdb cdb;
struct uscsi_cmd ucmd_buf;
int status;
const uint64_t param_size = sizeof (unmap_param_hdr_t) +
num_descr * sizeof (unmap_blk_descr_t);
ASSERT3U(param_size - 2, <=, UINT16_MAX);
uph->uph_data_len = BE_16(param_size - 2);
uph->uph_descr_data_len = BE_16(param_size - 8);
bzero(&cdb, sizeof (cdb));
bzero(&ucmd_buf, sizeof (ucmd_buf));
bzero(&sense_buf, sizeof (struct scsi_extended_sense));
cdb.scc_cmd = SCMD_UNMAP;
FORMG1COUNT(&cdb, param_size);
ucmd_buf.uscsi_cdb = (char *)&cdb;
ucmd_buf.uscsi_cdblen = (uchar_t)CDB_GROUP1;
ucmd_buf.uscsi_bufaddr = (caddr_t)uph;
ucmd_buf.uscsi_buflen = param_size;
ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense);
ucmd_buf.uscsi_flags = USCSI_WRITE | USCSI_RQENABLE | USCSI_SILENT;
ucmd_buf.uscsi_timeout = un->un_cmd_timeout;
status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL, UIO_SYSSPACE,
SD_PATH_STANDARD);
switch (status) {
case 0:
sd_ssc_assessment(ssc, SD_FMT_STANDARD);
if (un->un_unmapstats) {
atomic_inc_64(&un->un_unmapstats->us_cmds.value.ui64);
atomic_add_64(&un->un_unmapstats->us_extents.value.ui64,
num_descr);
atomic_add_64(&un->un_unmapstats->us_bytes.value.ui64,
bytes);
}
break;
case EIO:
if (un->un_unmapstats)
atomic_inc_64(&un->un_unmapstats->us_errs.value.ui64);
switch (ucmd_buf.uscsi_status) {
case STATUS_RESERVATION_CONFLICT:
status = EACCES;
break;
default:
break;
}
break;
default:
if (un->un_unmapstats)
atomic_inc_64(&un->un_unmapstats->us_errs.value.ui64);
break;
}
return (status);
}
static inline unmap_blk_descr_t *
UNMAP_blk_descr_i(void *buf, size_t i)
{
return ((unmap_blk_descr_t *)((uintptr_t)buf +
sizeof (unmap_param_hdr_t) + (i * sizeof (unmap_blk_descr_t))));
}
static int
sd_send_scsi_UNMAP_issue(dev_t dev, sd_ssc_t *ssc, const dkioc_free_list_t *dfl)
{
struct sd_lun *un = ssc->ssc_un;
unmap_param_hdr_t *uph;
sd_blk_limits_t *lim = &un->un_blk_lim;
int rval = 0;
int partition;
diskaddr_t part_off_sysblks = 0, part_len_sysblks = 0;
uint64_t part_off, part_len;
uint64_t descr_cnt_lim, byte_cnt_lim;
uint64_t descr_issued = 0, bytes_issued = 0;
uph = kmem_zalloc(SD_UNMAP_PARAM_LIST_MAXSZ, KM_SLEEP);
partition = SDPART(dev);
rval = cmlb_partinfo(un->un_cmlbhandle, partition, &part_len_sysblks,
&part_off_sysblks, NULL, NULL, (void *)SD_PATH_DIRECT);
if (rval != 0)
goto out;
part_off = SD_SYSBLOCKS2BYTES(part_off_sysblks);
part_len = SD_SYSBLOCKS2BYTES(part_len_sysblks);
ASSERT(un->un_blk_lim.lim_max_unmap_lba_cnt != 0);
ASSERT(un->un_blk_lim.lim_max_unmap_descr_cnt != 0);
byte_cnt_lim = lim->lim_max_unmap_lba_cnt < UINT32_MAX ?
(uint64_t)lim->lim_max_unmap_lba_cnt * un->un_tgt_blocksize :
UINT64_MAX;
descr_cnt_lim = MIN(lim->lim_max_unmap_descr_cnt, SD_UNMAP_MAX_DESCR);
if (dfl->dfl_offset >= part_len) {
rval = SET_ERROR(EINVAL);
goto out;
}
for (size_t i = 0; i < dfl->dfl_num_exts; i++) {
const dkioc_free_list_ext_t *ext = &dfl->dfl_exts[i];
uint64_t ext_start = ext->dfle_start;
uint64_t ext_length = ext->dfle_length;
while (ext_length > 0) {
unmap_blk_descr_t *ubd;
uint64_t len = MIN(MIN(ext_length, byte_cnt_lim -
bytes_issued), SD_TGTBLOCKS2BYTES(un, UINT32_MAX));
if (ext_start >= part_len ||
ext_start + len < ext_start ||
dfl->dfl_offset + ext_start + len <
dfl->dfl_offset ||
dfl->dfl_offset + ext_start + len > part_len) {
rval = SET_ERROR(EINVAL);
goto out;
}
ASSERT3U(descr_issued, <, descr_cnt_lim);
ASSERT3U(bytes_issued, <, byte_cnt_lim);
ubd = UNMAP_blk_descr_i(uph, descr_issued);
ubd->ubd_lba = BE_64(SD_BYTES2TGTBLOCKS(un,
dfl->dfl_offset + ext_start + part_off));
ubd->ubd_lba_cnt = BE_32(SD_BYTES2TGTBLOCKS(un, len));
descr_issued++;
bytes_issued += len;
if (descr_issued == descr_cnt_lim ||
bytes_issued == byte_cnt_lim) {
rval = sd_send_scsi_UNMAP_issue_one(ssc, uph,
descr_issued, bytes_issued);
if (rval != 0)
goto out;
descr_issued = 0;
bytes_issued = 0;
}
ext_start += len;
ext_length -= len;
}
}
if (descr_issued > 0) {
rval = sd_send_scsi_UNMAP_issue_one(ssc, uph, descr_issued,
bytes_issued);
}
out:
kmem_free(uph, SD_UNMAP_PARAM_LIST_MAXSZ);
return (rval);
}
static int
sd_send_scsi_UNMAP(dev_t dev, sd_ssc_t *ssc, dkioc_free_list_t *dfl, int flag)
{
struct sd_lun *un = ssc->ssc_un;
int rval = 0;
ASSERT(!mutex_owned(SD_MUTEX(un)));
ASSERT(dfl != NULL);
if (!(un->un_thin_flags & SD_THIN_PROV_ENABLED) ||
un->un_blk_lim.lim_max_unmap_descr_cnt == 0 ||
un->un_blk_lim.lim_max_unmap_lba_cnt == 0) {
return (SET_ERROR(ENOTSUP));
}
if (!(flag & FKIOCTL)) {
int err = dfl_copyin(dfl, &dfl, flag, KM_SLEEP);
if (err != 0)
return (err);
} else if (dfl->dfl_num_exts > DFL_COPYIN_MAX_EXTS) {
ASSERT3U(dfl->dfl_num_exts, <=, DFL_COPYIN_MAX_EXTS);
return (SET_ERROR(EINVAL));
}
rval = sd_send_scsi_UNMAP_issue(dev, ssc, dfl);
if (!(flag & FKIOCTL)) {
dfl_free(dfl);
dfl = NULL;
}
return (rval);
}
static int
sd_send_scsi_GET_CONFIGURATION(sd_ssc_t *ssc, struct uscsi_cmd *ucmdbuf,
uchar_t *rqbuf, uint_t rqbuflen, uchar_t *bufaddr, uint_t buflen,
int path_flag)
{
char cdb[CDB_GROUP1];
int status;
struct sd_lun *un;
ASSERT(ssc != NULL);
un = ssc->ssc_un;
ASSERT(un != NULL);
ASSERT(!mutex_owned(SD_MUTEX(un)));
ASSERT(bufaddr != NULL);
ASSERT(ucmdbuf != NULL);
ASSERT(rqbuf != NULL);
SD_TRACE(SD_LOG_IO, un,
"sd_send_scsi_GET_CONFIGURATION: entry: un:0x%p\n", un);
bzero(cdb, sizeof (cdb));
bzero(ucmdbuf, sizeof (struct uscsi_cmd));
bzero(rqbuf, rqbuflen);
bzero(bufaddr, buflen);
cdb[0] = SCMD_GET_CONFIGURATION;
cdb[1] = 0x02;
cdb[8] = SD_PROFILE_HEADER_LEN;
ucmdbuf->uscsi_cdb = cdb;
ucmdbuf->uscsi_cdblen = CDB_GROUP1;
ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr;
ucmdbuf->uscsi_buflen = buflen;
ucmdbuf->uscsi_timeout = sd_io_time;
ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf;
ucmdbuf->uscsi_rqlen = rqbuflen;
ucmdbuf->uscsi_flags = USCSI_RQENABLE | USCSI_SILENT | USCSI_READ;
status = sd_ssc_send(ssc, ucmdbuf, FKIOCTL,
UIO_SYSSPACE, path_flag);
switch (status) {
case 0:
sd_ssc_assessment(ssc, SD_FMT_STANDARD);
break;
case EIO:
switch (ucmdbuf->uscsi_status) {
case STATUS_RESERVATION_CONFLICT:
status = EACCES;
break;
default:
break;
}
break;
default:
break;
}
if (status == 0) {
SD_DUMP_MEMORY(un, SD_LOG_IO,
"sd_send_scsi_GET_CONFIGURATION: data",
(uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX);
}
SD_TRACE(SD_LOG_IO, un,
"sd_send_scsi_GET_CONFIGURATION: exit\n");
return (status);
}
static int
sd_send_scsi_feature_GET_CONFIGURATION(sd_ssc_t *ssc, struct uscsi_cmd *ucmdbuf,
uchar_t *rqbuf, uint_t rqbuflen, uchar_t *bufaddr, uint_t buflen,
char feature, int path_flag)
{
char cdb[CDB_GROUP1];
int status;
struct sd_lun *un;
ASSERT(ssc != NULL);
un = ssc->ssc_un;
ASSERT(un != NULL);
ASSERT(!mutex_owned(SD_MUTEX(un)));
ASSERT(bufaddr != NULL);
ASSERT(ucmdbuf != NULL);
ASSERT(rqbuf != NULL);
SD_TRACE(SD_LOG_IO, un,
"sd_send_scsi_feature_GET_CONFIGURATION: entry: un:0x%p\n", un);
bzero(cdb, sizeof (cdb));
bzero(ucmdbuf, sizeof (struct uscsi_cmd));
bzero(rqbuf, rqbuflen);
bzero(bufaddr, buflen);
cdb[0] = SCMD_GET_CONFIGURATION;
cdb[1] = 0x02;
cdb[3] = feature;
cdb[8] = buflen;
ucmdbuf->uscsi_cdb = cdb;
ucmdbuf->uscsi_cdblen = CDB_GROUP1;
ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr;
ucmdbuf->uscsi_buflen = buflen;
ucmdbuf->uscsi_timeout = sd_io_time;
ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf;
ucmdbuf->uscsi_rqlen = rqbuflen;
ucmdbuf->uscsi_flags = USCSI_RQENABLE | USCSI_SILENT | USCSI_READ;
status = sd_ssc_send(ssc, ucmdbuf, FKIOCTL,
UIO_SYSSPACE, path_flag);
switch (status) {
case 0:
break;
case EIO:
switch (ucmdbuf->uscsi_status) {
case STATUS_RESERVATION_CONFLICT:
status = EACCES;
break;
default:
break;
}
break;
default:
break;
}
if (status == 0) {
SD_DUMP_MEMORY(un, SD_LOG_IO,
"sd_send_scsi_feature_GET_CONFIGURATION: data",
(uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX);
}
SD_TRACE(SD_LOG_IO, un,
"sd_send_scsi_feature_GET_CONFIGURATION: exit\n");
return (status);
}
static int
sd_send_scsi_MODE_SENSE(sd_ssc_t *ssc, int cdbsize, uchar_t *bufaddr,
size_t buflen, uchar_t page_code, int path_flag)
{
struct scsi_extended_sense sense_buf;
union scsi_cdb cdb;
struct uscsi_cmd ucmd_buf;
int status;
int headlen;
struct sd_lun *un;
ASSERT(ssc != NULL);
un = ssc->ssc_un;
ASSERT(un != NULL);
ASSERT(!mutex_owned(SD_MUTEX(un)));
ASSERT(bufaddr != NULL);
ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) ||
(cdbsize == CDB_GROUP2));
SD_TRACE(SD_LOG_IO, un,
"sd_send_scsi_MODE_SENSE: entry: un:0x%p\n", un);
bzero(&cdb, sizeof (cdb));
bzero(&ucmd_buf, sizeof (ucmd_buf));
bzero(&sense_buf, sizeof (struct scsi_extended_sense));
bzero(bufaddr, buflen);
if (cdbsize == CDB_GROUP0) {
cdb.scc_cmd = SCMD_MODE_SENSE;
cdb.cdb_opaque[2] = page_code;
FORMG0COUNT(&cdb, buflen);
headlen = MODE_HEADER_LENGTH;
} else {
cdb.scc_cmd = SCMD_MODE_SENSE_G1;
cdb.cdb_opaque[2] = page_code;
FORMG1COUNT(&cdb, buflen);
headlen = MODE_HEADER_LENGTH_GRP2;
}
ASSERT(headlen <= buflen);
SD_FILL_SCSI1_LUN_CDB(un, &cdb);
ucmd_buf.uscsi_cdb = (char *)&cdb;
ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize;
ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr;
ucmd_buf.uscsi_buflen = buflen;
ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense);
ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT;
ucmd_buf.uscsi_timeout = 60;
status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
UIO_SYSSPACE, path_flag);
switch (status) {
case 0:
if (buflen - ucmd_buf.uscsi_resid < headlen) {
status = EIO;
sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1,
"mode page header is not returned");
}
break;
case EIO:
switch (ucmd_buf.uscsi_status) {
case STATUS_RESERVATION_CONFLICT:
status = EACCES;
break;
default:
break;
}
break;
default:
break;
}
if (status == 0) {
SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SENSE: data",
(uchar_t *)bufaddr, buflen, SD_LOG_HEX);
}
SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SENSE: exit\n");
return (status);
}
static int
sd_send_scsi_MODE_SELECT(sd_ssc_t *ssc, int cdbsize, uchar_t *bufaddr,
size_t buflen, uchar_t save_page, int path_flag)
{
struct scsi_extended_sense sense_buf;
union scsi_cdb cdb;
struct uscsi_cmd ucmd_buf;
int status;
struct sd_lun *un;
ASSERT(ssc != NULL);
un = ssc->ssc_un;
ASSERT(un != NULL);
ASSERT(!mutex_owned(SD_MUTEX(un)));
ASSERT(bufaddr != NULL);
ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) ||
(cdbsize == CDB_GROUP2));
SD_TRACE(SD_LOG_IO, un,
"sd_send_scsi_MODE_SELECT: entry: un:0x%p\n", un);
bzero(&cdb, sizeof (cdb));
bzero(&ucmd_buf, sizeof (ucmd_buf));
bzero(&sense_buf, sizeof (struct scsi_extended_sense));
cdb.cdb_opaque[1] = 0x10;
if (save_page == SD_SAVE_PAGE) {
cdb.cdb_opaque[1] |= 0x01;
}
if (cdbsize == CDB_GROUP0) {
cdb.scc_cmd = SCMD_MODE_SELECT;
FORMG0COUNT(&cdb, buflen);
} else {
cdb.scc_cmd = SCMD_MODE_SELECT_G1;
FORMG1COUNT(&cdb, buflen);
}
SD_FILL_SCSI1_LUN_CDB(un, &cdb);
ucmd_buf.uscsi_cdb = (char *)&cdb;
ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize;
ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr;
ucmd_buf.uscsi_buflen = buflen;
ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense);
ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT;
ucmd_buf.uscsi_timeout = 60;
status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
UIO_SYSSPACE, path_flag);
switch (status) {
case 0:
sd_ssc_assessment(ssc, SD_FMT_STANDARD);
break;
case EIO:
switch (ucmd_buf.uscsi_status) {
case STATUS_RESERVATION_CONFLICT:
status = EACCES;
break;
default:
break;
}
break;
default:
break;
}
if (status == 0) {
SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SELECT: data",
(uchar_t *)bufaddr, buflen, SD_LOG_HEX);
}
SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SELECT: exit\n");
return (status);
}
static int
sd_send_scsi_RDWR(sd_ssc_t *ssc, uchar_t cmd, void *bufaddr,
size_t buflen, daddr_t start_block, int path_flag)
{
struct scsi_extended_sense sense_buf;
union scsi_cdb cdb;
struct uscsi_cmd ucmd_buf;
uint32_t block_count;
int status;
int cdbsize;
uchar_t flag;
struct sd_lun *un;
ASSERT(ssc != NULL);
un = ssc->ssc_un;
ASSERT(un != NULL);
ASSERT(!mutex_owned(SD_MUTEX(un)));
ASSERT(bufaddr != NULL);
ASSERT((cmd == SCMD_READ) || (cmd == SCMD_WRITE));
SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: entry: un:0x%p\n", un);
if (un->un_f_tgt_blocksize_is_valid != TRUE) {
return (EINVAL);
}
mutex_enter(SD_MUTEX(un));
block_count = SD_BYTES2TGTBLOCKS(un, buflen);
mutex_exit(SD_MUTEX(un));
flag = (cmd == SCMD_READ) ? USCSI_READ : USCSI_WRITE;
SD_INFO(SD_LOG_IO, un, "sd_send_scsi_RDWR: "
"bufaddr:0x%p buflen:0x%x start_block:0x%p block_count:0x%x\n",
bufaddr, buflen, start_block, block_count);
bzero(&cdb, sizeof (cdb));
bzero(&ucmd_buf, sizeof (ucmd_buf));
bzero(&sense_buf, sizeof (struct scsi_extended_sense));
if (start_block > 0xffffffff)
cdbsize = CDB_GROUP4;
else if ((start_block & 0xFFE00000) ||
(un->un_f_cfg_is_atapi == TRUE))
cdbsize = CDB_GROUP1;
else
cdbsize = CDB_GROUP0;
switch (cdbsize) {
case CDB_GROUP0:
cdb.scc_cmd = cmd;
FORMG0ADDR(&cdb, start_block);
FORMG0COUNT(&cdb, block_count);
break;
case CDB_GROUP1:
cdb.scc_cmd = cmd | SCMD_GROUP1;
FORMG1ADDR(&cdb, start_block);
FORMG1COUNT(&cdb, block_count);
break;
case CDB_GROUP4:
cdb.scc_cmd = cmd | SCMD_GROUP4;
FORMG4LONGADDR(&cdb, (uint64_t)start_block);
FORMG4COUNT(&cdb, block_count);
break;
case CDB_GROUP5:
default:
return (EINVAL);
}
SD_FILL_SCSI1_LUN_CDB(un, &cdb);
ucmd_buf.uscsi_cdb = (char *)&cdb;
ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize;
ucmd_buf.uscsi_bufaddr = bufaddr;
ucmd_buf.uscsi_buflen = buflen;
ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense);
ucmd_buf.uscsi_flags = flag | USCSI_RQENABLE | USCSI_SILENT;
ucmd_buf.uscsi_timeout = 60;
status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
UIO_SYSSPACE, path_flag);
switch (status) {
case 0:
sd_ssc_assessment(ssc, SD_FMT_STANDARD);
break;
case EIO:
switch (ucmd_buf.uscsi_status) {
case STATUS_RESERVATION_CONFLICT:
status = EACCES;
break;
default:
break;
}
break;
default:
break;
}
if (status == 0) {
SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_RDWR: data",
(uchar_t *)bufaddr, buflen, SD_LOG_HEX);
}
SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: exit\n");
return (status);
}
static int
sd_send_scsi_LOG_SENSE(sd_ssc_t *ssc, uchar_t *bufaddr, uint16_t buflen,
uchar_t page_code, uchar_t page_control, uint16_t param_ptr, int path_flag)
{
struct scsi_extended_sense sense_buf;
union scsi_cdb cdb;
struct uscsi_cmd ucmd_buf;
int status;
struct sd_lun *un;
ASSERT(ssc != NULL);
un = ssc->ssc_un;
ASSERT(un != NULL);
ASSERT(!mutex_owned(SD_MUTEX(un)));
SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: entry: un:0x%p\n", un);
bzero(&cdb, sizeof (cdb));
bzero(&ucmd_buf, sizeof (ucmd_buf));
bzero(&sense_buf, sizeof (struct scsi_extended_sense));
cdb.scc_cmd = SCMD_LOG_SENSE_G1;
cdb.cdb_opaque[2] = (page_control << 6) | page_code;
cdb.cdb_opaque[5] = (uchar_t)((param_ptr & 0xFF00) >> 8);
cdb.cdb_opaque[6] = (uchar_t)(param_ptr & 0x00FF);
FORMG1COUNT(&cdb, buflen);
ucmd_buf.uscsi_cdb = (char *)&cdb;
ucmd_buf.uscsi_cdblen = CDB_GROUP1;
ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr;
ucmd_buf.uscsi_buflen = buflen;
ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense);
ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT;
ucmd_buf.uscsi_timeout = 60;
status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
UIO_SYSSPACE, path_flag);
switch (status) {
case 0:
break;
case EIO:
switch (ucmd_buf.uscsi_status) {
case STATUS_RESERVATION_CONFLICT:
status = EACCES;
break;
case STATUS_CHECK:
if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) &&
(scsi_sense_key((uint8_t *)&sense_buf) ==
KEY_ILLEGAL_REQUEST) &&
(scsi_sense_asc((uint8_t *)&sense_buf) == 0x24)) {
switch (page_code) {
case START_STOP_CYCLE_PAGE:
mutex_enter(SD_MUTEX(un));
un->un_start_stop_cycle_page =
START_STOP_CYCLE_VU_PAGE;
cdb.cdb_opaque[2] =
(char)(page_control << 6) |
un->un_start_stop_cycle_page;
mutex_exit(SD_MUTEX(un));
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
status = sd_ssc_send(
ssc, &ucmd_buf, FKIOCTL,
UIO_SYSSPACE, path_flag);
break;
case TEMPERATURE_PAGE:
status = ENOTTY;
break;
default:
break;
}
}
break;
default:
break;
}
break;
default:
break;
}
if (status == 0) {
sd_ssc_assessment(ssc, SD_FMT_STANDARD);
SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_LOG_SENSE: data",
(uchar_t *)bufaddr, buflen, SD_LOG_HEX);
}
SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: exit\n");
return (status);
}
static int
sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION(sd_ssc_t *ssc, uchar_t *bufaddr,
size_t buflen, uchar_t class_req)
{
union scsi_cdb cdb;
struct uscsi_cmd ucmd_buf;
int status;
struct sd_lun *un;
ASSERT(ssc != NULL);
un = ssc->ssc_un;
ASSERT(un != NULL);
ASSERT(!mutex_owned(SD_MUTEX(un)));
ASSERT(bufaddr != NULL);
SD_TRACE(SD_LOG_IO, un,
"sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION: entry: un:0x%p\n", un);
bzero(&cdb, sizeof (cdb));
bzero(&ucmd_buf, sizeof (ucmd_buf));
bzero(bufaddr, buflen);
cdb.scc_cmd = SCMD_GET_EVENT_STATUS_NOTIFICATION;
cdb.cdb_opaque[1] = 1;
cdb.cdb_opaque[4] = class_req;
FORMG1COUNT(&cdb, buflen);
ucmd_buf.uscsi_cdb = (char *)&cdb;
ucmd_buf.uscsi_cdblen = CDB_GROUP1;
ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr;
ucmd_buf.uscsi_buflen = buflen;
ucmd_buf.uscsi_rqbuf = NULL;
ucmd_buf.uscsi_rqlen = 0;
ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT;
ucmd_buf.uscsi_timeout = 60;
status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
UIO_SYSSPACE, SD_PATH_DIRECT);
if (status == 0) {
sd_ssc_assessment(ssc, SD_FMT_STANDARD);
if (ucmd_buf.uscsi_resid != 0) {
status = EIO;
}
}
SD_TRACE(SD_LOG_IO, un,
"sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION: exit\n");
return (status);
}
static boolean_t
sd_gesn_media_data_valid(uchar_t *data)
{
uint16_t len;
len = (data[1] << 8) | data[0];
return ((len >= 6) &&
((data[2] & SD_GESN_HEADER_NEA) == 0) &&
((data[2] & SD_GESN_HEADER_CLASS) == SD_GESN_MEDIA_CLASS) &&
((data[3] & (1 << SD_GESN_MEDIA_CLASS)) != 0));
}
static int
sdioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, int *rval_p)
{
struct sd_lun *un = NULL;
int err = 0;
int i = 0;
cred_t *cr;
int tmprval = EINVAL;
boolean_t is_valid;
sd_ssc_t *ssc;
if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
return (ENXIO);
}
ASSERT(!mutex_owned(SD_MUTEX(un)));
ssc = sd_ssc_init(un);
is_valid = SD_IS_VALID_LABEL(un);
mutex_enter(SD_MUTEX(un));
while ((un->un_state == SD_STATE_SUSPENDED) ||
(un->un_state == SD_STATE_PM_CHANGING)) {
cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
}
un->un_ncmds_in_driver++;
if (!is_valid &&
(flag & (FNDELAY | FNONBLOCK))) {
switch (cmd) {
case DKIOCGGEOM:
case DKIOCGVTOC:
case DKIOCGEXTVTOC:
case DKIOCGAPART:
case DKIOCPARTINFO:
case DKIOCEXTPARTINFO:
case DKIOCSGEOM:
case DKIOCSAPART:
case DKIOCGETEFI:
case DKIOCPARTITION:
case DKIOCSVTOC:
case DKIOCSEXTVTOC:
case DKIOCSETEFI:
case DKIOCGMBOOT:
case DKIOCSMBOOT:
case DKIOCG_PHYGEOM:
case DKIOCG_VIRTGEOM:
#if defined(__x86)
case DKIOCSETEXTPART:
#endif
goto skip_ready_valid;
case CDROMPAUSE:
case CDROMRESUME:
case CDROMPLAYMSF:
case CDROMPLAYTRKIND:
case CDROMREADTOCHDR:
case CDROMREADTOCENTRY:
case CDROMSTOP:
case CDROMSTART:
case CDROMVOLCTRL:
case CDROMSUBCHNL:
case CDROMREADMODE2:
case CDROMREADMODE1:
case CDROMREADOFFSET:
case CDROMSBLKMODE:
case CDROMGBLKMODE:
case CDROMGDRVSPEED:
case CDROMSDRVSPEED:
case CDROMCDDA:
case CDROMCDXA:
case CDROMSUBCODE:
if (!ISCD(un)) {
un->un_ncmds_in_driver--;
ASSERT(un->un_ncmds_in_driver >= 0);
mutex_exit(SD_MUTEX(un));
err = ENOTTY;
goto done_without_assess;
}
break;
case FDEJECT:
case DKIOCEJECT:
case CDROMEJECT:
if (!un->un_f_eject_media_supported) {
un->un_ncmds_in_driver--;
ASSERT(un->un_ncmds_in_driver >= 0);
mutex_exit(SD_MUTEX(un));
err = ENOTTY;
goto done_without_assess;
}
break;
case DKIOCFLUSHWRITECACHE:
mutex_exit(SD_MUTEX(un));
err = sd_send_scsi_TEST_UNIT_READY(ssc, 0);
if (err != 0) {
mutex_enter(SD_MUTEX(un));
un->un_ncmds_in_driver--;
ASSERT(un->un_ncmds_in_driver >= 0);
mutex_exit(SD_MUTEX(un));
err = EIO;
goto done_quick_assess;
}
mutex_enter(SD_MUTEX(un));
case DKIOCREMOVABLE:
case DKIOCHOTPLUGGABLE:
case DKIOCINFO:
case DKIOCGMEDIAINFO:
case DKIOCGMEDIAINFOEXT:
case DKIOCSOLIDSTATE:
case DKIOC_CANFREE:
case MHIOCENFAILFAST:
case MHIOCSTATUS:
case MHIOCTKOWN:
case MHIOCRELEASE:
case MHIOCGRP_INKEYS:
case MHIOCGRP_INRESV:
case MHIOCGRP_REGISTER:
case MHIOCGRP_CLEAR:
case MHIOCGRP_RESERVE:
case MHIOCGRP_PREEMPTANDABORT:
case MHIOCGRP_REGISTERANDIGNOREKEY:
case CDROMCLOSETRAY:
case USCSICMD:
case USCSIMAXXFER:
goto skip_ready_valid;
default:
break;
}
mutex_exit(SD_MUTEX(un));
err = sd_ready_and_valid(ssc, SDPART(dev));
mutex_enter(SD_MUTEX(un));
if (err != SD_READY_VALID) {
switch (cmd) {
case DKIOCSTATE:
case CDROMGDRVSPEED:
case CDROMSDRVSPEED:
case FDEJECT:
case DKIOCEJECT:
case CDROMEJECT:
case DKIOCREMOVABLE:
case DKIOCHOTPLUGGABLE:
break;
default:
if (un->un_f_has_removable_media) {
err = ENXIO;
} else {
if (err == SD_RESERVED_BY_OTHERS) {
err = EACCES;
} else {
err = EIO;
}
}
un->un_ncmds_in_driver--;
ASSERT(un->un_ncmds_in_driver >= 0);
mutex_exit(SD_MUTEX(un));
goto done_without_assess;
}
}
}
skip_ready_valid:
mutex_exit(SD_MUTEX(un));
switch (cmd) {
case DKIOCINFO:
SD_TRACE(SD_LOG_IOCTL, un, "DKIOCINFO\n");
err = sd_dkio_ctrl_info(dev, (caddr_t)arg, flag);
break;
case DKIOCGMEDIAINFO:
SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFO\n");
err = sd_get_media_info(dev, (caddr_t)arg, flag);
break;
case DKIOCGMEDIAINFOEXT:
SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFOEXT\n");
err = sd_get_media_info_ext(dev, (caddr_t)arg, flag);
break;
case DKIOCGGEOM:
case DKIOCGVTOC:
case DKIOCGEXTVTOC:
case DKIOCGAPART:
case DKIOCPARTINFO:
case DKIOCEXTPARTINFO:
case DKIOCSGEOM:
case DKIOCSAPART:
case DKIOCGETEFI:
case DKIOCPARTITION:
case DKIOCSVTOC:
case DKIOCSEXTVTOC:
case DKIOCSETEFI:
case DKIOCGMBOOT:
case DKIOCSMBOOT:
case DKIOCG_PHYGEOM:
case DKIOCG_VIRTGEOM:
#if defined(__x86)
case DKIOCSETEXTPART:
#endif
SD_TRACE(SD_LOG_IOCTL, un, "DKIOC %d\n", cmd);
if (un->un_f_has_removable_media)
err = sd_send_scsi_TEST_UNIT_READY(ssc,
SD_CHECK_FOR_MEDIA);
else
err = sd_send_scsi_TEST_UNIT_READY(ssc, 0);
if (err != 0)
goto done_with_assess;
err = cmlb_ioctl(un->un_cmlbhandle, dev,
cmd, arg, flag, cred_p, rval_p, (void *)SD_PATH_DIRECT);
if ((err == 0) &&
((cmd == DKIOCSETEFI) ||
((un->un_f_pkstats_enabled) &&
(cmd == DKIOCSAPART || cmd == DKIOCSVTOC ||
cmd == DKIOCSEXTVTOC)))) {
tmprval = cmlb_validate(un->un_cmlbhandle, CMLB_SILENT,
(void *)SD_PATH_DIRECT);
if ((tmprval == 0) && un->un_f_pkstats_enabled) {
sd_set_pstats(un);
SD_TRACE(SD_LOG_IO_PARTITION, un,
"sd_ioctl: un:0x%p pstats created and "
"set\n", un);
}
}
if ((cmd == DKIOCSVTOC || cmd == DKIOCSEXTVTOC) ||
((cmd == DKIOCSETEFI) && (tmprval == 0))) {
mutex_enter(SD_MUTEX(un));
if (un->un_f_devid_supported &&
(un->un_f_opt_fab_devid == TRUE)) {
if (un->un_devid == NULL) {
sd_register_devid(ssc, SD_DEVINFO(un),
SD_TARGET_IS_UNRESERVED);
} else {
if (sd_write_deviceid(ssc) != 0) {
ddi_devid_free(un->un_devid);
un->un_devid = NULL;
}
}
}
mutex_exit(SD_MUTEX(un));
}
break;
case DKIOCLOCK:
SD_TRACE(SD_LOG_IOCTL, un, "DKIOCLOCK\n");
err = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT,
SD_PATH_STANDARD);
goto done_with_assess;
case DKIOCUNLOCK:
SD_TRACE(SD_LOG_IOCTL, un, "DKIOCUNLOCK\n");
err = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_ALLOW,
SD_PATH_STANDARD);
goto done_with_assess;
case DKIOCSTATE: {
enum dkio_state state;
SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSTATE\n");
if (ddi_copyin((void *)arg, &state, sizeof (int), flag) != 0) {
err = EFAULT;
} else {
err = sd_check_media(dev, state);
if (err == 0) {
if (ddi_copyout(&un->un_mediastate, (void *)arg,
sizeof (int), flag) != 0)
err = EFAULT;
}
}
break;
}
case DKIOCREMOVABLE:
SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREMOVABLE\n");
i = un->un_f_has_removable_media ? 1 : 0;
if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) {
err = EFAULT;
} else {
err = 0;
}
break;
case DKIOCSOLIDSTATE:
SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSOLIDSTATE\n");
i = un->un_f_is_solid_state ? 1 : 0;
if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) {
err = EFAULT;
} else {
err = 0;
}
break;
case DKIOCHOTPLUGGABLE:
SD_TRACE(SD_LOG_IOCTL, un, "DKIOCHOTPLUGGABLE\n");
i = un->un_f_is_hotpluggable ? 1 : 0;
if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) {
err = EFAULT;
} else {
err = 0;
}
break;
case DKIOCREADONLY:
SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREADONLY\n");
i = 0;
if ((ISCD(un) && !un->un_f_mmc_writable_media) ||
(sr_check_wp(dev) != 0)) {
i = 1;
}
if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) {
err = EFAULT;
} else {
err = 0;
}
break;
case DKIOCGTEMPERATURE:
SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGTEMPERATURE\n");
err = sd_dkio_get_temp(dev, (caddr_t)arg, flag);
break;
case MHIOCENFAILFAST:
SD_TRACE(SD_LOG_IOCTL, un, "MHIOCENFAILFAST\n");
if ((err = drv_priv(cred_p)) == 0) {
err = sd_mhdioc_failfast(dev, (caddr_t)arg, flag);
}
break;
case MHIOCTKOWN:
SD_TRACE(SD_LOG_IOCTL, un, "MHIOCTKOWN\n");
if ((err = drv_priv(cred_p)) == 0) {
err = sd_mhdioc_takeown(dev, (caddr_t)arg, flag);
}
break;
case MHIOCRELEASE:
SD_TRACE(SD_LOG_IOCTL, un, "MHIOCRELEASE\n");
if ((err = drv_priv(cred_p)) == 0) {
err = sd_mhdioc_release(dev);
}
break;
case MHIOCSTATUS:
SD_TRACE(SD_LOG_IOCTL, un, "MHIOCSTATUS\n");
if ((err = drv_priv(cred_p)) == 0) {
switch (sd_send_scsi_TEST_UNIT_READY(ssc, 0)) {
case 0:
err = 0;
break;
case EACCES:
*rval_p = 1;
err = 0;
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
break;
default:
err = EIO;
goto done_with_assess;
}
}
break;
case MHIOCQRESERVE:
SD_TRACE(SD_LOG_IOCTL, un, "MHIOCQRESERVE\n");
if ((err = drv_priv(cred_p)) == 0) {
err = sd_reserve_release(dev, SD_RESERVE);
}
break;
case MHIOCREREGISTERDEVID:
SD_TRACE(SD_LOG_IOCTL, un, "MHIOCREREGISTERDEVID\n");
if (drv_priv(cred_p) == EPERM) {
err = EPERM;
} else if (!un->un_f_devid_supported) {
err = ENOTTY;
} else {
err = sd_mhdioc_register_devid(dev);
}
break;
case MHIOCGRP_INKEYS:
SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INKEYS\n");
if (((err = drv_priv(cred_p)) != EPERM) &&
arg != (intptr_t)NULL) {
if (un->un_reservation_type == SD_SCSI2_RESERVATION) {
err = ENOTSUP;
} else {
err = sd_mhdioc_inkeys(dev, (caddr_t)arg,
flag);
}
}
break;
case MHIOCGRP_INRESV:
SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INRESV\n");
if (((err = drv_priv(cred_p)) != EPERM) &&
arg != (intptr_t)NULL) {
if (un->un_reservation_type == SD_SCSI2_RESERVATION) {
err = ENOTSUP;
} else {
err = sd_mhdioc_inresv(dev, (caddr_t)arg, flag);
}
}
break;
case MHIOCGRP_REGISTER:
SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTER\n");
if ((err = drv_priv(cred_p)) != EPERM) {
if (un->un_reservation_type == SD_SCSI2_RESERVATION) {
err = ENOTSUP;
} else if (arg != (intptr_t)NULL) {
mhioc_register_t reg;
if (ddi_copyin((void *)arg, ®,
sizeof (mhioc_register_t), flag) != 0) {
err = EFAULT;
} else {
err =
sd_send_scsi_PERSISTENT_RESERVE_OUT(
ssc, SD_SCSI3_REGISTER,
(uchar_t *)®);
if (err != 0)
goto done_with_assess;
}
}
}
break;
case MHIOCGRP_CLEAR:
SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_CLEAR\n");
if ((err = drv_priv(cred_p)) != EPERM) {
if (un->un_reservation_type == SD_SCSI2_RESERVATION) {
err = ENOTSUP;
} else if (arg != (intptr_t)NULL) {
mhioc_register_t reg;
if (ddi_copyin((void *)arg, ®,
sizeof (mhioc_register_t), flag) != 0) {
err = EFAULT;
} else {
err =
sd_send_scsi_PERSISTENT_RESERVE_OUT(
ssc, SD_SCSI3_CLEAR,
(uchar_t *)®);
if (err != 0)
goto done_with_assess;
}
}
}
break;
case MHIOCGRP_RESERVE:
SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_RESERVE\n");
if ((err = drv_priv(cred_p)) != EPERM) {
if (un->un_reservation_type == SD_SCSI2_RESERVATION) {
err = ENOTSUP;
} else if (arg != (intptr_t)NULL) {
mhioc_resv_desc_t resv_desc;
if (ddi_copyin((void *)arg, &resv_desc,
sizeof (mhioc_resv_desc_t), flag) != 0) {
err = EFAULT;
} else {
err =
sd_send_scsi_PERSISTENT_RESERVE_OUT(
ssc, SD_SCSI3_RESERVE,
(uchar_t *)&resv_desc);
if (err != 0)
goto done_with_assess;
}
}
}
break;
case MHIOCGRP_PREEMPTANDABORT:
SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_PREEMPTANDABORT\n");
if ((err = drv_priv(cred_p)) != EPERM) {
if (un->un_reservation_type == SD_SCSI2_RESERVATION) {
err = ENOTSUP;
} else if (arg != (intptr_t)NULL) {
mhioc_preemptandabort_t preempt_abort;
if (ddi_copyin((void *)arg, &preempt_abort,
sizeof (mhioc_preemptandabort_t),
flag) != 0) {
err = EFAULT;
} else {
err =
sd_send_scsi_PERSISTENT_RESERVE_OUT(
ssc, SD_SCSI3_PREEMPTANDABORT,
(uchar_t *)&preempt_abort);
if (err != 0)
goto done_with_assess;
}
}
}
break;
case MHIOCGRP_REGISTERANDIGNOREKEY:
SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTERANDIGNOREKEY\n");
if ((err = drv_priv(cred_p)) != EPERM) {
if (un->un_reservation_type == SD_SCSI2_RESERVATION) {
err = ENOTSUP;
} else if (arg != (intptr_t)NULL) {
mhioc_registerandignorekey_t r_and_i;
if (ddi_copyin((void *)arg, (void *)&r_and_i,
sizeof (mhioc_registerandignorekey_t),
flag) != 0) {
err = EFAULT;
} else {
err =
sd_send_scsi_PERSISTENT_RESERVE_OUT(
ssc, SD_SCSI3_REGISTERANDIGNOREKEY,
(uchar_t *)&r_and_i);
if (err != 0)
goto done_with_assess;
}
}
}
break;
case USCSICMD:
SD_TRACE(SD_LOG_IOCTL, un, "USCSICMD\n");
cr = ddi_get_cred();
if ((drv_priv(cred_p) != 0) && (drv_priv(cr) != 0)) {
err = EPERM;
} else {
enum uio_seg uioseg;
uioseg = (flag & FKIOCTL) ? UIO_SYSSPACE :
UIO_USERSPACE;
if (un->un_f_format_in_progress == TRUE) {
err = EAGAIN;
break;
}
err = sd_ssc_send(ssc,
(struct uscsi_cmd *)arg,
flag, uioseg, SD_PATH_STANDARD);
if (err != 0)
goto done_with_assess;
else
sd_ssc_assessment(ssc, SD_FMT_STANDARD);
}
break;
case USCSIMAXXFER:
SD_TRACE(SD_LOG_IOCTL, un, "USCSIMAXXFER\n");
cr = ddi_get_cred();
if ((drv_priv(cred_p) != 0) && (drv_priv(cr) != 0)) {
err = EPERM;
} else {
const uscsi_xfer_t xfer = un->un_max_xfer_size;
if (ddi_copyout(&xfer, (void *)arg, sizeof (xfer),
flag) != 0) {
err = EFAULT;
} else {
err = 0;
}
}
break;
case CDROMPAUSE:
case CDROMRESUME:
SD_TRACE(SD_LOG_IOCTL, un, "PAUSE-RESUME\n");
if (!ISCD(un)) {
err = ENOTTY;
} else {
err = sr_pause_resume(dev, cmd);
}
break;
case CDROMPLAYMSF:
SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYMSF\n");
if (!ISCD(un)) {
err = ENOTTY;
} else {
err = sr_play_msf(dev, (caddr_t)arg, flag);
}
break;
case CDROMPLAYTRKIND:
SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYTRKIND\n");
#if defined(__x86)
if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) {
#else
if (!ISCD(un)) {
#endif
err = ENOTTY;
} else {
err = sr_play_trkind(dev, (caddr_t)arg, flag);
}
break;
case CDROMREADTOCHDR:
SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCHDR\n");
if (!ISCD(un)) {
err = ENOTTY;
} else {
err = sr_read_tochdr(dev, (caddr_t)arg, flag);
}
break;
case CDROMREADTOCENTRY:
SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCENTRY\n");
if (!ISCD(un)) {
err = ENOTTY;
} else {
err = sr_read_tocentry(dev, (caddr_t)arg, flag);
}
break;
case CDROMSTOP:
SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTOP\n");
if (!ISCD(un)) {
err = ENOTTY;
} else {
err = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP,
SD_TARGET_STOP, SD_PATH_STANDARD);
goto done_with_assess;
}
break;
case CDROMSTART:
SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTART\n");
if (!ISCD(un)) {
err = ENOTTY;
} else {
err = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP,
SD_TARGET_START, SD_PATH_STANDARD);
goto done_with_assess;
}
break;
case CDROMCLOSETRAY:
SD_TRACE(SD_LOG_IOCTL, un, "CDROMCLOSETRAY\n");
if (!ISCD(un)) {
err = ENOTTY;
} else {
err = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP,
SD_TARGET_CLOSE, SD_PATH_STANDARD);
goto done_with_assess;
}
break;
case FDEJECT:
case DKIOCEJECT:
case CDROMEJECT:
SD_TRACE(SD_LOG_IOCTL, un, "EJECT\n");
if (!un->un_f_eject_media_supported) {
err = ENOTTY;
} else {
err = sr_eject(dev);
}
break;
case CDROMVOLCTRL:
SD_TRACE(SD_LOG_IOCTL, un, "CDROMVOLCTRL\n");
if (!ISCD(un)) {
err = ENOTTY;
} else {
err = sr_volume_ctrl(dev, (caddr_t)arg, flag);
}
break;
case CDROMSUBCHNL:
SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCHNL\n");
if (!ISCD(un)) {
err = ENOTTY;
} else {
err = sr_read_subchannel(dev, (caddr_t)arg, flag);
}
break;
case CDROMREADMODE2:
SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE2\n");
if (!ISCD(un)) {
err = ENOTTY;
} else if (un->un_f_cfg_is_atapi == TRUE) {
err = sr_read_cd_mode2(dev, (caddr_t)arg, flag);
} else {
err = sr_read_mode2(dev, (caddr_t)arg, flag);
}
break;
case CDROMREADMODE1:
SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE1\n");
if (!ISCD(un)) {
err = ENOTTY;
} else {
err = sr_read_mode1(dev, (caddr_t)arg, flag);
}
break;
case CDROMREADOFFSET:
SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADOFFSET\n");
if (!ISCD(un)) {
err = ENOTTY;
} else {
err = sr_read_sony_session_offset(dev, (caddr_t)arg,
flag);
}
break;
case CDROMSBLKMODE:
SD_TRACE(SD_LOG_IOCTL, un, "CDROMSBLKMODE\n");
if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) {
err = ENOTTY;
} else if (un->un_f_mmc_cap == TRUE) {
err = EINVAL;
} else {
mutex_enter(SD_MUTEX(un));
if ((!(un->un_exclopen & (1<<SDPART(dev)))) ||
(un->un_ncmds_in_transport > 0)) {
mutex_exit(SD_MUTEX(un));
err = EINVAL;
} else {
mutex_exit(SD_MUTEX(un));
err = sr_change_blkmode(dev, cmd, arg, flag);
}
}
break;
case CDROMGBLKMODE:
SD_TRACE(SD_LOG_IOCTL, un, "CDROMGBLKMODE\n");
if (!ISCD(un)) {
err = ENOTTY;
} else if ((un->un_f_cfg_is_atapi != FALSE) &&
(un->un_f_blockcount_is_valid != FALSE)) {
if (ddi_copyout(&un->un_tgt_blocksize, (void *)arg,
sizeof (int), flag) != 0) {
err = EFAULT;
} else {
err = 0;
}
} else {
err = sr_change_blkmode(dev, cmd, arg, flag);
}
break;
case CDROMGDRVSPEED:
case CDROMSDRVSPEED:
SD_TRACE(SD_LOG_IOCTL, un, "CDROMXDRVSPEED\n");
if (!ISCD(un)) {
err = ENOTTY;
} else if (un->un_f_mmc_cap == TRUE) {
err = EINVAL;
} else if (un->un_f_cfg_is_atapi == TRUE) {
err = sr_atapi_change_speed(dev, cmd, arg, flag);
} else {
err = sr_change_speed(dev, cmd, arg, flag);
}
break;
case CDROMCDDA:
SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDDA\n");
if (!ISCD(un)) {
err = ENOTTY;
} else {
err = sr_read_cdda(dev, (void *)arg, flag);
}
break;
case CDROMCDXA:
SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDXA\n");
if (!ISCD(un)) {
err = ENOTTY;
} else {
err = sr_read_cdxa(dev, (caddr_t)arg, flag);
}
break;
case CDROMSUBCODE:
SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCODE\n");
if (!ISCD(un)) {
err = ENOTTY;
} else {
err = sr_read_all_subcodes(dev, (caddr_t)arg, flag);
}
break;
#ifdef SDDEBUG
case DKIOCRESET: {
int reset_level;
if (ddi_copyin((void *)arg, &reset_level, sizeof (int), flag)) {
err = EFAULT;
} else {
SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCRESET: "
"reset_level = 0x%lx\n", reset_level);
if (scsi_reset(SD_ADDRESS(un), reset_level)) {
err = 0;
} else {
err = EIO;
}
}
break;
}
case DKIOCABORT:
SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCABORT:\n");
if (scsi_abort(SD_ADDRESS(un), NULL)) {
err = 0;
} else {
err = EIO;
}
break;
#endif
#ifdef SD_FAULT_INJECTION
case SDIOCSTART:
case SDIOCSTOP:
case SDIOCINSERTPKT:
case SDIOCINSERTXB:
case SDIOCINSERTUN:
case SDIOCINSERTARQ:
case SDIOCPUSH:
case SDIOCRETRIEVE:
case SDIOCRUN:
SD_INFO(SD_LOG_SDTEST, un, "sdioctl:"
"SDIOC detected cmd:0x%X:\n", cmd);
sd_faultinjection_ioctl(cmd, arg, un);
err = 0;
break;
#endif
case DKIOCFLUSHWRITECACHE:
{
struct dk_callback *dkc = (struct dk_callback *)arg;
mutex_enter(SD_MUTEX(un));
if (!un->un_f_sync_cache_supported ||
!un->un_f_write_cache_enabled) {
err = un->un_f_sync_cache_supported ?
0 : ENOTSUP;
mutex_exit(SD_MUTEX(un));
if ((flag & FKIOCTL) && dkc != NULL &&
dkc->dkc_callback != NULL) {
(*dkc->dkc_callback)(dkc->dkc_cookie,
err);
err = 0;
}
break;
}
mutex_exit(SD_MUTEX(un));
if ((flag & FKIOCTL) && dkc != NULL &&
dkc->dkc_callback != NULL) {
err = sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc);
} else {
err = sd_send_scsi_SYNCHRONIZE_CACHE(un, NULL);
}
}
break;
case DKIOCFREE:
{
dkioc_free_list_t *dfl = (dkioc_free_list_t *)arg;
if (dfl == NULL) {
ASSERT0(flag & FKIOCTL);
err = SET_ERROR(EINVAL);
break;
}
err = sd_send_scsi_UNMAP(dev, ssc, dfl, flag);
}
break;
case DKIOC_CANFREE:
SD_TRACE(SD_LOG_IOCTL, un, "DKIOC_CANFREE\n");
i = (un->un_thin_flags & SD_THIN_PROV_ENABLED) ? 1 : 0;
if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) {
err = EFAULT;
} else {
err = 0;
}
break;
case DKIOCGETWCE: {
int wce;
if ((err = sd_get_write_cache_enabled(ssc, &wce)) != 0) {
break;
}
if (ddi_copyout(&wce, (void *)arg, sizeof (wce), flag)) {
err = EFAULT;
}
break;
}
case DKIOCSETWCE: {
int wce, sync_supported;
int cur_wce = 0;
if (!un->un_f_cache_mode_changeable) {
err = EINVAL;
break;
}
if (ddi_copyin((void *)arg, &wce, sizeof (wce), flag)) {
err = EFAULT;
break;
}
mutex_enter(SD_MUTEX(un));
if (un->un_f_opt_disable_cache && wce) {
mutex_exit(SD_MUTEX(un));
err = EINVAL;
break;
}
while (un->un_f_wcc_inprog)
cv_wait(&un->un_wcc_cv, SD_MUTEX(un));
un->un_f_wcc_inprog = 1;
mutex_exit(SD_MUTEX(un));
if ((err = sd_get_write_cache_enabled(ssc, &cur_wce)) != 0) {
mutex_enter(SD_MUTEX(un));
un->un_f_wcc_inprog = 0;
cv_broadcast(&un->un_wcc_cv);
mutex_exit(SD_MUTEX(un));
break;
}
mutex_enter(SD_MUTEX(un));
un->un_f_write_cache_enabled = (cur_wce != 0);
if (un->un_f_write_cache_enabled && wce == 0) {
sync_supported = un->un_f_sync_cache_supported;
if (!un->un_f_suppress_cache_flush) {
mutex_exit(SD_MUTEX(un));
if ((err = sd_cache_control(ssc,
SD_CACHE_NOCHANGE,
SD_CACHE_DISABLE)) == 0 &&
sync_supported) {
err = sd_send_scsi_SYNCHRONIZE_CACHE(un,
NULL);
}
} else {
mutex_exit(SD_MUTEX(un));
}
mutex_enter(SD_MUTEX(un));
if (err == 0) {
un->un_f_write_cache_enabled = 0;
}
} else if (!un->un_f_write_cache_enabled && wce != 0) {
un->un_f_write_cache_enabled = 1;
if (!un->un_f_suppress_cache_flush) {
mutex_exit(SD_MUTEX(un));
err = sd_cache_control(ssc, SD_CACHE_NOCHANGE,
SD_CACHE_ENABLE);
} else {
mutex_exit(SD_MUTEX(un));
}
mutex_enter(SD_MUTEX(un));
if (err) {
un->un_f_write_cache_enabled = 0;
}
}
un->un_f_wcc_inprog = 0;
cv_broadcast(&un->un_wcc_cv);
mutex_exit(SD_MUTEX(un));
break;
}
default:
err = ENOTTY;
break;
}
mutex_enter(SD_MUTEX(un));
un->un_ncmds_in_driver--;
ASSERT(un->un_ncmds_in_driver >= 0);
mutex_exit(SD_MUTEX(un));
done_without_assess:
sd_ssc_fini(ssc);
SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err);
return (err);
done_with_assess:
mutex_enter(SD_MUTEX(un));
un->un_ncmds_in_driver--;
ASSERT(un->un_ncmds_in_driver >= 0);
mutex_exit(SD_MUTEX(un));
done_quick_assess:
if (err != 0)
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
sd_ssc_fini(ssc);
SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err);
return (err);
}
static int
sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag)
{
struct sd_lun *un = NULL;
struct dk_cinfo *info;
dev_info_t *pdip;
int lun, tgt;
if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
return (ENXIO);
}
info = (struct dk_cinfo *)
kmem_zalloc(sizeof (struct dk_cinfo), KM_SLEEP);
switch (un->un_ctype) {
case CTYPE_CDROM:
info->dki_ctype = DKC_CDROM;
break;
default:
info->dki_ctype = DKC_SCSI_CCS;
break;
}
pdip = ddi_get_parent(SD_DEVINFO(un));
info->dki_cnum = ddi_get_instance(pdip);
if (strlen(ddi_get_name(pdip)) < DK_DEVLEN) {
(void) strcpy(info->dki_cname, ddi_get_name(pdip));
} else {
(void) strncpy(info->dki_cname, ddi_node_name(pdip),
DK_DEVLEN - 1);
}
lun = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un),
DDI_PROP_DONTPASS, SCSI_ADDR_PROP_LUN, 0);
tgt = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un),
DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET, 0);
info->dki_unit = ddi_get_instance(SD_DEVINFO(un));
info->dki_slave = ((tgt << 3) | lun);
(void) strncpy(info->dki_dname, ddi_driver_name(SD_DEVINFO(un)),
DK_DEVLEN - 1);
info->dki_flags = DKI_FMTVOL;
info->dki_partition = SDPART(dev);
info->dki_maxtransfer = un->un_max_xfer_size / un->un_sys_blocksize;
info->dki_addr = 0;
info->dki_space = 0;
info->dki_prio = 0;
info->dki_vec = 0;
if (ddi_copyout(info, arg, sizeof (struct dk_cinfo), flag) != 0) {
kmem_free(info, sizeof (struct dk_cinfo));
return (EFAULT);
} else {
kmem_free(info, sizeof (struct dk_cinfo));
return (0);
}
}
static int
sd_get_media_info_com(dev_t dev, uint_t *dki_media_type, uint_t *dki_lbsize,
diskaddr_t *dki_capacity, uint_t *dki_pbsize)
{
struct sd_lun *un = NULL;
struct uscsi_cmd com;
struct scsi_inquiry *sinq;
u_longlong_t media_capacity;
uint64_t capacity;
uint_t lbasize;
uint_t pbsize;
uchar_t *out_data;
uchar_t *rqbuf;
int rval = 0;
int rtn;
sd_ssc_t *ssc;
if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
(un->un_state == SD_STATE_OFFLINE)) {
return (ENXIO);
}
SD_TRACE(SD_LOG_IOCTL_DKIO, un, "sd_get_media_info_com: entry\n");
out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP);
rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP);
ssc = sd_ssc_init(un);
rval = sd_send_scsi_TEST_UNIT_READY(ssc, SD_CHECK_FOR_MEDIA);
if (rval == ENXIO) {
goto done;
} else if (rval != 0) {
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
}
if (ISCD(un)) {
*dki_media_type = DK_CDROM;
if (un->un_f_mmc_cap == TRUE) {
rtn = sd_send_scsi_GET_CONFIGURATION(ssc, &com, rqbuf,
SENSE_LENGTH, out_data, SD_PROFILE_HEADER_LEN,
SD_PATH_STANDARD);
if (rtn) {
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
if ((com.uscsi_status == STATUS_CHECK) &&
(com.uscsi_rqstatus == STATUS_GOOD)) {
if ((rqbuf[2] != KEY_ILLEGAL_REQUEST) ||
(rqbuf[12] != 0x20)) {
rval = EIO;
goto no_assessment;
}
}
} else {
*dki_media_type = out_data[6];
*dki_media_type <<= 8;
*dki_media_type |= out_data[7];
}
}
} else {
sinq = un->un_sd->sd_inq;
if ((sinq->inq_dtype == DTYPE_DIRECT) ||
(sinq->inq_dtype == DTYPE_OPTICAL)) {
*dki_media_type = DK_FIXED_DISK;
if ((bcmp(sinq->inq_vid, "IOMEGA", 6) == 0) ||
(bcmp(sinq->inq_vid, "iomega", 6) == 0)) {
if ((bcmp(sinq->inq_pid, "ZIP", 3) == 0)) {
*dki_media_type = DK_ZIP;
} else if (
(bcmp(sinq->inq_pid, "jaz", 3) == 0)) {
*dki_media_type = DK_JAZ;
}
}
} else {
*dki_media_type = DK_UNKNOWN;
}
}
if (dki_pbsize && un->un_f_descr_format_supported) {
rval = sd_send_scsi_READ_CAPACITY_16(ssc, &capacity, &lbasize,
&pbsize, SD_PATH_DIRECT);
pbsize = MAX(pbsize, un->un_phy_blocksize);
}
if (dki_pbsize == NULL || rval != 0 ||
!un->un_f_descr_format_supported) {
rval = sd_send_scsi_READ_CAPACITY(ssc, &capacity, &lbasize,
SD_PATH_DIRECT);
switch (rval) {
case 0:
if (un->un_f_enable_rmw &&
un->un_phy_blocksize != 0) {
pbsize = un->un_phy_blocksize;
} else {
pbsize = lbasize;
}
media_capacity = capacity;
if (un->un_f_has_removable_media) {
media_capacity *= un->un_sys_blocksize;
media_capacity /= lbasize;
}
break;
case EACCES:
rval = EACCES;
goto done;
default:
rval = EIO;
goto done;
}
} else {
if (un->un_f_enable_rmw &&
!ISP2(pbsize % DEV_BSIZE)) {
pbsize = SSD_SECSIZE;
} else if (!ISP2(lbasize % DEV_BSIZE) ||
!ISP2(pbsize % DEV_BSIZE)) {
pbsize = lbasize = DEV_BSIZE;
}
media_capacity = capacity;
}
mutex_enter(SD_MUTEX(un));
if ((un->un_f_blockcount_is_valid == TRUE) &&
(un->un_f_tgt_blocksize_is_valid == TRUE) &&
(capacity > un->un_blockcount)) {
un->un_f_expnevent = B_FALSE;
sd_update_block_info(un, lbasize, capacity);
}
mutex_exit(SD_MUTEX(un));
*dki_lbsize = lbasize;
*dki_capacity = media_capacity;
if (dki_pbsize)
*dki_pbsize = pbsize;
done:
if (rval != 0) {
if (rval == EIO)
sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
else
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
}
no_assessment:
sd_ssc_fini(ssc);
kmem_free(out_data, SD_PROFILE_HEADER_LEN);
kmem_free(rqbuf, SENSE_LENGTH);
return (rval);
}
static int
sd_get_media_info(dev_t dev, caddr_t arg, int flag)
{
struct dk_minfo mi;
int rval;
rval = sd_get_media_info_com(dev, &mi.dki_media_type,
&mi.dki_lbsize, &mi.dki_capacity, NULL);
if (rval)
return (rval);
if (ddi_copyout(&mi, arg, sizeof (struct dk_minfo), flag))
rval = EFAULT;
return (rval);
}
static int
sd_get_media_info_ext(dev_t dev, caddr_t arg, int flag)
{
struct dk_minfo_ext mie;
int rval = 0;
size_t len;
rval = sd_get_media_info_com(dev, &mie.dki_media_type,
&mie.dki_lbsize, &mie.dki_capacity, &mie.dki_pbsize);
if (rval)
return (rval);
switch (ddi_model_convert_from(flag & FMODELS)) {
case DDI_MODEL_ILP32:
len = sizeof (struct dk_minfo_ext32);
break;
default:
len = sizeof (struct dk_minfo_ext);
break;
}
if (ddi_copyout(&mie, arg, len, flag))
rval = EFAULT;
return (rval);
}
static opaque_t
sd_watch_request_submit(struct sd_lun *un)
{
dev_t dev;
dev = sd_make_device(SD_DEVINFO(un));
if (un->un_f_mmc_cap && un->un_f_mmc_gesn_polling) {
return (scsi_mmc_watch_request_submit(SD_SCSI_DEVP(un),
sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb,
(caddr_t)dev));
} else {
return (scsi_watch_request_submit(SD_SCSI_DEVP(un),
sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb,
(caddr_t)dev));
}
}
static int
sd_check_media(dev_t dev, enum dkio_state state)
{
struct sd_lun *un = NULL;
enum dkio_state prev_state;
opaque_t token = NULL;
int rval = 0;
sd_ssc_t *ssc;
if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
return (ENXIO);
}
SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: entry\n");
ssc = sd_ssc_init(un);
mutex_enter(SD_MUTEX(un));
SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: "
"state=%x, mediastate=%x\n", state, un->un_mediastate);
prev_state = un->un_mediastate;
if (state == un->un_mediastate || un->un_mediastate == DKIO_NONE) {
mutex_exit(SD_MUTEX(un));
if (sd_pm_entry(un) != DDI_SUCCESS) {
mutex_enter(SD_MUTEX(un));
goto done;
}
token = sd_watch_request_submit(un);
sd_pm_exit(un);
mutex_enter(SD_MUTEX(un));
if (token == NULL) {
rval = EAGAIN;
goto done;
}
un->un_ncmds_in_driver--;
ASSERT(un->un_ncmds_in_driver >= 0);
un->un_swr_token = token;
un->un_specified_mediastate = state;
SD_TRACE(SD_LOG_COMMON, un,
"sd_check_media: waiting for media state change\n");
while (un->un_mediastate == state) {
if (cv_wait_sig(&un->un_state_cv, SD_MUTEX(un)) == 0) {
SD_TRACE(SD_LOG_COMMON, un,
"sd_check_media: waiting for media state "
"was interrupted\n");
un->un_ncmds_in_driver++;
rval = EINTR;
goto done;
}
SD_TRACE(SD_LOG_COMMON, un,
"sd_check_media: received signal, state=%x\n",
un->un_mediastate);
}
un->un_ncmds_in_driver++;
}
if (prev_state == DKIO_INSERTED && un->un_mediastate == DKIO_EJECTED) {
sr_ejected(un);
}
if (un->un_mediastate == DKIO_INSERTED && prev_state != DKIO_INSERTED) {
uint64_t capacity;
uint_t lbasize;
SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: media inserted\n");
mutex_exit(SD_MUTEX(un));
if (sd_pm_entry(un) == DDI_SUCCESS) {
rval = sd_send_scsi_READ_CAPACITY(ssc,
&capacity, &lbasize, SD_PATH_DIRECT);
if (rval != 0) {
sd_pm_exit(un);
if (rval == EIO)
sd_ssc_assessment(ssc,
SD_FMT_STATUS_CHECK);
else
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
mutex_enter(SD_MUTEX(un));
goto done;
}
} else {
rval = EIO;
mutex_enter(SD_MUTEX(un));
goto done;
}
mutex_enter(SD_MUTEX(un));
sd_update_block_info(un, lbasize, capacity);
if (ISCD(un)) {
sd_check_for_writable_cd(ssc, SD_PATH_DIRECT);
}
mutex_exit(SD_MUTEX(un));
cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT);
if ((cmlb_validate(un->un_cmlbhandle, 0,
(void *)SD_PATH_DIRECT) == 0) && un->un_f_pkstats_enabled) {
sd_set_pstats(un);
SD_TRACE(SD_LOG_IO_PARTITION, un,
"sd_check_media: un:0x%p pstats created and "
"set\n", un);
}
rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT,
SD_PATH_DIRECT);
sd_pm_exit(un);
if (rval != 0) {
if (rval == EIO)
sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
else
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
}
mutex_enter(SD_MUTEX(un));
}
done:
sd_ssc_fini(ssc);
un->un_f_watcht_stopped = FALSE;
if (token != NULL && un->un_swr_token != NULL) {
token = un->un_swr_token;
mutex_exit(SD_MUTEX(un));
(void) scsi_watch_request_terminate(token,
SCSI_WATCH_TERMINATE_WAIT);
if (scsi_watch_get_ref_count(token) == 0) {
mutex_enter(SD_MUTEX(un));
un->un_swr_token = (opaque_t)NULL;
} else {
mutex_enter(SD_MUTEX(un));
}
}
if (un->un_errstats) {
struct sd_errstats *stp = NULL;
stp = (struct sd_errstats *)un->un_errstats->ks_data;
if ((stp->sd_capacity.value.ui64 == 0) &&
(un->un_f_blockcount_is_valid == TRUE)) {
stp->sd_capacity.value.ui64 =
(uint64_t)((uint64_t)un->un_blockcount *
un->un_sys_blocksize);
}
}
mutex_exit(SD_MUTEX(un));
SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: done\n");
return (rval);
}
static void
sd_delayed_cv_broadcast(void *arg)
{
struct sd_lun *un = arg;
SD_TRACE(SD_LOG_COMMON, un, "sd_delayed_cv_broadcast\n");
mutex_enter(SD_MUTEX(un));
un->un_dcvb_timeid = NULL;
cv_broadcast(&un->un_state_cv);
mutex_exit(SD_MUTEX(un));
}
static int
sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp)
{
struct sd_lun *un;
struct scsi_status *statusp = resultp->statusp;
uint8_t *sensep = (uint8_t *)resultp->sensep;
enum dkio_state state = DKIO_NONE;
dev_t dev = (dev_t)arg;
uchar_t actual_sense_length;
uint8_t skey, asc, ascq;
if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
return (-1);
}
actual_sense_length = resultp->actual_sense_length;
mutex_enter(SD_MUTEX(un));
SD_TRACE(SD_LOG_COMMON, un,
"sd_media_watch_cb: status=%x, sensep=%p, len=%x\n",
*((char *)statusp), (void *)sensep, actual_sense_length);
if (resultp->pkt->pkt_reason == CMD_DEV_GONE) {
un->un_mediastate = DKIO_DEV_GONE;
cv_broadcast(&un->un_state_cv);
mutex_exit(SD_MUTEX(un));
return (0);
}
if (un->un_f_mmc_cap && un->un_f_mmc_gesn_polling) {
if (sd_gesn_media_data_valid(resultp->mmc_data)) {
if ((resultp->mmc_data[5] &
SD_GESN_MEDIA_EVENT_STATUS_PRESENT) != 0) {
state = DKIO_INSERTED;
} else {
state = DKIO_EJECTED;
}
if ((resultp->mmc_data[4] & SD_GESN_MEDIA_EVENT_CODE) ==
SD_GESN_MEDIA_EVENT_EJECTREQUEST) {
sd_log_eject_request_event(un, KM_NOSLEEP);
}
}
} else if (sensep != NULL) {
skey = scsi_sense_key(sensep);
asc = scsi_sense_asc(sensep);
ascq = scsi_sense_ascq(sensep);
SD_INFO(SD_LOG_COMMON, un,
"sd_media_watch_cb: sense KEY=%x, ASC=%x, ASCQ=%x\n",
skey, asc, ascq);
if (actual_sense_length >= 13) {
if (skey == KEY_UNIT_ATTENTION) {
if (asc == 0x28) {
state = DKIO_INSERTED;
}
} else if (skey == KEY_NOT_READY) {
if (asc == 0x06 && ascq == 0x00)
state = DKIO_INSERTED;
if (asc == 0x3a) {
state = DKIO_EJECTED;
} else {
if ((asc == 0x04) &&
((ascq == 0x02) ||
(ascq == 0x07) ||
(ascq == 0x08))) {
state = DKIO_INSERTED;
}
}
} else if (skey == KEY_NO_SENSE) {
if ((asc == 0x00) && (ascq == 0x00)) {
mutex_exit(SD_MUTEX(un));
return (0);
}
}
}
} else if ((*((char *)statusp) == STATUS_GOOD) &&
(resultp->pkt->pkt_reason == CMD_CMPLT)) {
state = DKIO_INSERTED;
}
SD_TRACE(SD_LOG_COMMON, un,
"sd_media_watch_cb: state=%x, specified=%x\n",
state, un->un_specified_mediastate);
if (state != un->un_specified_mediastate) {
un->un_mediastate = state;
if (state == DKIO_INSERTED) {
SD_TRACE(SD_LOG_COMMON, un,
"sd_media_watch_cb: delayed cv_broadcast\n");
if (un->un_dcvb_timeid == NULL) {
un->un_dcvb_timeid =
timeout(sd_delayed_cv_broadcast, un,
drv_usectohz((clock_t)MEDIA_ACCESS_DELAY));
}
} else {
SD_TRACE(SD_LOG_COMMON, un,
"sd_media_watch_cb: immediate cv_broadcast\n");
cv_broadcast(&un->un_state_cv);
}
}
mutex_exit(SD_MUTEX(un));
return (0);
}
static int
sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag)
{
struct sd_lun *un = NULL;
struct dk_temperature *dktemp = NULL;
uchar_t *temperature_page;
int rval = 0;
int path_flag = SD_PATH_STANDARD;
sd_ssc_t *ssc;
if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
return (ENXIO);
}
ssc = sd_ssc_init(un);
dktemp = kmem_zalloc(sizeof (struct dk_temperature), KM_SLEEP);
if (ddi_copyin((void *)arg, dktemp,
sizeof (struct dk_temperature), flag) != 0) {
rval = EFAULT;
goto done;
}
dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP;
dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP;
if (dktemp->dkt_flags & DKT_BYPASS_PM) {
path_flag = SD_PATH_DIRECT;
ASSERT(!mutex_owned(&un->un_pm_mutex));
mutex_enter(&un->un_pm_mutex);
if (SD_DEVICE_IS_IN_LOW_POWER(un)) {
mutex_exit(&un->un_pm_mutex);
rval = EAGAIN;
goto done;
} else {
mutex_exit(&un->un_pm_mutex);
if (sd_pm_entry(un) != DDI_SUCCESS) {
rval = EAGAIN;
goto done;
}
}
}
temperature_page = kmem_zalloc(TEMPERATURE_PAGE_SIZE, KM_SLEEP);
rval = sd_send_scsi_LOG_SENSE(ssc, temperature_page,
TEMPERATURE_PAGE_SIZE, TEMPERATURE_PAGE, 1, 0, path_flag);
if (rval != 0)
goto done2;
if ((temperature_page[7] == 0x02) && (temperature_page[4] == 0x00) &&
(temperature_page[5] == 0x00)) {
if (temperature_page[9] == 0xFF) {
dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP;
} else {
dktemp->dkt_cur_temp = (short)(temperature_page[9]);
}
}
if ((temperature_page[13] == 0x02) && (temperature_page[10] == 0x00) &&
(temperature_page[11] == 0x01)) {
if (temperature_page[15] == 0xFF) {
dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP;
} else {
dktemp->dkt_ref_temp = (short)(temperature_page[15]);
}
}
if (ddi_copyout(dktemp, (void *)arg, sizeof (struct dk_temperature),
flag) != 0) {
rval = EFAULT;
goto done1;
}
done2:
if (rval != 0) {
if (rval == EIO)
sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
else
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
}
done1:
if (path_flag == SD_PATH_DIRECT) {
sd_pm_exit(un);
}
kmem_free(temperature_page, TEMPERATURE_PAGE_SIZE);
done:
sd_ssc_fini(ssc);
if (dktemp != NULL) {
kmem_free(dktemp, sizeof (struct dk_temperature));
}
return (rval);
}
static int
sd_log_page_supported(sd_ssc_t *ssc, int log_page)
{
uchar_t *log_page_data;
int i;
int match = 0;
int log_size;
int status = 0;
struct sd_lun *un;
ASSERT(ssc != NULL);
un = ssc->ssc_un;
ASSERT(un != NULL);
log_page_data = kmem_zalloc(0xFF, KM_SLEEP);
status = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 0xFF, 0, 0x01, 0,
SD_PATH_DIRECT);
if (status != 0) {
if (status == EIO) {
uint8_t *sensep;
int senlen;
sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf;
senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen -
ssc->ssc_uscsi_cmd->uscsi_rqresid);
if (senlen > 0 &&
scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) {
sd_ssc_assessment(ssc,
SD_FMT_IGNORE_COMPROMISE);
} else {
sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
}
} else {
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
}
SD_ERROR(SD_LOG_COMMON, un,
"sd_log_page_supported: failed log page retrieval\n");
kmem_free(log_page_data, 0xFF);
return (-1);
}
log_size = log_page_data[3];
for (i = 4; (i < (log_size + 4)) && !match; i++) {
if (log_page_data[i] == log_page) {
match++;
}
}
kmem_free(log_page_data, 0xFF);
return (match);
}
static int
sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag)
{
struct sd_lun *un = NULL;
int mh_time;
int rval = 0;
if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
return (ENXIO);
}
if (ddi_copyin((void *)arg, &mh_time, sizeof (int), flag))
return (EFAULT);
if (mh_time) {
mutex_enter(SD_MUTEX(un));
un->un_resvd_status |= SD_FAILFAST;
mutex_exit(SD_MUTEX(un));
if (mh_time != INT_MAX) {
rval = sd_check_mhd(dev, mh_time);
}
} else {
(void) sd_check_mhd(dev, 0);
mutex_enter(SD_MUTEX(un));
un->un_resvd_status &= ~SD_FAILFAST;
mutex_exit(SD_MUTEX(un));
}
return (rval);
}
static int
sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag)
{
struct sd_lun *un = NULL;
struct mhioctkown *tkown = NULL;
int rval = 0;
if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
return (ENXIO);
}
if (arg != NULL) {
tkown = (struct mhioctkown *)
kmem_zalloc(sizeof (struct mhioctkown), KM_SLEEP);
rval = ddi_copyin(arg, tkown, sizeof (struct mhioctkown), flag);
if (rval != 0) {
rval = EFAULT;
goto error;
}
}
rval = sd_take_ownership(dev, tkown);
mutex_enter(SD_MUTEX(un));
if (rval == 0) {
un->un_resvd_status |= SD_RESERVE;
if (tkown != NULL && tkown->reinstate_resv_delay != 0) {
sd_reinstate_resv_delay =
tkown->reinstate_resv_delay * 1000;
} else {
sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY;
}
if ((un->un_resvd_status & SD_FAILFAST) == 0) {
mutex_exit(SD_MUTEX(un));
(void) sd_check_mhd(dev,
sd_reinstate_resv_delay / 1000);
SD_TRACE(SD_LOG_IOCTL_MHD, un,
"sd_mhdioc_takeown : %d\n",
sd_reinstate_resv_delay);
} else {
mutex_exit(SD_MUTEX(un));
}
(void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_NOTIFY,
sd_mhd_reset_notify_cb, (caddr_t)un);
} else {
un->un_resvd_status &= ~SD_RESERVE;
mutex_exit(SD_MUTEX(un));
}
error:
if (tkown != NULL) {
kmem_free(tkown, sizeof (struct mhioctkown));
}
return (rval);
}
static int
sd_mhdioc_release(dev_t dev)
{
struct sd_lun *un = NULL;
timeout_id_t resvd_timeid_save;
int resvd_status_save;
int rval = 0;
if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
return (ENXIO);
}
mutex_enter(SD_MUTEX(un));
resvd_status_save = un->un_resvd_status;
un->un_resvd_status &=
~(SD_RESERVE | SD_LOST_RESERVE | SD_WANT_RESERVE);
if (un->un_resvd_timeid) {
resvd_timeid_save = un->un_resvd_timeid;
un->un_resvd_timeid = NULL;
mutex_exit(SD_MUTEX(un));
(void) untimeout(resvd_timeid_save);
} else {
mutex_exit(SD_MUTEX(un));
}
sd_rmv_resv_reclaim_req(dev);
if ((rval = sd_reserve_release(dev, SD_RELEASE)) == 0) {
mutex_enter(SD_MUTEX(un));
if ((un->un_mhd_token) &&
((un->un_resvd_status & SD_FAILFAST) == 0)) {
mutex_exit(SD_MUTEX(un));
(void) sd_check_mhd(dev, 0);
} else {
mutex_exit(SD_MUTEX(un));
}
(void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL,
sd_mhd_reset_notify_cb, (caddr_t)un);
} else {
mutex_enter(SD_MUTEX(un));
un->un_resvd_status = resvd_status_save;
mutex_exit(SD_MUTEX(un));
}
return (rval);
}
static int
sd_mhdioc_register_devid(dev_t dev)
{
struct sd_lun *un = NULL;
int rval = 0;
sd_ssc_t *ssc;
if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
return (ENXIO);
}
ASSERT(!mutex_owned(SD_MUTEX(un)));
mutex_enter(SD_MUTEX(un));
if (un->un_devid != NULL) {
ddi_devid_unregister(SD_DEVINFO(un));
ddi_devid_free(un->un_devid);
un->un_devid = NULL;
}
mutex_exit(SD_MUTEX(un));
ssc = sd_ssc_init(un);
rval = sd_send_scsi_TEST_UNIT_READY(ssc, 0);
mutex_enter(SD_MUTEX(un));
switch (rval) {
case 0:
sd_register_devid(ssc, SD_DEVINFO(un), SD_TARGET_IS_UNRESERVED);
break;
case EACCES:
break;
default:
rval = EIO;
}
mutex_exit(SD_MUTEX(un));
if (rval != 0) {
if (rval == EIO)
sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
else
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
}
sd_ssc_fini(ssc);
return (rval);
}
static int
sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag)
{
struct sd_lun *un;
mhioc_inkeys_t inkeys;
int rval = 0;
if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
return (ENXIO);
}
#ifdef _MULTI_DATAMODEL
switch (ddi_model_convert_from(flag & FMODELS)) {
case DDI_MODEL_ILP32: {
struct mhioc_inkeys32 inkeys32;
if (ddi_copyin(arg, &inkeys32,
sizeof (struct mhioc_inkeys32), flag) != 0) {
return (EFAULT);
}
inkeys.li = (mhioc_key_list_t *)(uintptr_t)inkeys32.li;
if ((rval = sd_persistent_reservation_in_read_keys(un,
&inkeys, flag)) != 0) {
return (rval);
}
inkeys32.generation = inkeys.generation;
if (ddi_copyout(&inkeys32, arg, sizeof (struct mhioc_inkeys32),
flag) != 0) {
return (EFAULT);
}
break;
}
case DDI_MODEL_NONE:
if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t),
flag) != 0) {
return (EFAULT);
}
if ((rval = sd_persistent_reservation_in_read_keys(un,
&inkeys, flag)) != 0) {
return (rval);
}
if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t),
flag) != 0) {
return (EFAULT);
}
break;
}
#else
if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), flag) != 0) {
return (EFAULT);
}
rval = sd_persistent_reservation_in_read_keys(un, &inkeys, flag);
if (rval != 0) {
return (rval);
}
if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), flag) != 0) {
return (EFAULT);
}
#endif
return (rval);
}
static int
sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag)
{
struct sd_lun *un;
mhioc_inresvs_t inresvs;
int rval = 0;
if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
return (ENXIO);
}
#ifdef _MULTI_DATAMODEL
switch (ddi_model_convert_from(flag & FMODELS)) {
case DDI_MODEL_ILP32: {
struct mhioc_inresvs32 inresvs32;
if (ddi_copyin(arg, &inresvs32,
sizeof (struct mhioc_inresvs32), flag) != 0) {
return (EFAULT);
}
inresvs.li = (mhioc_resv_desc_list_t *)(uintptr_t)inresvs32.li;
if ((rval = sd_persistent_reservation_in_read_resv(un,
&inresvs, flag)) != 0) {
return (rval);
}
inresvs32.generation = inresvs.generation;
if (ddi_copyout(&inresvs32, arg,
sizeof (struct mhioc_inresvs32), flag) != 0) {
return (EFAULT);
}
break;
}
case DDI_MODEL_NONE:
if (ddi_copyin(arg, &inresvs,
sizeof (mhioc_inresvs_t), flag) != 0) {
return (EFAULT);
}
if ((rval = sd_persistent_reservation_in_read_resv(un,
&inresvs, flag)) != 0) {
return (rval);
}
if (ddi_copyout(&inresvs, arg,
sizeof (mhioc_inresvs_t), flag) != 0) {
return (EFAULT);
}
break;
}
#else
if (ddi_copyin(arg, &inresvs, sizeof (mhioc_inresvs_t), flag) != 0) {
return (EFAULT);
}
rval = sd_persistent_reservation_in_read_resv(un, &inresvs, flag);
if (rval != 0) {
return (rval);
}
if (ddi_copyout(&inresvs, arg, sizeof (mhioc_inresvs_t), flag)) {
return (EFAULT);
}
#endif
return (rval);
}
static int
sd_check_mhd(dev_t dev, int interval)
{
struct sd_lun *un;
opaque_t token;
if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
return (ENXIO);
}
if (interval == 0) {
mutex_enter(SD_MUTEX(un));
if (un->un_mhd_token) {
token = un->un_mhd_token;
un->un_mhd_token = NULL;
mutex_exit(SD_MUTEX(un));
(void) scsi_watch_request_terminate(token,
SCSI_WATCH_TERMINATE_ALL_WAIT);
mutex_enter(SD_MUTEX(un));
} else {
mutex_exit(SD_MUTEX(un));
return (0);
}
if (un->un_resvd_status & SD_RESERVE) {
interval = sd_reinstate_resv_delay / 1000;
} else {
mutex_exit(SD_MUTEX(un));
return (0);
}
mutex_exit(SD_MUTEX(un));
}
if (interval > 0 && interval < 1000) {
interval = 1000;
}
interval *= 1000;
token = scsi_watch_request_submit(SD_SCSI_DEVP(un), interval,
SENSE_LENGTH, sd_mhd_watch_cb, (caddr_t)dev);
if (token == NULL) {
return (EAGAIN);
}
mutex_enter(SD_MUTEX(un));
un->un_mhd_token = token;
mutex_exit(SD_MUTEX(un));
return (0);
}
static int
sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp)
{
struct sd_lun *un;
struct scsi_status *statusp;
uint8_t *sensep;
struct scsi_pkt *pkt;
uchar_t actual_sense_length;
dev_t dev = (dev_t)arg;
ASSERT(resultp != NULL);
statusp = resultp->statusp;
sensep = (uint8_t *)resultp->sensep;
pkt = resultp->pkt;
actual_sense_length = resultp->actual_sense_length;
if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
return (ENXIO);
}
SD_TRACE(SD_LOG_IOCTL_MHD, un,
"sd_mhd_watch_cb: reason '%s', status '%s'\n",
scsi_rname(pkt->pkt_reason), sd_sname(*((unsigned char *)statusp)));
if (pkt->pkt_reason != CMD_CMPLT) {
sd_mhd_watch_incomplete(un, pkt);
return (0);
} else if (*((unsigned char *)statusp) != STATUS_GOOD) {
if (*((unsigned char *)statusp)
== STATUS_RESERVATION_CONFLICT) {
mutex_enter(SD_MUTEX(un));
if ((un->un_resvd_status & SD_FAILFAST) &&
(sd_failfast_enable)) {
sd_panic_for_res_conflict(un);
}
SD_INFO(SD_LOG_IOCTL_MHD, un,
"sd_mhd_watch_cb: Reservation Conflict\n");
un->un_resvd_status |= SD_RESERVATION_CONFLICT;
mutex_exit(SD_MUTEX(un));
}
}
if (sensep != NULL) {
if (actual_sense_length >= (SENSE_LENGTH - 2)) {
mutex_enter(SD_MUTEX(un));
if ((scsi_sense_asc(sensep) ==
SD_SCSI_RESET_SENSE_CODE) &&
(un->un_resvd_status & SD_RESERVE)) {
un->un_resvd_status |=
(SD_LOST_RESERVE | SD_WANT_RESERVE);
SD_INFO(SD_LOG_IOCTL_MHD, un,
"sd_mhd_watch_cb: Lost Reservation\n");
}
} else {
return (0);
}
} else {
mutex_enter(SD_MUTEX(un));
}
if ((un->un_resvd_status & SD_RESERVE) &&
(un->un_resvd_status & SD_LOST_RESERVE)) {
if (un->un_resvd_status & SD_WANT_RESERVE) {
if (un->un_resvd_timeid) {
timeout_id_t temp_id = un->un_resvd_timeid;
un->un_resvd_timeid = NULL;
mutex_exit(SD_MUTEX(un));
(void) untimeout(temp_id);
mutex_enter(SD_MUTEX(un));
}
un->un_resvd_status &= ~SD_WANT_RESERVE;
}
if (un->un_resvd_timeid == 0) {
un->un_resvd_timeid = timeout(sd_mhd_resvd_recover,
(void *)dev,
drv_usectohz(sd_reinstate_resv_delay));
}
}
mutex_exit(SD_MUTEX(un));
return (0);
}
static void
sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt)
{
int be_chatty;
int perr;
ASSERT(pkt != NULL);
ASSERT(un != NULL);
be_chatty = (!(pkt->pkt_flags & FLAG_SILENT));
perr = (pkt->pkt_statistics & STAT_PERR);
mutex_enter(SD_MUTEX(un));
if (un->un_state == SD_STATE_DUMPING) {
mutex_exit(SD_MUTEX(un));
return;
}
switch (pkt->pkt_reason) {
case CMD_UNX_BUS_FREE:
if (perr && be_chatty) {
be_chatty = 0;
}
break;
case CMD_TAG_REJECT:
pkt->pkt_flags = 0;
un->un_tagflags = 0;
if (un->un_f_opt_queueing == TRUE) {
un->un_throttle = min(un->un_throttle, 3);
} else {
un->un_throttle = 1;
}
mutex_exit(SD_MUTEX(un));
(void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1);
mutex_enter(SD_MUTEX(un));
break;
case CMD_INCOMPLETE:
if (pkt->pkt_state == STATE_GOT_BUS) {
break;
}
case CMD_TIMEOUT:
default:
if ((pkt->pkt_statistics &
(STAT_BUS_RESET | STAT_DEV_RESET | STAT_ABORTED)) == 0) {
int reset_retval = 0;
mutex_exit(SD_MUTEX(un));
if (un->un_f_allow_bus_device_reset == TRUE) {
if (un->un_f_lun_reset_enabled == TRUE) {
reset_retval =
scsi_reset(SD_ADDRESS(un),
RESET_LUN);
}
if (reset_retval == 0) {
reset_retval =
scsi_reset(SD_ADDRESS(un),
RESET_TARGET);
}
}
if (reset_retval == 0) {
(void) scsi_reset(SD_ADDRESS(un), RESET_ALL);
}
mutex_enter(SD_MUTEX(un));
}
break;
}
if ((pkt->pkt_reason == CMD_RESET) || (pkt->pkt_statistics &
(STAT_BUS_RESET | STAT_DEV_RESET))) {
if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) {
un->un_resvd_status |=
(SD_LOST_RESERVE | SD_WANT_RESERVE);
SD_INFO(SD_LOG_IOCTL_MHD, un,
"sd_mhd_watch_incomplete: Lost Reservation\n");
}
}
if (pkt->pkt_state == STATE_GOT_BUS) {
SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_watch_incomplete: "
"Disk not responding to selection\n");
if (un->un_state != SD_STATE_OFFLINE) {
New_state(un, SD_STATE_OFFLINE);
}
} else if (be_chatty) {
if (pkt->pkt_reason != un->un_last_pkt_reason) {
SD_ERROR(SD_LOG_IOCTL_MHD, un,
"sd_mhd_watch_incomplete: "
"SCSI transport failed: reason '%s'\n",
scsi_rname(pkt->pkt_reason));
}
}
un->un_last_pkt_reason = pkt->pkt_reason;
mutex_exit(SD_MUTEX(un));
}
static char *
sd_sname(uchar_t status)
{
switch (status & STATUS_MASK) {
case STATUS_GOOD:
return ("good status");
case STATUS_CHECK:
return ("check condition");
case STATUS_MET:
return ("condition met");
case STATUS_BUSY:
return ("busy");
case STATUS_INTERMEDIATE:
return ("intermediate");
case STATUS_INTERMEDIATE_MET:
return ("intermediate - condition met");
case STATUS_RESERVATION_CONFLICT:
return ("reservation_conflict");
case STATUS_TERMINATED:
return ("command terminated");
case STATUS_QFULL:
return ("queue full");
default:
return ("<unknown status>");
}
}
static void
sd_mhd_resvd_recover(void *arg)
{
dev_t dev = (dev_t)arg;
struct sd_lun *un;
struct sd_thr_request *sd_treq = NULL;
struct sd_thr_request *sd_cur = NULL;
struct sd_thr_request *sd_prev = NULL;
int already_there = 0;
if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
return;
}
mutex_enter(SD_MUTEX(un));
un->un_resvd_timeid = NULL;
if (un->un_resvd_status & SD_WANT_RESERVE) {
mutex_exit(SD_MUTEX(un));
return;
}
mutex_exit(SD_MUTEX(un));
sd_treq = (struct sd_thr_request *)
kmem_zalloc(sizeof (struct sd_thr_request), KM_NOSLEEP);
if (sd_treq == NULL) {
return;
}
sd_treq->sd_thr_req_next = NULL;
sd_treq->dev = dev;
mutex_enter(&sd_tr.srq_resv_reclaim_mutex);
if (sd_tr.srq_thr_req_head == NULL) {
sd_tr.srq_thr_req_head = sd_treq;
} else {
sd_cur = sd_prev = sd_tr.srq_thr_req_head;
for (; sd_cur != NULL; sd_cur = sd_cur->sd_thr_req_next) {
if (sd_cur->dev == dev) {
already_there = 1;
break;
}
sd_prev = sd_cur;
}
if (!already_there) {
SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_resvd_recover: "
"logging request for %lx\n", dev);
sd_prev->sd_thr_req_next = sd_treq;
} else {
kmem_free(sd_treq, sizeof (struct sd_thr_request));
}
}
if (sd_tr.srq_resv_reclaim_thread == NULL)
sd_tr.srq_resv_reclaim_thread = thread_create(NULL, 0,
sd_resv_reclaim_thread, NULL,
0, &p0, TS_RUN, v.v_maxsyspri - 2);
cv_signal(&sd_tr.srq_resv_reclaim_cv);
mutex_exit(&sd_tr.srq_resv_reclaim_mutex);
}
static void
sd_resv_reclaim_thread()
{
struct sd_lun *un;
struct sd_thr_request *sd_mhreq;
mutex_enter(&sd_tr.srq_resv_reclaim_mutex);
if (sd_tr.srq_thr_req_head == NULL) {
cv_wait(&sd_tr.srq_resv_reclaim_cv,
&sd_tr.srq_resv_reclaim_mutex);
}
while ((sd_tr.srq_thr_cur_req = sd_tr.srq_thr_req_head) != NULL) {
un = ddi_get_soft_state(sd_state,
SDUNIT(sd_tr.srq_thr_cur_req->dev));
if (un == NULL) {
sd_tr.srq_thr_req_head =
sd_tr.srq_thr_cur_req->sd_thr_req_next;
kmem_free(sd_tr.srq_thr_cur_req,
sizeof (struct sd_thr_request));
continue;
}
sd_mhreq = sd_tr.srq_thr_cur_req;
sd_tr.srq_thr_req_head =
sd_tr.srq_thr_cur_req->sd_thr_req_next;
mutex_exit(&sd_tr.srq_resv_reclaim_mutex);
mutex_enter(SD_MUTEX(un));
if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) {
un->un_resvd_status &= ~SD_LOST_RESERVE;
mutex_exit(SD_MUTEX(un));
if (sd_reserve_release(sd_mhreq->dev,
SD_RESERVE) == 0) {
mutex_enter(SD_MUTEX(un));
un->un_resvd_status |= SD_RESERVE;
mutex_exit(SD_MUTEX(un));
SD_INFO(SD_LOG_IOCTL_MHD, un,
"sd_resv_reclaim_thread: "
"Reservation Recovered\n");
} else {
mutex_enter(SD_MUTEX(un));
un->un_resvd_status |= SD_LOST_RESERVE;
mutex_exit(SD_MUTEX(un));
SD_INFO(SD_LOG_IOCTL_MHD, un,
"sd_resv_reclaim_thread: Failed "
"Reservation Recovery\n");
}
} else {
mutex_exit(SD_MUTEX(un));
}
mutex_enter(&sd_tr.srq_resv_reclaim_mutex);
ASSERT(sd_mhreq == sd_tr.srq_thr_cur_req);
kmem_free(sd_mhreq, sizeof (struct sd_thr_request));
sd_mhreq = sd_tr.srq_thr_cur_req = NULL;
cv_signal(&sd_tr.srq_inprocess_cv);
SD_TRACE(SD_LOG_IOCTL_MHD, un,
"sd_resv_reclaim_thread: cv_signalling current request \n");
}
ASSERT(sd_tr.srq_thr_req_head == NULL);
ASSERT(sd_tr.srq_thr_cur_req == NULL);
sd_tr.srq_resv_reclaim_thread = NULL;
mutex_exit(&sd_tr.srq_resv_reclaim_mutex);
thread_exit();
}
static void
sd_rmv_resv_reclaim_req(dev_t dev)
{
struct sd_thr_request *sd_mhreq;
struct sd_thr_request *sd_prev;
mutex_enter(&sd_tr.srq_resv_reclaim_mutex);
if (sd_tr.srq_thr_cur_req && sd_tr.srq_thr_cur_req->dev == dev) {
cv_wait(&sd_tr.srq_inprocess_cv,
&sd_tr.srq_resv_reclaim_mutex);
} else {
sd_prev = sd_mhreq = sd_tr.srq_thr_req_head;
if (sd_mhreq && sd_mhreq->dev == dev) {
sd_tr.srq_thr_req_head = sd_mhreq->sd_thr_req_next;
kmem_free(sd_mhreq, sizeof (struct sd_thr_request));
mutex_exit(&sd_tr.srq_resv_reclaim_mutex);
return;
}
for (; sd_mhreq != NULL; sd_mhreq = sd_mhreq->sd_thr_req_next) {
if (sd_mhreq && sd_mhreq->dev == dev) {
break;
}
sd_prev = sd_mhreq;
}
if (sd_mhreq != NULL) {
sd_prev->sd_thr_req_next = sd_mhreq->sd_thr_req_next;
kmem_free(sd_mhreq, sizeof (struct sd_thr_request));
}
}
mutex_exit(&sd_tr.srq_resv_reclaim_mutex);
}
static void
sd_mhd_reset_notify_cb(caddr_t arg)
{
struct sd_lun *un = (struct sd_lun *)arg;
mutex_enter(SD_MUTEX(un));
if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) {
un->un_resvd_status |= (SD_LOST_RESERVE | SD_WANT_RESERVE);
SD_INFO(SD_LOG_IOCTL_MHD, un,
"sd_mhd_reset_notify_cb: Lost Reservation\n");
}
mutex_exit(SD_MUTEX(un));
}
static int
sd_take_ownership(dev_t dev, struct mhioctkown *p)
{
struct sd_lun *un;
int rval;
int err;
int reservation_count = 0;
int min_ownership_delay = 6000000;
int max_ownership_delay = 30000000;
clock_t start_time;
clock_t end_time;
clock_t ownership_time;
clock_t current_time;
clock_t previous_current_time;
if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
return (ENXIO);
}
if ((rval = sd_reserve_release(dev, SD_PRIORITY_RESERVE))
!= SD_SUCCESS) {
SD_ERROR(SD_LOG_IOCTL_MHD, un,
"sd_take_ownership: return(1)=%d\n", rval);
return (rval);
}
mutex_enter(SD_MUTEX(un));
un->un_resvd_status |= SD_RESERVE;
un->un_resvd_status &=
~(SD_LOST_RESERVE | SD_WANT_RESERVE | SD_RESERVATION_CONFLICT);
mutex_exit(SD_MUTEX(un));
if (p != NULL) {
if (p->min_ownership_delay != 0) {
min_ownership_delay = p->min_ownership_delay * 1000;
}
if (p->max_ownership_delay != 0) {
max_ownership_delay = p->max_ownership_delay * 1000;
}
}
SD_INFO(SD_LOG_IOCTL_MHD, un,
"sd_take_ownership: min, max delays: %d, %d\n",
min_ownership_delay, max_ownership_delay);
start_time = ddi_get_lbolt();
current_time = start_time;
ownership_time = current_time + drv_usectohz(min_ownership_delay);
end_time = start_time + drv_usectohz(max_ownership_delay);
while (current_time - end_time < 0) {
delay(drv_usectohz(500000));
if ((err = sd_reserve_release(dev, SD_RESERVE)) != 0) {
if ((sd_reserve_release(dev, SD_RESERVE)) != 0) {
mutex_enter(SD_MUTEX(un));
rval = (un->un_resvd_status &
SD_RESERVATION_CONFLICT) ? EACCES : EIO;
mutex_exit(SD_MUTEX(un));
break;
}
}
previous_current_time = current_time;
current_time = ddi_get_lbolt();
mutex_enter(SD_MUTEX(un));
if (err || (un->un_resvd_status & SD_LOST_RESERVE)) {
ownership_time = ddi_get_lbolt() +
drv_usectohz(min_ownership_delay);
reservation_count = 0;
} else {
reservation_count++;
}
un->un_resvd_status |= SD_RESERVE;
un->un_resvd_status &= ~(SD_LOST_RESERVE | SD_WANT_RESERVE);
mutex_exit(SD_MUTEX(un));
SD_INFO(SD_LOG_IOCTL_MHD, un,
"sd_take_ownership: ticks for loop iteration=%ld, "
"reservation=%s\n", (current_time - previous_current_time),
reservation_count ? "ok" : "reclaimed");
if (current_time - ownership_time >= 0 &&
reservation_count >= 4) {
rval = 0;
break;
}
if (current_time - end_time >= 0) {
rval = EACCES;
break;
}
}
SD_TRACE(SD_LOG_IOCTL_MHD, un,
"sd_take_ownership: return(2)=%d\n", rval);
return (rval);
}
static int
sd_reserve_release(dev_t dev, int cmd)
{
struct uscsi_cmd *com = NULL;
struct sd_lun *un = NULL;
char cdb[CDB_GROUP0];
int rval;
ASSERT((cmd == SD_RELEASE) || (cmd == SD_RESERVE) ||
(cmd == SD_PRIORITY_RESERVE));
if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
return (ENXIO);
}
com = kmem_zalloc(sizeof (*com), KM_SLEEP);
bzero(cdb, CDB_GROUP0);
com->uscsi_flags = USCSI_SILENT;
com->uscsi_timeout = un->un_reserve_release_time;
com->uscsi_cdblen = CDB_GROUP0;
com->uscsi_cdb = cdb;
if (cmd == SD_RELEASE) {
cdb[0] = SCMD_RELEASE;
} else {
cdb[0] = SCMD_RESERVE;
}
rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
SD_PATH_STANDARD);
if ((cmd == SD_PRIORITY_RESERVE) &&
(rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) {
int reset_retval = 0;
if (un->un_f_lun_reset_enabled == TRUE) {
reset_retval = scsi_reset(SD_ADDRESS(un), RESET_LUN);
}
if (reset_retval == 0) {
reset_retval = scsi_reset(SD_ADDRESS(un), RESET_TARGET);
}
if ((reset_retval == 0) &&
(scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0)) {
rval = EIO;
kmem_free(com, sizeof (*com));
return (rval);
}
bzero(com, sizeof (struct uscsi_cmd));
com->uscsi_flags = USCSI_SILENT;
com->uscsi_cdb = cdb;
com->uscsi_cdblen = CDB_GROUP0;
com->uscsi_timeout = 5;
rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
SD_PATH_STANDARD);
}
if ((rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) {
rval = EACCES;
}
kmem_free(com, sizeof (*com));
return (rval);
}
#define SD_NDUMP_RETRIES 12
static int
sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk)
{
int instance;
int partition;
int i;
int err;
struct sd_lun *un;
struct scsi_pkt *wr_pktp;
struct buf *wr_bp;
struct buf wr_buf;
daddr_t tgt_byte_offset;
daddr_t tgt_blkno;
size_t tgt_byte_count;
size_t tgt_nblk;
size_t io_start_offset;
int doing_rmw = FALSE;
int rval;
ssize_t dma_resid;
daddr_t oblkno;
diskaddr_t nblks = 0;
diskaddr_t start_block;
instance = SDUNIT(dev);
if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) ||
!SD_IS_VALID_LABEL(un) || ISCD(un)) {
return (ENXIO);
}
_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un))
SD_TRACE(SD_LOG_DUMP, un, "sddump: entry\n");
partition = SDPART(dev);
SD_INFO(SD_LOG_DUMP, un, "sddump: partition = %d\n", partition);
if (!(NOT_DEVBSIZE(un))) {
int secmask = 0;
int blknomask = 0;
blknomask = (un->un_tgt_blocksize / DEV_BSIZE) - 1;
secmask = un->un_tgt_blocksize - 1;
if (blkno & blknomask) {
SD_TRACE(SD_LOG_DUMP, un,
"sddump: dump start block not modulo %d\n",
un->un_tgt_blocksize);
return (EINVAL);
}
if ((nblk * DEV_BSIZE) & secmask) {
SD_TRACE(SD_LOG_DUMP, un,
"sddump: dump length not modulo %d\n",
un->un_tgt_blocksize);
return (EINVAL);
}
}
(void) cmlb_partinfo(un->un_cmlbhandle, partition,
&nblks, &start_block, NULL, NULL, (void *)SD_PATH_DIRECT);
if (NOT_DEVBSIZE(un)) {
if ((blkno + nblk) > nblks) {
SD_TRACE(SD_LOG_DUMP, un,
"sddump: dump range larger than partition: "
"blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n",
blkno, nblk, nblks);
return (EINVAL);
}
} else {
if (((blkno / (un->un_tgt_blocksize / DEV_BSIZE)) +
(nblk / (un->un_tgt_blocksize / DEV_BSIZE))) > nblks) {
SD_TRACE(SD_LOG_DUMP, un,
"sddump: dump range larger than partition: "
"blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n",
blkno, nblk, nblks);
return (EINVAL);
}
}
mutex_enter(&un->un_pm_mutex);
if (SD_DEVICE_IS_IN_LOW_POWER(un)) {
struct scsi_pkt *start_pktp;
mutex_exit(&un->un_pm_mutex);
(void) pm_raise_power(SD_DEVINFO(un), 0,
SD_PM_STATE_ACTIVE(un));
SD_INFO(SD_LOG_DUMP, un, "sddump: starting device\n");
start_pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, NULL,
CDB_GROUP0, un->un_status_len, 0, 0, NULL_FUNC, NULL);
if (start_pktp == NULL) {
return (EIO);
}
bzero(start_pktp->pkt_cdbp, CDB_GROUP0);
start_pktp->pkt_cdbp[0] = SCMD_START_STOP;
start_pktp->pkt_cdbp[4] = SD_TARGET_START;
start_pktp->pkt_flags = FLAG_NOINTR;
mutex_enter(SD_MUTEX(un));
SD_FILL_SCSI1_LUN(un, start_pktp);
mutex_exit(SD_MUTEX(un));
if (sd_scsi_poll(un, start_pktp) != 0) {
scsi_destroy_pkt(start_pktp);
return (EIO);
}
scsi_destroy_pkt(start_pktp);
(void) sd_pm_state_change(un, SD_PM_STATE_ACTIVE(un),
SD_PM_STATE_CHANGE);
} else {
mutex_exit(&un->un_pm_mutex);
}
mutex_enter(SD_MUTEX(un));
un->un_throttle = 0;
if ((un->un_state != SD_STATE_SUSPENDED) &&
(un->un_state != SD_STATE_DUMPING)) {
New_state(un, SD_STATE_DUMPING);
if (un->un_f_is_fibre == FALSE) {
mutex_exit(SD_MUTEX(un));
if (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0) {
mutex_enter(SD_MUTEX(un));
Restore_state(un);
mutex_exit(SD_MUTEX(un));
return (EIO);
}
drv_usecwait(10000);
if (sd_send_polled_RQS(un) == SD_FAILURE) {
SD_INFO(SD_LOG_DUMP, un,
"sddump: sd_send_polled_RQS failed\n");
}
mutex_enter(SD_MUTEX(un));
}
}
if (NOT_DEVBSIZE(un)) {
blkno += start_block;
} else {
blkno = blkno / (un->un_tgt_blocksize / DEV_BSIZE);
blkno += start_block;
}
SD_INFO(SD_LOG_DUMP, un, "sddump: disk blkno = 0x%x\n", blkno);
wr_bp = NULL;
if (NOT_DEVBSIZE(un)) {
tgt_byte_offset = blkno * un->un_sys_blocksize;
tgt_byte_count = nblk * un->un_sys_blocksize;
if ((tgt_byte_offset % un->un_tgt_blocksize) ||
(tgt_byte_count % un->un_tgt_blocksize)) {
doing_rmw = TRUE;
tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize;
tgt_nblk =
((tgt_byte_offset + tgt_byte_count +
(un->un_tgt_blocksize - 1)) /
un->un_tgt_blocksize) - tgt_blkno;
err = sddump_do_read_of_rmw(un, tgt_blkno, tgt_nblk,
&wr_bp);
if (err) {
mutex_exit(SD_MUTEX(un));
return (err);
}
io_start_offset =
((uint64_t)(blkno * un->un_sys_blocksize)) -
((uint64_t)(tgt_blkno * un->un_tgt_blocksize));
ASSERT(io_start_offset < un->un_tgt_blocksize);
bcopy(addr, &wr_bp->b_un.b_addr[io_start_offset],
(size_t)nblk * un->un_sys_blocksize);
} else {
doing_rmw = FALSE;
tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize;
tgt_nblk = tgt_byte_count / un->un_tgt_blocksize;
}
blkno = tgt_blkno;
nblk = tgt_nblk;
} else {
wr_bp = &wr_buf;
bzero(wr_bp, sizeof (struct buf));
wr_bp->b_flags = B_BUSY;
wr_bp->b_un.b_addr = addr;
wr_bp->b_bcount = nblk << DEV_BSHIFT;
wr_bp->b_resid = 0;
}
mutex_exit(SD_MUTEX(un));
wr_pktp = NULL;
dma_resid = wr_bp->b_bcount;
oblkno = blkno;
if (!(NOT_DEVBSIZE(un))) {
nblk = nblk / (un->un_tgt_blocksize / DEV_BSIZE);
}
while (dma_resid != 0) {
for (i = 0; i < SD_NDUMP_RETRIES; i++) {
wr_bp->b_flags &= ~B_ERROR;
if (un->un_partial_dma_supported == 1) {
blkno = oblkno +
((wr_bp->b_bcount - dma_resid) /
un->un_tgt_blocksize);
nblk = dma_resid / un->un_tgt_blocksize;
if (wr_pktp) {
rval = sd_setup_next_rw_pkt(un, wr_pktp, wr_bp,
blkno, nblk);
} else {
rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp,
un->un_pkt_flags, NULL_FUNC, NULL,
blkno, nblk);
}
} else {
rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp,
0, NULL_FUNC, NULL, blkno, nblk);
}
if (rval == 0) {
break;
}
if (i == 0) {
if (wr_bp->b_flags & B_ERROR) {
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"no resources for dumping; "
"error code: 0x%x, retrying",
geterror(wr_bp));
} else {
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"no resources for dumping; retrying");
}
} else if (i != (SD_NDUMP_RETRIES - 1)) {
if (wr_bp->b_flags & B_ERROR) {
scsi_log(SD_DEVINFO(un), sd_label, CE_CONT,
"no resources for dumping; error code: "
"0x%x, retrying\n", geterror(wr_bp));
}
} else {
if (wr_bp->b_flags & B_ERROR) {
scsi_log(SD_DEVINFO(un), sd_label, CE_CONT,
"no resources for dumping; "
"error code: 0x%x, retries failed, "
"giving up.\n", geterror(wr_bp));
} else {
scsi_log(SD_DEVINFO(un), sd_label, CE_CONT,
"no resources for dumping; "
"retries failed, giving up.\n");
}
mutex_enter(SD_MUTEX(un));
Restore_state(un);
if (NOT_DEVBSIZE(un) && (doing_rmw == TRUE)) {
mutex_exit(SD_MUTEX(un));
scsi_free_consistent_buf(wr_bp);
} else {
mutex_exit(SD_MUTEX(un));
}
return (EIO);
}
drv_usecwait(10000);
}
if (un->un_partial_dma_supported == 1) {
dma_resid = wr_pktp->pkt_resid;
if (dma_resid != 0)
nblk -= SD_BYTES2TGTBLOCKS(un, dma_resid);
wr_pktp->pkt_resid = 0;
} else {
dma_resid = 0;
}
wr_pktp->pkt_flags = FLAG_NOINTR;
err = EIO;
for (i = 0; i < SD_NDUMP_RETRIES; i++) {
SD_TRACE(SD_LOG_DUMP, un, "sddump: sending write\n");
if ((sd_scsi_poll(un, wr_pktp) == 0) &&
(wr_pktp->pkt_resid == 0)) {
err = SD_SUCCESS;
break;
}
if (wr_pktp->pkt_reason == CMD_DEV_GONE) {
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"Error while dumping state...Device is gone\n");
break;
}
if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_CHECK) {
SD_INFO(SD_LOG_DUMP, un,
"sddump: write failed with CHECK, try # %d\n", i);
if (((wr_pktp->pkt_state & STATE_ARQ_DONE) == 0)) {
(void) sd_send_polled_RQS(un);
}
continue;
}
if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_BUSY) {
int reset_retval = 0;
SD_INFO(SD_LOG_DUMP, un,
"sddump: write failed with BUSY, try # %d\n", i);
if (un->un_f_lun_reset_enabled == TRUE) {
reset_retval = scsi_reset(SD_ADDRESS(un),
RESET_LUN);
}
if (reset_retval == 0) {
(void) scsi_reset(SD_ADDRESS(un), RESET_TARGET);
}
(void) sd_send_polled_RQS(un);
} else {
SD_INFO(SD_LOG_DUMP, un,
"sddump: write failed with 0x%x, try # %d\n",
SD_GET_PKT_STATUS(wr_pktp), i);
mutex_enter(SD_MUTEX(un));
sd_reset_target(un, wr_pktp);
mutex_exit(SD_MUTEX(un));
}
if (i == SD_NDUMP_RETRIES / 2) {
(void) scsi_reset(SD_ADDRESS(un), RESET_ALL);
(void) sd_send_polled_RQS(un);
}
}
}
scsi_destroy_pkt(wr_pktp);
mutex_enter(SD_MUTEX(un));
if ((NOT_DEVBSIZE(un)) && (doing_rmw == TRUE)) {
mutex_exit(SD_MUTEX(un));
scsi_free_consistent_buf(wr_bp);
} else {
mutex_exit(SD_MUTEX(un));
}
SD_TRACE(SD_LOG_DUMP, un, "sddump: exit: err = %d\n", err);
return (err);
}
static int
sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pktp)
{
int status;
ASSERT(un != NULL);
ASSERT(!mutex_owned(SD_MUTEX(un)));
ASSERT(pktp != NULL);
status = SD_SUCCESS;
if (scsi_ifgetcap(&pktp->pkt_address, "tagged-qing", 1) == 1) {
pktp->pkt_flags |= un->un_tagflags;
pktp->pkt_flags &= ~FLAG_NODISCON;
}
status = sd_ddi_scsi_poll(pktp);
if ((status != SD_SUCCESS) &&
(SD_GET_PKT_STATUS(pktp) == STATUS_CHECK) &&
(pktp->pkt_state & STATE_ARQ_DONE) == 0 &&
(pktp->pkt_reason != CMD_DEV_GONE))
(void) sd_send_polled_RQS(un);
return (status);
}
static int
sd_send_polled_RQS(struct sd_lun *un)
{
int ret_val;
struct scsi_pkt *rqs_pktp;
struct buf *rqs_bp;
ASSERT(un != NULL);
ASSERT(!mutex_owned(SD_MUTEX(un)));
ret_val = SD_SUCCESS;
rqs_pktp = un->un_rqs_pktp;
rqs_bp = un->un_rqs_bp;
mutex_enter(SD_MUTEX(un));
if (un->un_sense_isbusy) {
ret_val = SD_FAILURE;
mutex_exit(SD_MUTEX(un));
return (ret_val);
}
un->un_sense_isbusy = 1;
rqs_pktp->pkt_resid = 0;
rqs_pktp->pkt_reason = 0;
rqs_pktp->pkt_flags |= FLAG_NOINTR;
bzero(rqs_bp->b_un.b_addr, SENSE_LENGTH);
mutex_exit(SD_MUTEX(un));
SD_INFO(SD_LOG_COMMON, un, "sd_send_polled_RQS: req sense buf at"
" 0x%p\n", rqs_bp->b_un.b_addr);
if ((ret_val = sd_ddi_scsi_poll(rqs_pktp)) != 0) {
SD_INFO(SD_LOG_COMMON, un,
"sd_send_polled_RQS: RQS failed\n");
}
SD_DUMP_MEMORY(un, SD_LOG_COMMON, "sd_send_polled_RQS:",
(uchar_t *)rqs_bp->b_un.b_addr, SENSE_LENGTH, SD_LOG_HEX);
mutex_enter(SD_MUTEX(un));
un->un_sense_isbusy = 0;
mutex_exit(SD_MUTEX(un));
return (ret_val);
}
#define CSEC 10000
#define SEC_TO_CSEC (1000000 / CSEC)
static int
sd_ddi_scsi_poll(struct scsi_pkt *pkt)
{
int rval = -1;
int savef;
long savet;
void (*savec)();
int timeout;
int busy_count;
int poll_delay;
int rc;
uint8_t *sensep;
struct scsi_arq_status *arqstat;
extern int do_polled_io;
ASSERT(pkt->pkt_scbp);
savef = pkt->pkt_flags;
savec = pkt->pkt_comp;
savet = pkt->pkt_time;
pkt->pkt_flags |= FLAG_NOINTR;
pkt->pkt_comp = NULL;
if (pkt->pkt_time == 0)
pkt->pkt_time = SCSI_POLL_TIMEOUT;
timeout = pkt->pkt_time * SEC_TO_CSEC;
for (busy_count = 0; busy_count < timeout; busy_count++) {
*pkt->pkt_scbp = pkt->pkt_reason = pkt->pkt_state = 0;
if ((rc = scsi_transport(pkt)) != TRAN_ACCEPT) {
if (rc != TRAN_BUSY) {
break;
} else {
poll_delay = 1 * CSEC;
}
} else {
rc = (*pkt->pkt_scbp) & STATUS_MASK;
if ((pkt->pkt_reason == CMD_CMPLT) &&
(rc == STATUS_CHECK) &&
(pkt->pkt_state & STATE_ARQ_DONE)) {
arqstat =
(struct scsi_arq_status *)(pkt->pkt_scbp);
sensep = (uint8_t *)&arqstat->sts_sensedata;
} else {
sensep = NULL;
}
if ((pkt->pkt_reason == CMD_CMPLT) &&
(rc == STATUS_GOOD)) {
rval = 0;
break;
} else if (pkt->pkt_reason == CMD_DEV_GONE) {
break;
} else if ((pkt->pkt_reason == CMD_INCOMPLETE) &&
(pkt->pkt_state == 0)) {
poll_delay = 1 * CSEC;
} else if ((pkt->pkt_reason == CMD_CMPLT) &&
(rc == STATUS_QFULL)) {
poll_delay = 1 * CSEC;
} else if ((pkt->pkt_reason == CMD_CMPLT) &&
(rc == STATUS_BUSY)) {
poll_delay = 100 * CSEC;
busy_count += (SEC_TO_CSEC - 1);
} else if ((sensep != NULL) &&
(scsi_sense_key(sensep) == KEY_UNIT_ATTENTION)) {
busy_count += (SEC_TO_CSEC - 1);
continue;
} else if ((sensep != NULL) &&
(scsi_sense_key(sensep) == KEY_NOT_READY) &&
(scsi_sense_asc(sensep) == 0x04) &&
(scsi_sense_ascq(sensep) == 0x01)) {
poll_delay = 100 * CSEC;
busy_count += (SEC_TO_CSEC - 1);
} else {
break;
}
}
if (((curthread->t_flag & T_INTR_THREAD) == 0) &&
!do_polled_io) {
delay(drv_usectohz(poll_delay));
} else {
drv_usecwait(poll_delay);
}
}
pkt->pkt_flags = savef;
pkt->pkt_comp = savec;
pkt->pkt_time = savet;
if (rval)
return (rval);
scsi_sync_pkt(pkt);
return (0);
}
static int
sd_persistent_reservation_in_read_keys(struct sd_lun *un,
mhioc_inkeys_t *usrp, int flag)
{
#ifdef _MULTI_DATAMODEL
struct mhioc_key_list32 li32;
#endif
sd_prin_readkeys_t *in;
mhioc_inkeys_t *ptr;
mhioc_key_list_t li;
uchar_t *data_bufp = NULL;
int data_len = 0;
int rval = 0;
size_t copysz = 0;
sd_ssc_t *ssc;
if ((ptr = (mhioc_inkeys_t *)usrp) == NULL) {
return (EINVAL);
}
bzero(&li, sizeof (mhioc_key_list_t));
ssc = sd_ssc_init(un);
#ifdef _MULTI_DATAMODEL
switch (ddi_model_convert_from(flag & FMODELS)) {
case DDI_MODEL_ILP32:
copysz = sizeof (struct mhioc_key_list32);
if (ddi_copyin(ptr->li, &li32, copysz, flag)) {
SD_ERROR(SD_LOG_IOCTL_MHD, un,
"sd_persistent_reservation_in_read_keys: "
"failed ddi_copyin: mhioc_key_list32_t\n");
rval = EFAULT;
goto done;
}
li.listsize = li32.listsize;
li.list = (mhioc_resv_key_t *)(uintptr_t)li32.list;
break;
case DDI_MODEL_NONE:
copysz = sizeof (mhioc_key_list_t);
if (ddi_copyin(ptr->li, &li, copysz, flag)) {
SD_ERROR(SD_LOG_IOCTL_MHD, un,
"sd_persistent_reservation_in_read_keys: "
"failed ddi_copyin: mhioc_key_list_t\n");
rval = EFAULT;
goto done;
}
break;
}
#else
copysz = sizeof (mhioc_key_list_t);
if (ddi_copyin(ptr->li, &li, copysz, flag)) {
SD_ERROR(SD_LOG_IOCTL_MHD, un,
"sd_persistent_reservation_in_read_keys: "
"failed ddi_copyin: mhioc_key_list_t\n");
rval = EFAULT;
goto done;
}
#endif
data_len = li.listsize * MHIOC_RESV_KEY_SIZE;
data_len += (sizeof (sd_prin_readkeys_t) - sizeof (caddr_t));
data_bufp = kmem_zalloc(data_len, KM_SLEEP);
rval = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_KEYS,
data_len, data_bufp);
if (rval != 0) {
if (rval == EIO)
sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE);
else
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
goto done;
}
in = (sd_prin_readkeys_t *)data_bufp;
ptr->generation = BE_32(in->generation);
li.listlen = BE_32(in->len) / MHIOC_RESV_KEY_SIZE;
#ifdef _MULTI_DATAMODEL
switch (ddi_model_convert_from(flag & FMODELS)) {
case DDI_MODEL_ILP32:
li32.listlen = li.listlen;
if (ddi_copyout(&li32, ptr->li, copysz, flag)) {
SD_ERROR(SD_LOG_IOCTL_MHD, un,
"sd_persistent_reservation_in_read_keys: "
"failed ddi_copyout: mhioc_key_list32_t\n");
rval = EFAULT;
goto done;
}
break;
case DDI_MODEL_NONE:
if (ddi_copyout(&li, ptr->li, copysz, flag)) {
SD_ERROR(SD_LOG_IOCTL_MHD, un,
"sd_persistent_reservation_in_read_keys: "
"failed ddi_copyout: mhioc_key_list_t\n");
rval = EFAULT;
goto done;
}
break;
}
#else
if (ddi_copyout(&li, ptr->li, copysz, flag)) {
SD_ERROR(SD_LOG_IOCTL_MHD, un,
"sd_persistent_reservation_in_read_keys: "
"failed ddi_copyout: mhioc_key_list_t\n");
rval = EFAULT;
goto done;
}
#endif
copysz = min(li.listlen * MHIOC_RESV_KEY_SIZE,
li.listsize * MHIOC_RESV_KEY_SIZE);
if (ddi_copyout(&in->keylist, li.list, copysz, flag)) {
SD_ERROR(SD_LOG_IOCTL_MHD, un,
"sd_persistent_reservation_in_read_keys: "
"failed ddi_copyout: keylist\n");
rval = EFAULT;
}
done:
sd_ssc_fini(ssc);
kmem_free(data_bufp, data_len);
return (rval);
}
static int
sd_persistent_reservation_in_read_resv(struct sd_lun *un,
mhioc_inresvs_t *usrp, int flag)
{
#ifdef _MULTI_DATAMODEL
struct mhioc_resv_desc_list32 resvlist32;
#endif
sd_prin_readresv_t *in;
mhioc_inresvs_t *ptr;
sd_readresv_desc_t *readresv_ptr;
mhioc_resv_desc_list_t resvlist;
mhioc_resv_desc_t resvdesc;
uchar_t *data_bufp = NULL;
int data_len;
int rval = 0;
int i;
size_t copysz = 0;
mhioc_resv_desc_t *bufp;
sd_ssc_t *ssc;
if ((ptr = usrp) == NULL) {
return (EINVAL);
}
ssc = sd_ssc_init(un);
#ifdef _MULTI_DATAMODEL
switch (ddi_model_convert_from(flag & FMODELS)) {
case DDI_MODEL_ILP32:
copysz = sizeof (struct mhioc_resv_desc_list32);
if (ddi_copyin(ptr->li, &resvlist32, copysz, flag)) {
SD_ERROR(SD_LOG_IOCTL_MHD, un,
"sd_persistent_reservation_in_read_resv: "
"failed ddi_copyin: mhioc_resv_desc_list_t\n");
rval = EFAULT;
goto done;
}
resvlist.listsize = resvlist32.listsize;
resvlist.list = (mhioc_resv_desc_t *)(uintptr_t)resvlist32.list;
break;
case DDI_MODEL_NONE:
copysz = sizeof (mhioc_resv_desc_list_t);
if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) {
SD_ERROR(SD_LOG_IOCTL_MHD, un,
"sd_persistent_reservation_in_read_resv: "
"failed ddi_copyin: mhioc_resv_desc_list_t\n");
rval = EFAULT;
goto done;
}
break;
}
#else
copysz = sizeof (mhioc_resv_desc_list_t);
if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) {
SD_ERROR(SD_LOG_IOCTL_MHD, un,
"sd_persistent_reservation_in_read_resv: "
"failed ddi_copyin: mhioc_resv_desc_list_t\n");
rval = EFAULT;
goto done;
}
#endif
data_len = resvlist.listsize * SCSI3_RESV_DESC_LEN;
data_len += (sizeof (sd_prin_readresv_t) - sizeof (caddr_t));
data_bufp = kmem_zalloc(data_len, KM_SLEEP);
rval = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_RESV,
data_len, data_bufp);
if (rval != 0) {
if (rval == EIO)
sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE);
else
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
goto done;
}
in = (sd_prin_readresv_t *)data_bufp;
ptr->generation = BE_32(in->generation);
resvlist.listlen = BE_32(in->len) / SCSI3_RESV_DESC_LEN;
#ifdef _MULTI_DATAMODEL
switch (ddi_model_convert_from(flag & FMODELS)) {
case DDI_MODEL_ILP32:
resvlist32.listlen = resvlist.listlen;
if (ddi_copyout(&resvlist32, ptr->li, copysz, flag)) {
SD_ERROR(SD_LOG_IOCTL_MHD, un,
"sd_persistent_reservation_in_read_resv: "
"failed ddi_copyout: mhioc_resv_desc_list_t\n");
rval = EFAULT;
goto done;
}
break;
case DDI_MODEL_NONE:
if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) {
SD_ERROR(SD_LOG_IOCTL_MHD, un,
"sd_persistent_reservation_in_read_resv: "
"failed ddi_copyout: mhioc_resv_desc_list_t\n");
rval = EFAULT;
goto done;
}
break;
}
#else
if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) {
SD_ERROR(SD_LOG_IOCTL_MHD, un,
"sd_persistent_reservation_in_read_resv: "
"failed ddi_copyout: mhioc_resv_desc_list_t\n");
rval = EFAULT;
goto done;
}
#endif
readresv_ptr = (sd_readresv_desc_t *)&in->readresv_desc;
bufp = resvlist.list;
copysz = sizeof (mhioc_resv_desc_t);
for (i = 0; i < min(resvlist.listlen, resvlist.listsize);
i++, readresv_ptr++, bufp++) {
bcopy(&readresv_ptr->resvkey, &resvdesc.key,
MHIOC_RESV_KEY_SIZE);
resvdesc.type = readresv_ptr->type;
resvdesc.scope = readresv_ptr->scope;
resvdesc.scope_specific_addr =
BE_32(readresv_ptr->scope_specific_addr);
if (ddi_copyout(&resvdesc, bufp, copysz, flag)) {
SD_ERROR(SD_LOG_IOCTL_MHD, un,
"sd_persistent_reservation_in_read_resv: "
"failed ddi_copyout: resvlist\n");
rval = EFAULT;
goto done;
}
}
done:
sd_ssc_fini(ssc);
if (data_bufp) {
kmem_free(data_bufp, data_len);
}
return (rval);
}
static int
sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag)
{
struct sd_lun *un = NULL;
struct mode_header *sense_mhp, *select_mhp;
struct block_descriptor *sense_desc, *select_desc;
int current_bsize;
int rval = EINVAL;
uchar_t *sense = NULL;
uchar_t *select = NULL;
sd_ssc_t *ssc;
ASSERT((cmd == CDROMGBLKMODE) || (cmd == CDROMSBLKMODE));
if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
return (ENXIO);
}
sense = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP);
ssc = sd_ssc_init(un);
rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense,
BUFLEN_CHG_BLK_MODE, MODEPAGE_ERR_RECOV, SD_PATH_STANDARD);
sd_ssc_fini(ssc);
if (rval != 0) {
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"sr_change_blkmode: Mode Sense Failed\n");
kmem_free(sense, BUFLEN_CHG_BLK_MODE);
return (rval);
}
sense_mhp = (struct mode_header *)sense;
if ((sense_mhp->bdesc_length == 0) ||
(sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH)) {
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"sr_change_blkmode: Mode Sense returned invalid block"
" descriptor length\n");
kmem_free(sense, BUFLEN_CHG_BLK_MODE);
return (EIO);
}
sense_desc = (struct block_descriptor *)(sense + MODE_HEADER_LENGTH);
current_bsize = ((sense_desc->blksize_hi << 16) |
(sense_desc->blksize_mid << 8) | sense_desc->blksize_lo);
switch (cmd) {
case CDROMGBLKMODE:
if (ddi_copyout(¤t_bsize, (void *)data,
sizeof (int), flag) != 0)
rval = EFAULT;
break;
case CDROMSBLKMODE:
switch (data) {
case CDROM_BLK_512:
case CDROM_BLK_1024:
case CDROM_BLK_2048:
case CDROM_BLK_2056:
case CDROM_BLK_2336:
case CDROM_BLK_2340:
case CDROM_BLK_2352:
case CDROM_BLK_2368:
case CDROM_BLK_2448:
case CDROM_BLK_2646:
case CDROM_BLK_2647:
break;
default:
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"sr_change_blkmode: "
"Block Size '%ld' Not Supported\n", data);
kmem_free(sense, BUFLEN_CHG_BLK_MODE);
return (EINVAL);
}
if (current_bsize == data) {
break;
}
select = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP);
select_mhp = (struct mode_header *)select;
select_desc =
(struct block_descriptor *)(select + MODE_HEADER_LENGTH);
select_mhp->bdesc_length = MODE_BLK_DESC_LENGTH;
select_desc->blksize_hi = (char)(((data) & 0x00ff0000) >> 16);
select_desc->blksize_mid = (char)(((data) & 0x0000ff00) >> 8);
select_desc->blksize_lo = (char)((data) & 0x000000ff);
ssc = sd_ssc_init(un);
rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0,
select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE,
SD_PATH_STANDARD);
sd_ssc_fini(ssc);
if (rval != 0) {
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"sr_change_blkmode: Mode Select Failed\n");
select_desc->blksize_hi = sense_desc->blksize_hi;
select_desc->blksize_mid = sense_desc->blksize_mid;
select_desc->blksize_lo = sense_desc->blksize_lo;
ssc = sd_ssc_init(un);
(void) sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0,
select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE,
SD_PATH_STANDARD);
sd_ssc_fini(ssc);
} else {
ASSERT(!mutex_owned(SD_MUTEX(un)));
mutex_enter(SD_MUTEX(un));
sd_update_block_info(un, (uint32_t)data, 0);
mutex_exit(SD_MUTEX(un));
}
break;
default:
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"sr_change_blkmode: Command '%x' Not Supported\n", cmd);
rval = EINVAL;
break;
}
if (select) {
kmem_free(select, BUFLEN_CHG_BLK_MODE);
}
if (sense) {
kmem_free(sense, BUFLEN_CHG_BLK_MODE);
}
return (rval);
}
static int
sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag)
{
struct sd_lun *un = NULL;
struct mode_header *sense_mhp, *select_mhp;
struct mode_speed *sense_page, *select_page;
int current_speed;
int rval = EINVAL;
int bd_len;
uchar_t *sense = NULL;
uchar_t *select = NULL;
sd_ssc_t *ssc;
ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED));
if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
return (ENXIO);
}
sense = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP);
ssc = sd_ssc_init(un);
rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense,
BUFLEN_MODE_CDROM_SPEED, CDROM_MODE_SPEED,
SD_PATH_STANDARD);
sd_ssc_fini(ssc);
if (rval != 0) {
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"sr_change_speed: Mode Sense Failed\n");
kmem_free(sense, BUFLEN_MODE_CDROM_SPEED);
return (rval);
}
sense_mhp = (struct mode_header *)sense;
bd_len = sense_mhp->bdesc_length;
if (bd_len > MODE_BLK_DESC_LENGTH) {
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"sr_change_speed: Mode Sense returned invalid block "
"descriptor length\n");
kmem_free(sense, BUFLEN_MODE_CDROM_SPEED);
return (EIO);
}
sense_page = (struct mode_speed *)
(sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length);
current_speed = sense_page->speed;
switch (cmd) {
case CDROMGDRVSPEED:
if (current_speed == 0x2) {
current_speed = CDROM_TWELVE_SPEED;
}
if (ddi_copyout(¤t_speed, (void *)data,
sizeof (int), flag) != 0) {
rval = EFAULT;
}
break;
case CDROMSDRVSPEED:
switch ((uchar_t)data) {
case CDROM_TWELVE_SPEED:
data = 0x2;
case CDROM_NORMAL_SPEED:
case CDROM_DOUBLE_SPEED:
case CDROM_QUAD_SPEED:
case CDROM_MAXIMUM_SPEED:
break;
default:
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"sr_change_speed: "
"Drive Speed '%d' Not Supported\n", (uchar_t)data);
kmem_free(sense, BUFLEN_MODE_CDROM_SPEED);
return (EINVAL);
}
if (current_speed == data) {
break;
}
select = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP);
select_mhp = (struct mode_header *)select;
select_mhp->bdesc_length = 0;
select_page =
(struct mode_speed *)(select + MODE_HEADER_LENGTH);
select_page =
(struct mode_speed *)(select + MODE_HEADER_LENGTH);
select_page->mode_page.code = CDROM_MODE_SPEED;
select_page->mode_page.length = 2;
select_page->speed = (uchar_t)data;
ssc = sd_ssc_init(un);
rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select,
MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH,
SD_DONTSAVE_PAGE, SD_PATH_STANDARD);
sd_ssc_fini(ssc);
if (rval != 0) {
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"sr_drive_speed: Mode Select Failed\n");
select_page->speed = sense_page->speed;
ssc = sd_ssc_init(un);
(void) sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select,
MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH,
SD_DONTSAVE_PAGE, SD_PATH_STANDARD);
sd_ssc_fini(ssc);
}
break;
default:
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"sr_change_speed: Command '%x' Not Supported\n", cmd);
rval = EINVAL;
break;
}
if (select) {
kmem_free(select, BUFLEN_MODE_CDROM_SPEED);
}
if (sense) {
kmem_free(sense, BUFLEN_MODE_CDROM_SPEED);
}
return (rval);
}
static int
sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag)
{
struct sd_lun *un;
struct uscsi_cmd *com = NULL;
struct mode_header_grp2 *sense_mhp;
uchar_t *sense_page;
uchar_t *sense = NULL;
char cdb[CDB_GROUP5];
int bd_len;
int current_speed = 0;
int max_speed = 0;
int rval;
sd_ssc_t *ssc;
ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED));
if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
return (ENXIO);
}
sense = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP);
ssc = sd_ssc_init(un);
rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense,
BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP,
SD_PATH_STANDARD);
sd_ssc_fini(ssc);
if (rval != 0) {
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"sr_atapi_change_speed: Mode Sense Failed\n");
kmem_free(sense, BUFLEN_MODE_CDROM_CAP);
return (rval);
}
sense_mhp = (struct mode_header_grp2 *)sense;
bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo;
if (bd_len > MODE_BLK_DESC_LENGTH) {
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"sr_atapi_change_speed: Mode Sense returned invalid "
"block descriptor length\n");
kmem_free(sense, BUFLEN_MODE_CDROM_CAP);
return (EIO);
}
sense_page = (uchar_t *)(sense + MODE_HEADER_LENGTH_GRP2 + bd_len);
current_speed = (sense_page[14] << 8) | sense_page[15];
max_speed = (sense_page[8] << 8) | sense_page[9];
switch (cmd) {
case CDROMGDRVSPEED:
current_speed /= SD_SPEED_1X;
if (ddi_copyout(¤t_speed, (void *)data,
sizeof (int), flag) != 0)
rval = EFAULT;
break;
case CDROMSDRVSPEED:
switch ((uchar_t)data) {
case CDROM_NORMAL_SPEED:
current_speed = SD_SPEED_1X;
break;
case CDROM_DOUBLE_SPEED:
current_speed = 2 * SD_SPEED_1X;
break;
case CDROM_QUAD_SPEED:
current_speed = 4 * SD_SPEED_1X;
break;
case CDROM_TWELVE_SPEED:
current_speed = 12 * SD_SPEED_1X;
break;
case CDROM_MAXIMUM_SPEED:
current_speed = 0xffff;
break;
default:
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"sr_atapi_change_speed: invalid drive speed %d\n",
(uchar_t)data);
kmem_free(sense, BUFLEN_MODE_CDROM_CAP);
return (EINVAL);
}
if (current_speed != 0xffff) {
if (current_speed > max_speed) {
kmem_free(sense, BUFLEN_MODE_CDROM_CAP);
return (EINVAL);
}
}
bzero(cdb, sizeof (cdb));
cdb[0] = (char)SCMD_SET_CDROM_SPEED;
cdb[2] = (uchar_t)(current_speed >> 8);
cdb[3] = (uchar_t)current_speed;
com = kmem_zalloc(sizeof (*com), KM_SLEEP);
com->uscsi_cdb = (caddr_t)cdb;
com->uscsi_cdblen = CDB_GROUP5;
com->uscsi_bufaddr = NULL;
com->uscsi_buflen = 0;
com->uscsi_flags = USCSI_DIAGNOSE | USCSI_SILENT;
rval = sd_send_scsi_cmd(dev, com, FKIOCTL, 0, SD_PATH_STANDARD);
break;
default:
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"sr_atapi_change_speed: Command '%x' Not Supported\n", cmd);
rval = EINVAL;
}
if (sense) {
kmem_free(sense, BUFLEN_MODE_CDROM_CAP);
}
if (com) {
kmem_free(com, sizeof (*com));
}
return (rval);
}
static int
sr_pause_resume(dev_t dev, int cmd)
{
struct sd_lun *un;
struct uscsi_cmd *com;
char cdb[CDB_GROUP1];
int rval;
if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
return (ENXIO);
}
com = kmem_zalloc(sizeof (*com), KM_SLEEP);
bzero(cdb, CDB_GROUP1);
cdb[0] = SCMD_PAUSE_RESUME;
switch (cmd) {
case CDROMRESUME:
cdb[8] = 1;
break;
case CDROMPAUSE:
cdb[8] = 0;
break;
default:
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_pause_resume:"
" Command '%x' Not Supported\n", cmd);
rval = EINVAL;
goto done;
}
com->uscsi_cdb = cdb;
com->uscsi_cdblen = CDB_GROUP1;
com->uscsi_flags = USCSI_DIAGNOSE | USCSI_SILENT;
rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
SD_PATH_STANDARD);
done:
kmem_free(com, sizeof (*com));
return (rval);
}
static int
sr_play_msf(dev_t dev, caddr_t data, int flag)
{
struct sd_lun *un;
struct uscsi_cmd *com;
struct cdrom_msf msf_struct;
struct cdrom_msf *msf = &msf_struct;
char cdb[CDB_GROUP1];
int rval;
if (data == NULL) {
return (EINVAL);
}
if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
return (ENXIO);
}
if (ddi_copyin(data, msf, sizeof (struct cdrom_msf), flag)) {
return (EFAULT);
}
com = kmem_zalloc(sizeof (*com), KM_SLEEP);
bzero(cdb, CDB_GROUP1);
cdb[0] = SCMD_PLAYAUDIO_MSF;
if (un->un_f_cfg_playmsf_bcd == TRUE) {
cdb[3] = BYTE_TO_BCD(msf->cdmsf_min0);
cdb[4] = BYTE_TO_BCD(msf->cdmsf_sec0);
cdb[5] = BYTE_TO_BCD(msf->cdmsf_frame0);
cdb[6] = BYTE_TO_BCD(msf->cdmsf_min1);
cdb[7] = BYTE_TO_BCD(msf->cdmsf_sec1);
cdb[8] = BYTE_TO_BCD(msf->cdmsf_frame1);
} else {
cdb[3] = msf->cdmsf_min0;
cdb[4] = msf->cdmsf_sec0;
cdb[5] = msf->cdmsf_frame0;
cdb[6] = msf->cdmsf_min1;
cdb[7] = msf->cdmsf_sec1;
cdb[8] = msf->cdmsf_frame1;
}
com->uscsi_cdb = cdb;
com->uscsi_cdblen = CDB_GROUP1;
com->uscsi_flags = USCSI_DIAGNOSE | USCSI_SILENT;
rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
SD_PATH_STANDARD);
kmem_free(com, sizeof (*com));
return (rval);
}
static int
sr_play_trkind(dev_t dev, caddr_t data, int flag)
{
struct cdrom_ti ti_struct;
struct cdrom_ti *ti = &ti_struct;
struct uscsi_cmd *com = NULL;
char cdb[CDB_GROUP1];
int rval;
if (data == NULL) {
return (EINVAL);
}
if (ddi_copyin(data, ti, sizeof (struct cdrom_ti), flag)) {
return (EFAULT);
}
com = kmem_zalloc(sizeof (*com), KM_SLEEP);
bzero(cdb, CDB_GROUP1);
cdb[0] = SCMD_PLAYAUDIO_TI;
cdb[4] = ti->cdti_trk0;
cdb[5] = ti->cdti_ind0;
cdb[7] = ti->cdti_trk1;
cdb[8] = ti->cdti_ind1;
com->uscsi_cdb = cdb;
com->uscsi_cdblen = CDB_GROUP1;
com->uscsi_flags = USCSI_DIAGNOSE | USCSI_SILENT;
rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
SD_PATH_STANDARD);
kmem_free(com, sizeof (*com));
return (rval);
}
static int
sr_read_all_subcodes(dev_t dev, caddr_t data, int flag)
{
struct sd_lun *un = NULL;
struct uscsi_cmd *com = NULL;
struct cdrom_subcode *subcode = NULL;
int rval;
size_t buflen;
char cdb[CDB_GROUP5];
#ifdef _MULTI_DATAMODEL
struct cdrom_subcode32 cdrom_subcode32;
struct cdrom_subcode32 *cdsc32 = &cdrom_subcode32;
#endif
if (data == NULL) {
return (EINVAL);
}
if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
return (ENXIO);
}
subcode = kmem_zalloc(sizeof (struct cdrom_subcode), KM_SLEEP);
#ifdef _MULTI_DATAMODEL
switch (ddi_model_convert_from(flag & FMODELS)) {
case DDI_MODEL_ILP32:
if (ddi_copyin(data, cdsc32, sizeof (*cdsc32), flag)) {
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"sr_read_all_subcodes: ddi_copyin Failed\n");
kmem_free(subcode, sizeof (struct cdrom_subcode));
return (EFAULT);
}
cdrom_subcode32tocdrom_subcode(cdsc32, subcode);
break;
case DDI_MODEL_NONE:
if (ddi_copyin(data, subcode,
sizeof (struct cdrom_subcode), flag)) {
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"sr_read_all_subcodes: ddi_copyin Failed\n");
kmem_free(subcode, sizeof (struct cdrom_subcode));
return (EFAULT);
}
break;
}
#else
if (ddi_copyin(data, subcode, sizeof (struct cdrom_subcode), flag)) {
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"sr_read_all_subcodes: ddi_copyin Failed\n");
kmem_free(subcode, sizeof (struct cdrom_subcode));
return (EFAULT);
}
#endif
if ((subcode->cdsc_length & 0xFF000000) != 0) {
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"sr_read_all_subcodes: "
"cdrom transfer length too large: %d (limit %d)\n",
subcode->cdsc_length, 0xFFFFFF);
kmem_free(subcode, sizeof (struct cdrom_subcode));
return (EINVAL);
}
buflen = CDROM_BLK_SUBCODE * subcode->cdsc_length;
com = kmem_zalloc(sizeof (*com), KM_SLEEP);
bzero(cdb, CDB_GROUP5);
if (un->un_f_mmc_cap == TRUE) {
cdb[0] = (char)SCMD_READ_CD;
cdb[2] = (char)0xff;
cdb[3] = (char)0xff;
cdb[4] = (char)0xff;
cdb[5] = (char)0xff;
cdb[6] = (((subcode->cdsc_length) & 0x00ff0000) >> 16);
cdb[7] = (((subcode->cdsc_length) & 0x0000ff00) >> 8);
cdb[8] = ((subcode->cdsc_length) & 0x000000ff);
cdb[10] = 1;
} else {
cdb[0] = (char)SCMD_READ_ALL_SUBCODES;
cdb[6] = (((subcode->cdsc_length) & 0xff000000) >> 24);
cdb[7] = (((subcode->cdsc_length) & 0x00ff0000) >> 16);
cdb[8] = (((subcode->cdsc_length) & 0x0000ff00) >> 8);
cdb[9] = ((subcode->cdsc_length) & 0x000000ff);
}
com->uscsi_cdb = cdb;
com->uscsi_cdblen = CDB_GROUP5;
com->uscsi_bufaddr = (caddr_t)subcode->cdsc_addr;
com->uscsi_buflen = buflen;
com->uscsi_flags = USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ;
rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE,
SD_PATH_STANDARD);
kmem_free(subcode, sizeof (struct cdrom_subcode));
kmem_free(com, sizeof (*com));
return (rval);
}
static int
sr_read_subchannel(dev_t dev, caddr_t data, int flag)
{
struct sd_lun *un;
struct uscsi_cmd *com;
struct cdrom_subchnl subchanel;
struct cdrom_subchnl *subchnl = &subchanel;
char cdb[CDB_GROUP1];
caddr_t buffer;
int rval;
if (data == NULL) {
return (EINVAL);
}
if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
(un->un_state == SD_STATE_OFFLINE)) {
return (ENXIO);
}
if (ddi_copyin(data, subchnl, sizeof (struct cdrom_subchnl), flag)) {
return (EFAULT);
}
buffer = kmem_zalloc((size_t)16, KM_SLEEP);
bzero(cdb, CDB_GROUP1);
cdb[0] = SCMD_READ_SUBCHANNEL;
cdb[1] = (subchnl->cdsc_format & CDROM_LBA) ? 0 : 0x02;
cdb[2] = 0x40;
cdb[3] = 0x01;
cdb[8] = 0x10;
com = kmem_zalloc(sizeof (*com), KM_SLEEP);
com->uscsi_cdb = cdb;
com->uscsi_cdblen = CDB_GROUP1;
com->uscsi_bufaddr = buffer;
com->uscsi_buflen = 16;
com->uscsi_flags = USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ;
rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
SD_PATH_STANDARD);
if (rval != 0) {
kmem_free(buffer, 16);
kmem_free(com, sizeof (*com));
return (rval);
}
subchnl->cdsc_audiostatus = buffer[1];
subchnl->cdsc_adr = (buffer[5] & 0xF0) >> 4;
subchnl->cdsc_ctrl = (buffer[5] & 0x0F);
subchnl->cdsc_trk = buffer[6];
subchnl->cdsc_ind = buffer[7];
if (subchnl->cdsc_format & CDROM_LBA) {
subchnl->cdsc_absaddr.lba =
((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) +
((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]);
subchnl->cdsc_reladdr.lba =
((uchar_t)buffer[12] << 24) + ((uchar_t)buffer[13] << 16) +
((uchar_t)buffer[14] << 8) + ((uchar_t)buffer[15]);
} else if (un->un_f_cfg_readsub_bcd == TRUE) {
subchnl->cdsc_absaddr.msf.minute = BCD_TO_BYTE(buffer[9]);
subchnl->cdsc_absaddr.msf.second = BCD_TO_BYTE(buffer[10]);
subchnl->cdsc_absaddr.msf.frame = BCD_TO_BYTE(buffer[11]);
subchnl->cdsc_reladdr.msf.minute = BCD_TO_BYTE(buffer[13]);
subchnl->cdsc_reladdr.msf.second = BCD_TO_BYTE(buffer[14]);
subchnl->cdsc_reladdr.msf.frame = BCD_TO_BYTE(buffer[15]);
} else {
subchnl->cdsc_absaddr.msf.minute = buffer[9];
subchnl->cdsc_absaddr.msf.second = buffer[10];
subchnl->cdsc_absaddr.msf.frame = buffer[11];
subchnl->cdsc_reladdr.msf.minute = buffer[13];
subchnl->cdsc_reladdr.msf.second = buffer[14];
subchnl->cdsc_reladdr.msf.frame = buffer[15];
}
kmem_free(buffer, 16);
kmem_free(com, sizeof (*com));
if (ddi_copyout(subchnl, data, sizeof (struct cdrom_subchnl), flag)
!= 0) {
return (EFAULT);
}
return (rval);
}
static int
sr_read_tocentry(dev_t dev, caddr_t data, int flag)
{
struct sd_lun *un = NULL;
struct uscsi_cmd *com;
struct cdrom_tocentry toc_entry;
struct cdrom_tocentry *entry = &toc_entry;
caddr_t buffer;
int rval;
char cdb[CDB_GROUP1];
if (data == NULL) {
return (EINVAL);
}
if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
(un->un_state == SD_STATE_OFFLINE)) {
return (ENXIO);
}
if (ddi_copyin(data, entry, sizeof (struct cdrom_tocentry), flag)) {
return (EFAULT);
}
if (!(entry->cdte_format & (CDROM_LBA | CDROM_MSF))) {
return (EINVAL);
}
if (entry->cdte_track == 0) {
return (EINVAL);
}
buffer = kmem_zalloc((size_t)12, KM_SLEEP);
com = kmem_zalloc(sizeof (*com), KM_SLEEP);
bzero(cdb, CDB_GROUP1);
cdb[0] = SCMD_READ_TOC;
cdb[1] = ((entry->cdte_format & CDROM_LBA) ? 0 : 2);
if (un->un_f_cfg_read_toc_trk_bcd == TRUE) {
cdb[6] = BYTE_TO_BCD(entry->cdte_track);
} else {
cdb[6] = entry->cdte_track;
}
cdb[8] = 12;
com->uscsi_cdb = cdb;
com->uscsi_cdblen = CDB_GROUP1;
com->uscsi_bufaddr = buffer;
com->uscsi_buflen = 0x0C;
com->uscsi_flags = (USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ);
rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
SD_PATH_STANDARD);
if (rval != 0) {
kmem_free(buffer, 12);
kmem_free(com, sizeof (*com));
return (rval);
}
entry->cdte_adr = (buffer[5] & 0xF0) >> 4;
entry->cdte_ctrl = (buffer[5] & 0x0F);
if (entry->cdte_format & CDROM_LBA) {
entry->cdte_addr.lba =
((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) +
((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]);
} else if (un->un_f_cfg_read_toc_addr_bcd == TRUE) {
entry->cdte_addr.msf.minute = BCD_TO_BYTE(buffer[9]);
entry->cdte_addr.msf.second = BCD_TO_BYTE(buffer[10]);
entry->cdte_addr.msf.frame = BCD_TO_BYTE(buffer[11]);
cdb[1] = 0;
rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
SD_PATH_STANDARD);
if (rval != 0) {
kmem_free(buffer, 12);
kmem_free(com, sizeof (*com));
return (rval);
}
} else {
entry->cdte_addr.msf.minute = buffer[9];
entry->cdte_addr.msf.second = buffer[10];
entry->cdte_addr.msf.frame = buffer[11];
cdb[1] = 0;
rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
SD_PATH_STANDARD);
if (rval != 0) {
kmem_free(buffer, 12);
kmem_free(com, sizeof (*com));
return (rval);
}
}
if ((entry->cdte_ctrl & CDROM_DATA_TRACK) &&
(entry->cdte_track != CDROM_LEADOUT)) {
bzero(cdb, CDB_GROUP1);
cdb[0] = SCMD_READ_HEADER;
cdb[2] = buffer[8];
cdb[3] = buffer[9];
cdb[4] = buffer[10];
cdb[5] = buffer[11];
cdb[8] = 0x08;
com->uscsi_buflen = 0x08;
rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
SD_PATH_STANDARD);
if (rval == 0) {
entry->cdte_datamode = buffer[0];
} else {
entry->cdte_datamode = (uchar_t)-1;
}
} else {
entry->cdte_datamode = (uchar_t)-1;
}
kmem_free(buffer, 12);
kmem_free(com, sizeof (*com));
if (ddi_copyout(entry, data, sizeof (struct cdrom_tocentry), flag) != 0)
return (EFAULT);
return (rval);
}
static int
sr_read_tochdr(dev_t dev, caddr_t data, int flag)
{
struct sd_lun *un;
struct uscsi_cmd *com;
struct cdrom_tochdr toc_header;
struct cdrom_tochdr *hdr = &toc_header;
char cdb[CDB_GROUP1];
int rval;
caddr_t buffer;
if (data == NULL) {
return (EINVAL);
}
if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
(un->un_state == SD_STATE_OFFLINE)) {
return (ENXIO);
}
buffer = kmem_zalloc(4, KM_SLEEP);
bzero(cdb, CDB_GROUP1);
cdb[0] = SCMD_READ_TOC;
cdb[6] = 0x00;
cdb[8] = 0x04;
com = kmem_zalloc(sizeof (*com), KM_SLEEP);
com->uscsi_cdb = cdb;
com->uscsi_cdblen = CDB_GROUP1;
com->uscsi_bufaddr = buffer;
com->uscsi_buflen = 0x04;
com->uscsi_timeout = 300;
com->uscsi_flags = USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ;
rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
SD_PATH_STANDARD);
if (un->un_f_cfg_read_toc_trk_bcd == TRUE) {
hdr->cdth_trk0 = BCD_TO_BYTE(buffer[2]);
hdr->cdth_trk1 = BCD_TO_BYTE(buffer[3]);
} else {
hdr->cdth_trk0 = buffer[2];
hdr->cdth_trk1 = buffer[3];
}
kmem_free(buffer, 4);
kmem_free(com, sizeof (*com));
if (ddi_copyout(hdr, data, sizeof (struct cdrom_tochdr), flag) != 0) {
return (EFAULT);
}
return (rval);
}
static int
sr_read_mode1(dev_t dev, caddr_t data, int flag)
{
struct sd_lun *un;
struct cdrom_read mode1_struct;
struct cdrom_read *mode1 = &mode1_struct;
int rval;
sd_ssc_t *ssc;
#ifdef _MULTI_DATAMODEL
struct cdrom_read32 cdrom_read32;
struct cdrom_read32 *cdrd32 = &cdrom_read32;
#endif
if (data == NULL) {
return (EINVAL);
}
if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
(un->un_state == SD_STATE_OFFLINE)) {
return (ENXIO);
}
SD_TRACE(SD_LOG_ATTACH_DETACH, un,
"sd_read_mode1: entry: un:0x%p\n", un);
#ifdef _MULTI_DATAMODEL
switch (ddi_model_convert_from(flag & FMODELS)) {
case DDI_MODEL_ILP32:
if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) {
return (EFAULT);
}
cdrom_read32tocdrom_read(cdrd32, mode1);
break;
case DDI_MODEL_NONE:
if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) {
return (EFAULT);
}
}
#else
if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) {
return (EFAULT);
}
#endif
ssc = sd_ssc_init(un);
rval = sd_send_scsi_READ(ssc, mode1->cdread_bufaddr,
mode1->cdread_buflen, mode1->cdread_lba, SD_PATH_STANDARD);
sd_ssc_fini(ssc);
SD_TRACE(SD_LOG_ATTACH_DETACH, un,
"sd_read_mode1: exit: un:0x%p\n", un);
return (rval);
}
static int
sr_read_cd_mode2(dev_t dev, caddr_t data, int flag)
{
struct sd_lun *un;
struct uscsi_cmd *com;
struct cdrom_read mode2_struct;
struct cdrom_read *mode2 = &mode2_struct;
uchar_t cdb[CDB_GROUP5];
int nblocks;
int rval;
#ifdef _MULTI_DATAMODEL
struct cdrom_read32 cdrom_read32;
struct cdrom_read32 *cdrd32 = &cdrom_read32;
#endif
if (data == NULL) {
return (EINVAL);
}
if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
(un->un_state == SD_STATE_OFFLINE)) {
return (ENXIO);
}
#ifdef _MULTI_DATAMODEL
switch (ddi_model_convert_from(flag & FMODELS)) {
case DDI_MODEL_ILP32:
if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) {
return (EFAULT);
}
cdrom_read32tocdrom_read(cdrd32, mode2);
break;
case DDI_MODEL_NONE:
if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) {
return (EFAULT);
}
break;
}
#else
if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) {
return (EFAULT);
}
#endif
bzero(cdb, sizeof (cdb));
if (un->un_f_cfg_read_cd_xd4 == TRUE) {
cdb[0] = SCMD_READ_CDD4;
} else {
cdb[0] = SCMD_READ_CD;
}
cdb[1] = CDROM_SECTOR_TYPE_MODE2;
cdb[2] = (uchar_t)((mode2->cdread_lba >> 24) & 0XFF);
cdb[3] = (uchar_t)((mode2->cdread_lba >> 16) & 0XFF);
cdb[4] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF);
cdb[5] = (uchar_t)(mode2->cdread_lba & 0xFF);
nblocks = mode2->cdread_buflen / 2336;
cdb[6] = (uchar_t)(nblocks >> 16);
cdb[7] = (uchar_t)(nblocks >> 8);
cdb[8] = (uchar_t)nblocks;
cdb[9] = CDROM_READ_CD_USERDATA;
com = kmem_zalloc(sizeof (*com), KM_SLEEP);
com->uscsi_cdb = (caddr_t)cdb;
com->uscsi_cdblen = sizeof (cdb);
com->uscsi_bufaddr = mode2->cdread_bufaddr;
com->uscsi_buflen = mode2->cdread_buflen;
com->uscsi_flags = USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ;
rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE,
SD_PATH_STANDARD);
kmem_free(com, sizeof (*com));
return (rval);
}
static int
sr_read_mode2(dev_t dev, caddr_t data, int flag)
{
struct sd_lun *un;
struct cdrom_read mode2_struct;
struct cdrom_read *mode2 = &mode2_struct;
int rval;
uint32_t restore_blksize;
struct uscsi_cmd *com;
uchar_t cdb[CDB_GROUP0];
int nblocks;
#ifdef _MULTI_DATAMODEL
struct cdrom_read32 cdrom_read32;
struct cdrom_read32 *cdrd32 = &cdrom_read32;
#endif
if (data == NULL) {
return (EINVAL);
}
if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
(un->un_state == SD_STATE_OFFLINE)) {
return (ENXIO);
}
mutex_enter(SD_MUTEX(un));
if (un->un_ncmds_in_driver != 1) {
mutex_exit(SD_MUTEX(un));
return (EAGAIN);
}
mutex_exit(SD_MUTEX(un));
SD_TRACE(SD_LOG_ATTACH_DETACH, un,
"sd_read_mode2: entry: un:0x%p\n", un);
#ifdef _MULTI_DATAMODEL
switch (ddi_model_convert_from(flag & FMODELS)) {
case DDI_MODEL_ILP32:
if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) {
return (EFAULT);
}
cdrom_read32tocdrom_read(cdrd32, mode2);
break;
case DDI_MODEL_NONE:
if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) {
return (EFAULT);
}
break;
}
#else
if (ddi_copyin(data, mode2, sizeof (*mode2), flag)) {
return (EFAULT);
}
#endif
restore_blksize = un->un_tgt_blocksize;
if (sr_sector_mode(dev, SD_MODE2_BLKSIZE) != 0) {
rval = EIO;
goto done;
}
bzero(cdb, sizeof (cdb));
cdb[0] = SCMD_READ;
mode2->cdread_lba >>= 2;
cdb[1] = (uchar_t)((mode2->cdread_lba >> 16) & 0X1F);
cdb[2] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF);
cdb[3] = (uchar_t)(mode2->cdread_lba & 0xFF);
nblocks = mode2->cdread_buflen / 2336;
cdb[4] = (uchar_t)nblocks & 0xFF;
com = kmem_zalloc(sizeof (*com), KM_SLEEP);
com->uscsi_cdb = (caddr_t)cdb;
com->uscsi_cdblen = sizeof (cdb);
com->uscsi_bufaddr = mode2->cdread_bufaddr;
com->uscsi_buflen = mode2->cdread_buflen;
com->uscsi_flags = USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ;
rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE,
SD_PATH_STANDARD);
kmem_free(com, sizeof (*com));
if (sr_sector_mode(dev, restore_blksize) != 0) {
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"can't do switch back to mode 1\n");
if (rval == 0) {
rval = EIO;
}
}
done:
SD_TRACE(SD_LOG_ATTACH_DETACH, un,
"sd_read_mode2: exit: un:0x%p\n", un);
return (rval);
}
static int
sr_sector_mode(dev_t dev, uint32_t blksize)
{
struct sd_lun *un;
uchar_t *sense;
uchar_t *select;
int rval;
sd_ssc_t *ssc;
if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
(un->un_state == SD_STATE_OFFLINE)) {
return (ENXIO);
}
sense = kmem_zalloc(20, KM_SLEEP);
ssc = sd_ssc_init(un);
rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 20, 0x81,
SD_PATH_STANDARD);
sd_ssc_fini(ssc);
if (rval != 0) {
SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un,
"sr_sector_mode: Mode Sense failed\n");
kmem_free(sense, 20);
return (rval);
}
select = kmem_zalloc(20, KM_SLEEP);
select[3] = 0x08;
select[10] = ((blksize >> 8) & 0xff);
select[11] = (blksize & 0xff);
select[12] = 0x01;
select[13] = 0x06;
select[14] = sense[14];
select[15] = sense[15];
if (blksize == SD_MODE2_BLKSIZE) {
select[14] |= 0x01;
}
ssc = sd_ssc_init(un);
rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 20,
SD_DONTSAVE_PAGE, SD_PATH_STANDARD);
sd_ssc_fini(ssc);
if (rval != 0) {
SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un,
"sr_sector_mode: Mode Select failed\n");
} else {
mutex_enter(SD_MUTEX(un));
sd_update_block_info(un, blksize, 0);
mutex_exit(SD_MUTEX(un));
}
kmem_free(sense, 20);
kmem_free(select, 20);
return (rval);
}
static int
sr_read_cdda(dev_t dev, caddr_t data, int flag)
{
struct sd_lun *un;
struct uscsi_cmd *com;
struct cdrom_cdda *cdda;
int rval;
size_t buflen;
char cdb[CDB_GROUP5];
#ifdef _MULTI_DATAMODEL
struct cdrom_cdda32 cdrom_cdda32;
struct cdrom_cdda32 *cdda32 = &cdrom_cdda32;
#endif
if (data == NULL) {
return (EINVAL);
}
if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
return (ENXIO);
}
cdda = kmem_zalloc(sizeof (struct cdrom_cdda), KM_SLEEP);
#ifdef _MULTI_DATAMODEL
switch (ddi_model_convert_from(flag & FMODELS)) {
case DDI_MODEL_ILP32:
if (ddi_copyin(data, cdda32, sizeof (*cdda32), flag)) {
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"sr_read_cdda: ddi_copyin Failed\n");
kmem_free(cdda, sizeof (struct cdrom_cdda));
return (EFAULT);
}
cdrom_cdda32tocdrom_cdda(cdda32, cdda);
break;
case DDI_MODEL_NONE:
if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) {
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"sr_read_cdda: ddi_copyin Failed\n");
kmem_free(cdda, sizeof (struct cdrom_cdda));
return (EFAULT);
}
break;
}
#else
if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) {
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"sr_read_cdda: ddi_copyin Failed\n");
kmem_free(cdda, sizeof (struct cdrom_cdda));
return (EFAULT);
}
#endif
if ((cdda->cdda_length & 0xFF000000) != 0) {
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdda: "
"cdrom transfer length too large: %d (limit %d)\n",
cdda->cdda_length, 0xFFFFFF);
kmem_free(cdda, sizeof (struct cdrom_cdda));
return (EINVAL);
}
switch (cdda->cdda_subcode) {
case CDROM_DA_NO_SUBCODE:
buflen = CDROM_BLK_2352 * cdda->cdda_length;
break;
case CDROM_DA_SUBQ:
buflen = CDROM_BLK_2368 * cdda->cdda_length;
break;
case CDROM_DA_ALL_SUBCODE:
buflen = CDROM_BLK_2448 * cdda->cdda_length;
break;
case CDROM_DA_SUBCODE_ONLY:
buflen = CDROM_BLK_SUBCODE * cdda->cdda_length;
break;
default:
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"sr_read_cdda: Subcode '0x%x' Not Supported\n",
cdda->cdda_subcode);
kmem_free(cdda, sizeof (struct cdrom_cdda));
return (EINVAL);
}
com = kmem_zalloc(sizeof (*com), KM_SLEEP);
bzero(cdb, CDB_GROUP5);
if (un->un_f_cfg_cdda == TRUE) {
cdb[0] = (char)SCMD_READ_CD;
cdb[1] = 0x04;
cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24);
cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16);
cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8);
cdb[5] = ((cdda->cdda_addr) & 0x000000ff);
cdb[6] = (((cdda->cdda_length) & 0x00ff0000) >> 16);
cdb[7] = (((cdda->cdda_length) & 0x0000ff00) >> 8);
cdb[8] = ((cdda->cdda_length) & 0x000000ff);
cdb[9] = 0x10;
switch (cdda->cdda_subcode) {
case CDROM_DA_NO_SUBCODE :
cdb[10] = 0x0;
break;
case CDROM_DA_SUBQ :
cdb[10] = 0x2;
break;
case CDROM_DA_ALL_SUBCODE :
cdb[10] = 0x1;
break;
case CDROM_DA_SUBCODE_ONLY :
default :
kmem_free(cdda, sizeof (struct cdrom_cdda));
kmem_free(com, sizeof (*com));
return (ENOTTY);
}
} else {
cdb[0] = (char)SCMD_READ_CDDA;
cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24);
cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16);
cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8);
cdb[5] = ((cdda->cdda_addr) & 0x000000ff);
cdb[6] = (((cdda->cdda_length) & 0xff000000) >> 24);
cdb[7] = (((cdda->cdda_length) & 0x00ff0000) >> 16);
cdb[8] = (((cdda->cdda_length) & 0x0000ff00) >> 8);
cdb[9] = ((cdda->cdda_length) & 0x000000ff);
cdb[10] = cdda->cdda_subcode;
}
com->uscsi_cdb = cdb;
com->uscsi_cdblen = CDB_GROUP5;
com->uscsi_bufaddr = (caddr_t)cdda->cdda_data;
com->uscsi_buflen = buflen;
com->uscsi_flags = USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ;
rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE,
SD_PATH_STANDARD);
kmem_free(cdda, sizeof (struct cdrom_cdda));
kmem_free(com, sizeof (*com));
return (rval);
}
static int
sr_read_cdxa(dev_t dev, caddr_t data, int flag)
{
struct sd_lun *un;
struct uscsi_cmd *com;
struct cdrom_cdxa *cdxa;
int rval;
size_t buflen;
char cdb[CDB_GROUP5];
uchar_t read_flags;
#ifdef _MULTI_DATAMODEL
struct cdrom_cdxa32 cdrom_cdxa32;
struct cdrom_cdxa32 *cdxa32 = &cdrom_cdxa32;
#endif
if (data == NULL) {
return (EINVAL);
}
if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
return (ENXIO);
}
cdxa = kmem_zalloc(sizeof (struct cdrom_cdxa), KM_SLEEP);
#ifdef _MULTI_DATAMODEL
switch (ddi_model_convert_from(flag & FMODELS)) {
case DDI_MODEL_ILP32:
if (ddi_copyin(data, cdxa32, sizeof (*cdxa32), flag)) {
kmem_free(cdxa, sizeof (struct cdrom_cdxa));
return (EFAULT);
}
cdrom_cdxa32tocdrom_cdxa(cdxa32, cdxa);
break;
case DDI_MODEL_NONE:
if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) {
kmem_free(cdxa, sizeof (struct cdrom_cdxa));
return (EFAULT);
}
break;
}
#else
if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) {
kmem_free(cdxa, sizeof (struct cdrom_cdxa));
return (EFAULT);
}
#endif
if ((cdxa->cdxa_length & 0xFF000000) != 0) {
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdxa: "
"cdrom transfer length too large: %d (limit %d)\n",
cdxa->cdxa_length, 0xFFFFFF);
kmem_free(cdxa, sizeof (struct cdrom_cdxa));
return (EINVAL);
}
switch (cdxa->cdxa_format) {
case CDROM_XA_DATA:
buflen = CDROM_BLK_2048 * cdxa->cdxa_length;
read_flags = 0x10;
break;
case CDROM_XA_SECTOR_DATA:
buflen = CDROM_BLK_2352 * cdxa->cdxa_length;
read_flags = 0xf8;
break;
case CDROM_XA_DATA_W_ERROR:
buflen = CDROM_BLK_2646 * cdxa->cdxa_length;
read_flags = 0xfc;
break;
default:
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"sr_read_cdxa: Format '0x%x' Not Supported\n",
cdxa->cdxa_format);
kmem_free(cdxa, sizeof (struct cdrom_cdxa));
return (EINVAL);
}
com = kmem_zalloc(sizeof (*com), KM_SLEEP);
bzero(cdb, CDB_GROUP5);
if (un->un_f_mmc_cap == TRUE) {
cdb[0] = (char)SCMD_READ_CD;
cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24);
cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16);
cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8);
cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff);
cdb[6] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16);
cdb[7] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8);
cdb[8] = ((cdxa->cdxa_length) & 0x000000ff);
cdb[9] = (char)read_flags;
} else {
cdb[0] = (char)SCMD_READ_CDXA;
cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24);
cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16);
cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8);
cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff);
cdb[6] = (((cdxa->cdxa_length) & 0xff000000) >> 24);
cdb[7] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16);
cdb[8] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8);
cdb[9] = ((cdxa->cdxa_length) & 0x000000ff);
cdb[10] = cdxa->cdxa_format;
}
com->uscsi_cdb = cdb;
com->uscsi_cdblen = CDB_GROUP5;
com->uscsi_bufaddr = (caddr_t)cdxa->cdxa_data;
com->uscsi_buflen = buflen;
com->uscsi_flags = USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ;
rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE,
SD_PATH_STANDARD);
kmem_free(cdxa, sizeof (struct cdrom_cdxa));
kmem_free(com, sizeof (*com));
return (rval);
}
static int
sr_eject(dev_t dev)
{
struct sd_lun *un;
int rval;
sd_ssc_t *ssc;
if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
(un->un_state == SD_STATE_OFFLINE)) {
return (ENXIO);
}
mutex_enter(SD_MUTEX(un));
if (un->un_f_ejecting == TRUE) {
mutex_exit(SD_MUTEX(un));
return (EAGAIN);
}
un->un_f_ejecting = TRUE;
mutex_exit(SD_MUTEX(un));
ssc = sd_ssc_init(un);
rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_ALLOW,
SD_PATH_STANDARD);
sd_ssc_fini(ssc);
if (rval != 0) {
mutex_enter(SD_MUTEX(un));
un->un_f_ejecting = FALSE;
mutex_exit(SD_MUTEX(un));
return (rval);
}
ssc = sd_ssc_init(un);
rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP,
SD_TARGET_EJECT, SD_PATH_STANDARD);
sd_ssc_fini(ssc);
if (rval == 0) {
mutex_enter(SD_MUTEX(un));
sr_ejected(un);
un->un_mediastate = DKIO_EJECTED;
un->un_f_ejecting = FALSE;
cv_broadcast(&un->un_state_cv);
mutex_exit(SD_MUTEX(un));
} else {
mutex_enter(SD_MUTEX(un));
un->un_f_ejecting = FALSE;
mutex_exit(SD_MUTEX(un));
}
return (rval);
}
static void
sr_ejected(struct sd_lun *un)
{
struct sd_errstats *stp;
ASSERT(un != NULL);
ASSERT(mutex_owned(SD_MUTEX(un)));
un->un_f_blockcount_is_valid = FALSE;
un->un_f_tgt_blocksize_is_valid = FALSE;
mutex_exit(SD_MUTEX(un));
cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY);
mutex_enter(SD_MUTEX(un));
if (un->un_errstats != NULL) {
stp = (struct sd_errstats *)un->un_errstats->ks_data;
stp->sd_capacity.value.ui64 = 0;
}
}
static int
sr_check_wp(dev_t dev)
{
struct sd_lun *un;
uchar_t device_specific;
uchar_t *sense;
int hdrlen;
int rval = FALSE;
int status;
sd_ssc_t *ssc;
if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
return (FALSE);
}
if (un->un_f_cfg_is_atapi == TRUE) {
hdrlen = MODE_HEADER_LENGTH_GRP2;
sense = kmem_zalloc(hdrlen, KM_SLEEP);
ssc = sd_ssc_init(un);
status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, hdrlen,
MODEPAGE_ALLPAGES, SD_PATH_STANDARD);
sd_ssc_fini(ssc);
if (status != 0)
goto err_exit;
device_specific =
((struct mode_header_grp2 *)sense)->device_specific;
} else {
hdrlen = MODE_HEADER_LENGTH;
sense = kmem_zalloc(hdrlen, KM_SLEEP);
ssc = sd_ssc_init(un);
status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, hdrlen,
MODEPAGE_ALLPAGES, SD_PATH_STANDARD);
sd_ssc_fini(ssc);
if (status != 0)
goto err_exit;
device_specific =
((struct mode_header *)sense)->device_specific;
}
if (device_specific & WRITE_PROTECT) {
rval = TRUE;
}
err_exit:
kmem_free(sense, hdrlen);
return (rval);
}
static int
sr_volume_ctrl(dev_t dev, caddr_t data, int flag)
{
struct sd_lun *un;
struct cdrom_volctrl volume;
struct cdrom_volctrl *vol = &volume;
uchar_t *sense_page;
uchar_t *select_page;
uchar_t *sense;
uchar_t *select;
int sense_buflen;
int select_buflen;
int rval;
sd_ssc_t *ssc;
if (data == NULL) {
return (EINVAL);
}
if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
(un->un_state == SD_STATE_OFFLINE)) {
return (ENXIO);
}
if (ddi_copyin(data, vol, sizeof (struct cdrom_volctrl), flag)) {
return (EFAULT);
}
if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) {
struct mode_header_grp2 *sense_mhp;
struct mode_header_grp2 *select_mhp;
int bd_len;
sense_buflen = MODE_PARAM_LENGTH_GRP2 + MODEPAGE_AUDIO_CTRL_LEN;
select_buflen = MODE_HEADER_LENGTH_GRP2 +
MODEPAGE_AUDIO_CTRL_LEN;
sense = kmem_zalloc(sense_buflen, KM_SLEEP);
select = kmem_zalloc(select_buflen, KM_SLEEP);
ssc = sd_ssc_init(un);
rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense,
sense_buflen, MODEPAGE_AUDIO_CTRL,
SD_PATH_STANDARD);
sd_ssc_fini(ssc);
if (rval != 0) {
SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un,
"sr_volume_ctrl: Mode Sense Failed\n");
kmem_free(sense, sense_buflen);
kmem_free(select, select_buflen);
return (rval);
}
sense_mhp = (struct mode_header_grp2 *)sense;
select_mhp = (struct mode_header_grp2 *)select;
bd_len = (sense_mhp->bdesc_length_hi << 8) |
sense_mhp->bdesc_length_lo;
if (bd_len > MODE_BLK_DESC_LENGTH) {
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"sr_volume_ctrl: Mode Sense returned invalid "
"block descriptor length\n");
kmem_free(sense, sense_buflen);
kmem_free(select, select_buflen);
return (EIO);
}
sense_page = (uchar_t *)
(sense + MODE_HEADER_LENGTH_GRP2 + bd_len);
select_page = (uchar_t *)(select + MODE_HEADER_LENGTH_GRP2);
select_mhp->length_msb = 0;
select_mhp->length_lsb = 0;
select_mhp->bdesc_length_hi = 0;
select_mhp->bdesc_length_lo = 0;
} else {
struct mode_header *sense_mhp, *select_mhp;
sense_buflen = MODE_PARAM_LENGTH + MODEPAGE_AUDIO_CTRL_LEN;
select_buflen = MODE_HEADER_LENGTH + MODEPAGE_AUDIO_CTRL_LEN;
sense = kmem_zalloc(sense_buflen, KM_SLEEP);
select = kmem_zalloc(select_buflen, KM_SLEEP);
ssc = sd_ssc_init(un);
rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense,
sense_buflen, MODEPAGE_AUDIO_CTRL,
SD_PATH_STANDARD);
sd_ssc_fini(ssc);
if (rval != 0) {
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"sr_volume_ctrl: Mode Sense Failed\n");
kmem_free(sense, sense_buflen);
kmem_free(select, select_buflen);
return (rval);
}
sense_mhp = (struct mode_header *)sense;
select_mhp = (struct mode_header *)select;
if (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH) {
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"sr_volume_ctrl: Mode Sense returned invalid "
"block descriptor length\n");
kmem_free(sense, sense_buflen);
kmem_free(select, select_buflen);
return (EIO);
}
sense_page = (uchar_t *)
(sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length);
select_page = (uchar_t *)(select + MODE_HEADER_LENGTH);
select_mhp->length = 0;
select_mhp->bdesc_length = 0;
}
select_page[0] = MODEPAGE_AUDIO_CTRL;
select_page[1] = 0xE;
select_page[2] = 0x04;
select_page[3] = 0x00;
select_page[4] = 0x00;
select_page[5] = sense_page[5];
select_page[6] = sense_page[6];
select_page[7] = sense_page[7];
select_page[8] = 0x01;
select_page[9] = vol->channel0;
select_page[10] = 0x02;
select_page[11] = vol->channel1;
select_page[12] = sense_page[12];
select_page[13] = sense_page[13];
select_page[14] = sense_page[14];
select_page[15] = sense_page[15];
ssc = sd_ssc_init(un);
if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) {
rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP1, select,
select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD);
} else {
rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select,
select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD);
}
sd_ssc_fini(ssc);
kmem_free(sense, sense_buflen);
kmem_free(select, select_buflen);
return (rval);
}
static int
sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag)
{
struct sd_lun *un;
struct uscsi_cmd *com;
caddr_t buffer;
char cdb[CDB_GROUP1];
int session_offset = 0;
int rval;
if (data == NULL) {
return (EINVAL);
}
if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
(un->un_state == SD_STATE_OFFLINE)) {
return (ENXIO);
}
buffer = kmem_zalloc((size_t)SONY_SESSION_OFFSET_LEN, KM_SLEEP);
bzero(cdb, CDB_GROUP1);
cdb[0] = SCMD_READ_TOC;
cdb[8] = SONY_SESSION_OFFSET_LEN;
cdb[9] = SONY_SESSION_OFFSET_KEY;
com = kmem_zalloc(sizeof (*com), KM_SLEEP);
com->uscsi_cdb = cdb;
com->uscsi_cdblen = CDB_GROUP1;
com->uscsi_bufaddr = buffer;
com->uscsi_buflen = SONY_SESSION_OFFSET_LEN;
com->uscsi_flags = USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ;
rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
SD_PATH_STANDARD);
if (rval != 0) {
kmem_free(buffer, SONY_SESSION_OFFSET_LEN);
kmem_free(com, sizeof (*com));
return (rval);
}
if (buffer[1] == SONY_SESSION_OFFSET_VALID) {
session_offset =
((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) +
((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]);
if (un->un_tgt_blocksize == CDROM_BLK_512) {
session_offset >>= 2;
} else if (un->un_tgt_blocksize == CDROM_BLK_1024) {
session_offset >>= 1;
}
}
if (ddi_copyout(&session_offset, data, sizeof (int), flag) != 0) {
rval = EFAULT;
}
kmem_free(buffer, SONY_SESSION_OFFSET_LEN);
kmem_free(com, sizeof (*com));
return (rval);
}
static int
sd_wm_cache_constructor(void *wm, void *un, int flags)
{
bzero(wm, sizeof (struct sd_w_map));
cv_init(&((struct sd_w_map *)wm)->wm_avail, NULL, CV_DRIVER, NULL);
return (0);
}
static void
sd_wm_cache_destructor(void *wm, void *un)
{
cv_destroy(&((struct sd_w_map *)wm)->wm_avail);
}
static struct sd_w_map *
sd_range_lock(struct sd_lun *un, daddr_t startb, daddr_t endb, ushort_t typ)
{
struct sd_w_map *wmp = NULL;
struct sd_w_map *sl_wmp = NULL;
struct sd_w_map *tmp_wmp;
wm_state state = SD_WM_CHK_LIST;
ASSERT(un != NULL);
ASSERT(!mutex_owned(SD_MUTEX(un)));
mutex_enter(SD_MUTEX(un));
while (state != SD_WM_DONE) {
switch (state) {
case SD_WM_CHK_LIST:
if (!(typ & SD_WTYPE_RMW) && !(un->un_rmw_count)) {
state = SD_WM_LOCK_RANGE;
} else {
tmp_wmp = sd_get_range(un, startb, endb);
if (tmp_wmp != NULL) {
if ((wmp != NULL) && ONLIST(un, wmp)) {
FREE_ONLIST_WMAP(un, wmp);
}
sl_wmp = tmp_wmp;
state = SD_WM_WAIT_MAP;
} else {
state = SD_WM_LOCK_RANGE;
}
}
break;
case SD_WM_LOCK_RANGE:
ASSERT(un->un_wm_cache);
if (wmp == NULL)
wmp = kmem_cache_alloc(un->un_wm_cache,
KM_NOSLEEP);
if (wmp == NULL) {
mutex_exit(SD_MUTEX(un));
_NOTE(DATA_READABLE_WITHOUT_LOCK
(sd_lun::un_wm_cache))
wmp = kmem_cache_alloc(un->un_wm_cache,
KM_SLEEP);
mutex_enter(SD_MUTEX(un));
state = SD_WM_CHK_LIST;
} else {
wmp->wm_start = startb;
wmp->wm_end = endb;
wmp->wm_flags = typ | SD_WM_BUSY;
if (typ & SD_WTYPE_RMW) {
un->un_rmw_count++;
}
if (!ONLIST(un, wmp)) {
wmp->wm_next = un->un_wm;
wmp->wm_prev = NULL;
if (wmp->wm_next)
wmp->wm_next->wm_prev = wmp;
un->un_wm = wmp;
}
state = SD_WM_DONE;
}
break;
case SD_WM_WAIT_MAP:
ASSERT(sl_wmp->wm_flags & SD_WM_BUSY);
sl_wmp->wm_wanted_count++;
cv_wait(&sl_wmp->wm_avail, SD_MUTEX(un));
sl_wmp->wm_wanted_count--;
ASSERT(!(sl_wmp->wm_flags & SD_WM_BUSY));
if (sl_wmp->wm_wanted_count == 0) {
if (wmp != NULL) {
CHK_N_FREEWMP(un, wmp);
}
wmp = sl_wmp;
}
sl_wmp = NULL;
state = SD_WM_CHK_LIST;
break;
default:
panic("sd_range_lock: "
"Unknown state %d in sd_range_lock", state);
}
}
mutex_exit(SD_MUTEX(un));
ASSERT(wmp != NULL);
return (wmp);
}
static struct sd_w_map *
sd_get_range(struct sd_lun *un, daddr_t startb, daddr_t endb)
{
struct sd_w_map *wmp;
ASSERT(un != NULL);
for (wmp = un->un_wm; wmp != NULL; wmp = wmp->wm_next) {
if (!(wmp->wm_flags & SD_WM_BUSY)) {
continue;
}
if ((startb >= wmp->wm_start) && (startb <= wmp->wm_end)) {
break;
}
if ((endb >= wmp->wm_start) && (endb <= wmp->wm_end)) {
break;
}
}
return (wmp);
}
static void
sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp)
{
ASSERT(un != NULL);
if (un->un_wm == wmp) {
un->un_wm = wmp->wm_next;
} else {
wmp->wm_prev->wm_next = wmp->wm_next;
}
if (wmp->wm_next) {
wmp->wm_next->wm_prev = wmp->wm_prev;
}
wmp->wm_next = wmp->wm_prev = NULL;
kmem_cache_free(un->un_wm_cache, wmp);
}
static void
sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm)
{
ASSERT(un != NULL);
ASSERT(wm != NULL);
ASSERT(!mutex_owned(SD_MUTEX(un)));
mutex_enter(SD_MUTEX(un));
if (wm->wm_flags & SD_WTYPE_RMW) {
un->un_rmw_count--;
}
if (wm->wm_wanted_count) {
wm->wm_flags = 0;
cv_broadcast(&wm->wm_avail);
} else {
sd_free_inlist_wmap(un, wm);
}
mutex_exit(SD_MUTEX(un));
}
static void
sd_read_modify_write_task(void *arg)
{
struct sd_mapblocksize_info *bsp;
struct buf *bp;
struct sd_xbuf *xp;
struct sd_lun *un;
bp = arg;
ASSERT(bp != NULL);
xp = SD_GET_XBUF(bp);
ASSERT(xp != NULL);
bsp = xp->xb_private;
ASSERT(bsp != NULL);
un = SD_GET_UN(bp);
ASSERT(un != NULL);
ASSERT(!mutex_owned(SD_MUTEX(un)));
SD_TRACE(SD_LOG_IO_RMMEDIA, un,
"sd_read_modify_write_task: entry: buf:0x%p\n", bp);
SD_NEXT_IOSTART(bsp->mbs_layer_index, un, bp);
SD_TRACE(SD_LOG_IO_RMMEDIA, un,
"sd_read_modify_write_task: exit: buf:0x%p\n", bp);
}
static int
sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk,
struct buf **bpp)
{
int err;
int i;
int rval;
struct buf *bp;
struct scsi_pkt *pkt = NULL;
uint32_t target_blocksize;
ASSERT(un != NULL);
ASSERT(mutex_owned(SD_MUTEX(un)));
target_blocksize = un->un_tgt_blocksize;
mutex_exit(SD_MUTEX(un));
bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), (struct buf *)NULL,
(size_t)(nblk * target_blocksize), B_READ, NULL_FUNC, NULL);
if (bp == NULL) {
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"no resources for dumping; giving up");
err = ENOMEM;
goto done;
}
rval = sd_setup_rw_pkt(un, &pkt, bp, 0, NULL_FUNC, NULL,
blkno, nblk);
if (rval != 0) {
scsi_free_consistent_buf(bp);
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"no resources for dumping; giving up");
err = ENOMEM;
goto done;
}
pkt->pkt_flags |= FLAG_NOINTR;
err = EIO;
for (i = 0; i < SD_NDUMP_RETRIES; i++) {
SD_TRACE(SD_LOG_DUMP, un, "sddump: sending read\n");
if ((sd_scsi_poll(un, pkt) == 0) && (pkt->pkt_resid == 0)) {
err = 0;
break;
}
if (pkt->pkt_reason == CMD_DEV_GONE) {
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"Error while dumping state with rmw..."
"Device is gone\n");
break;
}
if (SD_GET_PKT_STATUS(pkt) == STATUS_CHECK) {
SD_INFO(SD_LOG_DUMP, un,
"sddump: read failed with CHECK, try # %d\n", i);
if (((pkt->pkt_state & STATE_ARQ_DONE) == 0)) {
(void) sd_send_polled_RQS(un);
}
continue;
}
if (SD_GET_PKT_STATUS(pkt) == STATUS_BUSY) {
int reset_retval = 0;
SD_INFO(SD_LOG_DUMP, un,
"sddump: read failed with BUSY, try # %d\n", i);
if (un->un_f_lun_reset_enabled == TRUE) {
reset_retval = scsi_reset(SD_ADDRESS(un),
RESET_LUN);
}
if (reset_retval == 0) {
(void) scsi_reset(SD_ADDRESS(un), RESET_TARGET);
}
(void) sd_send_polled_RQS(un);
} else {
SD_INFO(SD_LOG_DUMP, un,
"sddump: read failed with 0x%x, try # %d\n",
SD_GET_PKT_STATUS(pkt), i);
mutex_enter(SD_MUTEX(un));
sd_reset_target(un, pkt);
mutex_exit(SD_MUTEX(un));
}
if (i > SD_NDUMP_RETRIES / 2) {
(void) scsi_reset(SD_ADDRESS(un), RESET_ALL);
(void) sd_send_polled_RQS(un);
}
}
scsi_destroy_pkt(pkt);
if (err != 0) {
scsi_free_consistent_buf(bp);
*bpp = NULL;
} else {
*bpp = bp;
}
done:
mutex_enter(SD_MUTEX(un));
return (err);
}
static void
sd_failfast_flushq(struct sd_lun *un)
{
struct buf *bp;
struct buf *next_waitq_bp;
struct buf *prev_waitq_bp = NULL;
ASSERT(un != NULL);
ASSERT(mutex_owned(SD_MUTEX(un)));
ASSERT(un->un_failfast_state == SD_FAILFAST_ACTIVE);
ASSERT(un->un_failfast_bp == NULL);
SD_TRACE(SD_LOG_IO_FAILFAST, un,
"sd_failfast_flushq: entry: un:0x%p\n", un);
if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) {
if (un->un_failfast_headp == NULL) {
ASSERT(un->un_failfast_tailp == NULL);
un->un_failfast_headp = un->un_waitq_headp;
} else {
ASSERT(un->un_failfast_tailp != NULL);
un->un_failfast_tailp->av_forw = un->un_waitq_headp;
}
un->un_failfast_tailp = un->un_waitq_tailp;
for (bp = un->un_waitq_headp; bp != NULL; bp = bp->av_forw) {
SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp);
}
un->un_waitq_headp = un->un_waitq_tailp = NULL;
} else {
for (bp = un->un_waitq_headp; bp != NULL; bp = next_waitq_bp) {
next_waitq_bp = bp->av_forw;
if ((bp->b_flags & B_FAILFAST) == 0) {
prev_waitq_bp = bp;
continue;
}
if (bp == un->un_waitq_headp) {
un->un_waitq_headp = next_waitq_bp;
if (un->un_waitq_headp == NULL) {
un->un_waitq_tailp = NULL;
}
} else {
ASSERT(un->un_waitq_headp != NULL);
ASSERT(prev_waitq_bp != NULL);
ASSERT((prev_waitq_bp->b_flags & B_FAILFAST)
== 0);
if (bp == un->un_waitq_tailp) {
ASSERT(next_waitq_bp == NULL);
un->un_waitq_tailp = prev_waitq_bp;
}
prev_waitq_bp->av_forw = next_waitq_bp;
}
bp->av_forw = NULL;
SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp);
if (un->un_failfast_headp == NULL) {
ASSERT(un->un_failfast_tailp == NULL);
un->un_failfast_headp =
un->un_failfast_tailp = bp;
} else {
ASSERT(un->un_failfast_tailp != NULL);
ASSERT(un->un_failfast_tailp->b_flags &
B_FAILFAST);
un->un_failfast_tailp->av_forw = bp;
un->un_failfast_tailp = bp;
}
}
}
while ((bp = un->un_failfast_headp) != NULL) {
un->un_failfast_headp = bp->av_forw;
if (un->un_failfast_headp == NULL) {
un->un_failfast_tailp = NULL;
}
sd_return_failed_command_no_restart(un, bp, EIO);
}
if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_QUEUES) {
ddi_xbuf_flushq(un->un_xbuf_attr, sd_failfast_flushq_callback);
}
SD_TRACE(SD_LOG_IO_FAILFAST, un,
"sd_failfast_flushq: exit: un:0x%p\n", un);
}
static int
sd_failfast_flushq_callback(struct buf *bp)
{
return (((sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) ||
(bp->b_flags & B_FAILFAST)) ? TRUE : FALSE);
}
static int
sd_setup_next_xfer(struct sd_lun *un, struct buf *bp,
struct scsi_pkt *pkt, struct sd_xbuf *xp)
{
ssize_t num_blks_not_xfered;
daddr_t strt_blk_num;
ssize_t bytes_not_xfered;
int rval;
ASSERT(pkt->pkt_resid == 0);
bytes_not_xfered = xp->xb_dma_resid;
num_blks_not_xfered = SD_BYTES2TGTBLOCKS(un, bytes_not_xfered);
strt_blk_num = xp->xb_blkno +
SD_BYTES2TGTBLOCKS(un, bp->b_bcount - bytes_not_xfered);
rval = sd_setup_next_rw_pkt(un, pkt, bp,
strt_blk_num, num_blks_not_xfered);
if (rval == 0) {
xp->xb_dma_resid = pkt->pkt_resid;
pkt->pkt_resid = 0;
return (1);
}
ASSERT(rval == SD_PKT_ALLOC_FAILURE);
bp->b_resid = bp->b_bcount;
bp->b_flags |= B_ERROR;
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"Error setting up next portion of DMA transfer\n");
return (0);
}
#define SD_RESV_CONFLICT_FMT_LEN 40
void
sd_panic_for_res_conflict(struct sd_lun *un)
{
char panic_str[SD_RESV_CONFLICT_FMT_LEN + MAXPATHLEN];
char path_str[MAXPATHLEN];
(void) snprintf(panic_str, sizeof (panic_str),
"Reservation Conflict\nDisk: %s",
ddi_pathname(SD_DEVINFO(un), path_str));
panic(panic_str);
}
#ifdef SD_FAULT_INJECTION
static uint_t sd_fault_injection_on = 0;
static void
sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un)
{
uint_t i = 0;
uint_t rval;
SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: entry\n");
mutex_enter(SD_MUTEX(un));
switch (cmd) {
case SDIOCRUN:
SD_INFO(SD_LOG_SDTEST, un,
"sd_faultinjection_ioctl: Injecting Fault Run\n");
sd_fault_injection_on = 1;
SD_INFO(SD_LOG_IOERR, un,
"sd_faultinjection_ioctl: run finished\n");
break;
case SDIOCSTART:
SD_INFO(SD_LOG_SDTEST, un,
"sd_faultinjection_ioctl: Injecting Fault Start\n");
sd_fault_injection_on = 0;
un->sd_injection_mask = 0xFFFFFFFF;
for (i = 0; i < SD_FI_MAX_ERROR; i++) {
un->sd_fi_fifo_pkt[i] = NULL;
un->sd_fi_fifo_xb[i] = NULL;
un->sd_fi_fifo_un[i] = NULL;
un->sd_fi_fifo_arq[i] = NULL;
}
un->sd_fi_fifo_start = 0;
un->sd_fi_fifo_end = 0;
mutex_enter(&(un->un_fi_mutex));
un->sd_fi_log[0] = '\0';
un->sd_fi_buf_len = 0;
mutex_exit(&(un->un_fi_mutex));
SD_INFO(SD_LOG_IOERR, un,
"sd_faultinjection_ioctl: start finished\n");
break;
case SDIOCSTOP:
SD_INFO(SD_LOG_SDTEST, un,
"sd_faultinjection_ioctl: Injecting Fault Stop\n");
sd_fault_injection_on = 0;
un->sd_injection_mask = 0x0;
for (i = 0; i < SD_FI_MAX_ERROR; i++) {
if (un->sd_fi_fifo_pkt[i] != NULL) {
kmem_free(un->sd_fi_fifo_pkt[i],
sizeof (struct sd_fi_pkt));
}
if (un->sd_fi_fifo_xb[i] != NULL) {
kmem_free(un->sd_fi_fifo_xb[i],
sizeof (struct sd_fi_xb));
}
if (un->sd_fi_fifo_un[i] != NULL) {
kmem_free(un->sd_fi_fifo_un[i],
sizeof (struct sd_fi_un));
}
if (un->sd_fi_fifo_arq[i] != NULL) {
kmem_free(un->sd_fi_fifo_arq[i],
sizeof (struct sd_fi_arq));
}
un->sd_fi_fifo_pkt[i] = NULL;
un->sd_fi_fifo_un[i] = NULL;
un->sd_fi_fifo_xb[i] = NULL;
un->sd_fi_fifo_arq[i] = NULL;
}
un->sd_fi_fifo_start = 0;
un->sd_fi_fifo_end = 0;
SD_INFO(SD_LOG_IOERR, un,
"sd_faultinjection_ioctl: stop finished\n");
break;
case SDIOCINSERTPKT:
SD_INFO(SD_LOG_SDTEST, un,
"sd_faultinjection_ioctl: Injecting Fault Insert Pkt\n");
i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR;
sd_fault_injection_on = 0;
if (un->sd_fi_fifo_pkt[i] != NULL) {
kmem_free(un->sd_fi_fifo_pkt[i],
sizeof (struct sd_fi_pkt));
}
if (arg != (uintptr_t)NULL) {
un->sd_fi_fifo_pkt[i] =
kmem_alloc(sizeof (struct sd_fi_pkt), KM_NOSLEEP);
if (un->sd_fi_fifo_pkt[i] == NULL) {
break;
}
rval = ddi_copyin((void *)arg, un->sd_fi_fifo_pkt[i],
sizeof (struct sd_fi_pkt), 0);
if (rval == -1) {
kmem_free(un->sd_fi_fifo_pkt[i],
sizeof (struct sd_fi_pkt));
un->sd_fi_fifo_pkt[i] = NULL;
}
} else {
SD_INFO(SD_LOG_IOERR, un,
"sd_faultinjection_ioctl: pkt null\n");
}
break;
case SDIOCINSERTXB:
SD_INFO(SD_LOG_SDTEST, un,
"sd_faultinjection_ioctl: Injecting Fault Insert XB\n");
i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR;
sd_fault_injection_on = 0;
if (un->sd_fi_fifo_xb[i] != NULL) {
kmem_free(un->sd_fi_fifo_xb[i],
sizeof (struct sd_fi_xb));
un->sd_fi_fifo_xb[i] = NULL;
}
if (arg != (uintptr_t)NULL) {
un->sd_fi_fifo_xb[i] =
kmem_alloc(sizeof (struct sd_fi_xb), KM_NOSLEEP);
if (un->sd_fi_fifo_xb[i] == NULL) {
break;
}
rval = ddi_copyin((void *)arg, un->sd_fi_fifo_xb[i],
sizeof (struct sd_fi_xb), 0);
if (rval == -1) {
kmem_free(un->sd_fi_fifo_xb[i],
sizeof (struct sd_fi_xb));
un->sd_fi_fifo_xb[i] = NULL;
}
} else {
SD_INFO(SD_LOG_IOERR, un,
"sd_faultinjection_ioctl: xb null\n");
}
break;
case SDIOCINSERTUN:
SD_INFO(SD_LOG_SDTEST, un,
"sd_faultinjection_ioctl: Injecting Fault Insert UN\n");
i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR;
sd_fault_injection_on = 0;
if (un->sd_fi_fifo_un[i] != NULL) {
kmem_free(un->sd_fi_fifo_un[i],
sizeof (struct sd_fi_un));
un->sd_fi_fifo_un[i] = NULL;
}
if (arg != (uintptr_t)NULL) {
un->sd_fi_fifo_un[i] =
kmem_alloc(sizeof (struct sd_fi_un), KM_NOSLEEP);
if (un->sd_fi_fifo_un[i] == NULL) {
break;
}
rval = ddi_copyin((void *)arg, un->sd_fi_fifo_un[i],
sizeof (struct sd_fi_un), 0);
if (rval == -1) {
kmem_free(un->sd_fi_fifo_un[i],
sizeof (struct sd_fi_un));
un->sd_fi_fifo_un[i] = NULL;
}
} else {
SD_INFO(SD_LOG_IOERR, un,
"sd_faultinjection_ioctl: un null\n");
}
break;
case SDIOCINSERTARQ:
SD_INFO(SD_LOG_SDTEST, un,
"sd_faultinjection_ioctl: Injecting Fault Insert ARQ\n");
i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR;
sd_fault_injection_on = 0;
if (un->sd_fi_fifo_arq[i] != NULL) {
kmem_free(un->sd_fi_fifo_arq[i],
sizeof (struct sd_fi_arq));
un->sd_fi_fifo_arq[i] = NULL;
}
if (arg != (uintptr_t)NULL) {
un->sd_fi_fifo_arq[i] =
kmem_alloc(sizeof (struct sd_fi_arq), KM_NOSLEEP);
if (un->sd_fi_fifo_arq[i] == NULL) {
break;
}
rval = ddi_copyin((void *)arg, un->sd_fi_fifo_arq[i],
sizeof (struct sd_fi_arq), 0);
if (rval == -1) {
kmem_free(un->sd_fi_fifo_arq[i],
sizeof (struct sd_fi_arq));
un->sd_fi_fifo_arq[i] = NULL;
}
} else {
SD_INFO(SD_LOG_IOERR, un,
"sd_faultinjection_ioctl: arq null\n");
}
break;
case SDIOCPUSH:
sd_fault_injection_on = 0;
if (arg != (uintptr_t)NULL) {
rval = ddi_copyin((void *)arg, &i, sizeof (uint_t), 0);
if (rval != -1 &&
un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) {
un->sd_fi_fifo_end += i;
}
} else {
SD_INFO(SD_LOG_IOERR, un,
"sd_faultinjection_ioctl: push arg null\n");
if (un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) {
un->sd_fi_fifo_end++;
}
}
SD_INFO(SD_LOG_IOERR, un,
"sd_faultinjection_ioctl: push to end=%d\n",
un->sd_fi_fifo_end);
break;
case SDIOCRETRIEVE:
SD_INFO(SD_LOG_SDTEST, un,
"sd_faultinjection_ioctl: Injecting Fault Retreive");
sd_fault_injection_on = 0;
mutex_enter(&(un->un_fi_mutex));
rval = ddi_copyout(un->sd_fi_log, (void *)arg,
un->sd_fi_buf_len+1, 0);
mutex_exit(&(un->un_fi_mutex));
if (rval == -1) {
arg = (uintptr_t)NULL;
}
break;
}
mutex_exit(SD_MUTEX(un));
SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: exit\n");
}
static void
sd_injection_log(char *buf, struct sd_lun *un)
{
uint_t len;
ASSERT(un != NULL);
ASSERT(buf != NULL);
mutex_enter(&(un->un_fi_mutex));
len = min(strlen(buf), 255);
if (len + un->sd_fi_buf_len < SD_FI_MAX_BUF) {
uint_t offset = strlen((char *)un->sd_fi_log);
char *destp = (char *)un->sd_fi_log + offset;
int i;
for (i = 0; i < len; i++) {
*destp++ = *buf++;
}
un->sd_fi_buf_len += len;
un->sd_fi_log[un->sd_fi_buf_len] = '\0';
}
mutex_exit(&(un->un_fi_mutex));
}
static void
sd_faultinjection(struct scsi_pkt *pktp)
{
uint_t i;
struct sd_fi_pkt *fi_pkt;
struct sd_fi_xb *fi_xb;
struct sd_fi_un *fi_un;
struct sd_fi_arq *fi_arq;
struct buf *bp;
struct sd_xbuf *xb;
struct sd_lun *un;
ASSERT(pktp != NULL);
bp = (struct buf *)pktp->pkt_private;
xb = SD_GET_XBUF(bp);
un = SD_GET_UN(bp);
ASSERT(un != NULL);
mutex_enter(SD_MUTEX(un));
SD_TRACE(SD_LOG_SDTEST, un,
"sd_faultinjection: entry Injection from sdintr\n");
if (sd_fault_injection_on == 0 ||
un->sd_fi_fifo_start == un->sd_fi_fifo_end) {
mutex_exit(SD_MUTEX(un));
return;
}
SD_INFO(SD_LOG_SDTEST, un,
"sd_faultinjection: is working for copying\n");
i = un->sd_fi_fifo_start % SD_FI_MAX_ERROR;
fi_pkt = un->sd_fi_fifo_pkt[i];
fi_xb = un->sd_fi_fifo_xb[i];
fi_un = un->sd_fi_fifo_un[i];
fi_arq = un->sd_fi_fifo_arq[i];
if (fi_pkt != NULL) {
SD_CONDSET(pktp, pkt, pkt_flags, "pkt_flags");
SD_CONDSET(*pktp, pkt, pkt_scbp, "pkt_scbp");
if (fi_pkt->pkt_cdbp != 0xff)
SD_CONDSET(*pktp, pkt, pkt_cdbp, "pkt_cdbp");
SD_CONDSET(pktp, pkt, pkt_state, "pkt_state");
SD_CONDSET(pktp, pkt, pkt_statistics, "pkt_statistics");
SD_CONDSET(pktp, pkt, pkt_reason, "pkt_reason");
}
if (fi_xb != NULL) {
SD_CONDSET(xb, xb, xb_blkno, "xb_blkno");
SD_CONDSET(xb, xb, xb_dma_resid, "xb_dma_resid");
if (fi_xb->xb_retry_count != 0)
SD_CONDSET(xb, xb, xb_retry_count, "xb_retry_count");
SD_CONDSET(xb, xb, xb_victim_retry_count,
"xb_victim_retry_count");
SD_CONDSET(xb, xb, xb_sense_status, "xb_sense_status");
SD_CONDSET(xb, xb, xb_sense_state, "xb_sense_state");
SD_CONDSET(xb, xb, xb_sense_resid, "xb_sense_resid");
bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, SENSE_LENGTH);
SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data),
xb, es_code, "es_code");
SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data),
xb, es_key, "es_key");
SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data),
xb, es_add_code, "es_add_code");
SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data),
xb, es_qual_code, "es_qual_code");
struct scsi_extended_sense *esp;
esp = (struct scsi_extended_sense *)xb->xb_sense_data;
esp->es_class = CLASS_EXTENDED_SENSE;
}
if (fi_un != NULL) {
SD_CONDSET(un->un_sd->sd_inq, un, inq_rmb, "inq_rmb");
SD_CONDSET(un, un, un_ctype, "un_ctype");
SD_CONDSET(un, un, un_reset_retry_count,
"un_reset_retry_count");
SD_CONDSET(un, un, un_reservation_type, "un_reservation_type");
SD_CONDSET(un, un, un_resvd_status, "un_resvd_status");
SD_CONDSET(un, un, un_f_arq_enabled, "un_f_arq_enabled");
SD_CONDSET(un, un, un_f_allow_bus_device_reset,
"un_f_allow_bus_device_reset");
SD_CONDSET(un, un, un_f_opt_queueing, "un_f_opt_queueing");
}
if (fi_arq != NULL) {
bcopy(fi_arq, pktp->pkt_scbp, sizeof (struct sd_fi_arq));
}
if (un->sd_fi_fifo_pkt[i] != NULL) {
kmem_free(un->sd_fi_fifo_pkt[i], sizeof (struct sd_fi_pkt));
}
if (un->sd_fi_fifo_xb[i] != NULL) {
kmem_free(un->sd_fi_fifo_xb[i], sizeof (struct sd_fi_xb));
}
if (un->sd_fi_fifo_un[i] != NULL) {
kmem_free(un->sd_fi_fifo_un[i], sizeof (struct sd_fi_un));
}
if (un->sd_fi_fifo_arq[i] != NULL) {
kmem_free(un->sd_fi_fifo_arq[i], sizeof (struct sd_fi_arq));
}
un->sd_fi_fifo_pkt[i] = NULL;
un->sd_fi_fifo_un[i] = NULL;
un->sd_fi_fifo_xb[i] = NULL;
un->sd_fi_fifo_arq[i] = NULL;
un->sd_fi_fifo_start++;
mutex_exit(SD_MUTEX(un));
SD_INFO(SD_LOG_SDTEST, un, "sd_faultinjection: exit\n");
}
#endif
static void
sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi)
{
int pm_cap;
ASSERT(un->un_sd);
ASSERT(un->un_sd->sd_inq);
un->un_f_sync_cache_supported = TRUE;
un->un_f_sync_cache_required = FALSE;
if (un->un_sd->sd_inq->inq_rmb) {
un->un_f_has_removable_media = TRUE;
un->un_f_non_devbsize_supported = TRUE;
un->un_f_doorlock_supported = TRUE;
un->un_f_chk_wp_open = TRUE;
un->un_f_monitor_media_state = TRUE;
un->un_f_check_start_stop = TRUE;
un->un_f_eject_media_supported = TRUE;
un->un_f_pm_supported = TRUE;
(void) ddi_prop_create(DDI_DEV_T_NONE, devi,
DDI_PROP_CANSLEEP, "removable-media", NULL, 0);
} else {
un->un_f_devid_supported = TRUE;
un->un_f_attach_spinup = TRUE;
if (SD_INQUIRY(un)->inq_dtype == DTYPE_DIRECT) {
un->un_f_descr_format_supported = TRUE;
}
un->un_f_pkstats_enabled = (ddi_prop_get_int(DDI_DEV_T_ANY,
SD_DEVINFO(un), DDI_PROP_DONTPASS,
"enable-partition-kstats", 1));
pm_cap = ddi_prop_get_int(DDI_DEV_T_ANY, devi,
DDI_PROP_DONTPASS, "pm-capable", SD_PM_CAPABLE_UNDEFINED);
if (SD_PM_CAPABLE_IS_UNDEFINED(pm_cap)) {
un->un_f_log_sense_supported = TRUE;
if (!un->un_f_power_condition_disabled &&
SD_INQUIRY(un)->inq_ansi == 6) {
un->un_f_power_condition_supported = TRUE;
}
} else {
if (SD_PM_CAPABLE_IS_FALSE(pm_cap)) {
un->un_f_log_sense_supported = FALSE;
} else {
un->un_f_pm_supported = TRUE;
if (!un->un_f_power_condition_disabled &&
SD_PM_CAPABLE_IS_SPC_4(pm_cap)) {
un->un_f_power_condition_supported =
TRUE;
}
if (SD_PM_CAP_LOG_SUPPORTED(pm_cap)) {
un->un_f_log_sense_supported = TRUE;
un->un_f_pm_log_sense_smart =
SD_PM_CAP_SMART_LOG(pm_cap);
}
}
SD_INFO(SD_LOG_ATTACH_DETACH, un,
"sd_unit_attach: un:0x%p pm-capable "
"property set to %d.\n", un, un->un_f_pm_supported);
}
}
if (un->un_f_is_hotpluggable) {
un->un_f_monitor_media_state = TRUE;
un->un_f_check_start_stop = TRUE;
}
}
static int
sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr,
diskaddr_t start_block, size_t reqlength, void *tg_cookie)
{
struct sd_lun *un;
int path_flag = (int)(uintptr_t)tg_cookie;
char *dkl = NULL;
diskaddr_t real_addr = start_block;
diskaddr_t first_byte, end_block;
size_t buffer_size = reqlength;
int rval = 0;
diskaddr_t cap;
uint32_t lbasize;
sd_ssc_t *ssc;
un = ddi_get_soft_state(sd_state, ddi_get_instance(devi));
if (un == NULL)
return (ENXIO);
if (cmd != TG_READ && cmd != TG_WRITE)
return (EINVAL);
ssc = sd_ssc_init(un);
mutex_enter(SD_MUTEX(un));
if (un->un_f_tgt_blocksize_is_valid == FALSE) {
mutex_exit(SD_MUTEX(un));
rval = sd_send_scsi_READ_CAPACITY(ssc, (uint64_t *)&cap,
&lbasize, path_flag);
if (rval != 0)
goto done1;
mutex_enter(SD_MUTEX(un));
sd_update_block_info(un, lbasize, cap);
if ((un->un_f_tgt_blocksize_is_valid == FALSE)) {
mutex_exit(SD_MUTEX(un));
rval = EIO;
goto done;
}
}
if (NOT_DEVBSIZE(un)) {
first_byte = SD_SYSBLOCKS2BYTES(start_block);
real_addr = first_byte / un->un_tgt_blocksize;
end_block = (first_byte + reqlength +
un->un_tgt_blocksize - 1) / un->un_tgt_blocksize;
buffer_size = (end_block - real_addr) * un->un_tgt_blocksize;
SD_TRACE(SD_LOG_IO_PARTITION, un, "sd_tg_rdwr",
"label_addr: 0x%x allocation size: 0x%x\n",
real_addr, buffer_size);
if (((first_byte % un->un_tgt_blocksize) != 0) ||
(reqlength % un->un_tgt_blocksize) != 0)
dkl = kmem_zalloc(buffer_size, KM_SLEEP);
}
if (ISCD(un) && (cmd == TG_READ) &&
(un->un_f_blockcount_is_valid == TRUE) &&
((start_block == (un->un_blockcount - 1)) ||
(start_block == (un->un_blockcount - 2)))) {
path_flag = SD_PATH_DIRECT_PRIORITY;
}
mutex_exit(SD_MUTEX(un));
if (cmd == TG_READ) {
rval = sd_send_scsi_READ(ssc, (dkl != NULL) ? dkl : bufaddr,
buffer_size, real_addr, path_flag);
if (dkl != NULL)
bcopy(dkl + SD_TGTBYTEOFFSET(un, start_block,
real_addr), bufaddr, reqlength);
} else {
if (dkl) {
rval = sd_send_scsi_READ(ssc, dkl, buffer_size,
real_addr, path_flag);
if (rval) {
goto done1;
}
bcopy(bufaddr, dkl + SD_TGTBYTEOFFSET(un, start_block,
real_addr), reqlength);
}
rval = sd_send_scsi_WRITE(ssc, (dkl != NULL) ? dkl : bufaddr,
buffer_size, real_addr, path_flag);
}
done1:
if (dkl != NULL)
kmem_free(dkl, buffer_size);
if (rval != 0) {
if (rval == EIO)
sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
else
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
}
done:
sd_ssc_fini(ssc);
return (rval);
}
static int
sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie)
{
struct sd_lun *un;
diskaddr_t cap;
uint32_t lbasize;
int path_flag = (int)(uintptr_t)tg_cookie;
int ret = 0;
un = ddi_get_soft_state(sd_state, ddi_get_instance(devi));
if (un == NULL)
return (ENXIO);
switch (cmd) {
case TG_GETPHYGEOM:
case TG_GETVIRTGEOM:
case TG_GETCAPACITY:
case TG_GETBLOCKSIZE:
mutex_enter(SD_MUTEX(un));
if ((un->un_f_blockcount_is_valid == TRUE) &&
(un->un_f_tgt_blocksize_is_valid == TRUE)) {
cap = un->un_blockcount;
lbasize = un->un_tgt_blocksize;
mutex_exit(SD_MUTEX(un));
} else {
sd_ssc_t *ssc;
mutex_exit(SD_MUTEX(un));
ssc = sd_ssc_init(un);
ret = sd_send_scsi_READ_CAPACITY(ssc, (uint64_t *)&cap,
&lbasize, path_flag);
if (ret != 0) {
if (ret == EIO)
sd_ssc_assessment(ssc,
SD_FMT_STATUS_CHECK);
else
sd_ssc_assessment(ssc,
SD_FMT_IGNORE);
sd_ssc_fini(ssc);
return (ret);
}
sd_ssc_fini(ssc);
mutex_enter(SD_MUTEX(un));
sd_update_block_info(un, lbasize, cap);
if ((un->un_f_blockcount_is_valid == FALSE) ||
(un->un_f_tgt_blocksize_is_valid == FALSE)) {
mutex_exit(SD_MUTEX(un));
return (EIO);
}
mutex_exit(SD_MUTEX(un));
}
if (cmd == TG_GETCAPACITY) {
*(diskaddr_t *)arg = cap;
return (0);
}
if (cmd == TG_GETBLOCKSIZE) {
*(uint32_t *)arg = lbasize;
return (0);
}
if (cmd == TG_GETPHYGEOM)
ret = sd_get_physical_geometry(un, (cmlb_geom_t *)arg,
cap, lbasize, path_flag);
else
ret = sd_get_virtual_geometry(un,
(cmlb_geom_t *)arg, cap, lbasize);
return (ret);
case TG_GETATTR:
mutex_enter(SD_MUTEX(un));
((tg_attribute_t *)arg)->media_is_writable =
un->un_f_mmc_writable_media;
((tg_attribute_t *)arg)->media_is_solid_state =
un->un_f_is_solid_state;
((tg_attribute_t *)arg)->media_is_rotational =
un->un_f_is_rotational;
mutex_exit(SD_MUTEX(un));
return (0);
default:
return (ENOTTY);
}
}
#define DEVID_IF_KNOWN(d) "devid", DATA_TYPE_STRING, (d) ? (d) : "unknown"
static void
sd_ssc_ereport_post(sd_ssc_t *ssc, enum sd_driver_assessment drv_assess)
{
int uscsi_path_instance = 0;
uchar_t uscsi_pkt_reason;
uint32_t uscsi_pkt_state;
uint32_t uscsi_pkt_statistics;
uint64_t uscsi_ena;
uchar_t op_code;
uint8_t *sensep;
union scsi_cdb *cdbp;
uint_t cdblen = 0;
uint_t senlen = 0;
struct sd_lun *un;
dev_info_t *dip;
char *devid;
int ssc_invalid_flags = SSC_FLAGS_INVALID_PKT_REASON |
SSC_FLAGS_INVALID_STATUS |
SSC_FLAGS_INVALID_SENSE |
SSC_FLAGS_INVALID_DATA;
char assessment[16];
ASSERT(ssc != NULL);
ASSERT(ssc->ssc_uscsi_cmd != NULL);
ASSERT(ssc->ssc_uscsi_info != NULL);
un = ssc->ssc_un;
ASSERT(un != NULL);
dip = un->un_sd->sd_dev;
devid = DEVI(dip)->devi_devid_str;
if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) ||
(un->un_state == SD_STATE_DUMPING))
return;
uscsi_pkt_reason = ssc->ssc_uscsi_info->ui_pkt_reason;
uscsi_path_instance = ssc->ssc_uscsi_cmd->uscsi_path_instance;
uscsi_pkt_state = ssc->ssc_uscsi_info->ui_pkt_state;
uscsi_pkt_statistics = ssc->ssc_uscsi_info->ui_pkt_statistics;
uscsi_ena = ssc->ssc_uscsi_info->ui_ena;
sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf;
cdbp = (union scsi_cdb *)ssc->ssc_uscsi_cmd->uscsi_cdb;
if (cdbp == NULL) {
scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
"sd_ssc_ereport_post meet empty cdb\n");
return;
}
op_code = cdbp->scc_cmd;
cdblen = (int)ssc->ssc_uscsi_cmd->uscsi_cdblen;
senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen -
ssc->ssc_uscsi_cmd->uscsi_rqresid);
if (senlen > 0)
ASSERT(sensep != NULL);
switch (drv_assess) {
case SD_FM_DRV_RECOVERY:
(void) sprintf(assessment, "%s", "recovered");
break;
case SD_FM_DRV_RETRY:
(void) sprintf(assessment, "%s", "retry");
break;
case SD_FM_DRV_NOTICE:
(void) sprintf(assessment, "%s", "info");
break;
case SD_FM_DRV_FATAL:
default:
(void) sprintf(assessment, "%s", "unknown");
}
if (drv_assess == SD_FM_DRV_RECOVERY) {
scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, NULL,
"cmd.disk.recovered", uscsi_ena, devid, NULL,
DDI_NOSLEEP, NULL,
FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
DEVID_IF_KNOWN(devid),
"driver-assessment", DATA_TYPE_STRING, assessment,
"op-code", DATA_TYPE_UINT8, op_code,
"cdb", DATA_TYPE_UINT8_ARRAY,
cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb,
"pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason,
"pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state,
"pkt-stats", DATA_TYPE_UINT32, uscsi_pkt_statistics,
NULL);
return;
}
if (ssc->ssc_flags & ssc_invalid_flags) {
if (ssc->ssc_flags & SSC_FLAGS_INVALID_SENSE) {
scsi_fm_ereport_post(un->un_sd, uscsi_path_instance,
NULL, "cmd.disk.dev.uderr", uscsi_ena, devid,
NULL, DDI_NOSLEEP, NULL,
FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
DEVID_IF_KNOWN(devid),
"driver-assessment", DATA_TYPE_STRING,
drv_assess == SD_FM_DRV_FATAL ?
"fail" : assessment,
"op-code", DATA_TYPE_UINT8, op_code,
"cdb", DATA_TYPE_UINT8_ARRAY,
cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb,
"pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason,
"pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state,
"pkt-stats", DATA_TYPE_UINT32,
uscsi_pkt_statistics,
"stat-code", DATA_TYPE_UINT8,
ssc->ssc_uscsi_cmd->uscsi_status,
"un-decode-info", DATA_TYPE_STRING,
ssc->ssc_info,
"un-decode-value", DATA_TYPE_UINT8_ARRAY,
senlen, sensep,
NULL);
} else {
scsi_fm_ereport_post(un->un_sd, uscsi_path_instance,
NULL,
"cmd.disk.dev.uderr", uscsi_ena, devid,
NULL, DDI_NOSLEEP, NULL,
FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
DEVID_IF_KNOWN(devid),
"driver-assessment", DATA_TYPE_STRING,
drv_assess == SD_FM_DRV_FATAL ?
"fail" : assessment,
"op-code", DATA_TYPE_UINT8, op_code,
"cdb", DATA_TYPE_UINT8_ARRAY,
cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb,
"pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason,
"pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state,
"pkt-stats", DATA_TYPE_UINT32,
uscsi_pkt_statistics,
"stat-code", DATA_TYPE_UINT8,
ssc->ssc_uscsi_cmd->uscsi_status,
"un-decode-info", DATA_TYPE_STRING,
ssc->ssc_info,
"un-decode-value", DATA_TYPE_UINT8_ARRAY,
0, NULL,
NULL);
}
ssc->ssc_flags &= ~ssc_invalid_flags;
return;
}
if (uscsi_pkt_reason != CMD_CMPLT ||
(ssc->ssc_flags & SSC_FLAGS_TRAN_ABORT)) {
if (ssc->ssc_flags & SSC_FLAGS_TRAN_ABORT)
ssc->ssc_flags &= ~SSC_FLAGS_TRAN_ABORT;
scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, NULL,
"cmd.disk.tran", uscsi_ena, NULL, NULL, DDI_NOSLEEP, NULL,
FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
DEVID_IF_KNOWN(devid),
"driver-assessment", DATA_TYPE_STRING,
drv_assess == SD_FM_DRV_FATAL ? "fail" : assessment,
"op-code", DATA_TYPE_UINT8, op_code,
"cdb", DATA_TYPE_UINT8_ARRAY,
cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb,
"pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason,
"pkt-state", DATA_TYPE_UINT8, uscsi_pkt_state,
"pkt-stats", DATA_TYPE_UINT32, uscsi_pkt_statistics,
NULL);
} else {
if (senlen > 0) {
uint8_t sense_key = scsi_sense_key(sensep);
uint8_t sense_asc = scsi_sense_asc(sensep);
uint8_t sense_ascq = scsi_sense_ascq(sensep);
if (sense_key == KEY_RECOVERABLE_ERROR &&
sense_asc == 0x00 && sense_ascq == 0x1d)
return;
if (sense_key == KEY_MEDIUM_ERROR) {
scsi_fm_ereport_post(un->un_sd,
uscsi_path_instance, NULL,
"cmd.disk.dev.rqs.merr",
uscsi_ena, devid, NULL, DDI_NOSLEEP, NULL,
FM_VERSION, DATA_TYPE_UINT8,
FM_EREPORT_VERS0,
DEVID_IF_KNOWN(devid),
"driver-assessment",
DATA_TYPE_STRING,
drv_assess == SD_FM_DRV_FATAL ?
"fatal" : assessment,
"op-code",
DATA_TYPE_UINT8, op_code,
"cdb",
DATA_TYPE_UINT8_ARRAY, cdblen,
ssc->ssc_uscsi_cmd->uscsi_cdb,
"pkt-reason",
DATA_TYPE_UINT8, uscsi_pkt_reason,
"pkt-state",
DATA_TYPE_UINT8, uscsi_pkt_state,
"pkt-stats",
DATA_TYPE_UINT32,
uscsi_pkt_statistics,
"stat-code",
DATA_TYPE_UINT8,
ssc->ssc_uscsi_cmd->uscsi_status,
"key",
DATA_TYPE_UINT8,
scsi_sense_key(sensep),
"asc",
DATA_TYPE_UINT8,
scsi_sense_asc(sensep),
"ascq",
DATA_TYPE_UINT8,
scsi_sense_ascq(sensep),
"sense-data",
DATA_TYPE_UINT8_ARRAY,
senlen, sensep,
"lba",
DATA_TYPE_UINT64,
ssc->ssc_uscsi_info->ui_lba,
NULL);
} else {
scsi_fm_ereport_post(un->un_sd,
uscsi_path_instance, NULL,
"cmd.disk.dev.rqs.derr",
uscsi_ena, devid,
NULL, DDI_NOSLEEP, NULL,
FM_VERSION,
DATA_TYPE_UINT8, FM_EREPORT_VERS0,
DEVID_IF_KNOWN(devid),
"driver-assessment",
DATA_TYPE_STRING,
drv_assess == SD_FM_DRV_FATAL ?
(sense_key == 0x4 ?
"fatal" : "fail") : assessment,
"op-code",
DATA_TYPE_UINT8, op_code,
"cdb",
DATA_TYPE_UINT8_ARRAY, cdblen,
ssc->ssc_uscsi_cmd->uscsi_cdb,
"pkt-reason",
DATA_TYPE_UINT8, uscsi_pkt_reason,
"pkt-state",
DATA_TYPE_UINT8, uscsi_pkt_state,
"pkt-stats",
DATA_TYPE_UINT32,
uscsi_pkt_statistics,
"stat-code",
DATA_TYPE_UINT8,
ssc->ssc_uscsi_cmd->uscsi_status,
"key",
DATA_TYPE_UINT8,
scsi_sense_key(sensep),
"asc",
DATA_TYPE_UINT8,
scsi_sense_asc(sensep),
"ascq",
DATA_TYPE_UINT8,
scsi_sense_ascq(sensep),
"sense-data",
DATA_TYPE_UINT8_ARRAY,
senlen, sensep,
NULL);
}
} else {
if (ssc->ssc_uscsi_cmd->uscsi_status == STATUS_GOOD)
return;
scsi_fm_ereport_post(un->un_sd, uscsi_path_instance,
NULL,
"cmd.disk.dev.serr", uscsi_ena,
devid, NULL, DDI_NOSLEEP, NULL,
FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
DEVID_IF_KNOWN(devid),
"driver-assessment", DATA_TYPE_STRING,
drv_assess == SD_FM_DRV_FATAL ? "fail" : assessment,
"op-code", DATA_TYPE_UINT8, op_code,
"cdb",
DATA_TYPE_UINT8_ARRAY,
cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb,
"pkt-reason",
DATA_TYPE_UINT8, uscsi_pkt_reason,
"pkt-state",
DATA_TYPE_UINT8, uscsi_pkt_state,
"pkt-stats",
DATA_TYPE_UINT32, uscsi_pkt_statistics,
"stat-code",
DATA_TYPE_UINT8,
ssc->ssc_uscsi_cmd->uscsi_status,
NULL);
}
}
}
static void
sd_ssc_extract_info(sd_ssc_t *ssc, struct sd_lun *un, struct scsi_pkt *pktp,
struct buf *bp, struct sd_xbuf *xp)
{
size_t senlen = 0;
union scsi_cdb *cdbp;
int path_instance;
extern uchar_t scsi_cdb_size[];
ASSERT(un != NULL);
ASSERT(pktp != NULL);
ASSERT(bp != NULL);
ASSERT(xp != NULL);
ASSERT(ssc != NULL);
ASSERT(mutex_owned(SD_MUTEX(un)));
cdbp = (union scsi_cdb *)pktp->pkt_cdbp;
ssc->ssc_uscsi_cmd->uscsi_cdblen = scsi_cdb_size[GETGROUP(cdbp)];
ssc->ssc_uscsi_cmd->uscsi_cdb = (caddr_t)cdbp;
if ((xp->xb_sense_state & STATE_XARQ_DONE) ||
(xp->xb_sense_state & STATE_ARQ_DONE)) {
if (xp->xb_sense_state & STATE_XARQ_DONE) {
senlen = MAX_SENSE_LENGTH - xp->xb_sense_resid;
} else {
senlen = SENSE_LENGTH;
}
} else {
if (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK &&
(xp->xb_sense_state & STATE_XFERRED_DATA)) {
senlen = SENSE_LENGTH - xp->xb_sense_resid;
}
}
ssc->ssc_uscsi_cmd->uscsi_rqlen = (senlen & 0xff);
ssc->ssc_uscsi_cmd->uscsi_rqresid = 0;
ssc->ssc_uscsi_cmd->uscsi_rqbuf = (caddr_t)xp->xb_sense_data;
ssc->ssc_uscsi_cmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK);
path_instance = pktp->pkt_path_instance;
if (scsi_pkt_allocated_correctly(pktp) && path_instance)
ssc->ssc_uscsi_cmd->uscsi_path_instance = path_instance;
else
ssc->ssc_uscsi_cmd->uscsi_path_instance = 0;
ssc->ssc_uscsi_info->ui_pkt_reason = pktp->pkt_reason;
ssc->ssc_uscsi_info->ui_pkt_state = pktp->pkt_state;
ssc->ssc_uscsi_info->ui_pkt_statistics = pktp->pkt_statistics;
ssc->ssc_uscsi_info->ui_lba = (uint64_t)SD_GET_BLKNO(bp);
if ((pktp->pkt_reason == CMD_CMPLT) &&
(ssc->ssc_uscsi_cmd->uscsi_status == STATUS_GOOD) &&
(senlen == 0)) {
return;
}
if (xp->xb_ena == 0)
xp->xb_ena = fm_ena_generate(0, FM_ENA_FMT1);
ssc->ssc_uscsi_info->ui_ena = xp->xb_ena;
}
static void
sd_check_bdc_vpd(sd_ssc_t *ssc)
{
int rval = 0;
uchar_t *inqb1 = NULL;
size_t inqb1_len = MAX_INQUIRY_SIZE;
size_t inqb1_resid = 0;
struct sd_lun *un;
ASSERT(ssc != NULL);
un = ssc->ssc_un;
ASSERT(un != NULL);
ASSERT(!mutex_owned(SD_MUTEX(un)));
mutex_enter(SD_MUTEX(un));
un->un_f_is_rotational = TRUE;
un->un_f_is_solid_state = FALSE;
if (ISCD(un)) {
mutex_exit(SD_MUTEX(un));
return;
}
if (sd_check_vpd_page_support(ssc) == 0 &&
un->un_vpd_page_mask & SD_VPD_DEV_CHARACTER_PG) {
mutex_exit(SD_MUTEX(un));
inqb1 = kmem_zalloc(inqb1_len, KM_SLEEP);
rval = sd_send_scsi_INQUIRY(ssc, inqb1, inqb1_len,
0x01, 0xB1, &inqb1_resid);
if (rval == 0 && (inqb1_len - inqb1_resid > 5)) {
SD_TRACE(SD_LOG_COMMON, un,
"sd_check_bdc_vpd: \
successfully get VPD page: %x \
PAGE LENGTH: %x BYTE 4: %x \
BYTE 5: %x", inqb1[1], inqb1[3], inqb1[4],
inqb1[5]);
mutex_enter(SD_MUTEX(un));
if (inqb1[4] == 0) {
if (inqb1[5] == 0) {
un->un_f_is_rotational = FALSE;
} else if (inqb1[5] == 1) {
un->un_f_is_rotational = FALSE;
un->un_f_is_solid_state = TRUE;
un->un_f_disksort_disabled = TRUE;
}
}
mutex_exit(SD_MUTEX(un));
} else if (rval != 0) {
sd_ssc_assessment(ssc, SD_FMT_IGNORE);
}
kmem_free(inqb1, inqb1_len);
} else {
mutex_exit(SD_MUTEX(un));
}
}
static void
sd_check_emulation_mode(sd_ssc_t *ssc)
{
int rval = 0;
uint64_t capacity;
uint_t lbasize;
uint_t pbsize;
int i;
int devid_len;
struct sd_lun *un;
ASSERT(ssc != NULL);
un = ssc->ssc_un;
ASSERT(un != NULL);
ASSERT(!mutex_owned(SD_MUTEX(un)));
mutex_enter(SD_MUTEX(un));
if (ISCD(un)) {
mutex_exit(SD_MUTEX(un));
return;
}
if (un->un_f_descr_format_supported) {
mutex_exit(SD_MUTEX(un));
rval = sd_send_scsi_READ_CAPACITY_16(ssc, &capacity, &lbasize,
&pbsize, SD_PATH_DIRECT);
mutex_enter(SD_MUTEX(un));
if (rval != 0) {
un->un_phy_blocksize = DEV_BSIZE;
} else {
if (!ISP2(pbsize % DEV_BSIZE) || pbsize == 0) {
un->un_phy_blocksize = DEV_BSIZE;
} else if (pbsize > un->un_phy_blocksize) {
un->un_phy_blocksize = pbsize;
}
}
}
for (i = 0; i < sd_flash_dev_table_size; i++) {
devid_len = (int)strlen(sd_flash_dev_table[i]);
if (sd_sdconf_id_match(un, sd_flash_dev_table[i], devid_len)
== SD_SUCCESS) {
un->un_phy_blocksize = SSD_SECSIZE;
if (un->un_f_is_solid_state &&
un->un_phy_blocksize != un->un_tgt_blocksize)
un->un_f_enable_rmw = TRUE;
}
}
mutex_exit(SD_MUTEX(un));
}