#include <sys/scsi/scsi.h>
#include <sys/types.h>
#include <sys/varargs.h>
#include <sys/devctl.h>
#include <sys/thread.h>
#include <sys/thread.h>
#include <sys/open.h>
#include <sys/file.h>
#include <sys/sunndi.h>
#include <sys/console.h>
#include <sys/proc.h>
#include <sys/time.h>
#include <sys/utsname.h>
#include <sys/scsi/impl/scsi_reset_notify.h>
#include <sys/ndi_impldefs.h>
#include <sys/byteorder.h>
#include <sys/fs/dv_node.h>
#include <sys/ctype.h>
#include <sys/sunmdi.h>
#include <sys/fibre-channel/fc.h>
#include <sys/fibre-channel/impl/fc_ulpif.h>
#include <sys/fibre-channel/ulp/fcpvar.h>
static int fcp_attach(dev_info_t *devi, ddi_attach_cmd_t cmd);
static int fcp_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
static int fcp_open(dev_t *devp, int flag, int otype, cred_t *credp);
static int fcp_close(dev_t dev, int flag, int otype, cred_t *credp);
static int fcp_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
cred_t *credp, int *rval);
static int fcp_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
fc_attach_cmd_t cmd, uint32_t s_id);
static int fcp_port_detach(opaque_t ulph, fc_ulp_port_info_t *info,
fc_detach_cmd_t cmd);
static int fcp_port_ioctl(opaque_t ulph, opaque_t port_handle, dev_t dev,
int cmd, intptr_t data, int mode, cred_t *credp, int *rval,
uint32_t claimed);
static int fcp_els_callback(opaque_t ulph, opaque_t port_handle,
fc_unsol_buf_t *buf, uint32_t claimed);
static int fcp_data_callback(opaque_t ulph, opaque_t port_handle,
fc_unsol_buf_t *buf, uint32_t claimed);
static void fcp_statec_callback(opaque_t ulph, opaque_t port_handle,
uint32_t port_state, uint32_t port_top, fc_portmap_t *devlist,
uint32_t dev_cnt, uint32_t port_sid);
static int fcp_phys_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
static int fcp_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
static void fcp_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
static int fcp_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt);
static int fcp_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
static int fcp_scsi_reset(struct scsi_address *ap, int level);
static int fcp_scsi_getcap(struct scsi_address *ap, char *cap, int whom);
static int fcp_scsi_setcap(struct scsi_address *ap, char *cap, int value,
int whom);
static void fcp_pkt_teardown(struct scsi_pkt *pkt);
static int fcp_scsi_reset_notify(struct scsi_address *ap, int flag,
void (*callback)(caddr_t), caddr_t arg);
static int fcp_scsi_bus_get_eventcookie(dev_info_t *dip, dev_info_t *rdip,
char *name, ddi_eventcookie_t *event_cookiep);
static int fcp_scsi_bus_add_eventcall(dev_info_t *dip, dev_info_t *rdip,
ddi_eventcookie_t eventid, void (*callback)(), void *arg,
ddi_callback_id_t *cb_id);
static int fcp_scsi_bus_remove_eventcall(dev_info_t *devi,
ddi_callback_id_t cb_id);
static int fcp_scsi_bus_post_event(dev_info_t *dip, dev_info_t *rdip,
ddi_eventcookie_t eventid, void *impldata);
static int fcp_scsi_bus_config(dev_info_t *parent, uint_t flag,
ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
static int fcp_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
ddi_bus_config_op_t op, void *arg);
static int fcp_setup_device_data_ioctl(int cmd, struct fcp_ioctl *data,
int mode, int *rval);
static int fcp_setup_scsi_ioctl(struct fcp_scsi_cmd *u_fscsi,
int mode, int *rval);
static int fcp_copyin_scsi_cmd(caddr_t base_addr,
struct fcp_scsi_cmd *fscsi, int mode);
static int fcp_copyout_scsi_cmd(struct fcp_scsi_cmd *fscsi,
caddr_t base_addr, int mode);
static int fcp_send_scsi_ioctl(struct fcp_scsi_cmd *fscsi);
static struct fcp_tgt *fcp_port_create_tgt(struct fcp_port *pptr,
la_wwn_t *pwwn, int *ret_val, int *fc_status, int *fc_pkt_state,
int *fc_pkt_reason, int *fc_pkt_action);
static int fcp_tgt_send_plogi(struct fcp_tgt *ptgt, int *fc_status,
int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action);
static int fcp_tgt_send_prli(struct fcp_tgt *ptgt, int *fc_status,
int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action);
static void fcp_ipkt_sema_init(struct fcp_ipkt *icmd);
static int fcp_ipkt_sema_wait(struct fcp_ipkt *icmd);
static void fcp_ipkt_sema_callback(struct fc_packet *fpkt);
static void fcp_ipkt_sema_cleanup(struct fcp_ipkt *icmd);
static void fcp_handle_devices(struct fcp_port *pptr,
fc_portmap_t devlist[], uint32_t dev_cnt, int link_cnt,
fcp_map_tag_t *map_tag, int cause);
static int fcp_handle_mapflags(struct fcp_port *pptr,
struct fcp_tgt *ptgt, fc_portmap_t *map_entry, int link_cnt,
int tgt_cnt, int cause);
static int fcp_handle_reportlun_changed(struct fcp_tgt *ptgt, int cause);
static int fcp_send_els(struct fcp_port *pptr, struct fcp_tgt *ptgt,
struct fcp_ipkt *icmd, uchar_t opcode, int lcount, int tcount, int cause);
static void fcp_update_state(struct fcp_port *pptr, uint32_t state,
int cause);
static void fcp_update_tgt_state(struct fcp_tgt *ptgt, int flag,
uint32_t state);
static struct fcp_port *fcp_get_port(opaque_t port_handle);
static void fcp_unsol_callback(fc_packet_t *fpkt);
static void fcp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf,
uchar_t r_ctl, uchar_t type);
static int fcp_unsol_prli(struct fcp_port *pptr, fc_unsol_buf_t *buf);
static struct fcp_ipkt *fcp_icmd_alloc(struct fcp_port *pptr,
struct fcp_tgt *ptgt, int cmd_len, int resp_len, int data_len,
int nodma, int lcount, int tcount, int cause, uint32_t rscn_count);
static void fcp_icmd_free(struct fcp_port *pptr, struct fcp_ipkt *icmd);
static int fcp_alloc_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd,
int nodma, int flags);
static void fcp_free_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd);
static struct fcp_tgt *fcp_lookup_target(struct fcp_port *pptr,
uchar_t *wwn);
static struct fcp_tgt *fcp_get_target_by_did(struct fcp_port *pptr,
uint32_t d_id);
static void fcp_icmd_callback(fc_packet_t *fpkt);
static int fcp_send_scsi(struct fcp_lun *plun, uchar_t opcode,
int len, int lcount, int tcount, int cause, uint32_t rscn_count);
static int fcp_check_reportlun(struct fcp_rsp *rsp, fc_packet_t *fpkt);
static void fcp_scsi_callback(fc_packet_t *fpkt);
static void fcp_retry_scsi_cmd(fc_packet_t *fpkt);
static void fcp_handle_inquiry(fc_packet_t *fpkt, struct fcp_ipkt *icmd);
static void fcp_handle_reportlun(fc_packet_t *fpkt, struct fcp_ipkt *icmd);
static struct fcp_lun *fcp_get_lun(struct fcp_tgt *ptgt,
uint16_t lun_num);
static int fcp_finish_tgt(struct fcp_port *pptr, struct fcp_tgt *ptgt,
int link_cnt, int tgt_cnt, int cause);
static void fcp_finish_init(struct fcp_port *pptr);
static void fcp_create_luns(struct fcp_tgt *ptgt, int link_cnt,
int tgt_cnt, int cause);
static int fcp_trigger_lun(struct fcp_lun *plun, child_info_t *cip,
int old_mpxio, int online, int link_cnt, int tgt_cnt, int flags);
static int fcp_offline_target(struct fcp_port *pptr, struct fcp_tgt *ptgt,
int link_cnt, int tgt_cnt, int nowait, int flags);
static void fcp_offline_target_now(struct fcp_port *pptr,
struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt, int flags);
static void fcp_offline_tgt_luns(struct fcp_tgt *ptgt, int link_cnt,
int tgt_cnt, int flags);
static void fcp_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
int nowait, int flags);
static void fcp_prepare_offline_lun(struct fcp_lun *plun, int link_cnt,
int tgt_cnt);
static void fcp_offline_lun_now(struct fcp_lun *plun, int link_cnt,
int tgt_cnt, int flags);
static void fcp_scan_offline_luns(struct fcp_port *pptr);
static void fcp_scan_offline_tgts(struct fcp_port *pptr);
static void fcp_update_offline_flags(struct fcp_lun *plun);
static struct fcp_pkt *fcp_scan_commands(struct fcp_lun *plun);
static void fcp_abort_commands(struct fcp_pkt *head, struct
fcp_port *pptr);
static void fcp_cmd_callback(fc_packet_t *fpkt);
static void fcp_complete_pkt(fc_packet_t *fpkt);
static int fcp_validate_fcp_response(struct fcp_rsp *rsp,
struct fcp_port *pptr);
static int fcp_device_changed(struct fcp_port *pptr, struct fcp_tgt *ptgt,
fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause);
static struct fcp_lun *fcp_alloc_lun(struct fcp_tgt *ptgt);
static void fcp_dealloc_lun(struct fcp_lun *plun);
static struct fcp_tgt *fcp_alloc_tgt(struct fcp_port *pptr,
fc_portmap_t *map_entry, int link_cnt);
static void fcp_dealloc_tgt(struct fcp_tgt *ptgt);
static void fcp_queue_ipkt(struct fcp_port *pptr, fc_packet_t *fpkt);
static int fcp_transport(opaque_t port_handle, fc_packet_t *fpkt,
int internal);
static void fcp_log(int level, dev_info_t *dip, const char *fmt, ...);
static int fcp_handle_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
uint32_t s_id, int instance);
static int fcp_handle_port_detach(struct fcp_port *pptr, int flag,
int instance);
static void fcp_cleanup_port(struct fcp_port *pptr, int instance);
static int fcp_kmem_cache_constructor(struct scsi_pkt *, scsi_hba_tran_t *,
int);
static void fcp_kmem_cache_destructor(struct scsi_pkt *, scsi_hba_tran_t *);
static int fcp_pkt_setup(struct scsi_pkt *, int (*)(), caddr_t);
static int fcp_alloc_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt,
int flags);
static void fcp_free_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt);
static int fcp_reset_target(struct scsi_address *ap, int level);
static int fcp_commoncap(struct scsi_address *ap, char *cap,
int val, int tgtonly, int doset);
static int fcp_scsi_get_name(struct scsi_device *sd, char *name, int len);
static int fcp_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len);
static int fcp_linkreset(struct fcp_port *pptr, struct scsi_address *ap,
int sleep);
static int fcp_handle_port_resume(opaque_t ulph, fc_ulp_port_info_t *pinfo,
uint32_t s_id, fc_attach_cmd_t cmd, int instance);
static void fcp_cp_pinfo(struct fcp_port *pptr, fc_ulp_port_info_t *pinfo);
static void fcp_process_elem(struct fcp_hp_elem *elem, int result);
static child_info_t *fcp_get_cip(struct fcp_lun *plun, child_info_t *cip,
int lcount, int tcount);
static int fcp_is_dip_present(struct fcp_lun *plun, dev_info_t *cdip);
static int fcp_is_child_present(struct fcp_lun *plun, child_info_t *cip);
static dev_info_t *fcp_create_dip(struct fcp_lun *plun, int link_cnt,
int tgt_cnt);
static dev_info_t *fcp_find_existing_dip(struct fcp_lun *plun,
dev_info_t *pdip, caddr_t name);
static int fcp_online_child(struct fcp_lun *plun, child_info_t *cip,
int lcount, int tcount, int flags);
static int fcp_offline_child(struct fcp_lun *plun, child_info_t *cip,
int lcount, int tcount, int flags);
static void fcp_remove_child(struct fcp_lun *plun);
static void fcp_watch(void *arg);
static void fcp_check_reset_delay(struct fcp_port *pptr);
static void fcp_abort_all(struct fcp_port *pptr, struct fcp_tgt *ttgt,
struct fcp_lun *rlun, int tgt_cnt);
struct fcp_port *fcp_soft_state_unlink(struct fcp_port *pptr);
static struct fcp_lun *fcp_lookup_lun(struct fcp_port *pptr,
uchar_t *wwn, uint16_t lun);
static void fcp_prepare_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd,
struct fcp_lun *plun);
static void fcp_post_callback(struct fcp_pkt *cmd);
static int fcp_dopoll(struct fcp_port *pptr, struct fcp_pkt *cmd);
static struct fcp_port *fcp_dip2port(dev_info_t *dip);
struct fcp_lun *fcp_get_lun_from_cip(struct fcp_port *pptr,
child_info_t *cip);
static int fcp_pass_to_hp_and_wait(struct fcp_port *pptr,
struct fcp_lun *plun, child_info_t *cip, int what, int link_cnt,
int tgt_cnt, int flags);
static struct fcp_hp_elem *fcp_pass_to_hp(struct fcp_port *pptr,
struct fcp_lun *plun, child_info_t *cip, int what, int link_cnt,
int tgt_cnt, int flags, int wait);
static void fcp_retransport_cmd(struct fcp_port *pptr,
struct fcp_pkt *cmd);
static void fcp_fail_cmd(struct fcp_pkt *cmd, uchar_t reason,
uint_t statistics);
static void fcp_queue_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd);
static void fcp_update_targets(struct fcp_port *pptr,
fc_portmap_t *dev_list, uint32_t count, uint32_t state, int cause);
static int fcp_call_finish_init(struct fcp_port *pptr,
struct fcp_tgt *ptgt, int lcount, int tcount, int cause);
static int fcp_call_finish_init_held(struct fcp_port *pptr,
struct fcp_tgt *ptgt, int lcount, int tcount, int cause);
static void fcp_reconfigure_luns(void * tgt_handle);
static void fcp_free_targets(struct fcp_port *pptr);
static void fcp_free_target(struct fcp_tgt *ptgt);
static int fcp_is_retryable(struct fcp_ipkt *icmd);
static int fcp_create_on_demand(struct fcp_port *pptr, uchar_t *pwwn);
static void fcp_ascii_to_wwn(caddr_t string, uchar_t bytes[], unsigned int);
static void fcp_wwn_to_ascii(uchar_t bytes[], char *string);
static void fcp_print_error(fc_packet_t *fpkt);
static int fcp_handle_ipkt_errors(struct fcp_port *pptr,
struct fcp_tgt *ptgt, struct fcp_ipkt *icmd, int rval, caddr_t op);
static int fcp_outstanding_lun_cmds(struct fcp_tgt *ptgt);
static fc_portmap_t *fcp_construct_map(struct fcp_port *pptr,
uint32_t *dev_cnt);
static void fcp_offline_all(struct fcp_port *pptr, int lcount, int cause);
static int fcp_get_statec_count(struct fcp_ioctl *data, int mode, int *rval);
static int fcp_copyin_fcp_ioctl_data(struct fcp_ioctl *, int, int *,
struct fcp_ioctl *, struct fcp_port **);
static char *fcp_get_lun_path(struct fcp_lun *plun);
static int fcp_get_target_mappings(struct fcp_ioctl *data, int mode,
int *rval);
static int fcp_do_ns_registry(struct fcp_port *pptr, uint32_t s_id);
static void fcp_retry_ns_registry(struct fcp_port *pptr, uint32_t s_id);
static char *fcp_get_lun_path(struct fcp_lun *plun);
static int fcp_get_target_mappings(struct fcp_ioctl *data, int mode,
int *rval);
static void fcp_reconfig_wait(struct fcp_port *pptr);
static int fcp_virt_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
static mdi_pathinfo_t *fcp_create_pip(struct fcp_lun *plun, int lcount,
int tcount);
static mdi_pathinfo_t *fcp_find_existing_pip(struct fcp_lun *plun,
dev_info_t *pdip);
static int fcp_is_pip_present(struct fcp_lun *plun, mdi_pathinfo_t *pip);
static void fcp_handle_page83(fc_packet_t *, struct fcp_ipkt *, int);
static void fcp_update_mpxio_path_verifybusy(struct fcp_port *pptr);
static int fcp_copy_guid_2_lun_block(struct fcp_lun *plun, char *guidp);
static int fcp_update_mpxio_path(struct fcp_lun *plun, child_info_t *cip,
int what);
static int fcp_is_reconfig_needed(struct fcp_tgt *ptgt,
fc_packet_t *fpkt);
static int fcp_symmetric_device_probe(struct fcp_lun *plun);
static void fcp_read_blacklist(dev_info_t *dip,
struct fcp_black_list_entry **pplun_blacklist);
static void fcp_mask_pwwn_lun(char *curr_pwwn, char *curr_lun,
struct fcp_black_list_entry **pplun_blacklist);
static void fcp_add_one_mask(char *curr_pwwn, uint32_t lun_id,
struct fcp_black_list_entry **pplun_blacklist);
static int fcp_should_mask(la_wwn_t *wwn, uint32_t lun_id);
static void fcp_cleanup_blacklist(struct fcp_black_list_entry **lun_blacklist);
static struct scsi_pkt *fcp_pseudo_init_pkt(
struct scsi_address *ap, struct scsi_pkt *pkt,
struct buf *bp, int cmdlen, int statuslen,
int tgtlen, int flags, int (*callback)(), caddr_t arg);
static void fcp_pseudo_destroy_pkt(
struct scsi_address *ap, struct scsi_pkt *pkt);
static void fcp_pseudo_sync_pkt(
struct scsi_address *ap, struct scsi_pkt *pkt);
static int fcp_pseudo_start(struct scsi_address *ap, struct scsi_pkt *pkt);
static void fcp_pseudo_dmafree(
struct scsi_address *ap, struct scsi_pkt *pkt);
extern struct mod_ops mod_driverops;
extern int modrootloaded;
extern char *sense_keys[];
extern dev_info_t *scsi_vhci_dip;
#define FCP_LUN_ADDRESSING 0x80
#define FCP_PD_ADDRESSING 0x00
#define FCP_VOLUME_ADDRESSING 0x40
#define FCP_SVE_THROTTLE 0x28
#define MAX_INT_DMA 0x7fffffff
#define NODE_WWN_PROP (char *)fcp_node_wwn_prop
#define PORT_WWN_PROP (char *)fcp_port_wwn_prop
#define TARGET_PROP (char *)fcp_target_prop
#define LUN_PROP (char *)fcp_lun_prop
#define SAM_LUN_PROP (char *)fcp_sam_lun_prop
#define CONF_WWN_PROP (char *)fcp_conf_wwn_prop
#define OBP_BOOT_WWN (char *)fcp_obp_boot_wwn
#define MANUAL_CFG_ONLY (char *)fcp_manual_config_only
#define INIT_PORT_PROP (char *)fcp_init_port_prop
#define TGT_PORT_PROP (char *)fcp_tgt_port_prop
#define LUN_BLACKLIST_PROP (char *)fcp_lun_blacklist_prop
#define LUN_PORT (plun->lun_tgt->tgt_port)
#define LUN_TGT (plun->lun_tgt)
#define FCP_ATOB(x) (((x) >= '0' && (x) <= '9') ? ((x) - '0') : \
((x) >= 'a' && (x) <= 'f') ? \
((x) - 'a' + 10) : ((x) - 'A' + 10))
#define FCP_MAX(a, b) ((a) > (b) ? (a) : (b))
#define FCP_N_NDI_EVENTS \
(sizeof (fcp_ndi_event_defs) / sizeof (ndi_event_definition_t))
#define FCP_LINK_STATE_CHANGED(p, c) \
((p)->port_link_cnt != (c)->ipkt_link_cnt)
#define FCP_TGT_STATE_CHANGED(t, c) \
((t)->tgt_change_cnt != (c)->ipkt_change_cnt)
#define FCP_STATE_CHANGED(p, t, c) \
(FCP_TGT_STATE_CHANGED(t, c))
#define FCP_MUST_RETRY(fpkt) \
((fpkt)->pkt_state == FC_PKT_LOCAL_BSY || \
(fpkt)->pkt_state == FC_PKT_LOCAL_RJT || \
(fpkt)->pkt_state == FC_PKT_TRAN_BSY || \
(fpkt)->pkt_state == FC_PKT_ELS_IN_PROGRESS || \
(fpkt)->pkt_state == FC_PKT_NPORT_BSY || \
(fpkt)->pkt_state == FC_PKT_FABRIC_BSY || \
(fpkt)->pkt_state == FC_PKT_PORT_OFFLINE || \
(fpkt)->pkt_reason == FC_REASON_OFFLINE)
#define FCP_SENSE_REPORTLUN_CHANGED(es) \
((es)->es_key == KEY_UNIT_ATTENTION && \
(es)->es_add_code == 0x3f && \
(es)->es_qual_code == 0x0e)
#define FCP_SENSE_NO_LUN(es) \
((es)->es_key == KEY_ILLEGAL_REQUEST && \
(es)->es_add_code == 0x25 && \
(es)->es_qual_code == 0x0)
#define FCP_VERSION "20091208-1.192"
#define FCP_NAME_VERSION "SunFC FCP v" FCP_VERSION
#define FCP_NUM_ELEMENTS(array) \
(sizeof (array) / sizeof ((array)[0]))
#define FCP_LOG_SIZE 1024 * 1024
#define FCP_LEVEL_1 0x00001
#define FCP_LEVEL_2 0x00002
#define FCP_LEVEL_3 0x00004
#define FCP_LEVEL_4 0x00008
#define FCP_LEVEL_5 0x00010
#define FCP_LEVEL_6 0x00020
#define FCP_LEVEL_7 0x00040
#define FCP_LEVEL_8 0x00080
#define FCP_LEVEL_9 0x00100
#define FCP_MSG_LEVEL_1 (FCP_LEVEL_1 | FC_TRACE_LOG_MSG)
#define FCP_MSG_LEVEL_2 (FCP_LEVEL_2 | FC_TRACE_LOG_MSG)
#define FCP_MSG_LEVEL_3 (FCP_LEVEL_3 | FC_TRACE_LOG_MSG)
#define FCP_MSG_LEVEL_4 (FCP_LEVEL_4 | FC_TRACE_LOG_MSG)
#define FCP_MSG_LEVEL_5 (FCP_LEVEL_5 | FC_TRACE_LOG_MSG)
#define FCP_MSG_LEVEL_6 (FCP_LEVEL_6 | FC_TRACE_LOG_MSG)
#define FCP_MSG_LEVEL_7 (FCP_LEVEL_7 | FC_TRACE_LOG_MSG)
#define FCP_MSG_LEVEL_8 (FCP_LEVEL_8 | FC_TRACE_LOG_MSG)
#define FCP_MSG_LEVEL_9 (FCP_LEVEL_9 | FC_TRACE_LOG_MSG)
#define FCP_BUF_LEVEL_1 (FCP_LEVEL_1 | FC_TRACE_LOG_BUF)
#define FCP_BUF_LEVEL_2 (FCP_LEVEL_2 | FC_TRACE_LOG_BUF)
#define FCP_BUF_LEVEL_3 (FCP_LEVEL_3 | FC_TRACE_LOG_BUF)
#define FCP_BUF_LEVEL_4 (FCP_LEVEL_4 | FC_TRACE_LOG_BUF)
#define FCP_BUF_LEVEL_5 (FCP_LEVEL_5 | FC_TRACE_LOG_BUF)
#define FCP_BUF_LEVEL_6 (FCP_LEVEL_6 | FC_TRACE_LOG_BUF)
#define FCP_BUF_LEVEL_7 (FCP_LEVEL_7 | FC_TRACE_LOG_BUF)
#define FCP_BUF_LEVEL_8 (FCP_LEVEL_8 | FC_TRACE_LOG_BUF)
#define FCP_BUF_LEVEL_9 (FCP_LEVEL_9 | FC_TRACE_LOG_BUF)
#define FCP_MSG_BUF_LEVEL_1 (FCP_LEVEL_1 | FC_TRACE_LOG_BUF | \
FC_TRACE_LOG_MSG)
#define FCP_MSG_BUF_LEVEL_2 (FCP_LEVEL_2 | FC_TRACE_LOG_BUF | \
FC_TRACE_LOG_MSG)
#define FCP_MSG_BUF_LEVEL_3 (FCP_LEVEL_3 | FC_TRACE_LOG_BUF | \
FC_TRACE_LOG_MSG)
#define FCP_MSG_BUF_LEVEL_4 (FCP_LEVEL_4 | FC_TRACE_LOG_BUF | \
FC_TRACE_LOG_MSG)
#define FCP_MSG_BUF_LEVEL_5 (FCP_LEVEL_5 | FC_TRACE_LOG_BUF | \
FC_TRACE_LOG_MSG)
#define FCP_MSG_BUF_LEVEL_6 (FCP_LEVEL_6 | FC_TRACE_LOG_BUF | \
FC_TRACE_LOG_MSG)
#define FCP_MSG_BUF_LEVEL_7 (FCP_LEVEL_7 | FC_TRACE_LOG_BUF | \
FC_TRACE_LOG_MSG)
#define FCP_MSG_BUF_LEVEL_8 (FCP_LEVEL_8 | FC_TRACE_LOG_BUF | \
FC_TRACE_LOG_MSG)
#define FCP_MSG_BUF_LEVEL_9 (FCP_LEVEL_9 | FC_TRACE_LOG_BUF | \
FC_TRACE_LOG_MSG)
#ifdef DEBUG
#define FCP_DTRACE fc_trace_debug
#else
#define FCP_DTRACE
#endif
#define FCP_TRACE fc_trace_debug
static struct cb_ops fcp_cb_ops = {
fcp_open,
fcp_close,
nodev,
nodev,
nodev,
nodev,
nodev,
fcp_ioctl,
nodev,
nodev,
nodev,
nochpoll,
ddi_prop_op,
0,
D_NEW | D_MP | D_HOTPLUG,
CB_REV,
nodev,
nodev
};
static struct dev_ops fcp_ops = {
DEVO_REV,
0,
ddi_getinfo_1to1,
nulldev,
nulldev,
fcp_attach,
fcp_detach,
nodev,
&fcp_cb_ops,
NULL,
NULL,
};
char *fcp_version = FCP_NAME_VERSION;
static struct modldrv modldrv = {
&mod_driverops,
FCP_NAME_VERSION,
&fcp_ops
};
static struct modlinkage modlinkage = {
MODREV_1,
&modldrv,
NULL
};
static fc_ulp_modinfo_t fcp_modinfo = {
&fcp_modinfo,
FCTL_ULP_MODREV_4,
FC4_SCSI_FCP,
"fcp",
FCP_STATEC_MASK,
fcp_port_attach,
fcp_port_detach,
fcp_port_ioctl,
fcp_els_callback,
fcp_data_callback,
fcp_statec_callback
};
#ifdef DEBUG
#define FCP_TRACE_DEFAULT (FC_TRACE_LOG_MASK | FCP_LEVEL_1 | \
FCP_LEVEL_2 | FCP_LEVEL_3 | \
FCP_LEVEL_4 | FCP_LEVEL_5 | \
FCP_LEVEL_6 | FCP_LEVEL_7)
#else
#define FCP_TRACE_DEFAULT (FC_TRACE_LOG_MASK | FCP_LEVEL_1 | \
FCP_LEVEL_2 | FCP_LEVEL_3 | \
FCP_LEVEL_4 | FCP_LEVEL_5 | \
FCP_LEVEL_6 | FCP_LEVEL_7)
#endif
int fcp_bus_config_debug = 0;
static int fcp_log_size = FCP_LOG_SIZE;
static int fcp_trace = FCP_TRACE_DEFAULT;
static fc_trace_logq_t *fcp_logq = NULL;
static struct fcp_black_list_entry *fcp_lun_blacklist = NULL;
static int fcp_enable_auto_configuration = 1;
static int fcp_max_bus_config_retries = 4;
static int fcp_lun_ready_retry = 300;
static int fcp_max_target_retries = 50;
static int fcp_watchdog_init = 0;
static int fcp_watchdog_time = 0;
static int fcp_watchdog_timeout = 1;
static int fcp_watchdog_tick;
unsigned int fcp_offline_delay = FCP_OFFLINE_DELAY;
static void *fcp_softstate = NULL;
static uchar_t fcp_oflag = FCP_IDLE;
static kmutex_t fcp_global_mutex;
static kmutex_t fcp_ioctl_mutex;
static dev_info_t *fcp_global_dip = NULL;
static timeout_id_t fcp_watchdog_id;
const char *fcp_lun_prop = "lun";
const char *fcp_sam_lun_prop = "sam-lun";
const char *fcp_target_prop = "target";
const char *fcp_node_wwn_prop = "node-wwn";
const char *fcp_port_wwn_prop = "port-wwn";
const char *fcp_conf_wwn_prop = "fc-port-wwn";
const char *fcp_obp_boot_wwn = "fc-boot-dev-portwwn";
const char *fcp_manual_config_only = "manual_configuration_only";
const char *fcp_init_port_prop = "initiator-port";
const char *fcp_tgt_port_prop = "target-port";
const char *fcp_lun_blacklist_prop = "pwwn-lun-blacklist";
static struct fcp_port *fcp_port_head = NULL;
static ddi_eventcookie_t fcp_insert_eid;
static ddi_eventcookie_t fcp_remove_eid;
static ndi_event_definition_t fcp_ndi_event_defs[] = {
{ FCP_EVENT_TAG_INSERT, FCAL_INSERT_EVENT, EPL_KERNEL },
{ FCP_EVENT_TAG_REMOVE, FCAL_REMOVE_EVENT, EPL_INTERRUPT }
};
static uint8_t scsi_ioctl_list[] = {
SCMD_INQUIRY,
SCMD_REPORT_LUN,
SCMD_READ_CAPACITY
};
static uchar_t fcp_dummy_lun[] = {
0x00,
0x00,
0x00,
0x08,
0x00,
0x00,
0x00,
0x00,
FCP_PD_ADDRESSING,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00
};
static uchar_t fcp_alpa_to_switch[] = {
0x00, 0x7d, 0x7c, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x7a, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x79, 0x78, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x77, 0x76, 0x00, 0x00, 0x75, 0x00, 0x74,
0x73, 0x72, 0x00, 0x00, 0x00, 0x71, 0x00, 0x70, 0x6f, 0x6e,
0x00, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x68, 0x00, 0x00, 0x67,
0x66, 0x65, 0x64, 0x63, 0x62, 0x00, 0x00, 0x61, 0x60, 0x00,
0x5f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x5d,
0x5c, 0x5b, 0x00, 0x5a, 0x59, 0x58, 0x57, 0x56, 0x55, 0x00,
0x00, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0x00, 0x00, 0x4e,
0x4d, 0x00, 0x4c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4b,
0x00, 0x4a, 0x49, 0x48, 0x00, 0x47, 0x46, 0x45, 0x44, 0x43,
0x42, 0x00, 0x00, 0x41, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x00,
0x00, 0x3b, 0x3a, 0x00, 0x39, 0x00, 0x00, 0x00, 0x38, 0x37,
0x36, 0x00, 0x35, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x33, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x31, 0x30, 0x00, 0x00, 0x2f, 0x00, 0x2e, 0x2d, 0x2c,
0x00, 0x00, 0x00, 0x2b, 0x00, 0x2a, 0x29, 0x28, 0x00, 0x27,
0x26, 0x25, 0x24, 0x23, 0x22, 0x00, 0x00, 0x21, 0x20, 0x1f,
0x1e, 0x1d, 0x1c, 0x00, 0x00, 0x1b, 0x1a, 0x00, 0x19, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x17, 0x16, 0x15,
0x00, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x00, 0x00, 0x0e,
0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x00, 0x00, 0x08, 0x07, 0x00,
0x06, 0x00, 0x00, 0x00, 0x05, 0x04, 0x03, 0x00, 0x02, 0x00,
0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};
static caddr_t pid = "SESS01 ";
#if !defined(lint)
_NOTE(MUTEX_PROTECTS_DATA(fcp_global_mutex,
fcp_port::fcp_next fcp_watchdog_id))
_NOTE(DATA_READABLE_WITHOUT_LOCK(fcp_watchdog_time))
_NOTE(SCHEME_PROTECTS_DATA("Unshared",
fcp_insert_eid
fcp_remove_eid
fcp_watchdog_time))
_NOTE(SCHEME_PROTECTS_DATA("Unshared",
fcp_cb_ops
fcp_ops
callb_cpr))
#endif
char *fcp_symmetric_disk_table[] = {
"SEAGATE ST",
"IBM DDYFT",
"SUNW SUNWGS",
"SUN SENA",
"SUN SESS01"
};
int fcp_symmetric_disk_table_size =
sizeof (fcp_symmetric_disk_table)/sizeof (char *);
static ddi_dma_attr_t pseudo_fca_dma_attr = {
DMA_ATTR_V0,
0,
0xffffffff,
0x00ffffff,
1,
0x3f,
1,
0xffffffff,
(1 << 24) - 1,
1,
512,
0
};
int
_init(void)
{
int rval;
if (ddi_soft_state_init(&fcp_softstate,
sizeof (struct fcp_port), FCP_INIT_ITEMS) != 0) {
return (EINVAL);
}
mutex_init(&fcp_global_mutex, NULL, MUTEX_DRIVER, NULL);
mutex_init(&fcp_ioctl_mutex, NULL, MUTEX_DRIVER, NULL);
if ((rval = fc_ulp_add(&fcp_modinfo)) != FC_SUCCESS) {
cmn_err(CE_WARN, "fcp: fc_ulp_add failed");
mutex_destroy(&fcp_global_mutex);
mutex_destroy(&fcp_ioctl_mutex);
ddi_soft_state_fini(&fcp_softstate);
return (ENODEV);
}
fcp_logq = fc_trace_alloc_logq(fcp_log_size);
if ((rval = mod_install(&modlinkage)) != 0) {
fc_trace_free_logq(fcp_logq);
(void) fc_ulp_remove(&fcp_modinfo);
mutex_destroy(&fcp_global_mutex);
mutex_destroy(&fcp_ioctl_mutex);
ddi_soft_state_fini(&fcp_softstate);
rval = ENODEV;
}
return (rval);
}
int
_fini(void)
{
int rval;
if ((rval = mod_remove(&modlinkage)) != 0) {
return (rval);
}
(void) fc_ulp_remove(&fcp_modinfo);
ddi_soft_state_fini(&fcp_softstate);
mutex_destroy(&fcp_global_mutex);
mutex_destroy(&fcp_ioctl_mutex);
fc_trace_free_logq(fcp_logq);
return (rval);
}
int
_info(struct modinfo *modinfop)
{
return (mod_info(&modlinkage, modinfop));
}
static int
fcp_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
{
int rval = DDI_SUCCESS;
FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
FCP_BUF_LEVEL_8, 0, "fcp module attach: cmd=0x%x", cmd);
if (cmd == DDI_ATTACH) {
mutex_enter(&fcp_global_mutex);
fcp_global_dip = devi;
mutex_exit(&fcp_global_mutex);
if (ddi_create_minor_node(fcp_global_dip, "fcp", S_IFCHR,
0, DDI_PSEUDO, 0) == DDI_SUCCESS) {
ddi_report_dev(fcp_global_dip);
} else {
cmn_err(CE_WARN, "FCP: Cannot create minor node");
mutex_enter(&fcp_global_mutex);
fcp_global_dip = NULL;
mutex_exit(&fcp_global_mutex);
rval = DDI_FAILURE;
}
fcp_offline_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
devi, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
"fcp_offline_delay", FCP_OFFLINE_DELAY);
if ((fcp_offline_delay < 10) ||
(fcp_offline_delay > 60)) {
cmn_err(CE_WARN, "Setting fcp_offline_delay "
"to %d second(s). This is outside the "
"recommended range of 10..60 seconds.",
fcp_offline_delay);
}
}
return (rval);
}
static int
fcp_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
{
int res = DDI_SUCCESS;
FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
FCP_BUF_LEVEL_8, 0, "module detach: cmd=0x%x", cmd);
if (cmd == DDI_DETACH) {
mutex_enter(&fcp_global_mutex);
FCP_DTRACE(fcp_logq, "fcp",
fcp_trace, FCP_BUF_LEVEL_8, 0, "port_head=%p",
(void *) fcp_port_head);
if (fcp_port_head == NULL) {
ddi_remove_minor_node(fcp_global_dip, NULL);
fcp_global_dip = NULL;
mutex_exit(&fcp_global_mutex);
} else {
mutex_exit(&fcp_global_mutex);
res = DDI_FAILURE;
}
}
FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
FCP_BUF_LEVEL_8, 0, "module detach returning %d", res);
return (res);
}
static int
fcp_open(dev_t *devp, int flag, int otype, cred_t *credp)
{
if (otype != OTYP_CHR) {
return (EINVAL);
}
if (drv_priv(credp)) {
return (EPERM);
}
mutex_enter(&fcp_global_mutex);
if (fcp_oflag & FCP_EXCL) {
mutex_exit(&fcp_global_mutex);
return (EBUSY);
}
if (flag & FEXCL) {
if (fcp_oflag & FCP_OPEN) {
mutex_exit(&fcp_global_mutex);
return (EBUSY);
}
fcp_oflag |= FCP_EXCL;
}
fcp_oflag |= FCP_OPEN;
mutex_exit(&fcp_global_mutex);
return (0);
}
static int
fcp_close(dev_t dev, int flag, int otype, cred_t *credp)
{
if (otype != OTYP_CHR) {
return (EINVAL);
}
mutex_enter(&fcp_global_mutex);
if (!(fcp_oflag & FCP_OPEN)) {
mutex_exit(&fcp_global_mutex);
return (ENODEV);
}
fcp_oflag = FCP_IDLE;
mutex_exit(&fcp_global_mutex);
return (0);
}
static int
fcp_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp,
int *rval)
{
int ret = 0;
mutex_enter(&fcp_global_mutex);
if (!(fcp_oflag & FCP_OPEN)) {
mutex_exit(&fcp_global_mutex);
return (ENXIO);
}
mutex_exit(&fcp_global_mutex);
switch (cmd) {
case FCP_TGT_INQUIRY:
case FCP_TGT_CREATE:
case FCP_TGT_DELETE:
ret = fcp_setup_device_data_ioctl(cmd,
(struct fcp_ioctl *)data, mode, rval);
break;
case FCP_TGT_SEND_SCSI:
mutex_enter(&fcp_ioctl_mutex);
ret = fcp_setup_scsi_ioctl(
(struct fcp_scsi_cmd *)data, mode, rval);
mutex_exit(&fcp_ioctl_mutex);
break;
case FCP_STATE_COUNT:
ret = fcp_get_statec_count((struct fcp_ioctl *)data,
mode, rval);
break;
case FCP_GET_TARGET_MAPPINGS:
ret = fcp_get_target_mappings((struct fcp_ioctl *)data,
mode, rval);
break;
default:
fcp_log(CE_WARN, NULL,
"!Invalid ioctl opcode = 0x%x", cmd);
ret = EINVAL;
}
return (ret);
}
static int
fcp_setup_device_data_ioctl(int cmd, struct fcp_ioctl *data, int mode,
int *rval)
{
struct fcp_port *pptr;
struct device_data *dev_data;
uint32_t link_cnt;
la_wwn_t *wwn_ptr = NULL;
struct fcp_tgt *ptgt = NULL;
struct fcp_lun *plun = NULL;
int i, error;
struct fcp_ioctl fioctl;
#ifdef _MULTI_DATAMODEL
switch (ddi_model_convert_from(mode & FMODELS)) {
case DDI_MODEL_ILP32: {
struct fcp32_ioctl f32_ioctl;
if (ddi_copyin((void *)data, (void *)&f32_ioctl,
sizeof (struct fcp32_ioctl), mode)) {
return (EFAULT);
}
fioctl.fp_minor = f32_ioctl.fp_minor;
fioctl.listlen = f32_ioctl.listlen;
fioctl.list = (caddr_t)(long)f32_ioctl.list;
break;
}
case DDI_MODEL_NONE:
if (ddi_copyin((void *)data, (void *)&fioctl,
sizeof (struct fcp_ioctl), mode)) {
return (EFAULT);
}
break;
}
#else
if (ddi_copyin((void *)data, (void *)&fioctl,
sizeof (struct fcp_ioctl), mode)) {
return (EFAULT);
}
#endif
mutex_enter(&fcp_global_mutex);
pptr = fcp_port_head;
while (pptr) {
if (pptr->port_instance == (uint32_t)fioctl.fp_minor) {
break;
} else {
pptr = pptr->port_next;
}
}
mutex_exit(&fcp_global_mutex);
if (pptr == NULL) {
return (ENXIO);
}
mutex_enter(&pptr->port_mutex);
if ((dev_data = kmem_zalloc((sizeof (struct device_data)) *
fioctl.listlen, KM_NOSLEEP)) == NULL) {
mutex_exit(&pptr->port_mutex);
return (ENOMEM);
}
if (ddi_copyin(fioctl.list, dev_data,
(sizeof (struct device_data)) * fioctl.listlen, mode)) {
kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
mutex_exit(&pptr->port_mutex);
return (EFAULT);
}
link_cnt = pptr->port_link_cnt;
if (cmd == FCP_TGT_INQUIRY) {
wwn_ptr = (la_wwn_t *)&(dev_data[0].dev_pwwn);
if (bcmp(wwn_ptr->raw_wwn, pptr->port_pwwn.raw_wwn,
sizeof (wwn_ptr->raw_wwn)) == 0) {
mutex_exit(&pptr->port_mutex);
dev_data[0].dev0_type = DTYPE_UNKNOWN;
dev_data[0].dev_status = 0;
if (ddi_copyout(dev_data, fioctl.list,
(sizeof (struct device_data)) * fioctl.listlen,
mode)) {
kmem_free(dev_data,
sizeof (*dev_data) * fioctl.listlen);
return (EFAULT);
}
kmem_free(dev_data,
sizeof (*dev_data) * fioctl.listlen);
#ifdef _MULTI_DATAMODEL
switch (ddi_model_convert_from(mode & FMODELS)) {
case DDI_MODEL_ILP32: {
struct fcp32_ioctl f32_ioctl;
f32_ioctl.fp_minor = fioctl.fp_minor;
f32_ioctl.listlen = fioctl.listlen;
f32_ioctl.list = (caddr32_t)(long)fioctl.list;
if (ddi_copyout((void *)&f32_ioctl,
(void *)data,
sizeof (struct fcp32_ioctl), mode)) {
return (EFAULT);
}
break;
}
case DDI_MODEL_NONE:
if (ddi_copyout((void *)&fioctl, (void *)data,
sizeof (struct fcp_ioctl), mode)) {
return (EFAULT);
}
break;
}
#else
if (ddi_copyout((void *)&fioctl, (void *)data,
sizeof (struct fcp_ioctl), mode)) {
return (EFAULT);
}
#endif
return (0);
}
}
if (pptr->port_state & (FCP_STATE_INIT | FCP_STATE_OFFLINE)) {
kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
mutex_exit(&pptr->port_mutex);
return (ENXIO);
}
for (i = 0; (i < fioctl.listlen) && (link_cnt == pptr->port_link_cnt);
i++) {
wwn_ptr = (la_wwn_t *)&(dev_data[i].dev_pwwn);
dev_data[i].dev0_type = DTYPE_UNKNOWN;
dev_data[i].dev_status = ENXIO;
if ((ptgt = fcp_lookup_target(pptr,
(uchar_t *)wwn_ptr)) == NULL) {
mutex_exit(&pptr->port_mutex);
if (fc_ulp_get_remote_port(pptr->port_fp_handle,
wwn_ptr, &error, 0) == NULL) {
dev_data[i].dev_status = ENODEV;
mutex_enter(&pptr->port_mutex);
continue;
} else {
dev_data[i].dev_status = EAGAIN;
mutex_enter(&pptr->port_mutex);
continue;
}
} else {
mutex_enter(&ptgt->tgt_mutex);
if (ptgt->tgt_state & (FCP_TGT_MARK |
FCP_TGT_BUSY)) {
dev_data[i].dev_status = EAGAIN;
mutex_exit(&ptgt->tgt_mutex);
continue;
}
if (ptgt->tgt_state & FCP_TGT_OFFLINE) {
if (ptgt->tgt_icap && !ptgt->tgt_tcap) {
dev_data[i].dev_status = ENOTSUP;
} else {
dev_data[i].dev_status = ENXIO;
}
mutex_exit(&ptgt->tgt_mutex);
continue;
}
switch (cmd) {
case FCP_TGT_INQUIRY:
dev_data[i].dev_lun_cnt = ptgt->tgt_lun_cnt;
dev_data[i].dev_status = 0;
mutex_exit(&ptgt->tgt_mutex);
if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
dev_data[i].dev0_type = DTYPE_UNKNOWN;
} else {
dev_data[i].dev0_type = plun->lun_type;
}
mutex_enter(&ptgt->tgt_mutex);
break;
case FCP_TGT_CREATE:
mutex_exit(&ptgt->tgt_mutex);
mutex_exit(&pptr->port_mutex);
mutex_enter(&fcp_global_mutex);
if (fcp_oflag & FCP_BUSY) {
mutex_exit(&fcp_global_mutex);
if (dev_data) {
kmem_free(dev_data,
sizeof (*dev_data) *
fioctl.listlen);
}
return (EBUSY);
}
fcp_oflag |= FCP_BUSY;
mutex_exit(&fcp_global_mutex);
dev_data[i].dev_status =
fcp_create_on_demand(pptr,
wwn_ptr->raw_wwn);
if (dev_data[i].dev_status != 0) {
char buf[25];
for (i = 0; i < FC_WWN_SIZE; i++) {
(void) sprintf(&buf[i << 1],
"%02x",
wwn_ptr->raw_wwn[i]);
}
fcp_log(CE_WARN, pptr->port_dip,
"!Failed to create nodes for"
" pwwn=%s; error=%x", buf,
dev_data[i].dev_status);
}
mutex_enter(&fcp_global_mutex);
fcp_oflag &= ~FCP_BUSY;
mutex_exit(&fcp_global_mutex);
mutex_enter(&pptr->port_mutex);
mutex_enter(&ptgt->tgt_mutex);
break;
case FCP_TGT_DELETE:
break;
default:
fcp_log(CE_WARN, pptr->port_dip,
"!Invalid device data ioctl "
"opcode = 0x%x", cmd);
}
mutex_exit(&ptgt->tgt_mutex);
}
}
mutex_exit(&pptr->port_mutex);
if (ddi_copyout(dev_data, fioctl.list,
(sizeof (struct device_data)) * fioctl.listlen, mode)) {
kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
return (EFAULT);
}
kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
#ifdef _MULTI_DATAMODEL
switch (ddi_model_convert_from(mode & FMODELS)) {
case DDI_MODEL_ILP32: {
struct fcp32_ioctl f32_ioctl;
f32_ioctl.fp_minor = fioctl.fp_minor;
f32_ioctl.listlen = fioctl.listlen;
f32_ioctl.list = (caddr32_t)(long)fioctl.list;
if (ddi_copyout((void *)&f32_ioctl, (void *)data,
sizeof (struct fcp32_ioctl), mode)) {
return (EFAULT);
}
break;
}
case DDI_MODEL_NONE:
if (ddi_copyout((void *)&fioctl, (void *)data,
sizeof (struct fcp_ioctl), mode)) {
return (EFAULT);
}
break;
}
#else
if (ddi_copyout((void *)&fioctl, (void *)data,
sizeof (struct fcp_ioctl), mode)) {
return (EFAULT);
}
#endif
return (0);
}
static int
fcp_get_target_mappings(struct fcp_ioctl *data,
int mode, int *rval)
{
struct fcp_port *pptr;
fc_hba_target_mappings_t *mappings;
fc_hba_mapping_entry_t *map;
struct fcp_tgt *ptgt = NULL;
struct fcp_lun *plun = NULL;
int i, mapIndex, mappingSize;
int listlen;
struct fcp_ioctl fioctl;
char *path;
fcp_ent_addr_t sam_lun_addr;
#ifdef _MULTI_DATAMODEL
switch (ddi_model_convert_from(mode & FMODELS)) {
case DDI_MODEL_ILP32: {
struct fcp32_ioctl f32_ioctl;
if (ddi_copyin((void *)data, (void *)&f32_ioctl,
sizeof (struct fcp32_ioctl), mode)) {
return (EFAULT);
}
fioctl.fp_minor = f32_ioctl.fp_minor;
fioctl.listlen = f32_ioctl.listlen;
fioctl.list = (caddr_t)(long)f32_ioctl.list;
break;
}
case DDI_MODEL_NONE:
if (ddi_copyin((void *)data, (void *)&fioctl,
sizeof (struct fcp_ioctl), mode)) {
return (EFAULT);
}
break;
}
#else
if (ddi_copyin((void *)data, (void *)&fioctl,
sizeof (struct fcp_ioctl), mode)) {
return (EFAULT);
}
#endif
mutex_enter(&fcp_global_mutex);
pptr = fcp_port_head;
while (pptr) {
if (pptr->port_instance == (uint32_t)fioctl.fp_minor) {
break;
} else {
pptr = pptr->port_next;
}
}
mutex_exit(&fcp_global_mutex);
if (pptr == NULL) {
cmn_err(CE_NOTE, "target mappings: unknown instance number: %d",
fioctl.fp_minor);
return (ENXIO);
}
mappingSize = fioctl.listlen;
listlen = fioctl.listlen + sizeof (fc_hba_mapping_entry_t)
- sizeof (fc_hba_target_mappings_t);
if (listlen <= 0) {
cmn_err(CE_NOTE, "target mappings: Insufficient buffer");
return (ENXIO);
}
listlen = listlen / sizeof (fc_hba_mapping_entry_t);
if ((mappings = kmem_zalloc(mappingSize, KM_SLEEP)) == NULL) {
return (ENOMEM);
}
mappings->version = FC_HBA_TARGET_MAPPINGS_VERSION;
mapIndex = 0;
mutex_enter(&pptr->port_mutex);
for (i = 0; i < FCP_NUM_HASH; i++) {
for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
ptgt = ptgt->tgt_next) {
mutex_enter(&ptgt->tgt_mutex);
for (plun = ptgt->tgt_lun; plun != NULL;
plun = plun->lun_next) {
if (plun->lun_state & FCP_LUN_OFFLINE) {
continue;
}
path = fcp_get_lun_path(plun);
if (path == NULL) {
continue;
}
if (mapIndex >= listlen) {
mapIndex ++;
kmem_free(path, MAXPATHLEN);
continue;
}
map = &mappings->entries[mapIndex++];
bcopy(path, map->targetDriver,
sizeof (map->targetDriver));
map->d_id = ptgt->tgt_d_id;
map->busNumber = 0;
map->targetNumber = ptgt->tgt_d_id;
map->osLUN = plun->lun_num;
sam_lun_addr.ent_addr_0 =
BE_16(plun->lun_addr.ent_addr_0);
sam_lun_addr.ent_addr_1 =
BE_16(plun->lun_addr.ent_addr_1);
sam_lun_addr.ent_addr_2 =
BE_16(plun->lun_addr.ent_addr_2);
sam_lun_addr.ent_addr_3 =
BE_16(plun->lun_addr.ent_addr_3);
bcopy(&sam_lun_addr, &map->samLUN,
FCP_LUN_SIZE);
bcopy(ptgt->tgt_node_wwn.raw_wwn,
map->NodeWWN.raw_wwn, sizeof (la_wwn_t));
bcopy(ptgt->tgt_port_wwn.raw_wwn,
map->PortWWN.raw_wwn, sizeof (la_wwn_t));
if (plun->lun_guid) {
fcp_ascii_to_wwn(plun->lun_guid,
map->guid, sizeof (map->guid));
if ((sizeof (map->guid)) <
plun->lun_guid_size / 2) {
cmn_err(CE_WARN,
"fcp_get_target_mappings:"
"guid copy space "
"insufficient."
"Copy Truncation - "
"available %d; need %d",
(int)sizeof (map->guid),
(int)
plun->lun_guid_size / 2);
}
}
kmem_free(path, MAXPATHLEN);
}
mutex_exit(&ptgt->tgt_mutex);
}
}
mutex_exit(&pptr->port_mutex);
mappings->numLuns = mapIndex;
if (ddi_copyout(mappings, fioctl.list, mappingSize, mode)) {
kmem_free(mappings, mappingSize);
return (EFAULT);
}
kmem_free(mappings, mappingSize);
#ifdef _MULTI_DATAMODEL
switch (ddi_model_convert_from(mode & FMODELS)) {
case DDI_MODEL_ILP32: {
struct fcp32_ioctl f32_ioctl;
f32_ioctl.fp_minor = fioctl.fp_minor;
f32_ioctl.listlen = fioctl.listlen;
f32_ioctl.list = (caddr32_t)(long)fioctl.list;
if (ddi_copyout((void *)&f32_ioctl, (void *)data,
sizeof (struct fcp32_ioctl), mode)) {
return (EFAULT);
}
break;
}
case DDI_MODEL_NONE:
if (ddi_copyout((void *)&fioctl, (void *)data,
sizeof (struct fcp_ioctl), mode)) {
return (EFAULT);
}
break;
}
#else
if (ddi_copyout((void *)&fioctl, (void *)data,
sizeof (struct fcp_ioctl), mode)) {
return (EFAULT);
}
#endif
return (0);
}
static int
fcp_setup_scsi_ioctl(struct fcp_scsi_cmd *u_fscsi,
int mode, int *rval)
{
int ret = 0;
int temp_ret;
caddr_t k_cdbbufaddr = NULL;
caddr_t k_bufaddr = NULL;
caddr_t k_rqbufaddr = NULL;
caddr_t u_cdbbufaddr;
caddr_t u_bufaddr;
caddr_t u_rqbufaddr;
struct fcp_scsi_cmd k_fscsi;
if ((ret = fcp_copyin_scsi_cmd((caddr_t)u_fscsi, &k_fscsi, mode))
!= 0) {
return (ret);
}
if ((k_fscsi.scsi_cdblen <= 0) ||
(k_fscsi.scsi_buflen <= 0) ||
(k_fscsi.scsi_rqlen <= 0)) {
return (EINVAL);
}
if (ret == 0) {
k_cdbbufaddr = kmem_alloc(k_fscsi.scsi_cdblen, KM_NOSLEEP);
k_bufaddr = kmem_alloc(k_fscsi.scsi_buflen, KM_NOSLEEP);
k_rqbufaddr = kmem_alloc(k_fscsi.scsi_rqlen, KM_NOSLEEP);
if (k_cdbbufaddr == NULL ||
k_bufaddr == NULL ||
k_rqbufaddr == NULL) {
ret = ENOMEM;
}
}
if (ret == 0) {
u_cdbbufaddr = k_fscsi.scsi_cdbbufaddr;
u_bufaddr = k_fscsi.scsi_bufaddr;
u_rqbufaddr = k_fscsi.scsi_rqbufaddr;
if (ddi_copyin(u_cdbbufaddr,
k_cdbbufaddr,
k_fscsi.scsi_cdblen,
mode)) {
ret = EFAULT;
} else if (ddi_copyin(u_bufaddr,
k_bufaddr,
k_fscsi.scsi_buflen,
mode)) {
ret = EFAULT;
} else if (ddi_copyin(u_rqbufaddr,
k_rqbufaddr,
k_fscsi.scsi_rqlen,
mode)) {
ret = EFAULT;
}
}
if (ret == 0) {
k_fscsi.scsi_cdbbufaddr = k_cdbbufaddr;
k_fscsi.scsi_bufaddr = k_bufaddr;
k_fscsi.scsi_rqbufaddr = k_rqbufaddr;
ret = fcp_send_scsi_ioctl(&k_fscsi);
k_fscsi.scsi_cdbbufaddr = u_cdbbufaddr;
k_fscsi.scsi_bufaddr = u_bufaddr;
k_fscsi.scsi_rqbufaddr = u_rqbufaddr;
}
if (ret == 0) {
if (ddi_copyout(k_cdbbufaddr,
u_cdbbufaddr,
k_fscsi.scsi_cdblen,
mode)) {
ret = EFAULT;
} else if (ddi_copyout(k_bufaddr,
u_bufaddr,
k_fscsi.scsi_buflen,
mode)) {
ret = EFAULT;
} else if (ddi_copyout(k_rqbufaddr,
u_rqbufaddr,
k_fscsi.scsi_rqlen,
mode)) {
ret = EFAULT;
}
}
if (k_cdbbufaddr != NULL) {
kmem_free(k_cdbbufaddr, k_fscsi.scsi_cdblen);
}
if (k_bufaddr != NULL) {
kmem_free(k_bufaddr, k_fscsi.scsi_buflen);
}
if (k_rqbufaddr != NULL) {
kmem_free(k_rqbufaddr, k_fscsi.scsi_rqlen);
}
temp_ret = fcp_copyout_scsi_cmd(&k_fscsi, (caddr_t)u_fscsi, mode);
if (temp_ret != 0) {
ret = temp_ret;
}
return (ret);
}
static int
fcp_copyin_scsi_cmd(caddr_t base_addr, struct fcp_scsi_cmd *fscsi, int mode)
{
#ifdef _MULTI_DATAMODEL
struct fcp32_scsi_cmd f32scsi;
switch (ddi_model_convert_from(mode & FMODELS)) {
case DDI_MODEL_ILP32:
if (ddi_copyin((void *)base_addr,
&f32scsi,
sizeof (struct fcp32_scsi_cmd),
mode)) {
return (EFAULT);
}
FCP32_SCSI_CMD_TO_FCP_SCSI_CMD(&f32scsi, fscsi);
break;
case DDI_MODEL_NONE:
if (ddi_copyin((void *)base_addr,
fscsi,
sizeof (struct fcp_scsi_cmd),
mode)) {
return (EFAULT);
}
break;
}
#else
if (ddi_copyin((void *)base_addr,
fscsi,
sizeof (struct fcp_scsi_cmd),
mode)) {
return (EFAULT);
}
#endif
return (0);
}
static int
fcp_copyout_scsi_cmd(struct fcp_scsi_cmd *fscsi, caddr_t base_addr, int mode)
{
#ifdef _MULTI_DATAMODEL
struct fcp32_scsi_cmd f32scsi;
switch (ddi_model_convert_from(mode & FMODELS)) {
case DDI_MODEL_ILP32:
FCP_SCSI_CMD_TO_FCP32_SCSI_CMD(fscsi, &f32scsi);
if (ddi_copyout(&f32scsi,
(void *)base_addr,
sizeof (struct fcp32_scsi_cmd),
mode)) {
return (EFAULT);
}
break;
case DDI_MODEL_NONE:
if (ddi_copyout(fscsi,
(void *)base_addr,
sizeof (struct fcp_scsi_cmd),
mode)) {
return (EFAULT);
}
break;
}
#else
if (ddi_copyout(fscsi,
(void *)base_addr,
sizeof (struct fcp_scsi_cmd),
mode)) {
return (EFAULT);
}
#endif
return (0);
}
static int
fcp_send_scsi_ioctl(struct fcp_scsi_cmd *fscsi)
{
struct fcp_lun *plun = NULL;
struct fcp_port *pptr = NULL;
struct fcp_tgt *ptgt = NULL;
fc_packet_t *fpkt = NULL;
struct fcp_ipkt *icmd = NULL;
int target_created = FALSE;
fc_frame_hdr_t *hp;
struct fcp_cmd fcp_cmd;
struct fcp_cmd *fcmd;
union scsi_cdb *scsi_cdb;
la_wwn_t *wwn_ptr;
int nodma;
struct fcp_rsp *rsp;
struct fcp_rsp_info *rsp_info;
caddr_t rsp_sense;
int buf_len;
int info_len;
int sense_len;
struct scsi_extended_sense *sense_to = NULL;
timeout_id_t tid;
uint8_t reconfig_lun = FALSE;
uint8_t reconfig_pending = FALSE;
uint8_t scsi_cmd;
int rsp_len;
int cmd_index;
int fc_status;
int pkt_state;
int pkt_action;
int pkt_reason;
int ret, xport_retval = ~FC_SUCCESS;
int lcount;
int tcount;
int reconfig_status;
int port_busy = FALSE;
uchar_t *lun_string;
scsi_cmd = ((uint8_t *)fscsi->scsi_cdbbufaddr)[0];
ret = EINVAL;
for (cmd_index = 0;
cmd_index < FCP_NUM_ELEMENTS(scsi_ioctl_list) &&
ret != 0;
cmd_index++) {
if (scsi_ioctl_list[cmd_index] == scsi_cmd) {
ret = 0;
}
}
if (fscsi->scsi_flags != FCP_SCSI_READ) {
ret = EINVAL;
} else if (fscsi->scsi_cdblen > FCP_CDB_SIZE) {
ret = EINVAL;
}
if (ret == 0) {
mutex_enter(&fcp_global_mutex);
pptr = fcp_port_head;
while (pptr) {
if (pptr->port_instance ==
(uint32_t)fscsi->scsi_fc_port_num) {
break;
} else {
pptr = pptr->port_next;
}
}
if (pptr == NULL) {
ret = ENXIO;
} else {
mutex_exit(&fcp_global_mutex);
ret = fc_ulp_busy_port(pptr->port_fp_handle);
}
if (ret == 0) {
port_busy = TRUE;
fcp_reconfig_wait(pptr);
mutex_enter(&fcp_global_mutex);
mutex_enter(&pptr->port_mutex);
nodma = (pptr->port_fcp_dma == FC_NO_DVMA_SPACE)
? 1 : 0;
if (pptr->port_state & (FCP_STATE_INIT |
FCP_STATE_OFFLINE)) {
ret = ENXIO;
} else if (pptr->port_state & FCP_STATE_ONLINING) {
ret = EBUSY;
} else {
wwn_ptr = (la_wwn_t *)&(fscsi->scsi_fc_pwwn);
ptgt = fcp_lookup_target(pptr,
(uchar_t *)wwn_ptr);
if (ptgt == NULL) {
mutex_exit(&pptr->port_mutex);
ptgt = fcp_port_create_tgt(pptr,
wwn_ptr, &ret, &fc_status,
&pkt_state, &pkt_action,
&pkt_reason);
mutex_enter(&pptr->port_mutex);
fscsi->scsi_fc_status = fc_status;
fscsi->scsi_pkt_state =
(uchar_t)pkt_state;
fscsi->scsi_pkt_reason = pkt_reason;
fscsi->scsi_pkt_action =
(uchar_t)pkt_action;
if (ptgt != NULL) {
target_created = TRUE;
} else if (ret == 0) {
ret = ENOMEM;
}
}
if (ret == 0) {
mutex_enter(&ptgt->tgt_mutex);
if (ptgt->tgt_state &
(FCP_TGT_MARK |
FCP_TGT_BUSY)) {
ret = EBUSY;
} else {
ptgt->tgt_state |=
FCP_TGT_BUSY;
}
lcount = pptr->port_link_cnt;
tcount = ptgt->tgt_change_cnt;
mutex_exit(&ptgt->tgt_mutex);
}
}
mutex_exit(&pptr->port_mutex);
}
mutex_exit(&fcp_global_mutex);
}
if (ret == 0) {
uint64_t belun = BE_64(fscsi->scsi_lun);
mutex_enter(&pptr->port_mutex);
wwn_ptr = (la_wwn_t *)&(fscsi->scsi_fc_pwwn);
if (!ptgt->tgt_tcap && ptgt->tgt_icap) {
fscsi->scsi_fc_status = FC_DEVICE_NOT_TGT;
ret = ENXIO;
} else if ((belun << 16) != 0) {
fscsi->scsi_fc_status = FC_INVALID_LUN;
cmn_err(CE_WARN, "fcp: Unsupported LUN addressing"
" method 0x%02x with LUN number 0x%016" PRIx64,
(uint8_t)(belun >> 62), belun);
ret = ENXIO;
} else if ((plun = fcp_lookup_lun(pptr, (uchar_t *)wwn_ptr,
(uint16_t)((belun >> 48) & 0x3fff))) == NULL) {
ret = ENXIO;
}
mutex_exit(&pptr->port_mutex);
}
if (ret == 0) {
rsp_len = sizeof (struct fcp_rsp) +
sizeof (struct fcp_rsp_info) + fscsi->scsi_rqlen;
icmd = fcp_icmd_alloc(pptr, ptgt,
sizeof (struct fcp_cmd),
rsp_len,
fscsi->scsi_buflen,
nodma,
lcount,
tcount,
0,
FC_INVALID_RSCN_COUNT);
if (icmd == NULL) {
ret = ENOMEM;
} else {
fcp_ipkt_sema_init(icmd);
}
}
if (ret == 0) {
fpkt = icmd->ipkt_fpkt;
fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
fpkt->pkt_tran_type = FC_PKT_FCP_READ;
fpkt->pkt_timeout = fscsi->scsi_timeout;
if (nodma) {
fcmd = (struct fcp_cmd *)fpkt->pkt_cmd;
} else {
fcmd = &fcp_cmd;
}
bzero(fcmd, sizeof (struct fcp_cmd));
ptgt = plun->lun_tgt;
lun_string = (uchar_t *)&fscsi->scsi_lun;
fcmd->fcp_ent_addr.ent_addr_0 =
BE_16(*(uint16_t *)&(lun_string[0]));
fcmd->fcp_ent_addr.ent_addr_1 =
BE_16(*(uint16_t *)&(lun_string[2]));
fcmd->fcp_ent_addr.ent_addr_2 =
BE_16(*(uint16_t *)&(lun_string[4]));
fcmd->fcp_ent_addr.ent_addr_3 =
BE_16(*(uint16_t *)&(lun_string[6]));
icmd->ipkt_lun = plun;
icmd->ipkt_restart = 0;
icmd->ipkt_retries = 0;
icmd->ipkt_opcode = 0;
hp = &fpkt->pkt_cmd_fhdr;
hp->s_id = pptr->port_id;
hp->d_id = ptgt->tgt_d_id;
hp->r_ctl = R_CTL_COMMAND;
hp->type = FC_TYPE_SCSI_FCP;
hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
hp->rsvd = 0;
hp->seq_id = 0;
hp->seq_cnt = 0;
hp->ox_id = 0xffff;
hp->rx_id = 0xffff;
hp->ro = 0;
fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
fcmd->fcp_cntl.cntl_read_data = 1;
fcmd->fcp_cntl.cntl_write_data = 0;
fcmd->fcp_data_len = fscsi->scsi_buflen;
scsi_cdb = (union scsi_cdb *)fcmd->fcp_cdb;
bcopy((char *)fscsi->scsi_cdbbufaddr, (char *)scsi_cdb,
fscsi->scsi_cdblen);
if (!nodma) {
FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
}
if (ret == 0) {
mutex_enter(&ptgt->tgt_mutex);
if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
mutex_exit(&ptgt->tgt_mutex);
fscsi->scsi_fc_status = xport_retval =
fc_ulp_transport(pptr->port_fp_handle,
fpkt);
if (fscsi->scsi_fc_status != FC_SUCCESS) {
ret = EIO;
}
} else {
mutex_exit(&ptgt->tgt_mutex);
ret = EBUSY;
}
}
}
if ((ret == 0) && (xport_retval == FC_SUCCESS)) {
ret = fcp_ipkt_sema_wait(icmd);
}
rsp = NULL;
if ((ret == 0) && (xport_retval == FC_SUCCESS)) {
rsp = (struct fcp_rsp *)fpkt->pkt_resp;
if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
fcp_log(CE_WARN, pptr->port_dip,
"!SCSI command to d_id=0x%x lun=0x%x"
" failed, Bad FCP response values:"
" rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
" sts-rsvd2=%x, rsplen=%x, senselen=%x",
ptgt->tgt_d_id, plun->lun_num,
rsp->reserved_0, rsp->reserved_1,
rsp->fcp_u.fcp_status.reserved_0,
rsp->fcp_u.fcp_status.reserved_1,
rsp->fcp_response_len, rsp->fcp_sense_len);
ret = EIO;
}
}
if ((ret == 0) && (rsp != NULL)) {
sense_len = 0;
info_len = 0;
if (rsp->fcp_u.fcp_status.rsp_len_set) {
info_len = rsp->fcp_response_len;
}
rsp_info = (struct fcp_rsp_info *)
((uint8_t *)rsp + sizeof (struct fcp_rsp));
fscsi->scsi_bufstatus = rsp->fcp_u.fcp_status.scsi_status;
if (fscsi->scsi_bufstatus != STATUS_GOOD &&
rsp->fcp_u.fcp_status.sense_len_set) {
sense_len = rsp->fcp_sense_len;
rsp_sense = (caddr_t)((uint8_t *)rsp_info + info_len);
sense_to = (struct scsi_extended_sense *)rsp_sense;
if ((FCP_SENSE_REPORTLUN_CHANGED(sense_to)) ||
(FCP_SENSE_NO_LUN(sense_to))) {
reconfig_lun = TRUE;
}
}
if (fscsi->scsi_bufstatus == STATUS_GOOD && (ptgt != NULL) &&
(reconfig_lun || (scsi_cdb->scc_cmd == SCMD_REPORT_LUN))) {
if (reconfig_lun == FALSE) {
reconfig_status =
fcp_is_reconfig_needed(ptgt, fpkt);
}
if ((reconfig_lun == TRUE) ||
(reconfig_status == TRUE)) {
mutex_enter(&ptgt->tgt_mutex);
if (ptgt->tgt_tid == NULL) {
tid = timeout(fcp_reconfigure_luns,
(caddr_t)ptgt, drv_usectohz(1));
ptgt->tgt_tid = tid;
ptgt->tgt_state |= FCP_TGT_BUSY;
ret = EBUSY;
reconfig_pending = TRUE;
}
mutex_exit(&ptgt->tgt_mutex);
}
}
if (ret == 0) {
buf_len = fscsi->scsi_buflen;
fscsi->scsi_bufresid = 0;
if (rsp->fcp_u.fcp_status.resid_under) {
if (rsp->fcp_resid <= fscsi->scsi_buflen) {
fscsi->scsi_bufresid = rsp->fcp_resid;
} else {
cmn_err(CE_WARN, "fcp: bad residue %x "
"for txfer len %x", rsp->fcp_resid,
fscsi->scsi_buflen);
fscsi->scsi_bufresid =
fscsi->scsi_buflen;
}
buf_len -= fscsi->scsi_bufresid;
}
if (rsp->fcp_u.fcp_status.resid_over) {
fscsi->scsi_bufresid = -rsp->fcp_resid;
}
fscsi->scsi_rqresid = fscsi->scsi_rqlen - sense_len;
if (fscsi->scsi_rqlen < sense_len) {
sense_len = fscsi->scsi_rqlen;
}
fscsi->scsi_fc_rspcode = 0;
if (rsp->fcp_u.fcp_status.rsp_len_set) {
fscsi->scsi_fc_rspcode = rsp_info->rsp_code;
}
fscsi->scsi_pkt_state = fpkt->pkt_state;
fscsi->scsi_pkt_action = fpkt->pkt_action;
fscsi->scsi_pkt_reason = fpkt->pkt_reason;
if (buf_len) {
FCP_CP_IN(fpkt->pkt_data,
fscsi->scsi_bufaddr,
fpkt->pkt_data_acc,
buf_len);
}
bcopy((void *)rsp_sense,
(void *)fscsi->scsi_rqbufaddr,
sense_len);
}
}
if (icmd != NULL) {
fcp_ipkt_sema_cleanup(icmd);
}
if (port_busy) {
fc_ulp_idle_port(pptr->port_fp_handle);
}
if ((ptgt != NULL) && !reconfig_pending) {
if (target_created) {
mutex_enter(&ptgt->tgt_mutex);
ptgt->tgt_state &= ~FCP_TGT_BUSY;
mutex_exit(&ptgt->tgt_mutex);
} else {
mutex_enter(&ptgt->tgt_mutex);
ptgt->tgt_state &= ~FCP_TGT_BUSY;
mutex_exit(&ptgt->tgt_mutex);
}
}
return (ret);
}
static int
fcp_is_reconfig_needed(struct fcp_tgt *ptgt,
fc_packet_t *fpkt)
{
uchar_t *lun_string;
uint16_t lun_num, i;
int num_luns;
int actual_luns;
int num_masked_luns;
int lun_buflen;
struct fcp_lun *plun = NULL;
struct fcp_reportlun_resp *report_lun;
uint8_t reconfig_needed = FALSE;
uint8_t lun_exists = FALSE;
fcp_port_t *pptr = ptgt->tgt_port;
report_lun = kmem_zalloc(fpkt->pkt_datalen, KM_SLEEP);
FCP_CP_IN(fpkt->pkt_data, report_lun, fpkt->pkt_data_acc,
fpkt->pkt_datalen);
num_luns = BE_32(report_lun->num_lun) >> 3;
lun_buflen = (fpkt->pkt_datalen -
2 * sizeof (uint32_t)) / sizeof (longlong_t);
if (num_luns <= lun_buflen) {
actual_luns = num_luns;
} else {
actual_luns = lun_buflen;
}
mutex_enter(&ptgt->tgt_mutex);
num_masked_luns = 0;
if (fcp_lun_blacklist != NULL) {
for (i = 0; i < actual_luns; i++) {
lun_string = (uchar_t *)&(report_lun->lun_string[i]);
switch (lun_string[0] & 0xC0) {
case FCP_LUN_ADDRESSING:
case FCP_PD_ADDRESSING:
case FCP_VOLUME_ADDRESSING:
lun_num = ((lun_string[0] & 0x3F) << 8)
| lun_string[1];
if (fcp_should_mask(&ptgt->tgt_port_wwn,
lun_num) == TRUE) {
num_masked_luns++;
}
break;
default:
break;
}
}
}
if (num_luns && num_luns != (ptgt->tgt_lun_cnt + num_masked_luns)) {
mutex_exit(&ptgt->tgt_mutex);
kmem_free(report_lun, fpkt->pkt_datalen);
return (TRUE);
}
for (i = 0; i < actual_luns; i++) {
lun_string = (uchar_t *)&(report_lun->lun_string[i]);
lun_exists = FALSE;
switch (lun_string[0] & 0xC0) {
case FCP_LUN_ADDRESSING:
case FCP_PD_ADDRESSING:
case FCP_VOLUME_ADDRESSING:
lun_num = ((lun_string[0] & 0x3F) << 8) | lun_string[1];
if ((fcp_lun_blacklist != NULL) && (fcp_should_mask(
&ptgt->tgt_port_wwn, lun_num) == TRUE)) {
lun_exists = TRUE;
break;
}
for (plun = ptgt->tgt_lun; plun;
plun = plun->lun_next) {
if (plun->lun_num == lun_num) {
lun_exists = TRUE;
break;
}
}
break;
default:
break;
}
if (lun_exists == FALSE) {
reconfig_needed = TRUE;
break;
}
}
mutex_exit(&ptgt->tgt_mutex);
kmem_free(report_lun, fpkt->pkt_datalen);
return (reconfig_needed);
}
static int
fcp_symmetric_device_probe(struct fcp_lun *plun)
{
struct scsi_inquiry *stdinq = &plun->lun_inq;
char *devidptr;
int i, len;
for (i = 0; i < fcp_symmetric_disk_table_size; i++) {
devidptr = fcp_symmetric_disk_table[i];
len = (int)strlen(devidptr);
if (bcmp(stdinq->inq_vid, devidptr, len) == 0) {
return (0);
}
}
return (1);
}
static int
fcp_get_statec_count(struct fcp_ioctl *data, int mode, int *rval)
{
int ret;
uint32_t link_cnt;
struct fcp_ioctl fioctl;
struct fcp_port *pptr = NULL;
if ((ret = fcp_copyin_fcp_ioctl_data(data, mode, rval, &fioctl,
&pptr)) != 0) {
return (ret);
}
ASSERT(pptr != NULL);
if (fioctl.listlen != 1) {
return (EINVAL);
}
mutex_enter(&pptr->port_mutex);
if (pptr->port_state & FCP_STATE_OFFLINE) {
mutex_exit(&pptr->port_mutex);
return (ENXIO);
}
if ((pptr->port_state & FCP_STATE_INIT) && pptr->port_tmp_cnt) {
mutex_exit(&pptr->port_mutex);
return (ENXIO);
}
link_cnt = pptr->port_link_cnt;
mutex_exit(&pptr->port_mutex);
if (ddi_copyout(&link_cnt, fioctl.list, (sizeof (uint32_t)), mode)) {
return (EFAULT);
}
#ifdef _MULTI_DATAMODEL
switch (ddi_model_convert_from(mode & FMODELS)) {
case DDI_MODEL_ILP32: {
struct fcp32_ioctl f32_ioctl;
f32_ioctl.fp_minor = fioctl.fp_minor;
f32_ioctl.listlen = fioctl.listlen;
f32_ioctl.list = (caddr32_t)(long)fioctl.list;
if (ddi_copyout((void *)&f32_ioctl, (void *)data,
sizeof (struct fcp32_ioctl), mode)) {
return (EFAULT);
}
break;
}
case DDI_MODEL_NONE:
if (ddi_copyout((void *)&fioctl, (void *)data,
sizeof (struct fcp_ioctl), mode)) {
return (EFAULT);
}
break;
}
#else
if (ddi_copyout((void *)&fioctl, (void *)data,
sizeof (struct fcp_ioctl), mode)) {
return (EFAULT);
}
#endif
return (0);
}
static int
fcp_copyin_fcp_ioctl_data(struct fcp_ioctl *data, int mode, int *rval,
struct fcp_ioctl *fioctl, struct fcp_port **pptr)
{
struct fcp_port *t_pptr;
#ifdef _MULTI_DATAMODEL
switch (ddi_model_convert_from(mode & FMODELS)) {
case DDI_MODEL_ILP32: {
struct fcp32_ioctl f32_ioctl;
if (ddi_copyin((void *)data, (void *)&f32_ioctl,
sizeof (struct fcp32_ioctl), mode)) {
return (EFAULT);
}
fioctl->fp_minor = f32_ioctl.fp_minor;
fioctl->listlen = f32_ioctl.listlen;
fioctl->list = (caddr_t)(long)f32_ioctl.list;
break;
}
case DDI_MODEL_NONE:
if (ddi_copyin((void *)data, (void *)fioctl,
sizeof (struct fcp_ioctl), mode)) {
return (EFAULT);
}
break;
}
#else
if (ddi_copyin((void *)data, (void *)fioctl,
sizeof (struct fcp_ioctl), mode)) {
return (EFAULT);
}
#endif
mutex_enter(&fcp_global_mutex);
t_pptr = fcp_port_head;
while (t_pptr) {
if (t_pptr->port_instance == (uint32_t)fioctl->fp_minor) {
break;
} else {
t_pptr = t_pptr->port_next;
}
}
*pptr = t_pptr;
mutex_exit(&fcp_global_mutex);
if (t_pptr == NULL) {
return (ENXIO);
}
return (0);
}
static struct fcp_tgt *
fcp_port_create_tgt(struct fcp_port *pptr, la_wwn_t *pwwn, int *ret_val,
int *fc_status, int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action)
{
struct fcp_tgt *ptgt = NULL;
fc_portmap_t devlist;
int lcount;
int error;
*ret_val = 0;
if (fc_ulp_get_remote_port(pptr->port_fp_handle, pwwn,
&error, 1) == NULL) {
*ret_val = EIO;
} else {
if (fc_ulp_pwwn_to_portmap(pptr->port_fp_handle, pwwn,
&devlist) != FC_SUCCESS) {
*ret_val = EIO;
}
}
devlist.map_type = PORT_DEVICE_USER_CREATE;
if (*ret_val == 0) {
lcount = pptr->port_link_cnt;
ptgt = fcp_alloc_tgt(pptr, &devlist, lcount);
if (ptgt == NULL) {
fcp_log(CE_WARN, pptr->port_dip,
"!FC target allocation failed");
*ret_val = ENOMEM;
} else {
mutex_enter(&ptgt->tgt_mutex);
ptgt->tgt_statec_cause = FCP_CAUSE_TGT_CHANGE;
ptgt->tgt_tmp_cnt = 1;
ptgt->tgt_d_id = devlist.map_did.port_id;
ptgt->tgt_hard_addr =
devlist.map_hard_addr.hard_addr;
ptgt->tgt_pd_handle = devlist.map_pd;
ptgt->tgt_fca_dev = NULL;
bcopy(&devlist.map_nwwn, &ptgt->tgt_node_wwn.raw_wwn[0],
FC_WWN_SIZE);
bcopy(&devlist.map_pwwn, &ptgt->tgt_port_wwn.raw_wwn[0],
FC_WWN_SIZE);
mutex_exit(&ptgt->tgt_mutex);
}
}
mutex_exit(&fcp_global_mutex);
if (*ret_val == 0) {
*ret_val = fcp_tgt_send_plogi(ptgt, fc_status,
fc_pkt_state, fc_pkt_reason, fc_pkt_action);
}
if (*ret_val == 0) {
*ret_val = fcp_tgt_send_prli(ptgt, fc_status,
fc_pkt_state, fc_pkt_reason, fc_pkt_action);
}
mutex_enter(&fcp_global_mutex);
return (ptgt);
}
static int
fcp_tgt_send_plogi(struct fcp_tgt *ptgt, int *fc_status, int *fc_pkt_state,
int *fc_pkt_reason, int *fc_pkt_action)
{
struct fcp_port *pptr;
struct fcp_ipkt *icmd;
struct fc_packet *fpkt;
fc_frame_hdr_t *hp;
struct la_els_logi logi;
int tcount;
int lcount;
int ret, login_retval = ~FC_SUCCESS;
ret = 0;
pptr = ptgt->tgt_port;
lcount = pptr->port_link_cnt;
tcount = ptgt->tgt_change_cnt;
icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (la_els_logi_t),
sizeof (la_els_logi_t), 0,
pptr->port_state & FCP_STATE_FCA_IS_NODMA,
lcount, tcount, 0, FC_INVALID_RSCN_COUNT);
if (icmd == NULL) {
ret = ENOMEM;
} else {
fcp_ipkt_sema_init(icmd);
icmd->ipkt_lun = NULL;
icmd->ipkt_restart = 0;
icmd->ipkt_retries = 0;
icmd->ipkt_opcode = LA_ELS_PLOGI;
fpkt = icmd->ipkt_fpkt;
fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
fpkt->pkt_tran_type = FC_PKT_EXCHANGE;
fpkt->pkt_timeout = FCP_ELS_TIMEOUT;
hp = &fpkt->pkt_cmd_fhdr;
hp->s_id = pptr->port_id;
hp->d_id = ptgt->tgt_d_id;
hp->r_ctl = R_CTL_ELS_REQ;
hp->type = FC_TYPE_EXTENDED_LS;
hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
hp->seq_id = 0;
hp->rsvd = 0;
hp->df_ctl = 0;
hp->seq_cnt = 0;
hp->ox_id = 0xffff;
hp->rx_id = 0xffff;
hp->ro = 0;
bzero(&logi, sizeof (struct la_els_logi));
logi.ls_code.ls_code = LA_ELS_PLOGI;
FCP_CP_OUT((uint8_t *)&logi, fpkt->pkt_cmd,
fpkt->pkt_cmd_acc, sizeof (struct la_els_logi));
*fc_status = login_retval =
fc_ulp_login(pptr->port_fp_handle, &fpkt, 1);
if (*fc_status != FC_SUCCESS) {
ret = EIO;
}
}
if ((ret == 0) && (login_retval == FC_SUCCESS)) {
ret = fcp_ipkt_sema_wait(icmd);
*fc_pkt_state = fpkt->pkt_state;
*fc_pkt_reason = fpkt->pkt_reason;
*fc_pkt_action = fpkt->pkt_action;
}
if (icmd != NULL) {
fcp_ipkt_sema_cleanup(icmd);
}
return (ret);
}
static int
fcp_tgt_send_prli(struct fcp_tgt *ptgt, int *fc_status, int *fc_pkt_state,
int *fc_pkt_reason, int *fc_pkt_action)
{
return (0);
}
static void
fcp_ipkt_sema_init(struct fcp_ipkt *icmd)
{
struct fc_packet *fpkt;
fpkt = icmd->ipkt_fpkt;
sema_init(&(icmd->ipkt_sema), 0, NULL, SEMA_DRIVER, NULL);
fpkt->pkt_comp = fcp_ipkt_sema_callback;
}
static int
fcp_ipkt_sema_wait(struct fcp_ipkt *icmd)
{
struct fc_packet *fpkt;
int ret;
ret = EIO;
fpkt = icmd->ipkt_fpkt;
sema_p(&(icmd->ipkt_sema));
switch (fpkt->pkt_state) {
case FC_PKT_SUCCESS:
ret = 0;
break;
case FC_PKT_LOCAL_RJT:
switch (fpkt->pkt_reason) {
case FC_REASON_SEQ_TIMEOUT:
case FC_REASON_RX_BUF_TIMEOUT:
ret = EAGAIN;
break;
case FC_REASON_PKT_BUSY:
ret = EBUSY;
break;
}
break;
case FC_PKT_TIMEOUT:
ret = EAGAIN;
break;
case FC_PKT_LOCAL_BSY:
case FC_PKT_TRAN_BSY:
case FC_PKT_NPORT_BSY:
case FC_PKT_FABRIC_BSY:
ret = EBUSY;
break;
case FC_PKT_LS_RJT:
case FC_PKT_BA_RJT:
switch (fpkt->pkt_reason) {
case FC_REASON_LOGICAL_BSY:
ret = EBUSY;
break;
}
break;
case FC_PKT_FS_RJT:
switch (fpkt->pkt_reason) {
case FC_REASON_FS_LOGICAL_BUSY:
ret = EBUSY;
break;
}
break;
}
return (ret);
}
static void
fcp_ipkt_sema_callback(struct fc_packet *fpkt)
{
struct fcp_ipkt *icmd;
icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
sema_v(&(icmd->ipkt_sema));
}
static void
fcp_ipkt_sema_cleanup(struct fcp_ipkt *icmd)
{
struct fcp_tgt *ptgt;
struct fcp_port *pptr;
ptgt = icmd->ipkt_tgt;
pptr = icmd->ipkt_port;
mutex_enter(&ptgt->tgt_mutex);
sema_destroy(&(icmd->ipkt_sema));
mutex_exit(&ptgt->tgt_mutex);
fcp_icmd_free(pptr, icmd);
}
static int
fcp_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
fc_attach_cmd_t cmd, uint32_t s_id)
{
int instance;
int res = FC_FAILURE;
ASSERT(pinfo != NULL);
instance = ddi_get_instance(pinfo->port_dip);
switch (cmd) {
case FC_CMD_ATTACH:
if (fcp_handle_port_attach(ulph, pinfo, s_id,
instance) == DDI_SUCCESS) {
res = FC_SUCCESS;
} else {
ASSERT(ddi_get_soft_state(fcp_softstate,
instance) == NULL);
}
break;
case FC_CMD_RESUME:
case FC_CMD_POWER_UP:
if (fcp_handle_port_resume(ulph, pinfo, s_id, cmd,
instance) == DDI_SUCCESS) {
res = FC_SUCCESS;
}
break;
default:
FCP_TRACE(fcp_logq, "fcp",
fcp_trace, FCP_BUF_LEVEL_2, 0,
"port_attach: unknown cmdcommand: %d", cmd);
break;
}
FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
FCP_BUF_LEVEL_1, 0, "fcp_port_attach returning %d", res);
return (res);
}
static int
fcp_port_detach(opaque_t ulph, fc_ulp_port_info_t *info,
fc_detach_cmd_t cmd)
{
int flag;
int instance;
struct fcp_port *pptr;
instance = ddi_get_instance(info->port_dip);
pptr = ddi_get_soft_state(fcp_softstate, instance);
switch (cmd) {
case FC_CMD_SUSPEND:
FCP_DTRACE(fcp_logq, "fcp",
fcp_trace, FCP_BUF_LEVEL_8, 0,
"port suspend called for port %d", instance);
flag = FCP_STATE_SUSPENDED;
break;
case FC_CMD_POWER_DOWN:
FCP_DTRACE(fcp_logq, "fcp",
fcp_trace, FCP_BUF_LEVEL_8, 0,
"port power down called for port %d", instance);
flag = FCP_STATE_POWER_DOWN;
break;
case FC_CMD_DETACH:
FCP_DTRACE(fcp_logq, "fcp",
fcp_trace, FCP_BUF_LEVEL_8, 0,
"port detach called for port %d", instance);
flag = FCP_STATE_DETACHING;
break;
default:
return (FC_FAILURE);
}
FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
FCP_BUF_LEVEL_1, 0, "fcp_port_detach returning");
return (fcp_handle_port_detach(pptr, flag, instance));
}
static int
fcp_port_ioctl(opaque_t ulph, opaque_t port_handle, dev_t dev, int cmd,
intptr_t data, int mode, cred_t *credp, int *rval, uint32_t claimed)
{
int retval = FC_UNCLAIMED;
struct fcp_port *pptr = NULL;
struct devctl_iocdata *dcp = NULL;
dev_info_t *cdip;
mdi_pathinfo_t *pip = NULL;
char *ndi_nm;
char *ndi_addr;
int is_mpxio;
boolean_t enteredv;
int devi_entered = 0;
clock_t end_time;
ASSERT(rval != NULL);
FCP_DTRACE(fcp_logq, "fcp",
fcp_trace, FCP_BUF_LEVEL_8, 0,
"fcp_port_ioctl(cmd=0x%x, claimed=%d)", cmd, claimed);
if (claimed) {
return (retval);
}
if ((pptr = fcp_get_port(port_handle)) == NULL) {
fcp_log(CE_WARN, NULL,
"!fcp:Invalid port handle handle in ioctl");
*rval = ENXIO;
return (retval);
}
is_mpxio = pptr->port_mpxio;
switch (cmd) {
case DEVCTL_BUS_GETSTATE:
case DEVCTL_BUS_QUIESCE:
case DEVCTL_BUS_UNQUIESCE:
case DEVCTL_BUS_RESET:
case DEVCTL_BUS_RESETALL:
case DEVCTL_BUS_DEV_CREATE:
if (ndi_dc_allochdl((void *)data, &dcp) != NDI_SUCCESS) {
return (retval);
}
break;
case DEVCTL_DEVICE_GETSTATE:
case DEVCTL_DEVICE_OFFLINE:
case DEVCTL_DEVICE_ONLINE:
case DEVCTL_DEVICE_REMOVE:
case DEVCTL_DEVICE_RESET:
if (ndi_dc_allochdl((void *)data, &dcp) != NDI_SUCCESS) {
return (retval);
}
ASSERT(dcp != NULL);
if (((ndi_nm = ndi_dc_getname(dcp)) == NULL) ||
((ndi_addr = ndi_dc_getaddr(dcp)) == NULL)) {
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_2, 0,
"ioctl: can't get name (%s) or addr (%s)",
ndi_nm ? ndi_nm : "<null ptr>",
ndi_addr ? ndi_addr : "<null ptr>");
ndi_dc_freehdl(dcp);
return (retval);
}
ASSERT(pptr != NULL);
if (is_mpxio) {
mdi_devi_enter(pptr->port_dip, &enteredv);
} else {
ndi_devi_enter(pptr->port_dip);
}
devi_entered = 1;
if ((cdip = ndi_devi_find(pptr->port_dip, ndi_nm,
ndi_addr)) == NULL) {
pip = mdi_pi_find(pptr->port_dip, NULL, ndi_addr);
if (pip == NULL ||
((cdip = mdi_pi_get_client(pip)) == NULL)) {
*rval = ENXIO;
goto out;
}
}
break;
default:
*rval = ENOTTY;
return (retval);
}
retval = FC_SUCCESS;
*rval = 0;
FCP_DTRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_8, 0,
"ioctl: claiming this one");
switch (cmd) {
case DEVCTL_DEVICE_GETSTATE:
ASSERT(cdip != NULL);
ASSERT(dcp != NULL);
if (ndi_dc_return_dev_state(cdip, dcp) != NDI_SUCCESS) {
*rval = EFAULT;
}
break;
case DEVCTL_DEVICE_REMOVE:
case DEVCTL_DEVICE_OFFLINE: {
int flag = 0;
int lcount;
int tcount;
struct fcp_pkt *head = NULL;
struct fcp_lun *plun;
child_info_t *cip = CIP(cdip);
int all = 1;
struct fcp_lun *tplun;
struct fcp_tgt *ptgt;
ASSERT(pptr != NULL);
ASSERT(cdip != NULL);
mutex_enter(&pptr->port_mutex);
if (pip != NULL) {
cip = CIP(pip);
}
if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
mutex_exit(&pptr->port_mutex);
*rval = ENXIO;
break;
}
head = fcp_scan_commands(plun);
if (head != NULL) {
fcp_abort_commands(head, LUN_PORT);
}
lcount = pptr->port_link_cnt;
tcount = plun->lun_tgt->tgt_change_cnt;
mutex_exit(&pptr->port_mutex);
if (cmd == DEVCTL_DEVICE_REMOVE) {
flag = NDI_DEVI_REMOVE;
}
if (is_mpxio) {
mdi_devi_exit(pptr->port_dip, enteredv);
} else {
ndi_devi_exit(pptr->port_dip);
}
devi_entered = 0;
*rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
FCP_OFFLINE, lcount, tcount, flag);
if (*rval != NDI_SUCCESS) {
*rval = (*rval == NDI_BUSY) ? EBUSY : EIO;
break;
}
fcp_update_offline_flags(plun);
ptgt = plun->lun_tgt;
mutex_enter(&ptgt->tgt_mutex);
for (tplun = ptgt->tgt_lun; tplun != NULL; tplun =
tplun->lun_next) {
mutex_enter(&tplun->lun_mutex);
if (!(tplun->lun_state & FCP_LUN_OFFLINE)) {
all = 0;
}
mutex_exit(&tplun->lun_mutex);
}
if (all) {
ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
if (FC_TOP_EXTERNAL(pptr->port_topology) &&
fcp_enable_auto_configuration) {
ptgt->tgt_manual_config_only = 1;
}
}
mutex_exit(&ptgt->tgt_mutex);
break;
}
case DEVCTL_DEVICE_ONLINE: {
int lcount;
int tcount;
struct fcp_lun *plun;
child_info_t *cip = CIP(cdip);
ASSERT(cdip != NULL);
ASSERT(pptr != NULL);
mutex_enter(&pptr->port_mutex);
if (pip != NULL) {
cip = CIP(pip);
}
if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
mutex_exit(&pptr->port_mutex);
*rval = ENXIO;
break;
}
lcount = pptr->port_link_cnt;
tcount = plun->lun_tgt->tgt_change_cnt;
mutex_exit(&pptr->port_mutex);
mutex_enter(&LUN_TGT->tgt_mutex);
plun->lun_state |= FCP_LUN_ONLINING;
mutex_exit(&LUN_TGT->tgt_mutex);
if (is_mpxio) {
mdi_devi_exit(pptr->port_dip, enteredv);
} else {
ndi_devi_exit(pptr->port_dip);
}
devi_entered = 0;
*rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
FCP_ONLINE, lcount, tcount, 0);
if (*rval != NDI_SUCCESS) {
mutex_enter(&LUN_TGT->tgt_mutex);
plun->lun_state &= ~FCP_LUN_ONLINING;
mutex_exit(&LUN_TGT->tgt_mutex);
*rval = EIO;
break;
}
mutex_enter(&LUN_TGT->tgt_mutex);
plun->lun_state &= ~(FCP_LUN_OFFLINE | FCP_LUN_BUSY |
FCP_LUN_ONLINING);
mutex_exit(&LUN_TGT->tgt_mutex);
break;
}
case DEVCTL_BUS_DEV_CREATE: {
uchar_t *bytes = NULL;
uint_t nbytes;
struct fcp_tgt *ptgt = NULL;
struct fcp_lun *plun = NULL;
dev_info_t *useless_dip = NULL;
*rval = ndi_dc_devi_create(dcp, pptr->port_dip,
DEVCTL_CONSTRUCT, &useless_dip);
if (*rval != 0 || useless_dip == NULL) {
break;
}
if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, useless_dip,
DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
&nbytes) != DDI_PROP_SUCCESS) || nbytes != FC_WWN_SIZE) {
*rval = EINVAL;
(void) ndi_devi_free(useless_dip);
if (bytes != NULL) {
ddi_prop_free(bytes);
}
break;
}
*rval = fcp_create_on_demand(pptr, bytes);
if (*rval == 0) {
mutex_enter(&pptr->port_mutex);
ptgt = fcp_lookup_target(pptr, (uchar_t *)bytes);
if (ptgt) {
mutex_enter(&ptgt->tgt_mutex);
plun = ptgt->tgt_lun;
while (plun &&
plun->lun_state & FCP_LUN_OFFLINE) {
plun = plun->lun_next;
}
mutex_exit(&ptgt->tgt_mutex);
}
mutex_exit(&pptr->port_mutex);
}
if (*rval == 0 && ptgt && plun) {
mutex_enter(&plun->lun_mutex);
end_time = ddi_get_lbolt() +
SEC_TO_TICK(fcp_lun_ready_retry);
while (ddi_get_lbolt() < end_time) {
retval = FC_SUCCESS;
if (plun->lun_cip) {
if (plun->lun_mpxio == 0) {
cdip = DIP(plun->lun_cip);
} else {
cdip = mdi_pi_get_client(
PIP(plun->lun_cip));
}
if (cdip == NULL) {
*rval = ENXIO;
break;
}
if (!i_ddi_devi_attached(cdip)) {
mutex_exit(&plun->lun_mutex);
delay(drv_usectohz(1000000));
mutex_enter(&plun->lun_mutex);
} else {
mutex_exit(&plun->lun_mutex);
plun = plun->lun_next;
while (plun && (plun->lun_state
& FCP_LUN_OFFLINE)) {
plun = plun->lun_next;
}
if (!plun) {
break;
}
mutex_enter(&plun->lun_mutex);
}
} else {
*rval = ENXIO;
break;
}
}
if (plun) {
mutex_exit(&plun->lun_mutex);
} else {
char devnm[MAXNAMELEN];
int nmlen;
nmlen = snprintf(devnm, MAXNAMELEN, "%s@%s",
ddi_node_name(cdip),
ddi_get_name_addr(cdip));
if (copyout(&devnm, dcp->cpyout_buf, nmlen) !=
0) {
*rval = EFAULT;
}
}
} else {
int i;
char buf[25];
for (i = 0; i < FC_WWN_SIZE; i++) {
(void) sprintf(&buf[i << 1], "%02x", bytes[i]);
}
fcp_log(CE_WARN, pptr->port_dip,
"!Failed to create nodes for pwwn=%s; error=%x",
buf, *rval);
}
(void) ndi_devi_free(useless_dip);
ddi_prop_free(bytes);
break;
}
case DEVCTL_DEVICE_RESET: {
struct fcp_lun *plun;
child_info_t *cip = CIP(cdip);
ASSERT(cdip != NULL);
ASSERT(pptr != NULL);
mutex_enter(&pptr->port_mutex);
if (pip != NULL) {
cip = CIP(pip);
}
if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
mutex_exit(&pptr->port_mutex);
*rval = ENXIO;
break;
}
mutex_exit(&pptr->port_mutex);
mutex_enter(&plun->lun_tgt->tgt_mutex);
if (!(plun->lun_state & FCP_SCSI_LUN_TGT_INIT)) {
mutex_exit(&plun->lun_tgt->tgt_mutex);
*rval = ENXIO;
break;
}
if (plun->lun_sd == NULL) {
mutex_exit(&plun->lun_tgt->tgt_mutex);
*rval = ENXIO;
break;
}
mutex_exit(&plun->lun_tgt->tgt_mutex);
if (fcp_scsi_reset(&plun->lun_sd->sd_address,
RESET_TARGET) == FALSE) {
*rval = EIO;
}
break;
}
case DEVCTL_BUS_GETSTATE:
ASSERT(dcp != NULL);
ASSERT(pptr != NULL);
ASSERT(pptr->port_dip != NULL);
if (ndi_dc_return_bus_state(pptr->port_dip, dcp) !=
NDI_SUCCESS) {
*rval = EFAULT;
}
break;
case DEVCTL_BUS_QUIESCE:
case DEVCTL_BUS_UNQUIESCE:
*rval = ENOTSUP;
break;
case DEVCTL_BUS_RESET:
case DEVCTL_BUS_RESETALL:
ASSERT(pptr != NULL);
(void) fcp_linkreset(pptr, NULL, KM_SLEEP);
break;
default:
ASSERT(dcp != NULL);
*rval = ENOTTY;
break;
}
out: if (devi_entered) {
if (is_mpxio) {
mdi_devi_exit(pptr->port_dip, enteredv);
} else {
ndi_devi_exit(pptr->port_dip);
}
}
if (dcp != NULL) {
ndi_dc_freehdl(dcp);
}
return (retval);
}
static int
fcp_els_callback(opaque_t ulph, opaque_t port_handle, fc_unsol_buf_t *buf,
uint32_t claimed)
{
uchar_t r_ctl;
uchar_t ls_code;
struct fcp_port *pptr;
if ((pptr = fcp_get_port(port_handle)) == NULL || claimed) {
return (FC_UNCLAIMED);
}
mutex_enter(&pptr->port_mutex);
if (pptr->port_state & (FCP_STATE_DETACHING |
FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
mutex_exit(&pptr->port_mutex);
return (FC_UNCLAIMED);
}
mutex_exit(&pptr->port_mutex);
r_ctl = buf->ub_frame.r_ctl;
switch (r_ctl & R_CTL_ROUTING) {
case R_CTL_EXTENDED_SVC:
if (r_ctl == R_CTL_ELS_REQ) {
ls_code = buf->ub_buffer[0];
switch (ls_code) {
case LA_ELS_PRLI:
if (fcp_unsol_prli(pptr, buf) == FC_SUCCESS) {
return (FC_SUCCESS);
}
return (FC_UNCLAIMED);
default:
break;
}
}
default:
return (FC_UNCLAIMED);
}
}
static int
fcp_data_callback(opaque_t ulph, opaque_t port_handle, fc_unsol_buf_t *buf,
uint32_t claimed)
{
return (FC_UNCLAIMED);
}
static void
fcp_statec_callback(opaque_t ulph, opaque_t port_handle,
uint32_t port_state, uint32_t port_top, fc_portmap_t *devlist,
uint32_t dev_cnt, uint32_t port_sid)
{
uint32_t link_count;
int map_len = 0;
struct fcp_port *pptr;
fcp_map_tag_t *map_tag = NULL;
if ((pptr = fcp_get_port(port_handle)) == NULL) {
fcp_log(CE_WARN, NULL, "!Invalid port handle in callback");
return;
}
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_2, 0,
"fcp_statec_callback: port state/dev_cnt/top ="
"%d/%d/%d", FC_PORT_STATE_MASK(port_state),
dev_cnt, port_top);
mutex_enter(&pptr->port_mutex);
if (pptr->port_state & (FCP_STATE_DETACHING |
FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
mutex_exit(&pptr->port_mutex);
return;
}
if (FC_PORT_STATE_MASK(port_state) == FC_STATE_DEVICE_CHANGE) {
pptr->port_state |= FCP_STATE_IN_CB_DEVC;
}
pptr->port_phys_state = port_state;
if (dev_cnt) {
mutex_exit(&pptr->port_mutex);
map_len = sizeof (*map_tag) * dev_cnt;
map_tag = kmem_alloc(map_len, KM_NOSLEEP);
if (map_tag == NULL) {
fcp_log(CE_WARN, pptr->port_dip,
"!fcp%d: failed to allocate for map tags; "
" state change will not be processed",
pptr->port_instance);
mutex_enter(&pptr->port_mutex);
pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
mutex_exit(&pptr->port_mutex);
return;
}
mutex_enter(&pptr->port_mutex);
}
if (pptr->port_id != port_sid) {
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_3, 0,
"fcp: Port S_ID=0x%x => 0x%x", pptr->port_id,
port_sid);
pptr->port_id = port_sid;
}
switch (FC_PORT_STATE_MASK(port_state)) {
case FC_STATE_OFFLINE:
case FC_STATE_RESET_REQUESTED:
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_3, 0,
"link went offline");
if ((pptr->port_state & FCP_STATE_OFFLINE) && dev_cnt) {
pptr->port_tmp_cnt += dev_cnt;
pptr->port_state &= ~FCP_STATE_OFFLINE;
pptr->port_state |= FCP_STATE_INIT;
link_count = pptr->port_link_cnt;
fcp_handle_devices(pptr, devlist, dev_cnt,
link_count, map_tag, FCP_CAUSE_LINK_DOWN);
} else {
pptr->port_link_cnt++;
ASSERT(!(pptr->port_state & FCP_STATE_SUSPENDED));
fcp_update_state(pptr, (FCP_LUN_BUSY |
FCP_LUN_MARK), FCP_CAUSE_LINK_DOWN);
if (pptr->port_mpxio) {
fcp_update_mpxio_path_verifybusy(pptr);
}
pptr->port_state |= FCP_STATE_OFFLINE;
pptr->port_state &=
~(FCP_STATE_ONLINING | FCP_STATE_ONLINE);
pptr->port_tmp_cnt = 0;
}
mutex_exit(&pptr->port_mutex);
break;
case FC_STATE_ONLINE:
case FC_STATE_LIP:
case FC_STATE_LIP_LBIT_SET:
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_3, 0,
"link went online");
pptr->port_link_cnt++;
while (pptr->port_ipkt_cnt) {
mutex_exit(&pptr->port_mutex);
delay(drv_usectohz(1000000));
mutex_enter(&pptr->port_mutex);
}
pptr->port_topology = port_top;
fcp_update_state(pptr, FCP_LUN_BUSY | FCP_LUN_MARK,
FCP_CAUSE_LINK_CHANGE);
pptr->port_state &= ~(FCP_STATE_INIT | FCP_STATE_OFFLINE);
pptr->port_state |= FCP_STATE_ONLINING;
pptr->port_tmp_cnt = dev_cnt;
link_count = pptr->port_link_cnt;
pptr->port_deadline = fcp_watchdog_time +
FCP_ICMD_DEADLINE;
if (!dev_cnt) {
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_3, 0,
"No remote ports discovered");
pptr->port_state &= ~FCP_STATE_ONLINING;
pptr->port_state |= FCP_STATE_ONLINE;
}
switch (port_top) {
case FC_TOP_FABRIC:
case FC_TOP_PUBLIC_LOOP:
case FC_TOP_PRIVATE_LOOP:
case FC_TOP_PT_PT:
if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
fcp_retry_ns_registry(pptr, port_sid);
}
fcp_handle_devices(pptr, devlist, dev_cnt, link_count,
map_tag, FCP_CAUSE_LINK_CHANGE);
break;
default:
if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
pptr->port_state &= ~FCP_STATE_NS_REG_FAILED;
}
pptr->port_tmp_cnt -= dev_cnt;
fcp_log(CE_WARN, pptr->port_dip,
"!unknown/unsupported topology (0x%x)", port_top);
break;
}
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_3, 0,
"Notify ssd of the reset to reinstate the reservations");
scsi_hba_reset_notify_callback(&pptr->port_mutex,
&pptr->port_reset_notify_listf);
mutex_exit(&pptr->port_mutex);
break;
case FC_STATE_RESET:
ASSERT(pptr->port_state & FCP_STATE_OFFLINE);
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_3, 0,
"RESET state, waiting for Offline/Online state_cb");
mutex_exit(&pptr->port_mutex);
break;
case FC_STATE_DEVICE_CHANGE:
if (pptr->port_state & (FCP_STATE_OFFLINE |
FCP_STATE_INIT)) {
pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
mutex_exit(&pptr->port_mutex);
break;
}
if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
fcp_retry_ns_registry(pptr, port_sid);
}
if (!pptr->port_ipkt_cnt) {
pptr->port_deadline = fcp_watchdog_time +
FCP_ICMD_DEADLINE;
}
fcp_update_targets(pptr, devlist, dev_cnt,
FCP_LUN_BUSY | FCP_LUN_MARK, FCP_CAUSE_TGT_CHANGE);
link_count = pptr->port_link_cnt;
fcp_handle_devices(pptr, devlist, dev_cnt,
link_count, map_tag, FCP_CAUSE_TGT_CHANGE);
pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
mutex_exit(&pptr->port_mutex);
break;
case FC_STATE_TARGET_PORT_RESET:
if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
fcp_retry_ns_registry(pptr, port_sid);
}
mutex_exit(&pptr->port_mutex);
break;
default:
fcp_log(CE_WARN, pptr->port_dip,
"!Invalid state change=0x%x", port_state);
mutex_exit(&pptr->port_mutex);
break;
}
if (map_tag) {
kmem_free(map_tag, map_len);
}
}
static void
fcp_handle_devices(struct fcp_port *pptr, fc_portmap_t devlist[],
uint32_t dev_cnt, int link_cnt, fcp_map_tag_t *map_tag, int cause)
{
int i;
int check_finish_init = 0;
fc_portmap_t *map_entry;
struct fcp_tgt *ptgt = NULL;
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_3, 0,
"fcp_handle_devices: called for %d dev(s)", dev_cnt);
if (dev_cnt) {
ASSERT(map_tag != NULL);
}
for (i = 0; (i < dev_cnt) && (pptr->port_link_cnt == link_cnt); i++) {
map_entry = &(devlist[i]);
ptgt = fcp_lookup_target(pptr,
(uchar_t *)&(map_entry->map_pwwn));
if (ptgt) {
map_tag[i] = ptgt->tgt_change_cnt;
if (cause == FCP_CAUSE_LINK_CHANGE) {
ptgt->tgt_aux_state = FCP_TGT_TAGGED;
}
}
}
for (i = 0; i < FCP_NUM_HASH; i++) {
for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
ptgt = ptgt->tgt_next) {
mutex_enter(&ptgt->tgt_mutex);
if ((ptgt->tgt_aux_state != FCP_TGT_TAGGED) &&
(cause == FCP_CAUSE_LINK_CHANGE) &&
!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
fcp_offline_target_now(pptr, ptgt,
link_cnt, ptgt->tgt_change_cnt, 0);
}
mutex_exit(&ptgt->tgt_mutex);
}
}
for (i = 0; (i < dev_cnt) && (pptr->port_link_cnt == link_cnt); i++) {
if (check_finish_init) {
ASSERT(i > 0);
(void) fcp_call_finish_init_held(pptr, ptgt, link_cnt,
map_tag[i - 1], cause);
check_finish_init = 0;
}
map_entry = &(devlist[i]);
if (map_entry->map_flags & PORT_DEVICE_DUPLICATE_MAP_ENTRY) {
continue;
}
ptgt = fcp_lookup_target(pptr,
(uchar_t *)&(map_entry->map_pwwn));
if (ptgt) {
ptgt->tgt_aux_state = 0;
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_3, 0,
"handle_devices: map did/state/type/flags = "
"0x%x/0x%x/0x%x/0x%x, tgt_d_id=0x%x, "
"tgt_state=%d",
map_entry->map_did.port_id, map_entry->map_state,
map_entry->map_type, map_entry->map_flags,
ptgt->tgt_d_id, ptgt->tgt_state);
}
if (map_entry->map_type == PORT_DEVICE_OLD ||
map_entry->map_type == PORT_DEVICE_NEW ||
map_entry->map_type == PORT_DEVICE_REPORTLUN_CHANGED ||
map_entry->map_type == PORT_DEVICE_CHANGED) {
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_2, 0,
"map_type=%x, did = %x",
map_entry->map_type,
map_entry->map_did.port_id);
}
switch (map_entry->map_type) {
case PORT_DEVICE_NOCHANGE:
case PORT_DEVICE_USER_CREATE:
case PORT_DEVICE_USER_LOGIN:
case PORT_DEVICE_NEW:
case PORT_DEVICE_REPORTLUN_CHANGED:
FCP_TGT_TRACE(ptgt, map_tag[i], FCP_TGT_TRACE_1);
if (fcp_handle_mapflags(pptr, ptgt, map_entry,
link_cnt, (ptgt) ? map_tag[i] : 0,
cause) == TRUE) {
FCP_TGT_TRACE(ptgt, map_tag[i],
FCP_TGT_TRACE_2);
check_finish_init++;
}
break;
case PORT_DEVICE_OLD:
if (ptgt != NULL) {
FCP_TGT_TRACE(ptgt, map_tag[i],
FCP_TGT_TRACE_3);
mutex_enter(&ptgt->tgt_mutex);
if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
mutex_exit(&ptgt->tgt_mutex);
mutex_exit(&pptr->port_mutex);
mutex_enter(&ptgt->tgt_mutex);
while (ptgt->tgt_ipkt_cnt ||
fcp_outstanding_lun_cmds(ptgt)
== FC_SUCCESS) {
mutex_exit(&ptgt->tgt_mutex);
delay(drv_usectohz(1000000));
mutex_enter(&ptgt->tgt_mutex);
}
mutex_exit(&ptgt->tgt_mutex);
mutex_enter(&pptr->port_mutex);
mutex_enter(&ptgt->tgt_mutex);
(void) fcp_offline_target(pptr, ptgt,
link_cnt, map_tag[i], 0, 0);
}
mutex_exit(&ptgt->tgt_mutex);
}
check_finish_init++;
break;
case PORT_DEVICE_USER_DELETE:
case PORT_DEVICE_USER_LOGOUT:
if (ptgt != NULL) {
FCP_TGT_TRACE(ptgt, map_tag[i],
FCP_TGT_TRACE_4);
mutex_enter(&ptgt->tgt_mutex);
if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
(void) fcp_offline_target(pptr, ptgt,
link_cnt, map_tag[i], 1, 0);
}
mutex_exit(&ptgt->tgt_mutex);
}
check_finish_init++;
break;
case PORT_DEVICE_CHANGED:
if (ptgt != NULL) {
FCP_TGT_TRACE(ptgt, map_tag[i],
FCP_TGT_TRACE_5);
if (fcp_device_changed(pptr, ptgt,
map_entry, link_cnt, map_tag[i],
cause) == TRUE) {
check_finish_init++;
}
} else {
if (fcp_handle_mapflags(pptr, ptgt,
map_entry, link_cnt, 0, cause) == TRUE) {
check_finish_init++;
}
}
break;
default:
fcp_log(CE_WARN, pptr->port_dip,
"!Invalid map_type=0x%x", map_entry->map_type);
check_finish_init++;
break;
}
}
if (check_finish_init && pptr->port_link_cnt == link_cnt) {
ASSERT(i > 0);
(void) fcp_call_finish_init_held(pptr, ptgt, link_cnt,
map_tag[i-1], cause);
} else if (dev_cnt == 0 && pptr->port_link_cnt == link_cnt) {
fcp_offline_all(pptr, link_cnt, cause);
}
}
static int
fcp_handle_reportlun_changed(struct fcp_tgt *ptgt, int cause)
{
struct fcp_lun *plun;
struct fcp_port *pptr;
int rscn_count;
int lun0_newalloc;
int ret = TRUE;
ASSERT(ptgt);
pptr = ptgt->tgt_port;
lun0_newalloc = 0;
if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
plun = fcp_alloc_lun(ptgt);
if (plun == NULL) {
fcp_log(CE_WARN, pptr->port_dip,
"!Failed to allocate lun 0 for"
" D_ID=%x", ptgt->tgt_d_id);
return (ret);
}
lun0_newalloc = 1;
}
mutex_enter(&ptgt->tgt_mutex);
if ((plun->lun_state & FCP_LUN_OFFLINE) || lun0_newalloc) {
plun->lun_state |= FCP_LUN_DEVICE_NOT_CONNECTED;
}
plun->lun_state |= (FCP_LUN_BUSY | FCP_LUN_MARK);
plun->lun_state &= ~FCP_LUN_OFFLINE;
ptgt->tgt_lun_cnt = 1;
ptgt->tgt_report_lun_cnt = 0;
mutex_exit(&ptgt->tgt_mutex);
rscn_count = fc_ulp_get_rscn_count(pptr->port_fp_handle);
if (fcp_send_scsi(plun, SCMD_REPORT_LUN,
sizeof (struct fcp_reportlun_resp), pptr->port_link_cnt,
ptgt->tgt_change_cnt, cause, rscn_count) != DDI_SUCCESS) {
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_3, 0, "!Failed to send REPORTLUN "
"to D_ID=%x", ptgt->tgt_d_id);
} else {
ret = FALSE;
}
return (ret);
}
static int
fcp_handle_mapflags(struct fcp_port *pptr, struct fcp_tgt *ptgt,
fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause)
{
int lcount;
int tcount;
int ret = TRUE;
int alloc;
struct fcp_ipkt *icmd;
struct fcp_lun *pseq_lun = NULL;
uchar_t opcode;
int valid_ptgt_was_passed = FALSE;
ASSERT(mutex_owned(&pptr->port_mutex));
if (ptgt == NULL) {
mutex_exit(&pptr->port_mutex);
ptgt = fcp_alloc_tgt(pptr, map_entry, link_cnt);
mutex_enter(&pptr->port_mutex);
if (ptgt == NULL) {
fcp_log(CE_WARN, pptr->port_dip,
"!FC target allocation failed");
return (ret);
}
mutex_enter(&ptgt->tgt_mutex);
ptgt->tgt_statec_cause = cause;
ptgt->tgt_tmp_cnt = 1;
mutex_exit(&ptgt->tgt_mutex);
} else {
valid_ptgt_was_passed = TRUE;
}
mutex_enter(&ptgt->tgt_mutex);
ptgt->tgt_d_id = map_entry->map_did.port_id;
ptgt->tgt_hard_addr = map_entry->map_hard_addr.hard_addr;
ptgt->tgt_pd_handle = map_entry->map_pd;
ptgt->tgt_fca_dev = NULL;
bcopy(&map_entry->map_nwwn, &ptgt->tgt_node_wwn.raw_wwn[0],
FC_WWN_SIZE);
bcopy(&map_entry->map_pwwn, &ptgt->tgt_port_wwn.raw_wwn[0],
FC_WWN_SIZE);
if (!(map_entry->map_flags & PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY) &&
(map_entry->map_type == PORT_DEVICE_NOCHANGE) &&
(map_entry->map_state == PORT_DEVICE_LOGGED_IN) &&
valid_ptgt_was_passed) {
for (pseq_lun = ptgt->tgt_lun;
pseq_lun != NULL;
pseq_lun = pseq_lun->lun_next) {
if ((pseq_lun->lun_type == DTYPE_SEQUENTIAL) &&
!(pseq_lun->lun_state & FCP_LUN_OFFLINE)) {
fcp_update_tgt_state(ptgt, FCP_RESET,
FCP_LUN_MARK);
mutex_exit(&ptgt->tgt_mutex);
return (ret);
}
}
}
if (map_entry->map_type == PORT_DEVICE_REPORTLUN_CHANGED) {
ptgt->tgt_state &= ~(FCP_TGT_OFFLINE | FCP_TGT_MARK);
mutex_exit(&ptgt->tgt_mutex);
mutex_exit(&pptr->port_mutex);
ret = fcp_handle_reportlun_changed(ptgt, cause);
mutex_enter(&pptr->port_mutex);
return (ret);
}
switch (ptgt->tgt_node_state) {
case FCP_TGT_NODE_NONE:
case FCP_TGT_NODE_ON_DEMAND:
if (FC_TOP_EXTERNAL(pptr->port_topology) &&
!fcp_enable_auto_configuration &&
map_entry->map_type != PORT_DEVICE_USER_CREATE) {
ptgt->tgt_node_state = FCP_TGT_NODE_ON_DEMAND;
} else if (FC_TOP_EXTERNAL(pptr->port_topology) &&
fcp_enable_auto_configuration &&
(ptgt->tgt_manual_config_only == 1) &&
map_entry->map_type != PORT_DEVICE_USER_CREATE) {
ptgt->tgt_node_state = FCP_TGT_NODE_ON_DEMAND;
} else {
ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
}
break;
case FCP_TGT_NODE_PRESENT:
break;
}
if (FC_TOP_EXTERNAL(pptr->port_topology) && pptr->port_boot_wwn[0]) {
if (bcmp((caddr_t)pptr->port_boot_wwn,
(caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
sizeof (ptgt->tgt_port_wwn)) == 0) {
ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
}
}
mutex_exit(&ptgt->tgt_mutex);
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_3, 0,
"map_pd=%p, map_type=%x, did = %x, ulp_rscn_count=0x%x",
map_entry->map_pd, map_entry->map_type, map_entry->map_did.port_id,
map_entry->map_rscn_info.ulp_rscn_count);
mutex_enter(&ptgt->tgt_mutex);
ptgt->tgt_state &= ~FCP_TGT_OFFLINE;
ptgt->tgt_state |= (FCP_TGT_BUSY | FCP_TGT_MARK);
tcount = tgt_cnt ? tgt_cnt : ptgt->tgt_change_cnt;
lcount = link_cnt;
mutex_exit(&ptgt->tgt_mutex);
mutex_exit(&pptr->port_mutex);
opcode = (map_entry->map_state == PORT_DEVICE_LOGGED_IN &&
map_entry->map_pd != NULL) ? LA_ELS_PRLI : LA_ELS_PLOGI;
alloc = FCP_MAX(sizeof (la_els_logi_t), sizeof (la_els_prli_t));
icmd = fcp_icmd_alloc(pptr, ptgt, alloc, alloc, 0,
pptr->port_state & FCP_STATE_FCA_IS_NODMA, lcount, tcount,
cause, map_entry->map_rscn_info.ulp_rscn_count);
if (icmd == NULL) {
FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_29);
mutex_enter(&pptr->port_mutex);
return (FALSE);
}
ret = FALSE;
if ((fcp_send_els(pptr, ptgt, icmd, opcode,
lcount, tcount, cause)) == DDI_SUCCESS) {
FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_9);
} else {
fcp_icmd_free(pptr, icmd);
ret = TRUE;
}
mutex_enter(&pptr->port_mutex);
return (ret);
}
static int
fcp_send_els(struct fcp_port *pptr, struct fcp_tgt *ptgt,
struct fcp_ipkt *icmd, uchar_t opcode, int lcount, int tcount, int cause)
{
fc_packet_t *fpkt;
fc_frame_hdr_t *hp;
int internal = 0;
int alloc;
int cmd_len;
int resp_len;
int res = DDI_FAILURE;
int rval = DDI_FAILURE;
ASSERT(opcode == LA_ELS_PLOGI || opcode == LA_ELS_PRLI);
ASSERT(ptgt->tgt_port == pptr);
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_5, 0,
"fcp_send_els: d_id=0x%x ELS 0x%x (%s)", ptgt->tgt_d_id, opcode,
(opcode == LA_ELS_PLOGI) ? "PLOGI" : "PRLI");
if (opcode == LA_ELS_PLOGI) {
cmd_len = sizeof (la_els_logi_t);
resp_len = sizeof (la_els_logi_t);
} else {
ASSERT(opcode == LA_ELS_PRLI);
cmd_len = sizeof (la_els_prli_t);
resp_len = sizeof (la_els_prli_t);
}
if (icmd == NULL) {
alloc = FCP_MAX(sizeof (la_els_logi_t),
sizeof (la_els_prli_t));
icmd = fcp_icmd_alloc(pptr, ptgt, alloc, alloc, 0,
pptr->port_state & FCP_STATE_FCA_IS_NODMA,
lcount, tcount, cause, FC_INVALID_RSCN_COUNT);
if (icmd == NULL) {
FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_10);
return (res);
}
internal++;
}
fpkt = icmd->ipkt_fpkt;
fpkt->pkt_cmdlen = cmd_len;
fpkt->pkt_rsplen = resp_len;
fpkt->pkt_datalen = 0;
icmd->ipkt_retries = 0;
fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
fpkt->pkt_tran_type = FC_PKT_EXCHANGE;
fpkt->pkt_timeout = FCP_ELS_TIMEOUT;
hp = &fpkt->pkt_cmd_fhdr;
hp->r_ctl = R_CTL_ELS_REQ;
hp->s_id = pptr->port_id;
hp->d_id = ptgt->tgt_d_id;
hp->type = FC_TYPE_EXTENDED_LS;
hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
hp->seq_id = 0;
hp->rsvd = 0;
hp->df_ctl = 0;
hp->seq_cnt = 0;
hp->ox_id = 0xffff;
hp->rx_id = 0xffff;
hp->ro = 0;
switch (opcode) {
case LA_ELS_PLOGI: {
struct la_els_logi logi;
bzero(&logi, sizeof (struct la_els_logi));
hp = &fpkt->pkt_cmd_fhdr;
hp->r_ctl = R_CTL_ELS_REQ;
logi.ls_code.ls_code = LA_ELS_PLOGI;
logi.ls_code.mbz = 0;
FCP_CP_OUT((uint8_t *)&logi, fpkt->pkt_cmd,
fpkt->pkt_cmd_acc, sizeof (struct la_els_logi));
icmd->ipkt_opcode = LA_ELS_PLOGI;
mutex_enter(&pptr->port_mutex);
if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
mutex_exit(&pptr->port_mutex);
rval = fc_ulp_login(pptr->port_fp_handle, &fpkt, 1);
if (rval == FC_SUCCESS) {
res = DDI_SUCCESS;
break;
}
FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_11);
res = fcp_handle_ipkt_errors(pptr, ptgt, icmd,
rval, "PLOGI");
} else {
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_5, 0,
"fcp_send_els1: state change occured"
" for D_ID=0x%x", ptgt->tgt_d_id);
mutex_exit(&pptr->port_mutex);
FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_12);
}
break;
}
case LA_ELS_PRLI: {
struct la_els_prli prli;
struct fcp_prli *fprli;
bzero(&prli, sizeof (struct la_els_prli));
hp = &fpkt->pkt_cmd_fhdr;
hp->r_ctl = R_CTL_ELS_REQ;
prli.ls_code = LA_ELS_PRLI;
prli.page_length = 0x10;
prli.payload_length = sizeof (struct la_els_prli);
icmd->ipkt_opcode = LA_ELS_PRLI;
fprli = (struct fcp_prli *)prli.service_params;
fprli->type = 0x08;
fprli->resvd1 = 0;
fprli->orig_process_assoc_valid = 0;
fprli->resp_process_assoc_valid = 0;
fprli->establish_image_pair = 1;
fprli->resvd2 = 0;
fprli->resvd3 = 0;
fprli->obsolete_1 = 0;
fprli->obsolete_2 = 0;
fprli->data_overlay_allowed = 0;
fprli->initiator_fn = 1;
fprli->confirmed_compl_allowed = 1;
if (fc_ulp_is_name_present("ltct") == FC_SUCCESS) {
fprli->target_fn = 1;
} else {
fprli->target_fn = 0;
}
fprli->retry = 1;
fprli->read_xfer_rdy_disabled = 1;
fprli->write_xfer_rdy_disabled = 0;
FCP_CP_OUT((uint8_t *)&prli, fpkt->pkt_cmd,
fpkt->pkt_cmd_acc, sizeof (struct la_els_prli));
mutex_enter(&pptr->port_mutex);
if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
mutex_exit(&pptr->port_mutex);
rval = fc_ulp_issue_els(pptr->port_fp_handle, fpkt);
if (rval == FC_SUCCESS) {
res = DDI_SUCCESS;
break;
}
FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_13);
res = fcp_handle_ipkt_errors(pptr, ptgt, icmd,
rval, "PRLI");
} else {
mutex_exit(&pptr->port_mutex);
FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_14);
}
break;
}
default:
fcp_log(CE_WARN, NULL, "!invalid ELS opcode=0x%x", opcode);
break;
}
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_5, 0,
"fcp_send_els: returning %d", res);
if (res != DDI_SUCCESS) {
if (internal) {
fcp_icmd_free(pptr, icmd);
}
}
return (res);
}
void
fcp_update_state(struct fcp_port *pptr, uint32_t state, int cause)
{
int i;
struct fcp_tgt *ptgt;
ASSERT(mutex_owned(&pptr->port_mutex));
for (i = 0; i < FCP_NUM_HASH; i++) {
for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
ptgt = ptgt->tgt_next) {
mutex_enter(&ptgt->tgt_mutex);
fcp_update_tgt_state(ptgt, FCP_SET, state);
ptgt->tgt_change_cnt++;
ptgt->tgt_statec_cause = cause;
ptgt->tgt_tmp_cnt = 1;
ptgt->tgt_done = 0;
mutex_exit(&ptgt->tgt_mutex);
}
}
}
static void
fcp_offline_all(struct fcp_port *pptr, int lcount, int cause)
{
int i;
int ndevs;
struct fcp_tgt *ptgt;
ASSERT(mutex_owned(&pptr->port_mutex));
for (ndevs = 0, i = 0; i < FCP_NUM_HASH; i++) {
for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
ptgt = ptgt->tgt_next) {
ndevs++;
}
}
if (ndevs == 0) {
return;
}
pptr->port_tmp_cnt = ndevs;
for (i = 0; i < FCP_NUM_HASH; i++) {
for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
ptgt = ptgt->tgt_next) {
(void) fcp_call_finish_init_held(pptr, ptgt,
lcount, ptgt->tgt_change_cnt, cause);
}
}
}
void
fcp_update_tgt_state(struct fcp_tgt *ptgt, int flag, uint32_t state)
{
struct fcp_lun *plun;
ASSERT(mutex_owned(&ptgt->tgt_mutex));
if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
if (flag == FCP_SET) {
ptgt->tgt_state |= state;
ptgt->tgt_trace = 0;
} else {
ptgt->tgt_state &= ~state;
}
for (plun = ptgt->tgt_lun; plun != NULL;
plun = plun->lun_next) {
if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
if (flag == FCP_SET) {
plun->lun_state |= state;
plun->lun_trace = 0;
} else {
plun->lun_state &= ~state;
}
}
}
}
}
void
fcp_update_lun_state(struct fcp_lun *plun, int flag, uint32_t state)
{
struct fcp_tgt *ptgt = plun->lun_tgt;
ASSERT(mutex_owned(&ptgt->tgt_mutex));
if (!(plun->lun_state & FCP_TGT_OFFLINE)) {
if (flag == FCP_SET) {
plun->lun_state |= state;
} else {
plun->lun_state &= ~state;
}
}
}
static struct fcp_port *
fcp_get_port(opaque_t port_handle)
{
struct fcp_port *pptr;
ASSERT(port_handle != NULL);
mutex_enter(&fcp_global_mutex);
for (pptr = fcp_port_head; pptr != NULL; pptr = pptr->port_next) {
if (pptr->port_fp_handle == port_handle) {
break;
}
}
mutex_exit(&fcp_global_mutex);
return (pptr);
}
static void
fcp_unsol_callback(fc_packet_t *fpkt)
{
struct fcp_ipkt *icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
struct fcp_port *pptr = icmd->ipkt_port;
if (fpkt->pkt_state != FC_PKT_SUCCESS) {
caddr_t state, reason, action, expln;
(void) fc_ulp_pkt_error(fpkt, &state, &reason,
&action, &expln);
fcp_log(CE_WARN, pptr->port_dip,
"!couldn't post response to unsolicited request: "
" state=%s reason=%s rx_id=%x ox_id=%x",
state, reason, fpkt->pkt_cmd_fhdr.ox_id,
fpkt->pkt_cmd_fhdr.rx_id);
}
fcp_icmd_free(pptr, icmd);
}
static void
fcp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf,
uchar_t r_ctl, uchar_t type)
{
pkt->pkt_cmd_fhdr.r_ctl = r_ctl;
pkt->pkt_cmd_fhdr.d_id = buf->ub_frame.s_id;
pkt->pkt_cmd_fhdr.s_id = buf->ub_frame.d_id;
pkt->pkt_cmd_fhdr.type = type;
pkt->pkt_cmd_fhdr.f_ctl = F_CTL_LAST_SEQ | F_CTL_XCHG_CONTEXT;
pkt->pkt_cmd_fhdr.seq_id = buf->ub_frame.seq_id;
pkt->pkt_cmd_fhdr.df_ctl = buf->ub_frame.df_ctl;
pkt->pkt_cmd_fhdr.seq_cnt = buf->ub_frame.seq_cnt;
pkt->pkt_cmd_fhdr.ox_id = buf->ub_frame.ox_id;
pkt->pkt_cmd_fhdr.rx_id = buf->ub_frame.rx_id;
pkt->pkt_cmd_fhdr.ro = 0;
pkt->pkt_cmd_fhdr.rsvd = 0;
pkt->pkt_comp = fcp_unsol_callback;
pkt->pkt_pd = NULL;
pkt->pkt_ub_resp_token = (opaque_t)buf;
}
static int
fcp_unsol_prli(struct fcp_port *pptr, fc_unsol_buf_t *buf)
{
fc_packet_t *fpkt;
struct la_els_prli prli;
struct fcp_prli *fprli;
struct fcp_ipkt *icmd;
struct la_els_prli *from;
struct fcp_prli *orig;
struct fcp_tgt *ptgt;
int tcount = 0;
int lcount;
from = (struct la_els_prli *)buf->ub_buffer;
orig = (struct fcp_prli *)from->service_params;
if ((ptgt = fcp_get_target_by_did(pptr, buf->ub_frame.s_id)) !=
NULL) {
mutex_enter(&ptgt->tgt_mutex);
tcount = ptgt->tgt_change_cnt;
mutex_exit(&ptgt->tgt_mutex);
}
mutex_enter(&pptr->port_mutex);
lcount = pptr->port_link_cnt;
mutex_exit(&pptr->port_mutex);
if ((icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (la_els_prli_t),
sizeof (la_els_prli_t), 0,
pptr->port_state & FCP_STATE_FCA_IS_NODMA,
lcount, tcount, 0, FC_INVALID_RSCN_COUNT)) == NULL) {
return (FC_FAILURE);
}
fpkt = icmd->ipkt_fpkt;
fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
fpkt->pkt_tran_type = FC_PKT_OUTBOUND;
fpkt->pkt_timeout = FCP_ELS_TIMEOUT;
fpkt->pkt_cmdlen = sizeof (la_els_prli_t);
fpkt->pkt_rsplen = 0;
fpkt->pkt_datalen = 0;
icmd->ipkt_opcode = LA_ELS_PRLI;
bzero(&prli, sizeof (struct la_els_prli));
fprli = (struct fcp_prli *)prli.service_params;
prli.ls_code = LA_ELS_ACC;
prli.page_length = 0x10;
prli.payload_length = sizeof (struct la_els_prli);
fprli->type = 0x08;
fprli->resvd1 = 0;
fprli->orig_process_assoc_valid = orig->orig_process_assoc_valid;
fprli->orig_process_associator = orig->orig_process_associator;
fprli->resp_process_assoc_valid = 0;
fprli->establish_image_pair = 1;
fprli->resvd2 = 0;
fprli->resvd3 = 0;
fprli->obsolete_1 = 0;
fprli->obsolete_2 = 0;
fprli->data_overlay_allowed = 0;
fprli->initiator_fn = 1;
fprli->confirmed_compl_allowed = 1;
if (fc_ulp_is_name_present("ltct") == FC_SUCCESS) {
fprli->target_fn = 1;
} else {
fprli->target_fn = 0;
}
fprli->retry = 1;
fprli->read_xfer_rdy_disabled = 1;
fprli->write_xfer_rdy_disabled = 0;
FCP_CP_OUT((uint8_t *)from, fpkt->pkt_resp,
fpkt->pkt_resp_acc, sizeof (struct la_els_prli));
FCP_CP_OUT((uint8_t *)&prli, fpkt->pkt_cmd,
fpkt->pkt_cmd_acc, sizeof (struct la_els_prli));
fcp_unsol_resp_init(fpkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS);
mutex_enter(&pptr->port_mutex);
if (!FCP_LINK_STATE_CHANGED(pptr, icmd)) {
int rval;
mutex_exit(&pptr->port_mutex);
if ((rval = fc_ulp_issue_els(pptr->port_fp_handle, fpkt)) !=
FC_SUCCESS) {
if ((rval == FC_STATEC_BUSY || rval == FC_OFFLINE) &&
ptgt != NULL) {
fcp_queue_ipkt(pptr, fpkt);
return (FC_SUCCESS);
}
fcp_icmd_free(pptr, icmd);
return (FC_FAILURE);
}
} else {
mutex_exit(&pptr->port_mutex);
fcp_icmd_free(pptr, icmd);
return (FC_FAILURE);
}
(void) fc_ulp_ubrelease(pptr->port_fp_handle, 1, &buf->ub_token);
return (FC_SUCCESS);
}
static struct fcp_ipkt *
fcp_icmd_alloc(struct fcp_port *pptr, struct fcp_tgt *ptgt, int cmd_len,
int resp_len, int data_len, int nodma, int lcount, int tcount, int cause,
uint32_t rscn_count)
{
int dma_setup = 0;
fc_packet_t *fpkt;
struct fcp_ipkt *icmd = NULL;
icmd = kmem_zalloc(sizeof (struct fcp_ipkt) +
pptr->port_dmacookie_sz + pptr->port_priv_pkt_len,
KM_NOSLEEP);
if (icmd == NULL) {
fcp_log(CE_WARN, pptr->port_dip,
"!internal packet allocation failed");
return (NULL);
}
icmd->ipkt_nodma = nodma;
icmd->ipkt_next = icmd->ipkt_prev = NULL;
icmd->ipkt_lun = NULL;
icmd->ipkt_link_cnt = lcount;
icmd->ipkt_change_cnt = tcount;
icmd->ipkt_cause = cause;
mutex_enter(&pptr->port_mutex);
icmd->ipkt_port = pptr;
mutex_exit(&pptr->port_mutex);
icmd->ipkt_cmdlen = cmd_len;
icmd->ipkt_resplen = resp_len;
icmd->ipkt_datalen = data_len;
icmd->ipkt_fpkt = (fc_packet_t *)(&icmd->ipkt_fc_packet);
icmd->ipkt_fpkt->pkt_ulp_private = (opaque_t)icmd;
icmd->ipkt_fpkt->pkt_fca_private = (opaque_t)
((char *)icmd + sizeof (struct fcp_ipkt) +
pptr->port_dmacookie_sz);
fpkt = icmd->ipkt_fpkt;
fpkt->pkt_data_cookie = (ddi_dma_cookie_t *)((caddr_t)icmd +
sizeof (struct fcp_ipkt));
if (ptgt != NULL) {
icmd->ipkt_tgt = ptgt;
fpkt->pkt_fca_device = ptgt->tgt_fca_dev;
}
fpkt->pkt_comp = fcp_icmd_callback;
fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
fpkt->pkt_cmdlen = cmd_len;
fpkt->pkt_rsplen = resp_len;
fpkt->pkt_datalen = data_len;
fpkt->pkt_ulp_rscn_infop = NULL;
if (rscn_count != FC_INVALID_RSCN_COUNT) {
fpkt->pkt_ulp_rscn_infop = kmem_zalloc(
sizeof (fc_ulp_rscn_info_t), KM_NOSLEEP);
if (fpkt->pkt_ulp_rscn_infop == NULL) {
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_6, 0,
"Failed to alloc memory to pass rscn info");
}
}
if (fpkt->pkt_ulp_rscn_infop != NULL) {
fc_ulp_rscn_info_t *rscnp;
rscnp = (fc_ulp_rscn_info_t *)fpkt->pkt_ulp_rscn_infop;
rscnp->ulp_rscn_count = rscn_count;
}
if (fcp_alloc_dma(pptr, icmd, nodma, KM_NOSLEEP) != FC_SUCCESS) {
goto fail;
}
dma_setup++;
if (ptgt != NULL) {
mutex_enter(&ptgt->tgt_mutex);
fpkt->pkt_pd = ptgt->tgt_pd_handle;
if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, KM_NOSLEEP)
!= FC_SUCCESS) {
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_6, 0,
"fc_ulp_init_packet failed");
mutex_exit(&ptgt->tgt_mutex);
goto fail;
}
mutex_exit(&ptgt->tgt_mutex);
} else {
if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, KM_NOSLEEP)
!= FC_SUCCESS) {
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_6, 0,
"fc_ulp_init_packet failed");
goto fail;
}
}
mutex_enter(&pptr->port_mutex);
if (pptr->port_state & (FCP_STATE_DETACHING |
FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
int rval;
mutex_exit(&pptr->port_mutex);
rval = fc_ulp_uninit_packet(pptr->port_fp_handle, fpkt);
ASSERT(rval == FC_SUCCESS);
goto fail;
}
if (ptgt != NULL) {
mutex_enter(&ptgt->tgt_mutex);
ptgt->tgt_ipkt_cnt++;
mutex_exit(&ptgt->tgt_mutex);
}
pptr->port_ipkt_cnt++;
mutex_exit(&pptr->port_mutex);
return (icmd);
fail:
if (fpkt->pkt_ulp_rscn_infop != NULL) {
kmem_free(fpkt->pkt_ulp_rscn_infop,
sizeof (fc_ulp_rscn_info_t));
fpkt->pkt_ulp_rscn_infop = NULL;
}
if (dma_setup) {
fcp_free_dma(pptr, icmd);
}
kmem_free(icmd, sizeof (struct fcp_ipkt) + pptr->port_priv_pkt_len +
(size_t)pptr->port_dmacookie_sz);
return (NULL);
}
static void
fcp_icmd_free(struct fcp_port *pptr, struct fcp_ipkt *icmd)
{
struct fcp_tgt *ptgt = icmd->ipkt_tgt;
(void) fc_ulp_uninit_packet(pptr->port_fp_handle,
icmd->ipkt_fpkt);
if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop) {
kmem_free(icmd->ipkt_fpkt->pkt_ulp_rscn_infop,
sizeof (fc_ulp_rscn_info_t));
}
fcp_free_dma(pptr, icmd);
kmem_free(icmd, sizeof (struct fcp_ipkt) + pptr->port_priv_pkt_len +
(size_t)pptr->port_dmacookie_sz);
mutex_enter(&pptr->port_mutex);
if (ptgt) {
mutex_enter(&ptgt->tgt_mutex);
ptgt->tgt_ipkt_cnt--;
mutex_exit(&ptgt->tgt_mutex);
}
pptr->port_ipkt_cnt--;
mutex_exit(&pptr->port_mutex);
}
static int
fcp_alloc_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd,
int nodma, int flags)
{
int rval;
size_t real_size;
uint_t ccount;
int bound = 0;
int cmd_resp = 0;
fc_packet_t *fpkt;
ddi_dma_cookie_t pkt_data_cookie;
ddi_dma_cookie_t *cp;
uint32_t cnt;
fpkt = &icmd->ipkt_fc_packet;
ASSERT(fpkt->pkt_cmd_dma == NULL && fpkt->pkt_data_dma == NULL &&
fpkt->pkt_resp_dma == NULL);
icmd->ipkt_nodma = nodma;
if (nodma) {
fpkt->pkt_cmd = kmem_zalloc(fpkt->pkt_cmdlen, flags);
if (fpkt->pkt_cmd == NULL) {
goto fail;
}
fpkt->pkt_resp = kmem_zalloc(fpkt->pkt_rsplen, flags);
if (fpkt->pkt_resp == NULL) {
goto fail;
}
} else {
ASSERT(fpkt->pkt_cmdlen && fpkt->pkt_rsplen);
rval = fcp_alloc_cmd_resp(pptr, fpkt, flags);
if (rval == FC_FAILURE) {
ASSERT(fpkt->pkt_cmd_dma == NULL &&
fpkt->pkt_resp_dma == NULL);
goto fail;
}
cmd_resp++;
}
if ((fpkt->pkt_datalen != 0) &&
!(pptr->port_state & FCP_STATE_FCA_IS_NODMA)) {
if (ddi_dma_alloc_handle(pptr->port_dip,
&pptr->port_data_dma_attr, DDI_DMA_DONTWAIT,
NULL, &fpkt->pkt_data_dma) != DDI_SUCCESS) {
goto fail;
}
if (ddi_dma_mem_alloc(fpkt->pkt_data_dma, fpkt->pkt_datalen,
&pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT,
DDI_DMA_DONTWAIT, NULL, &fpkt->pkt_data,
&real_size, &fpkt->pkt_data_acc) != DDI_SUCCESS) {
goto fail;
}
if (real_size < fpkt->pkt_datalen) {
goto fail;
}
if (ddi_dma_addr_bind_handle(fpkt->pkt_data_dma,
NULL, fpkt->pkt_data, real_size, DDI_DMA_READ |
DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
&pkt_data_cookie, &ccount) != DDI_DMA_MAPPED) {
goto fail;
}
bound++;
if (ccount > pptr->port_data_dma_attr.dma_attr_sgllen) {
goto fail;
}
fpkt->pkt_data_cookie_cnt = ccount;
cp = fpkt->pkt_data_cookie;
*cp = pkt_data_cookie;
cp++;
for (cnt = 1; cnt < ccount; cnt++, cp++) {
ddi_dma_nextcookie(fpkt->pkt_data_dma,
&pkt_data_cookie);
*cp = pkt_data_cookie;
}
} else if (fpkt->pkt_datalen != 0) {
fpkt->pkt_data = kmem_alloc(fpkt->pkt_datalen, flags);
if (fpkt->pkt_data == NULL) {
goto fail;
}
}
return (FC_SUCCESS);
fail:
if (bound) {
(void) ddi_dma_unbind_handle(fpkt->pkt_data_dma);
}
if (fpkt->pkt_data_dma) {
if (fpkt->pkt_data) {
ddi_dma_mem_free(&fpkt->pkt_data_acc);
}
ddi_dma_free_handle(&fpkt->pkt_data_dma);
} else {
if (fpkt->pkt_data) {
kmem_free(fpkt->pkt_data, fpkt->pkt_datalen);
}
}
if (nodma) {
if (fpkt->pkt_cmd) {
kmem_free(fpkt->pkt_cmd, fpkt->pkt_cmdlen);
}
if (fpkt->pkt_resp) {
kmem_free(fpkt->pkt_resp, fpkt->pkt_rsplen);
}
} else {
if (cmd_resp) {
fcp_free_cmd_resp(pptr, fpkt);
}
}
return (FC_NOMEM);
}
static void
fcp_free_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd)
{
fc_packet_t *fpkt = icmd->ipkt_fpkt;
if (fpkt->pkt_data_dma) {
(void) ddi_dma_unbind_handle(fpkt->pkt_data_dma);
if (fpkt->pkt_data) {
ddi_dma_mem_free(&fpkt->pkt_data_acc);
}
ddi_dma_free_handle(&fpkt->pkt_data_dma);
} else {
if (fpkt->pkt_data) {
kmem_free(fpkt->pkt_data, fpkt->pkt_datalen);
}
}
if (icmd->ipkt_nodma) {
if (fpkt->pkt_cmd) {
kmem_free(fpkt->pkt_cmd, icmd->ipkt_cmdlen);
}
if (fpkt->pkt_resp) {
kmem_free(fpkt->pkt_resp, icmd->ipkt_resplen);
}
} else {
ASSERT(fpkt->pkt_resp_dma != NULL && fpkt->pkt_cmd_dma != NULL);
fcp_free_cmd_resp(pptr, fpkt);
}
}
static struct fcp_tgt *
fcp_lookup_target(struct fcp_port *pptr, uchar_t *wwn)
{
int hash;
struct fcp_tgt *ptgt;
ASSERT(mutex_owned(&pptr->port_mutex));
hash = FCP_HASH(wwn);
for (ptgt = pptr->port_tgt_hash_table[hash]; ptgt != NULL;
ptgt = ptgt->tgt_next) {
if (!(ptgt->tgt_state & FCP_TGT_ORPHAN) &&
bcmp((caddr_t)wwn, (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
sizeof (ptgt->tgt_port_wwn)) == 0) {
break;
}
}
return (ptgt);
}
static struct fcp_tgt *
fcp_get_target_by_did(struct fcp_port *pptr, uint32_t d_id)
{
fc_portid_t port_id;
la_wwn_t pwwn;
struct fcp_tgt *ptgt = NULL;
port_id.priv_lilp_posit = 0;
port_id.port_id = d_id;
if (fc_ulp_get_pwwn_by_did(pptr->port_fp_handle, port_id,
&pwwn) == FC_SUCCESS) {
mutex_enter(&pptr->port_mutex);
ptgt = fcp_lookup_target(pptr, pwwn.raw_wwn);
mutex_exit(&pptr->port_mutex);
}
return (ptgt);
}
static void
fcp_icmd_callback(fc_packet_t *fpkt)
{
struct fcp_ipkt *icmd;
struct fcp_port *pptr;
struct fcp_tgt *ptgt;
struct la_els_prli *prli;
struct la_els_prli prli_s;
struct fcp_prli *fprli;
struct fcp_lun *plun;
int free_pkt = 1;
int rval;
ls_code_t resp;
uchar_t prli_acc = 0;
uint32_t rscn_count = FC_INVALID_RSCN_COUNT;
int lun0_newalloc;
icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
pptr = icmd->ipkt_port;
ptgt = icmd->ipkt_tgt;
FCP_CP_IN(fpkt->pkt_resp, &resp, fpkt->pkt_resp_acc, sizeof (resp));
if (icmd->ipkt_opcode == LA_ELS_PRLI) {
FCP_CP_IN(fpkt->pkt_cmd, &prli_s, fpkt->pkt_cmd_acc,
sizeof (prli_s));
prli_acc = (prli_s.ls_code == LA_ELS_ACC);
}
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_2, 0,
"ELS (%x) callback state=0x%x reason=0x%x for %x",
icmd->ipkt_opcode, fpkt->pkt_state, fpkt->pkt_reason,
ptgt->tgt_d_id);
if ((fpkt->pkt_state == FC_PKT_SUCCESS) &&
((resp.ls_code == LA_ELS_ACC) || prli_acc)) {
mutex_enter(&ptgt->tgt_mutex);
if (ptgt->tgt_pd_handle == NULL) {
ASSERT(fpkt->pkt_pd != NULL);
ptgt->tgt_pd_handle = fpkt->pkt_pd;
}
mutex_exit(&ptgt->tgt_mutex);
switch (icmd->ipkt_opcode) {
case LA_ELS_PLOGI:
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_5, 0,
"PLOGI to d_id=0x%x succeeded, wwn=%08x%08x",
ptgt->tgt_d_id,
*((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
*((int *)&ptgt->tgt_port_wwn.raw_wwn[4]));
FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
FCP_TGT_TRACE_15);
if (fcp_send_els(pptr, ptgt, icmd, LA_ELS_PRLI,
icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
icmd->ipkt_cause) != DDI_SUCCESS) {
FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
FCP_TGT_TRACE_16);
goto fail;
}
break;
case LA_ELS_PRLI:
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_5, 0,
"PRLI to d_id=0x%x succeeded", ptgt->tgt_d_id);
FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
FCP_TGT_TRACE_17);
prli = &prli_s;
FCP_CP_IN(fpkt->pkt_resp, prli, fpkt->pkt_resp_acc,
sizeof (prli_s));
fprli = (struct fcp_prli *)prli->service_params;
mutex_enter(&ptgt->tgt_mutex);
ptgt->tgt_icap = fprli->initiator_fn;
ptgt->tgt_tcap = fprli->target_fn;
mutex_exit(&ptgt->tgt_mutex);
if ((fprli->type != 0x08) || (fprli->target_fn != 1)) {
FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
FCP_TGT_TRACE_18);
goto fail;
}
if (fprli->retry == 1) {
fc_ulp_disable_relogin(pptr->port_fp_handle,
&ptgt->tgt_port_wwn);
}
mutex_enter(&pptr->port_mutex);
mutex_enter(&ptgt->tgt_mutex);
if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
ptgt->tgt_state &= ~(FCP_TGT_OFFLINE |
FCP_TGT_MARK);
} else {
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_2, 0,
"fcp_icmd_callback,1: state change "
" occured for D_ID=0x%x", ptgt->tgt_d_id);
mutex_exit(&ptgt->tgt_mutex);
mutex_exit(&pptr->port_mutex);
goto fail;
}
mutex_exit(&ptgt->tgt_mutex);
mutex_exit(&pptr->port_mutex);
lun0_newalloc = 0;
if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
plun = fcp_alloc_lun(ptgt);
if (plun == NULL) {
fcp_log(CE_WARN, pptr->port_dip,
"!Failed to allocate lun 0 for"
" D_ID=%x", ptgt->tgt_d_id);
goto fail;
}
lun0_newalloc = 1;
}
mutex_enter(&ptgt->tgt_mutex);
if ((plun->lun_state & FCP_LUN_OFFLINE) ||
lun0_newalloc) {
plun->lun_state |= FCP_LUN_DEVICE_NOT_CONNECTED;
}
plun->lun_state |= (FCP_LUN_BUSY | FCP_LUN_MARK);
plun->lun_state &= ~FCP_LUN_OFFLINE;
ptgt->tgt_lun_cnt = 1;
ptgt->tgt_report_lun_cnt = 0;
mutex_exit(&ptgt->tgt_mutex);
if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
rscn_count = ((fc_ulp_rscn_info_t *)
(icmd->ipkt_fpkt->pkt_ulp_rscn_infop))
->ulp_rscn_count;
} else {
rscn_count = FC_INVALID_RSCN_COUNT;
}
if (fcp_send_scsi(plun, SCMD_REPORT_LUN,
sizeof (struct fcp_reportlun_resp),
icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
mutex_enter(&pptr->port_mutex);
if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
fcp_log(CE_WARN, pptr->port_dip,
"!Failed to send REPORT LUN to"
" D_ID=%x", ptgt->tgt_d_id);
} else {
FCP_TRACE(fcp_logq,
pptr->port_instbuf, fcp_trace,
FCP_BUF_LEVEL_5, 0,
"fcp_icmd_callback,2:state change"
" occured for D_ID=0x%x",
ptgt->tgt_d_id);
}
mutex_exit(&pptr->port_mutex);
FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
FCP_TGT_TRACE_19);
goto fail;
} else {
free_pkt = 0;
fcp_icmd_free(pptr, icmd);
}
break;
default:
fcp_log(CE_WARN, pptr->port_dip,
"!fcp_icmd_callback Invalid opcode");
goto fail;
}
return;
}
if (icmd->ipkt_opcode != LA_ELS_PLOGI) {
if (fcp_is_retryable(icmd) &&
icmd->ipkt_retries++ < FCP_MAX_RETRIES) {
if (FCP_MUST_RETRY(fpkt)) {
fcp_queue_ipkt(pptr, fpkt);
return;
}
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_2, 0,
"ELS PRLI is retried for d_id=0x%x, state=%x,"
" reason= %x", ptgt->tgt_d_id, fpkt->pkt_state,
fpkt->pkt_reason);
mutex_enter(&pptr->port_mutex);
if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
caddr_t msg;
mutex_exit(&pptr->port_mutex);
ASSERT(icmd->ipkt_opcode != LA_ELS_PLOGI);
if (fpkt->pkt_state == FC_PKT_TIMEOUT) {
fpkt->pkt_timeout +=
FCP_TIMEOUT_DELTA;
}
rval = fc_ulp_issue_els(pptr->port_fp_handle,
fpkt);
if (rval == FC_SUCCESS) {
return;
}
if (rval == FC_STATEC_BUSY ||
rval == FC_OFFLINE) {
fcp_queue_ipkt(pptr, fpkt);
return;
}
(void) fc_ulp_error(rval, &msg);
fcp_log(CE_NOTE, pptr->port_dip,
"!ELS 0x%x failed to d_id=0x%x;"
" %s", icmd->ipkt_opcode,
ptgt->tgt_d_id, msg);
} else {
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_2, 0,
"fcp_icmd_callback,3: state change "
" occured for D_ID=0x%x", ptgt->tgt_d_id);
mutex_exit(&pptr->port_mutex);
}
}
} else {
if (fcp_is_retryable(icmd) &&
icmd->ipkt_retries++ < FCP_MAX_RETRIES) {
if (FCP_MUST_RETRY(fpkt)) {
fcp_queue_ipkt(pptr, fpkt);
return;
}
}
mutex_enter(&pptr->port_mutex);
if (!FCP_TGT_STATE_CHANGED(ptgt, icmd) &&
fpkt->pkt_state != FC_PKT_PORT_OFFLINE) {
mutex_exit(&pptr->port_mutex);
fcp_print_error(fpkt);
} else {
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_2, 0,
"fcp_icmd_callback,4: state change occured"
" for D_ID=0x%x", ptgt->tgt_d_id);
mutex_exit(&pptr->port_mutex);
}
}
fail:
if (free_pkt) {
(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
icmd->ipkt_change_cnt, icmd->ipkt_cause);
fcp_icmd_free(pptr, icmd);
}
}
static int
fcp_send_scsi(struct fcp_lun *plun, uchar_t opcode, int alloc_len,
int lcount, int tcount, int cause, uint32_t rscn_count)
{
int nodma;
struct fcp_ipkt *icmd;
struct fcp_tgt *ptgt;
struct fcp_port *pptr;
fc_frame_hdr_t *hp;
fc_packet_t *fpkt;
struct fcp_cmd fcp_cmd;
struct fcp_cmd *fcmd;
union scsi_cdb *scsi_cdb;
ASSERT(plun != NULL);
ptgt = plun->lun_tgt;
ASSERT(ptgt != NULL);
pptr = ptgt->tgt_port;
ASSERT(pptr != NULL);
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_5, 0,
"fcp_send_scsi: d_id=0x%x opcode=0x%x", ptgt->tgt_d_id, opcode);
nodma = (pptr->port_fcp_dma == FC_NO_DVMA_SPACE) ? 1 : 0;
icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (struct fcp_cmd),
FCP_MAX_RSP_IU_SIZE, alloc_len, nodma, lcount, tcount, cause,
rscn_count);
if (icmd == NULL) {
return (DDI_FAILURE);
}
fpkt = icmd->ipkt_fpkt;
fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
icmd->ipkt_retries = 0;
icmd->ipkt_opcode = opcode;
icmd->ipkt_lun = plun;
if (nodma) {
fcmd = (struct fcp_cmd *)fpkt->pkt_cmd;
} else {
fcmd = &fcp_cmd;
}
bzero(fcmd, sizeof (struct fcp_cmd));
fpkt->pkt_timeout = FCP_SCSI_CMD_TIMEOUT;
hp = &fpkt->pkt_cmd_fhdr;
hp->s_id = pptr->port_id;
hp->d_id = ptgt->tgt_d_id;
hp->r_ctl = R_CTL_COMMAND;
hp->type = FC_TYPE_SCSI_FCP;
hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
hp->rsvd = 0;
hp->seq_id = 0;
hp->seq_cnt = 0;
hp->ox_id = 0xffff;
hp->rx_id = 0xffff;
hp->ro = 0;
bcopy(&(plun->lun_addr), &(fcmd->fcp_ent_addr), FCP_LUN_SIZE);
fcmd->fcp_cntl.cntl_reserved_0 = 0;
fcmd->fcp_cntl.cntl_reserved_1 = 0;
fcmd->fcp_cntl.cntl_reserved_2 = 0;
fcmd->fcp_cntl.cntl_reserved_3 = 0;
fcmd->fcp_cntl.cntl_reserved_4 = 0;
fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
scsi_cdb = (union scsi_cdb *)fcmd->fcp_cdb;
switch (opcode) {
case SCMD_INQUIRY_PAGE83:
fcmd->fcp_cntl.cntl_read_data = 1;
fcmd->fcp_cntl.cntl_write_data = 0;
fcmd->fcp_data_len = alloc_len;
fpkt->pkt_tran_type = FC_PKT_FCP_READ;
fpkt->pkt_comp = fcp_scsi_callback;
scsi_cdb->scc_cmd = SCMD_INQUIRY;
scsi_cdb->g0_addr2 = 0x01;
scsi_cdb->g0_addr1 = 0x83;
scsi_cdb->g0_count0 = (uchar_t)alloc_len;
break;
case SCMD_INQUIRY:
fcmd->fcp_cntl.cntl_read_data = 1;
fcmd->fcp_cntl.cntl_write_data = 0;
fcmd->fcp_data_len = alloc_len;
fpkt->pkt_tran_type = FC_PKT_FCP_READ;
fpkt->pkt_comp = fcp_scsi_callback;
scsi_cdb->scc_cmd = SCMD_INQUIRY;
scsi_cdb->g0_count0 = SUN_INQSIZE;
break;
case SCMD_REPORT_LUN: {
fc_portid_t d_id;
opaque_t fca_dev;
ASSERT(alloc_len >= 16);
d_id.priv_lilp_posit = 0;
d_id.port_id = ptgt->tgt_d_id;
fca_dev = fc_ulp_get_fca_device(pptr->port_fp_handle, d_id);
mutex_enter(&ptgt->tgt_mutex);
ptgt->tgt_fca_dev = fca_dev;
mutex_exit(&ptgt->tgt_mutex);
fcmd->fcp_cntl.cntl_read_data = 1;
fcmd->fcp_cntl.cntl_write_data = 0;
fcmd->fcp_data_len = alloc_len;
fpkt->pkt_tran_type = FC_PKT_FCP_READ;
fpkt->pkt_comp = fcp_scsi_callback;
scsi_cdb->scc_cmd = SCMD_REPORT_LUN;
scsi_cdb->scc5_count0 = alloc_len & 0xff;
scsi_cdb->scc5_count1 = (alloc_len >> 8) & 0xff;
scsi_cdb->scc5_count2 = (alloc_len >> 16) & 0xff;
scsi_cdb->scc5_count3 = (alloc_len >> 24) & 0xff;
break;
}
default:
fcp_log(CE_WARN, pptr->port_dip,
"!fcp_send_scsi Invalid opcode");
break;
}
if (!nodma) {
FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
}
mutex_enter(&pptr->port_mutex);
if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
mutex_exit(&pptr->port_mutex);
if (fcp_transport(pptr->port_fp_handle, fpkt, 1) !=
FC_SUCCESS) {
fcp_icmd_free(pptr, icmd);
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
} else {
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_2, 0,
"fcp_send_scsi,1: state change occured"
" for D_ID=0x%x", ptgt->tgt_d_id);
mutex_exit(&pptr->port_mutex);
fcp_icmd_free(pptr, icmd);
return (DDI_FAILURE);
}
}
static int
fcp_check_reportlun(struct fcp_rsp *rsp, fc_packet_t *fpkt)
{
uchar_t rqlen;
int rval = DDI_FAILURE;
struct scsi_extended_sense sense_info, *sense;
struct fcp_ipkt *icmd = (struct fcp_ipkt *)
fpkt->pkt_ulp_private;
struct fcp_tgt *ptgt = icmd->ipkt_tgt;
struct fcp_port *pptr = ptgt->tgt_port;
ASSERT(icmd->ipkt_opcode == SCMD_REPORT_LUN);
if (rsp->fcp_u.fcp_status.scsi_status == STATUS_RESERVATION_CONFLICT) {
if (icmd->ipkt_nodma) {
rsp->fcp_u.fcp_status.rsp_len_set = 0;
rsp->fcp_u.fcp_status.sense_len_set = 0;
rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
} else {
fcp_rsp_t new_resp;
FCP_CP_IN(fpkt->pkt_resp, &new_resp,
fpkt->pkt_resp_acc, sizeof (new_resp));
new_resp.fcp_u.fcp_status.rsp_len_set = 0;
new_resp.fcp_u.fcp_status.sense_len_set = 0;
new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
fpkt->pkt_resp_acc, sizeof (new_resp));
}
FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
return (DDI_SUCCESS);
}
sense = &sense_info;
if (!rsp->fcp_u.fcp_status.sense_len_set) {
return (rval);
}
rqlen = (uchar_t)min(rsp->fcp_sense_len,
sizeof (struct scsi_extended_sense));
if (rqlen < 14) {
return (rval);
}
if (icmd->ipkt_nodma) {
sense = (struct scsi_extended_sense *)(fpkt->pkt_resp +
sizeof (struct fcp_rsp) + rsp->fcp_response_len);
} else {
FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp) +
rsp->fcp_response_len, sense, fpkt->pkt_resp_acc,
sizeof (struct scsi_extended_sense));
}
if (!FCP_SENSE_NO_LUN(sense)) {
mutex_enter(&ptgt->tgt_mutex);
ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
mutex_exit(&ptgt->tgt_mutex);
}
if ((sense->es_key == KEY_ILLEGAL_REQUEST) &&
(sense->es_add_code == 0x20)) {
if (icmd->ipkt_nodma) {
rsp->fcp_u.fcp_status.rsp_len_set = 0;
rsp->fcp_u.fcp_status.sense_len_set = 0;
rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
} else {
fcp_rsp_t new_resp;
FCP_CP_IN(fpkt->pkt_resp, &new_resp,
fpkt->pkt_resp_acc, sizeof (new_resp));
new_resp.fcp_u.fcp_status.rsp_len_set = 0;
new_resp.fcp_u.fcp_status.sense_len_set = 0;
new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
fpkt->pkt_resp_acc, sizeof (new_resp));
}
FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
return (DDI_SUCCESS);
}
if ((sense->es_key == KEY_NOT_READY) && (sense->es_add_code == 0x04)) {
if (icmd->ipkt_nodma) {
rsp->fcp_u.fcp_status.rsp_len_set = 0;
rsp->fcp_u.fcp_status.sense_len_set = 0;
rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
} else {
fcp_rsp_t new_resp;
FCP_CP_IN(fpkt->pkt_resp, &new_resp,
fpkt->pkt_resp_acc, sizeof (new_resp));
new_resp.fcp_u.fcp_status.rsp_len_set = 0;
new_resp.fcp_u.fcp_status.sense_len_set = 0;
new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
fpkt->pkt_resp_acc, sizeof (new_resp));
}
FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
return (DDI_SUCCESS);
}
if ((FCP_SENSE_REPORTLUN_CHANGED(sense)) ||
(FCP_SENSE_NO_LUN(sense))) {
mutex_enter(&ptgt->tgt_mutex);
if ((FCP_SENSE_NO_LUN(sense)) &&
(ptgt->tgt_state & FCP_TGT_ILLREQ)) {
ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
mutex_exit(&ptgt->tgt_mutex);
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_3, 0,
"!FCP: Unable to obtain Report Lun data"
" target=%x", ptgt->tgt_d_id);
} else {
if (ptgt->tgt_tid == NULL) {
timeout_id_t tid;
tid = timeout(fcp_reconfigure_luns,
(caddr_t)ptgt, (clock_t)drv_usectohz(1));
ptgt->tgt_tid = tid;
ptgt->tgt_state |= FCP_TGT_BUSY;
}
if (FCP_SENSE_NO_LUN(sense)) {
ptgt->tgt_state |= FCP_TGT_ILLREQ;
}
mutex_exit(&ptgt->tgt_mutex);
if (FCP_SENSE_REPORTLUN_CHANGED(sense)) {
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_3, 0,
"!FCP:Report Lun Has Changed"
" target=%x", ptgt->tgt_d_id);
} else if (FCP_SENSE_NO_LUN(sense)) {
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_3, 0,
"!FCP:LU Not Supported"
" target=%x", ptgt->tgt_d_id);
}
}
rval = DDI_SUCCESS;
}
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_5, 0,
"D_ID=%x, sense=%x, status=%x",
fpkt->pkt_cmd_fhdr.d_id, sense->es_key,
rsp->fcp_u.fcp_status.scsi_status);
return (rval);
}
static void
fcp_scsi_callback(fc_packet_t *fpkt)
{
struct fcp_ipkt *icmd = (struct fcp_ipkt *)
fpkt->pkt_ulp_private;
struct fcp_rsp_info fcp_rsp_err, *bep;
struct fcp_port *pptr;
struct fcp_tgt *ptgt;
struct fcp_lun *plun;
struct fcp_rsp response, *rsp;
ptgt = icmd->ipkt_tgt;
pptr = ptgt->tgt_port;
plun = icmd->ipkt_lun;
if (icmd->ipkt_nodma) {
rsp = (struct fcp_rsp *)fpkt->pkt_resp;
} else {
rsp = &response;
FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
sizeof (struct fcp_rsp));
}
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_2, 0,
"SCSI callback state=0x%x for %x, op_code=0x%x, "
"status=%x, lun num=%x",
fpkt->pkt_state, ptgt->tgt_d_id, icmd->ipkt_opcode,
rsp->fcp_u.fcp_status.scsi_status, plun->lun_num);
if ((plun->lun_guid_size == 0) &&
(icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) &&
(fcp_symmetric_device_probe(plun) == 0)) {
char ascii_wwn[FC_WWN_SIZE*2+1];
fcp_wwn_to_ascii(&ptgt->tgt_node_wwn.raw_wwn[0], ascii_wwn);
(void) fcp_copy_guid_2_lun_block(plun, ascii_wwn);
}
if ((fpkt->pkt_state != FC_PKT_SUCCESS) &&
(fpkt->pkt_reason == FC_REASON_OVERRUN) &&
(icmd->ipkt_opcode == SCMD_REPORT_LUN)) {
fpkt->pkt_state = FC_PKT_SUCCESS;
}
if (fpkt->pkt_state != FC_PKT_SUCCESS) {
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_2, 0,
"icmd failed with state=0x%x for %x", fpkt->pkt_state,
ptgt->tgt_d_id);
if (fpkt->pkt_reason == FC_REASON_CRC_ERROR) {
if (icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) {
(void) fcp_handle_page83(fpkt, icmd, 1);
return;
}
}
if (fpkt->pkt_state == FC_PKT_TIMEOUT ||
FCP_MUST_RETRY(fpkt)) {
fpkt->pkt_timeout += FCP_TIMEOUT_DELTA;
fcp_retry_scsi_cmd(fpkt);
return;
}
FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
FCP_TGT_TRACE_20);
mutex_enter(&pptr->port_mutex);
mutex_enter(&ptgt->tgt_mutex);
if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
mutex_exit(&ptgt->tgt_mutex);
mutex_exit(&pptr->port_mutex);
fcp_print_error(fpkt);
} else {
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_2, 0,
"fcp_scsi_callback,1: state change occured"
" for D_ID=0x%x", ptgt->tgt_d_id);
mutex_exit(&ptgt->tgt_mutex);
mutex_exit(&pptr->port_mutex);
}
(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
icmd->ipkt_change_cnt, icmd->ipkt_cause);
fcp_icmd_free(pptr, icmd);
return;
}
FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt, FCP_TGT_TRACE_21);
mutex_enter(&pptr->port_mutex);
mutex_enter(&ptgt->tgt_mutex);
if (FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_2, 0,
"fcp_scsi_callback,2: state change occured"
" for D_ID=0x%x", ptgt->tgt_d_id);
mutex_exit(&ptgt->tgt_mutex);
mutex_exit(&pptr->port_mutex);
(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
icmd->ipkt_change_cnt, icmd->ipkt_cause);
fcp_icmd_free(pptr, icmd);
return;
}
ASSERT((ptgt->tgt_state & FCP_TGT_MARK) == 0);
mutex_exit(&ptgt->tgt_mutex);
mutex_exit(&pptr->port_mutex);
if (icmd->ipkt_nodma) {
bep = (struct fcp_rsp_info *)(fpkt->pkt_resp +
sizeof (struct fcp_rsp));
} else {
bep = &fcp_rsp_err;
FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp), bep,
fpkt->pkt_resp_acc, sizeof (struct fcp_rsp_info));
}
if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
fcp_retry_scsi_cmd(fpkt);
return;
}
if (rsp->fcp_u.fcp_status.rsp_len_set && bep->rsp_code !=
FCP_NO_FAILURE) {
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_2, 0,
"rsp_code=0x%x, rsp_len_set=0x%x",
bep->rsp_code, rsp->fcp_u.fcp_status.rsp_len_set);
fcp_retry_scsi_cmd(fpkt);
return;
}
if (rsp->fcp_u.fcp_status.scsi_status == STATUS_QFULL ||
rsp->fcp_u.fcp_status.scsi_status == STATUS_BUSY) {
fcp_queue_ipkt(pptr, fpkt);
return;
}
if ((icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) &&
(rsp->fcp_u.fcp_status.scsi_status & STATUS_CHECK)) {
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_3, 0,
"INQUIRY_PAGE83 for d_id %x (dtype:0x%x) failed with "
"check condition. May enumerate as non-mpxio device",
ptgt->tgt_d_id, plun->lun_type);
if (plun->lun_type == DTYPE_ESI) {
fpkt->pkt_state = FC_PKT_LOCAL_RJT;
(void) fcp_handle_page83(fpkt, icmd, 1);
return;
}
mutex_enter(&ptgt->tgt_mutex);
plun->lun_state &= ~(FCP_LUN_OFFLINE |
FCP_LUN_MARK | FCP_LUN_BUSY);
mutex_exit(&ptgt->tgt_mutex);
(void) fcp_call_finish_init(pptr, ptgt,
icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
icmd->ipkt_cause);
fcp_icmd_free(pptr, icmd);
return;
}
if (rsp->fcp_u.fcp_status.scsi_status != STATUS_GOOD) {
int rval = DDI_FAILURE;
if (icmd->ipkt_opcode == SCMD_REPORT_LUN) {
rval = fcp_check_reportlun(rsp, fpkt);
if (rval == DDI_SUCCESS && icmd->ipkt_nodma == 0) {
rsp = &response;
FCP_CP_IN(fpkt->pkt_resp, rsp,
fpkt->pkt_resp_acc,
sizeof (struct fcp_rsp));
}
}
if (rsp->fcp_u.fcp_status.scsi_status != STATUS_GOOD) {
if (rval == DDI_SUCCESS) {
(void) fcp_call_finish_init(pptr, ptgt,
icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
icmd->ipkt_cause);
fcp_icmd_free(pptr, icmd);
} else {
fcp_retry_scsi_cmd(fpkt);
}
return;
}
} else {
if (icmd->ipkt_opcode == SCMD_REPORT_LUN) {
mutex_enter(&ptgt->tgt_mutex);
ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
mutex_exit(&ptgt->tgt_mutex);
}
}
ASSERT(rsp->fcp_u.fcp_status.scsi_status == STATUS_GOOD);
if (!(pptr->port_state & FCP_STATE_FCA_IS_NODMA)) {
(void) ddi_dma_sync(fpkt->pkt_data_dma, 0, 0,
DDI_DMA_SYNC_FORCPU);
}
switch (icmd->ipkt_opcode) {
case SCMD_INQUIRY:
FCP_LUN_TRACE(plun, FCP_LUN_TRACE_1);
fcp_handle_inquiry(fpkt, icmd);
break;
case SCMD_REPORT_LUN:
FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
FCP_TGT_TRACE_22);
fcp_handle_reportlun(fpkt, icmd);
break;
case SCMD_INQUIRY_PAGE83:
FCP_LUN_TRACE(plun, FCP_LUN_TRACE_2);
(void) fcp_handle_page83(fpkt, icmd, 0);
break;
default:
fcp_log(CE_WARN, NULL, "!Invalid SCSI opcode");
(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
icmd->ipkt_change_cnt, icmd->ipkt_cause);
fcp_icmd_free(pptr, icmd);
break;
}
}
static void
fcp_retry_scsi_cmd(fc_packet_t *fpkt)
{
struct fcp_ipkt *icmd = (struct fcp_ipkt *)
fpkt->pkt_ulp_private;
struct fcp_tgt *ptgt = icmd->ipkt_tgt;
struct fcp_port *pptr = ptgt->tgt_port;
if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
fcp_is_retryable(icmd)) {
mutex_enter(&pptr->port_mutex);
if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
mutex_exit(&pptr->port_mutex);
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_3, 0,
"Retrying %s to %x; state=%x, reason=%x",
(icmd->ipkt_opcode == SCMD_REPORT_LUN) ?
"Report LUN" : "INQUIRY", ptgt->tgt_d_id,
fpkt->pkt_state, fpkt->pkt_reason);
fcp_queue_ipkt(pptr, fpkt);
} else {
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_3, 0,
"fcp_retry_scsi_cmd,1: state change occured"
" for D_ID=0x%x", ptgt->tgt_d_id);
mutex_exit(&pptr->port_mutex);
(void) fcp_call_finish_init(pptr, ptgt,
icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
icmd->ipkt_cause);
fcp_icmd_free(pptr, icmd);
}
} else {
fcp_print_error(fpkt);
(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
icmd->ipkt_change_cnt, icmd->ipkt_cause);
fcp_icmd_free(pptr, icmd);
}
}
static void
fcp_handle_page83(fc_packet_t *fpkt, struct fcp_ipkt *icmd,
int ignore_page83_data)
{
struct fcp_port *pptr;
struct fcp_lun *plun;
struct fcp_tgt *ptgt;
uchar_t dev_id_page[SCMD_MAX_INQUIRY_PAGE83_SIZE];
int fail = 0;
ddi_devid_t devid;
char *guid = NULL;
int ret;
ASSERT(icmd != NULL && fpkt != NULL);
pptr = icmd->ipkt_port;
ptgt = icmd->ipkt_tgt;
plun = icmd->ipkt_lun;
if (fpkt->pkt_state == FC_PKT_SUCCESS) {
FCP_LUN_TRACE(plun, FCP_LUN_TRACE_7);
FCP_CP_IN(fpkt->pkt_data, dev_id_page, fpkt->pkt_data_acc,
SCMD_MAX_INQUIRY_PAGE83_SIZE);
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_5, 0,
"fcp_handle_page83: port=%d, tgt D_ID=0x%x, "
"dtype=0x%x, lun num=%x",
pptr->port_instance, ptgt->tgt_d_id,
dev_id_page[0], plun->lun_num);
ret = ddi_devid_scsi_encode(
DEVID_SCSI_ENCODE_VERSION_LATEST,
NULL,
(unsigned char *) &plun->lun_inq,
sizeof (plun->lun_inq),
NULL,
0,
dev_id_page,
SCMD_MAX_INQUIRY_PAGE83_SIZE,
&devid);
if (ret == DDI_SUCCESS) {
guid = ddi_devid_to_guid(devid);
if (guid) {
if (plun->lun_guid &&
strcmp(guid, plun->lun_guid)) {
unsigned int len;
plun->lun_state |= FCP_LUN_CHANGED;
if (plun->lun_old_guid) {
kmem_free(plun->lun_old_guid,
plun->lun_old_guid_size);
}
len = plun->lun_guid_size;
plun->lun_old_guid_size = len;
plun->lun_old_guid = kmem_zalloc(len,
KM_NOSLEEP);
if (plun->lun_old_guid) {
bcopy(plun->lun_guid,
plun->lun_old_guid, len);
} else {
fail = 1;
plun->lun_old_guid_size = 0;
}
}
if (!fail) {
if (fcp_copy_guid_2_lun_block(
plun, guid)) {
fail = 1;
}
}
ddi_devid_free_guid(guid);
} else {
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_2, 0,
"fcp_handle_page83: unable to create "
"GUID");
fail = 1;
}
ddi_devid_free(devid);
} else if (ret == DDI_NOT_WELL_FORMED) {
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_2, 0,
"fcp_handle_page83: retry GUID");
icmd->ipkt_retries = 0;
fcp_retry_scsi_cmd(fpkt);
return;
} else {
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_2, 0,
"fcp_handle_page83: bad ddi_devid_scsi_encode %x",
ret);
if (fcp_symmetric_device_probe(plun) != 0) {
fail = 1;
}
}
} else {
FCP_LUN_TRACE(plun, FCP_LUN_TRACE_8);
if (ignore_page83_data) {
fail = 0;
} else {
fail = 1;
}
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_2, 0,
"!Devid page cmd failed. "
"fpkt_state: %x fpkt_reason: %x",
"ignore_page83: %d",
fpkt->pkt_state, fpkt->pkt_reason,
ignore_page83_data);
}
mutex_enter(&pptr->port_mutex);
mutex_enter(&plun->lun_mutex);
if (plun->lun_cip == NULL) {
if (fail || pptr->port_mpxio == 0) {
plun->lun_mpxio = 0;
} else {
plun->lun_mpxio = 1;
}
}
mutex_exit(&plun->lun_mutex);
mutex_exit(&pptr->port_mutex);
mutex_enter(&ptgt->tgt_mutex);
plun->lun_state &=
~(FCP_LUN_OFFLINE | FCP_LUN_MARK | FCP_LUN_BUSY);
mutex_exit(&ptgt->tgt_mutex);
(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
icmd->ipkt_change_cnt, icmd->ipkt_cause);
fcp_icmd_free(pptr, icmd);
}
static void
fcp_handle_inquiry(fc_packet_t *fpkt, struct fcp_ipkt *icmd)
{
struct fcp_port *pptr;
struct fcp_lun *plun;
struct fcp_tgt *ptgt;
uchar_t dtype;
uchar_t pqual;
uint32_t rscn_count = FC_INVALID_RSCN_COUNT;
ASSERT(icmd != NULL && fpkt != NULL);
pptr = icmd->ipkt_port;
ptgt = icmd->ipkt_tgt;
plun = icmd->ipkt_lun;
FCP_CP_IN(fpkt->pkt_data, &plun->lun_inq, fpkt->pkt_data_acc,
sizeof (struct scsi_inquiry));
dtype = plun->lun_inq.inq_dtype & DTYPE_MASK;
pqual = plun->lun_inq.inq_dtype >> 5;
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_5, 0,
"fcp_handle_inquiry: port=%d, tgt D_ID=0x%x, lun=0x%x, "
"dtype=0x%x pqual: 0x%x", pptr->port_instance, ptgt->tgt_d_id,
plun->lun_num, dtype, pqual);
if (pqual != 0) {
fcp_log(CE_CONT, pptr->port_dip,
"!Target 0x%x lun 0x%x: Nonzero peripheral qualifier: "
"Device type=0x%x Peripheral qual=0x%x\n",
ptgt->tgt_d_id, plun->lun_num, dtype, pqual);
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_5, 0,
"!Target 0x%x lun 0x%x: Nonzero peripheral qualifier: "
"Device type=0x%x Peripheral qual=0x%x\n",
ptgt->tgt_d_id, plun->lun_num, dtype, pqual);
FCP_LUN_TRACE(plun, FCP_LUN_TRACE_3);
(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
icmd->ipkt_change_cnt, icmd->ipkt_cause);
fcp_icmd_free(pptr, icmd);
return;
}
if ((plun->lun_state & FCP_LUN_INIT) && dtype != plun->lun_type) {
plun->lun_state |= FCP_LUN_CHANGED;
}
plun->lun_type = plun->lun_inq.inq_dtype;
mutex_enter(&pptr->port_mutex);
if (!pptr->port_notify) {
if (bcmp(plun->lun_inq.inq_pid, pid, strlen(pid)) == 0) {
uint32_t cmd = 0;
cmd = ((cmd & 0xFF | FC_NOTIFY_THROTTLE) |
((cmd & 0xFFFFFF00 >> 8) |
FCP_SVE_THROTTLE << 8));
pptr->port_notify = 1;
mutex_exit(&pptr->port_mutex);
(void) fc_ulp_port_notify(pptr->port_fp_handle, cmd);
mutex_enter(&pptr->port_mutex);
}
}
if (FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_2, 0,
"fcp_handle_inquiry,1:state change occured"
" for D_ID=0x%x", ptgt->tgt_d_id);
mutex_exit(&pptr->port_mutex);
FCP_LUN_TRACE(plun, FCP_LUN_TRACE_5);
(void) fcp_call_finish_init(pptr, ptgt,
icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
icmd->ipkt_cause);
fcp_icmd_free(pptr, icmd);
return;
}
ASSERT((ptgt->tgt_state & FCP_TGT_MARK) == 0);
mutex_exit(&pptr->port_mutex);
if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
rscn_count = ((fc_ulp_rscn_info_t *)
(icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->ulp_rscn_count;
} else {
rscn_count = FC_INVALID_RSCN_COUNT;
}
if (fcp_send_scsi(plun, SCMD_INQUIRY_PAGE83,
SCMD_MAX_INQUIRY_PAGE83_SIZE,
icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
fcp_log(CE_WARN, NULL, "!failed to send page 83");
FCP_LUN_TRACE(plun, FCP_LUN_TRACE_6);
(void) fcp_call_finish_init(pptr, ptgt,
icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
icmd->ipkt_cause);
}
fcp_icmd_free(pptr, icmd);
}
static void
fcp_handle_reportlun(fc_packet_t *fpkt, struct fcp_ipkt *icmd)
{
int i;
int nluns_claimed;
int nluns_bufmax;
int len;
uint16_t lun_num;
uint32_t rscn_count = FC_INVALID_RSCN_COUNT;
struct fcp_port *pptr;
struct fcp_tgt *ptgt;
struct fcp_lun *plun;
struct fcp_reportlun_resp *report_lun;
pptr = icmd->ipkt_port;
ptgt = icmd->ipkt_tgt;
len = fpkt->pkt_datalen;
if ((len < FCP_LUN_HEADER) ||
((report_lun = kmem_zalloc(len, KM_NOSLEEP)) == NULL)) {
(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
icmd->ipkt_change_cnt, icmd->ipkt_cause);
fcp_icmd_free(pptr, icmd);
return;
}
FCP_CP_IN(fpkt->pkt_data, report_lun, fpkt->pkt_data_acc,
fpkt->pkt_datalen);
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_5, 0,
"fcp_handle_reportlun: port=%d, tgt D_ID=0x%x",
pptr->port_instance, ptgt->tgt_d_id);
nluns_claimed = BE_32(report_lun->num_lun) >> 3;
nluns_bufmax = (fpkt->pkt_datalen - FCP_LUN_HEADER) / FCP_LUN_SIZE;
if (nluns_claimed > FCP_MAX_LUNS_SUPPORTED) {
kmem_free(report_lun, len);
fcp_log(CE_NOTE, pptr->port_dip, "!Can not support"
" 0x%x number of LUNs for target=%x", nluns_claimed,
ptgt->tgt_d_id);
(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
icmd->ipkt_change_cnt, icmd->ipkt_cause);
fcp_icmd_free(pptr, icmd);
return;
}
mutex_enter(&ptgt->tgt_mutex);
if ((nluns_claimed > nluns_bufmax) &&
(ptgt->tgt_report_lun_cnt < FCP_MAX_REPORTLUNS_ATTEMPTS)) {
struct fcp_lun *plun;
ptgt->tgt_report_lun_cnt++;
plun = ptgt->tgt_lun;
ASSERT(plun != NULL);
mutex_exit(&ptgt->tgt_mutex);
kmem_free(report_lun, len);
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_5, 0,
"!Dynamically discovered %d LUNs for D_ID=%x",
nluns_claimed, ptgt->tgt_d_id);
if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
rscn_count = ((fc_ulp_rscn_info_t *)
(icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->
ulp_rscn_count;
} else {
rscn_count = FC_INVALID_RSCN_COUNT;
}
if (fcp_send_scsi(icmd->ipkt_lun, SCMD_REPORT_LUN,
FCP_LUN_HEADER + (nluns_claimed * FCP_LUN_SIZE),
icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
(void) fcp_call_finish_init(pptr, ptgt,
icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
icmd->ipkt_cause);
}
fcp_icmd_free(pptr, icmd);
return;
}
if (nluns_claimed > nluns_bufmax) {
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_5, 0,
"Target=%x:%x:%x:%x:%x:%x:%x:%x"
" Number of LUNs lost=%x",
ptgt->tgt_port_wwn.raw_wwn[0],
ptgt->tgt_port_wwn.raw_wwn[1],
ptgt->tgt_port_wwn.raw_wwn[2],
ptgt->tgt_port_wwn.raw_wwn[3],
ptgt->tgt_port_wwn.raw_wwn[4],
ptgt->tgt_port_wwn.raw_wwn[5],
ptgt->tgt_port_wwn.raw_wwn[6],
ptgt->tgt_port_wwn.raw_wwn[7],
nluns_claimed - nluns_bufmax);
nluns_claimed = nluns_bufmax;
}
ptgt->tgt_lun_cnt = nluns_claimed;
for (plun = ptgt->tgt_lun; plun; plun = plun->lun_next) {
int offline;
int exists = 0;
offline = (plun->lun_state & FCP_LUN_OFFLINE) ? 1 : 0;
for (i = 0; i < nluns_claimed && exists == 0; i++) {
uchar_t *lun_string;
lun_string = (uchar_t *)&(report_lun->lun_string[i]);
switch (lun_string[0] & 0xC0) {
case FCP_LUN_ADDRESSING:
case FCP_PD_ADDRESSING:
case FCP_VOLUME_ADDRESSING:
lun_num = ((lun_string[0] & 0x3F) << 8) |
lun_string[1];
if (plun->lun_num == lun_num) {
exists++;
break;
}
break;
default:
break;
}
}
if (!exists && !offline) {
mutex_exit(&ptgt->tgt_mutex);
mutex_enter(&pptr->port_mutex);
mutex_enter(&ptgt->tgt_mutex);
if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
if (!(plun->lun_state &
FCP_LUN_DEVICE_NOT_CONNECTED)) {
plun->lun_state |= FCP_LUN_DISAPPEARED;
}
mutex_exit(&ptgt->tgt_mutex);
mutex_exit(&pptr->port_mutex);
if (!(plun->lun_state &
FCP_LUN_DEVICE_NOT_CONNECTED)) {
fcp_log(CE_NOTE, pptr->port_dip,
"!Lun=%x for target=%x disappeared",
plun->lun_num, ptgt->tgt_d_id);
}
mutex_enter(&ptgt->tgt_mutex);
} else {
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_5, 0,
"fcp_handle_reportlun,1: state change"
" occured for D_ID=0x%x", ptgt->tgt_d_id);
mutex_exit(&ptgt->tgt_mutex);
mutex_exit(&pptr->port_mutex);
kmem_free(report_lun, len);
(void) fcp_call_finish_init(pptr, ptgt,
icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
icmd->ipkt_cause);
fcp_icmd_free(pptr, icmd);
return;
}
} else if (exists) {
if (plun->lun_state & FCP_LUN_DEVICE_NOT_CONNECTED) {
plun->lun_state &=
~FCP_LUN_DEVICE_NOT_CONNECTED;
}
if (offline || plun->lun_num == 0) {
if (plun->lun_state & FCP_LUN_DISAPPEARED) {
plun->lun_state &= ~FCP_LUN_DISAPPEARED;
mutex_exit(&ptgt->tgt_mutex);
fcp_log(CE_NOTE, pptr->port_dip,
"!Lun=%x for target=%x reappeared",
plun->lun_num, ptgt->tgt_d_id);
mutex_enter(&ptgt->tgt_mutex);
}
}
}
}
ptgt->tgt_tmp_cnt = nluns_claimed ? nluns_claimed : 1;
mutex_exit(&ptgt->tgt_mutex);
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_5, 0,
"fcp_handle_reportlun: port=%d, tgt D_ID=0x%x, %d LUN(s)",
pptr->port_instance, ptgt->tgt_d_id, nluns_claimed);
for (i = 0; i < nluns_claimed; i++) {
uchar_t *lun_string;
lun_string = (uchar_t *)&(report_lun->lun_string[i]);
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_5, 0,
"handle_reportlun: d_id=%x, LUN ind=%d, LUN=%d,"
" addr=0x%x", ptgt->tgt_d_id, i, lun_string[1],
lun_string[0]);
switch (lun_string[0] & 0xC0) {
case FCP_LUN_ADDRESSING:
case FCP_PD_ADDRESSING:
case FCP_VOLUME_ADDRESSING:
lun_num = ((lun_string[0] & 0x3F) << 8) | lun_string[1];
if (fcp_lun_blacklist != NULL) {
mutex_enter(&ptgt->tgt_mutex);
if (fcp_should_mask(&ptgt->tgt_port_wwn,
lun_num) == TRUE) {
ptgt->tgt_lun_cnt--;
mutex_exit(&ptgt->tgt_mutex);
break;
}
mutex_exit(&ptgt->tgt_mutex);
}
if ((plun = fcp_get_lun(ptgt, lun_num)) == NULL) {
plun = fcp_alloc_lun(ptgt);
if (plun == NULL) {
fcp_log(CE_NOTE, pptr->port_dip,
"!Lun allocation failed"
" target=%x lun=%x",
ptgt->tgt_d_id, lun_num);
break;
}
}
mutex_enter(&plun->lun_tgt->tgt_mutex);
plun->lun_addr.ent_addr_0 =
BE_16(*(uint16_t *)&(lun_string[0]));
plun->lun_addr.ent_addr_1 =
BE_16(*(uint16_t *)&(lun_string[2]));
plun->lun_addr.ent_addr_2 =
BE_16(*(uint16_t *)&(lun_string[4]));
plun->lun_addr.ent_addr_3 =
BE_16(*(uint16_t *)&(lun_string[6]));
plun->lun_num = lun_num;
plun->lun_state |= FCP_LUN_BUSY | FCP_LUN_MARK;
plun->lun_state &= ~FCP_LUN_OFFLINE;
mutex_exit(&plun->lun_tgt->tgt_mutex);
if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
rscn_count = ((fc_ulp_rscn_info_t *)
(icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->
ulp_rscn_count;
} else {
rscn_count = FC_INVALID_RSCN_COUNT;
}
if (fcp_send_scsi(plun, SCMD_INQUIRY, SUN_INQSIZE,
icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
mutex_enter(&pptr->port_mutex);
mutex_enter(&plun->lun_tgt->tgt_mutex);
if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
fcp_log(CE_NOTE, pptr->port_dip,
"!failed to send INQUIRY"
" target=%x lun=%x",
ptgt->tgt_d_id, plun->lun_num);
} else {
FCP_TRACE(fcp_logq,
pptr->port_instbuf, fcp_trace,
FCP_BUF_LEVEL_5, 0,
"fcp_handle_reportlun,2: state"
" change occured for D_ID=0x%x",
ptgt->tgt_d_id);
}
mutex_exit(&plun->lun_tgt->tgt_mutex);
mutex_exit(&pptr->port_mutex);
} else {
continue;
}
break;
default:
fcp_log(CE_WARN, NULL,
"!Unsupported LUN Addressing method %x "
"in response to REPORT_LUN", lun_string[0]);
break;
}
(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
icmd->ipkt_change_cnt, icmd->ipkt_cause);
}
if (i == 0) {
fcp_log(CE_WARN, pptr->port_dip,
"!FCP: target=%x reported NO Luns", ptgt->tgt_d_id);
(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
icmd->ipkt_change_cnt, icmd->ipkt_cause);
}
kmem_free(report_lun, len);
fcp_icmd_free(pptr, icmd);
}
static struct fcp_lun *
fcp_get_lun(struct fcp_tgt *ptgt, uint16_t lun_num)
{
struct fcp_lun *plun;
mutex_enter(&ptgt->tgt_mutex);
for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
if (plun->lun_num == lun_num) {
mutex_exit(&ptgt->tgt_mutex);
return (plun);
}
}
mutex_exit(&ptgt->tgt_mutex);
return (NULL);
}
static int
fcp_finish_tgt(struct fcp_port *pptr, struct fcp_tgt *ptgt,
int link_cnt, int tgt_cnt, int cause)
{
int rval = 1;
ASSERT(pptr != NULL);
ASSERT(ptgt != NULL);
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_5, 0,
"finish_tgt: D_ID/state = 0x%x/0x%x", ptgt->tgt_d_id,
ptgt->tgt_state);
ASSERT(mutex_owned(&pptr->port_mutex));
if ((pptr->port_link_cnt != link_cnt) ||
(tgt_cnt && ptgt->tgt_change_cnt != tgt_cnt)) {
FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_23);
return (0);
} else {
FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_24);
}
mutex_enter(&ptgt->tgt_mutex);
if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
if (ptgt->tgt_state & FCP_TGT_MARK) {
ptgt->tgt_state &= ~FCP_TGT_MARK;
rval = fcp_offline_target(pptr, ptgt, link_cnt,
tgt_cnt, 0, 0);
} else {
ptgt->tgt_state &= ~FCP_TGT_BUSY;
if (ptgt->tgt_node_state != FCP_TGT_NODE_ON_DEMAND) {
ptgt->tgt_node_state = FCP_TGT_NODE_PRESENT;
fcp_create_luns(ptgt, link_cnt, tgt_cnt,
cause);
ptgt->tgt_device_created = 1;
} else {
fcp_update_tgt_state(ptgt, FCP_RESET,
FCP_LUN_BUSY);
}
}
}
mutex_exit(&ptgt->tgt_mutex);
return (rval);
}
static void
fcp_finish_init(struct fcp_port *pptr)
{
#ifdef DEBUG
bzero(pptr->port_finish_stack, sizeof (pptr->port_finish_stack));
pptr->port_finish_depth = getpcstack(pptr->port_finish_stack,
FCP_STACK_DEPTH);
#endif
ASSERT(mutex_owned(&pptr->port_mutex));
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_2, 0, "finish_init:"
" entering; ipkt count=%d", pptr->port_ipkt_cnt);
if ((pptr->port_state & FCP_STATE_ONLINING) &&
!(pptr->port_state & (FCP_STATE_SUSPENDED |
FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN))) {
pptr->port_state &= ~FCP_STATE_ONLINING;
pptr->port_state |= FCP_STATE_ONLINE;
}
cv_broadcast(&pptr->port_config_cv);
}
static void
fcp_create_luns(struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt, int cause)
{
struct fcp_lun *plun;
struct fcp_port *pptr;
child_info_t *cip = NULL;
ASSERT(ptgt != NULL);
ASSERT(mutex_owned(&ptgt->tgt_mutex));
pptr = ptgt->tgt_port;
ASSERT(pptr != NULL);
for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
if (plun->lun_state & FCP_LUN_OFFLINE) {
continue;
}
if (plun->lun_state & FCP_LUN_MARK) {
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_2, 0,
"fcp_create_luns: offlining marked LUN!");
fcp_offline_lun(plun, link_cnt, tgt_cnt, 1, 0);
continue;
}
plun->lun_state &= ~FCP_LUN_BUSY;
if (plun->lun_mpxio && plun->lun_cip &&
(!fcp_pass_to_hp(pptr, plun, plun->lun_cip,
FCP_MPXIO_PATH_CLEAR_BUSY, link_cnt, tgt_cnt,
0, 0))) {
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_2, 0,
"fcp_create_luns: enable lun %p failed!",
plun);
}
if (plun->lun_state & FCP_LUN_INIT &&
!(plun->lun_state & FCP_LUN_CHANGED)) {
continue;
}
if (cause == FCP_CAUSE_USER_CREATE) {
continue;
}
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_6, 0,
"create_luns: passing ONLINE elem to HP thread");
if (plun->lun_cip && (plun->lun_state & FCP_LUN_CHANGED)) {
fcp_prepare_offline_lun(plun, link_cnt, tgt_cnt);
}
if (!fcp_pass_to_hp(pptr, plun, cip, FCP_ONLINE,
link_cnt, tgt_cnt, NDI_ONLINE_ATTACH, 0)) {
if (!fcp_pass_to_hp(pptr, plun, cip, FCP_ONLINE,
link_cnt, tgt_cnt, 0, 0)) {
fcp_log(CE_CONT, pptr->port_dip,
"Can not ONLINE LUN; D_ID=%x, LUN=%x\n",
plun->lun_tgt->tgt_d_id, plun->lun_num);
}
}
}
}
static int
fcp_trigger_lun(struct fcp_lun *plun, child_info_t *cip, int old_mpxio,
int online, int lcount, int tcount, int flags)
{
int rval = NDI_FAILURE;
boolean_t enteredv;
child_info_t *ccip;
struct fcp_port *pptr = plun->lun_tgt->tgt_port;
int is_mpxio = pptr->port_mpxio;
dev_info_t *cdip, *pdip;
char *devname;
if ((old_mpxio != 0) && (plun->lun_mpxio != old_mpxio)) {
FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
FCP_BUF_LEVEL_2, 0, "fcp_trigger_lun: lun_mpxio changed: "
"plun: %p, cip: %p, what:%d", plun, cip, online);
return (rval);
}
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_2, 0,
"fcp_trigger_lun: plun=%p target=%x lun=%d cip=%p what=%x "
"flags=%x mpxio=%x\n",
plun, LUN_TGT->tgt_d_id, plun->lun_num, cip, online, flags,
plun->lun_mpxio);
if (online == FCP_MPXIO_PATH_CLEAR_BUSY ||
online == FCP_MPXIO_PATH_SET_BUSY) {
if (plun->lun_mpxio) {
rval = fcp_update_mpxio_path(plun, cip, online);
} else {
rval = NDI_SUCCESS;
}
return (rval);
}
ASSERT(!servicing_interrupt());
if (online == FCP_OFFLINE) {
if (plun->lun_mpxio == 0) {
if (plun->lun_cip == cip) {
cdip = DIP(plun->lun_cip);
} else {
cdip = DIP(cip);
}
} else if ((plun->lun_cip == cip) && plun->lun_cip) {
cdip = mdi_pi_get_client(PIP(plun->lun_cip));
} else if ((plun->lun_cip != cip) && cip) {
cdip = mdi_pi_get_client(PIP(cip));
}
if (cdip) {
if (i_ddi_devi_attached(cdip)) {
pdip = ddi_get_parent(cdip);
devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
ndi_devi_enter(pdip);
(void) ddi_deviname(cdip, devname);
ndi_devi_exit(pdip);
(void) devfs_clean(pdip, devname + 1,
DV_CLEAN_FORCE);
kmem_free(devname, MAXNAMELEN + 1);
}
}
}
if (fc_ulp_busy_port(pptr->port_fp_handle) != 0) {
return (NDI_FAILURE);
}
if (is_mpxio) {
mdi_devi_enter(pptr->port_dip, &enteredv);
} else {
ndi_devi_enter(pptr->port_dip);
}
mutex_enter(&pptr->port_mutex);
mutex_enter(&plun->lun_mutex);
if (online == FCP_ONLINE) {
ccip = fcp_get_cip(plun, cip, lcount, tcount);
if (ccip == NULL) {
goto fail;
}
} else {
if (fcp_is_child_present(plun, cip) != FC_SUCCESS) {
goto fail;
}
ccip = cip;
}
if (online == FCP_ONLINE) {
rval = fcp_online_child(plun, ccip, lcount, tcount, flags);
fc_ulp_log_device_event(pptr->port_fp_handle,
FC_ULP_DEVICE_ONLINE);
} else {
rval = fcp_offline_child(plun, ccip, lcount, tcount, flags);
fc_ulp_log_device_event(pptr->port_fp_handle,
FC_ULP_DEVICE_OFFLINE);
}
fail: mutex_exit(&plun->lun_mutex);
mutex_exit(&pptr->port_mutex);
if (is_mpxio) {
mdi_devi_exit(pptr->port_dip, enteredv);
} else {
ndi_devi_exit(pptr->port_dip);
}
fc_ulp_idle_port(pptr->port_fp_handle);
return (rval);
}
static int
fcp_offline_target(struct fcp_port *pptr, struct fcp_tgt *ptgt,
int link_cnt, int tgt_cnt, int nowait, int flags)
{
struct fcp_tgt_elem *elem;
ASSERT(mutex_owned(&pptr->port_mutex));
ASSERT(mutex_owned(&ptgt->tgt_mutex));
ASSERT(!(ptgt->tgt_state & FCP_TGT_OFFLINE));
if (link_cnt != pptr->port_link_cnt || (tgt_cnt && tgt_cnt !=
ptgt->tgt_change_cnt)) {
mutex_exit(&ptgt->tgt_mutex);
FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_25);
mutex_enter(&ptgt->tgt_mutex);
return (0);
}
ptgt->tgt_pd_handle = NULL;
mutex_exit(&ptgt->tgt_mutex);
FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_26);
mutex_enter(&ptgt->tgt_mutex);
tgt_cnt = tgt_cnt ? tgt_cnt : ptgt->tgt_change_cnt;
if (ptgt->tgt_tcap &&
(elem = kmem_zalloc(sizeof (*elem), KM_NOSLEEP)) != NULL) {
elem->flags = flags;
elem->time = fcp_watchdog_time;
if (nowait == 0) {
elem->time += fcp_offline_delay;
}
elem->ptgt = ptgt;
elem->link_cnt = link_cnt;
elem->tgt_cnt = tgt_cnt;
elem->next = pptr->port_offline_tgts;
pptr->port_offline_tgts = elem;
} else {
fcp_offline_target_now(pptr, ptgt, link_cnt, tgt_cnt, flags);
}
return (1);
}
static void
fcp_offline_target_now(struct fcp_port *pptr, struct fcp_tgt *ptgt,
int link_cnt, int tgt_cnt, int flags)
{
ASSERT(mutex_owned(&pptr->port_mutex));
ASSERT(mutex_owned(&ptgt->tgt_mutex));
fc_ulp_enable_relogin(pptr->port_fp_handle, &ptgt->tgt_port_wwn);
ptgt->tgt_state = FCP_TGT_OFFLINE;
ptgt->tgt_pd_handle = NULL;
fcp_offline_tgt_luns(ptgt, link_cnt, tgt_cnt, flags);
}
static void
fcp_offline_tgt_luns(struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt,
int flags)
{
struct fcp_lun *plun;
ASSERT(mutex_owned(&ptgt->tgt_port->port_mutex));
ASSERT(mutex_owned(&ptgt->tgt_mutex));
for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
fcp_offline_lun(plun, link_cnt, tgt_cnt, 1, flags);
}
}
}
static void
fcp_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
int nowait, int flags)
{
struct fcp_port *pptr = plun->lun_tgt->tgt_port;
struct fcp_lun_elem *elem;
ASSERT(plun != NULL);
ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
if (nowait) {
fcp_offline_lun_now(plun, link_cnt, tgt_cnt, flags);
return;
}
if ((elem = kmem_zalloc(sizeof (*elem), KM_NOSLEEP)) != NULL) {
elem->flags = flags;
elem->time = fcp_watchdog_time;
if (nowait == 0) {
elem->time += fcp_offline_delay;
}
elem->plun = plun;
elem->link_cnt = link_cnt;
elem->tgt_cnt = plun->lun_tgt->tgt_change_cnt;
elem->next = pptr->port_offline_luns;
pptr->port_offline_luns = elem;
} else {
fcp_offline_lun_now(plun, link_cnt, tgt_cnt, flags);
}
}
static void
fcp_prepare_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt)
{
struct fcp_pkt *head = NULL;
ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
mutex_exit(&LUN_TGT->tgt_mutex);
head = fcp_scan_commands(plun);
if (head != NULL) {
fcp_abort_commands(head, LUN_PORT);
}
mutex_enter(&LUN_TGT->tgt_mutex);
if (plun->lun_cip && plun->lun_mpxio) {
if (!fcp_pass_to_hp(LUN_PORT, plun, plun->lun_cip,
FCP_MPXIO_PATH_CLEAR_BUSY, link_cnt, tgt_cnt,
0, 0)) {
fcp_log(CE_NOTE, LUN_PORT->port_dip,
"Can not ENABLE LUN; D_ID=%x, LUN=%x",
LUN_TGT->tgt_d_id, plun->lun_num);
}
mutex_exit(&LUN_TGT->tgt_mutex);
(void) mdi_pi_disable_path(PIP(plun->lun_cip), DRIVER_DISABLE);
mutex_enter(&LUN_TGT->tgt_mutex);
}
}
static void
fcp_offline_lun_now(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
int flags)
{
ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
mutex_exit(&LUN_TGT->tgt_mutex);
fcp_update_offline_flags(plun);
mutex_enter(&LUN_TGT->tgt_mutex);
fcp_prepare_offline_lun(plun, link_cnt, tgt_cnt);
FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_4, 0,
"offline_lun: passing OFFLINE elem to HP thread");
if (plun->lun_cip) {
fcp_log(CE_NOTE, LUN_PORT->port_dip,
"!offlining lun=%x (trace=%x), target=%x (trace=%x)",
plun->lun_num, plun->lun_trace, LUN_TGT->tgt_d_id,
LUN_TGT->tgt_trace);
if (!fcp_pass_to_hp(LUN_PORT, plun, plun->lun_cip, FCP_OFFLINE,
link_cnt, tgt_cnt, flags, 0)) {
fcp_log(CE_CONT, LUN_PORT->port_dip,
"Can not OFFLINE LUN; D_ID=%x, LUN=%x\n",
LUN_TGT->tgt_d_id, plun->lun_num);
}
}
}
static void
fcp_scan_offline_luns(struct fcp_port *pptr)
{
struct fcp_lun_elem *elem;
struct fcp_lun_elem *prev;
struct fcp_lun_elem *next;
ASSERT(MUTEX_HELD(&pptr->port_mutex));
prev = NULL;
elem = pptr->port_offline_luns;
while (elem) {
next = elem->next;
if (elem->time <= fcp_watchdog_time) {
int changed = 1;
struct fcp_tgt *ptgt = elem->plun->lun_tgt;
mutex_enter(&ptgt->tgt_mutex);
if (pptr->port_link_cnt == elem->link_cnt &&
ptgt->tgt_change_cnt == elem->tgt_cnt) {
changed = 0;
}
if (!changed &&
!(elem->plun->lun_state & FCP_TGT_OFFLINE)) {
fcp_offline_lun_now(elem->plun,
elem->link_cnt, elem->tgt_cnt, elem->flags);
}
mutex_exit(&ptgt->tgt_mutex);
kmem_free(elem, sizeof (*elem));
if (prev) {
prev->next = next;
} else {
pptr->port_offline_luns = next;
}
} else {
prev = elem;
}
elem = next;
}
}
static void
fcp_scan_offline_tgts(struct fcp_port *pptr)
{
struct fcp_tgt_elem *elem;
struct fcp_tgt_elem *prev;
struct fcp_tgt_elem *next;
ASSERT(MUTEX_HELD(&pptr->port_mutex));
prev = NULL;
elem = pptr->port_offline_tgts;
while (elem) {
next = elem->next;
if (elem->time <= fcp_watchdog_time) {
int outdated = 1;
struct fcp_tgt *ptgt = elem->ptgt;
mutex_enter(&ptgt->tgt_mutex);
if (ptgt->tgt_change_cnt == elem->tgt_cnt) {
outdated = 0;
} else if (ptgt->tgt_change_cnt == elem->tgt_cnt + 1 &&
pptr->port_link_cnt == elem->link_cnt + 1 &&
ptgt->tgt_statec_cause == FCP_CAUSE_LINK_DOWN) {
outdated = 0;
}
if (!outdated && !(ptgt->tgt_state &
FCP_TGT_OFFLINE)) {
fcp_offline_target_now(pptr,
ptgt, elem->link_cnt, elem->tgt_cnt,
elem->flags);
}
mutex_exit(&ptgt->tgt_mutex);
kmem_free(elem, sizeof (*elem));
if (prev) {
prev->next = next;
} else {
pptr->port_offline_tgts = next;
}
} else {
prev = elem;
}
elem = next;
}
}
static void
fcp_update_offline_flags(struct fcp_lun *plun)
{
struct fcp_port *pptr = LUN_PORT;
ASSERT(plun != NULL);
mutex_enter(&LUN_TGT->tgt_mutex);
plun->lun_state |= FCP_LUN_OFFLINE;
plun->lun_state &= ~(FCP_LUN_INIT | FCP_LUN_BUSY | FCP_LUN_MARK);
mutex_enter(&plun->lun_mutex);
if (plun->lun_cip && plun->lun_state & FCP_SCSI_LUN_TGT_INIT) {
dev_info_t *cdip = NULL;
mutex_exit(&LUN_TGT->tgt_mutex);
if (plun->lun_mpxio == 0) {
cdip = DIP(plun->lun_cip);
} else if (plun->lun_cip) {
cdip = mdi_pi_get_client(PIP(plun->lun_cip));
}
mutex_exit(&plun->lun_mutex);
if (cdip) {
(void) ndi_event_retrieve_cookie(
pptr->port_ndi_event_hdl, cdip, FCAL_REMOVE_EVENT,
&fcp_remove_eid, NDI_EVENT_NOPASS);
(void) ndi_event_run_callbacks(
pptr->port_ndi_event_hdl, cdip,
fcp_remove_eid, NULL);
}
} else {
mutex_exit(&plun->lun_mutex);
mutex_exit(&LUN_TGT->tgt_mutex);
}
}
static struct fcp_pkt *
fcp_scan_commands(struct fcp_lun *plun)
{
struct fcp_port *pptr = LUN_PORT;
struct fcp_pkt *cmd = NULL;
struct fcp_pkt *ncmd = NULL;
struct fcp_pkt *pcmd = NULL;
struct fcp_pkt *head = NULL;
struct fcp_pkt *tail = NULL;
int cmds_found = 0;
mutex_enter(&pptr->port_pkt_mutex);
for (cmd = pptr->port_pkt_head; cmd != NULL; cmd = ncmd) {
struct fcp_lun *tlun =
ADDR2LUN(&cmd->cmd_pkt->pkt_address);
ncmd = cmd->cmd_next;
if (tlun != plun || cmd->cmd_state == FCP_PKT_ISSUED ||
(cmd->cmd_pkt->pkt_flags & FLAG_NOINTR)) {
pcmd = cmd;
continue;
}
cmds_found++;
if (pcmd != NULL) {
ASSERT(pptr->port_pkt_head != cmd);
pcmd->cmd_next = cmd->cmd_next;
} else {
ASSERT(cmd == pptr->port_pkt_head);
pptr->port_pkt_head = cmd->cmd_next;
}
if (cmd == pptr->port_pkt_tail) {
pptr->port_pkt_tail = pcmd;
if (pcmd) {
pcmd->cmd_next = NULL;
}
}
if (head == NULL) {
head = tail = cmd;
} else {
ASSERT(tail != NULL);
tail->cmd_next = cmd;
tail = cmd;
}
cmd->cmd_next = NULL;
}
mutex_exit(&pptr->port_pkt_mutex);
FCP_DTRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_8, 0,
"scan commands: %d cmd(s) found", cmds_found);
return (head);
}
static void
fcp_abort_commands(struct fcp_pkt *head, struct fcp_port *pptr)
{
struct fcp_pkt *cmd = NULL;
struct fcp_pkt *ncmd = NULL;
ASSERT(mutex_owned(&pptr->port_mutex));
for (cmd = head; cmd != NULL; cmd = ncmd) {
struct scsi_pkt *pkt = cmd->cmd_pkt;
ncmd = cmd->cmd_next;
ASSERT(pkt != NULL);
pkt->pkt_reason = CMD_DEV_GONE;
pkt->pkt_statistics = 0;
pkt->pkt_state = 0;
cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
cmd->cmd_state = FCP_PKT_IDLE;
ASSERT(pkt->pkt_comp != NULL);
mutex_exit(&pptr->port_mutex);
fcp_post_callback(cmd);
mutex_enter(&pptr->port_mutex);
}
}
static void
fcp_cmd_callback(fc_packet_t *fpkt)
{
struct fcp_pkt *cmd = (struct fcp_pkt *)fpkt->pkt_ulp_private;
struct scsi_pkt *pkt = cmd->cmd_pkt;
struct fcp_port *pptr = ADDR2FCP(&pkt->pkt_address);
ASSERT(cmd->cmd_state != FCP_PKT_IDLE);
if (cmd->cmd_state == FCP_PKT_IDLE) {
cmn_err(CE_PANIC, "Packet already completed %p",
(void *)cmd);
}
if (cmd->cmd_state == FCP_PKT_ABORTING) {
fcp_log(CE_CONT, pptr->port_dip,
"!FCP: Pkt completed while aborting\n");
return;
}
cmd->cmd_state = FCP_PKT_IDLE;
fcp_complete_pkt(fpkt);
#ifdef DEBUG
mutex_enter(&pptr->port_pkt_mutex);
pptr->port_npkts--;
mutex_exit(&pptr->port_pkt_mutex);
#endif
fcp_post_callback(cmd);
}
static void
fcp_complete_pkt(fc_packet_t *fpkt)
{
int error = 0;
struct fcp_pkt *cmd = (struct fcp_pkt *)
fpkt->pkt_ulp_private;
struct scsi_pkt *pkt = cmd->cmd_pkt;
struct fcp_port *pptr = ADDR2FCP(&pkt->pkt_address);
struct fcp_lun *plun;
struct fcp_tgt *ptgt;
struct fcp_rsp *rsp;
struct scsi_address save;
#ifdef DEBUG
save = pkt->pkt_address;
#endif
rsp = (struct fcp_rsp *)cmd->cmd_fcp_rsp;
if (fpkt->pkt_state == FC_PKT_SUCCESS) {
if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
sizeof (struct fcp_rsp));
}
pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
STATE_SENT_CMD | STATE_GOT_STATUS;
pkt->pkt_resid = 0;
if (fpkt->pkt_datalen) {
pkt->pkt_state |= STATE_XFERRED_DATA;
if (fpkt->pkt_data_resid) {
error++;
}
}
if ((pkt->pkt_scbp != NULL) && ((*(pkt->pkt_scbp) =
rsp->fcp_u.fcp_status.scsi_status) != STATUS_GOOD)) {
if (!rsp->fcp_u.fcp_status.rsp_len_set &&
!rsp->fcp_u.fcp_status.sense_len_set) {
pkt->pkt_state &= ~STATE_XFERRED_DATA;
pkt->pkt_resid = cmd->cmd_dmacount;
}
}
if ((error | rsp->fcp_u.i_fcp_status | rsp->fcp_resid) == 0) {
return;
}
plun = ADDR2LUN(&pkt->pkt_address);
ptgt = plun->lun_tgt;
ASSERT(ptgt != NULL);
if (rsp->fcp_u.fcp_status.resid_over ||
rsp->fcp_u.fcp_status.resid_under) {
pkt->pkt_resid = rsp->fcp_resid;
}
if (rsp->fcp_u.fcp_status.rsp_len_set) {
struct fcp_rsp_info *bep;
bep = (struct fcp_rsp_info *)(cmd->cmd_fcp_rsp +
sizeof (struct fcp_rsp));
if (fcp_validate_fcp_response(rsp, pptr) !=
FC_SUCCESS) {
pkt->pkt_reason = CMD_CMPLT;
*(pkt->pkt_scbp) = STATUS_CHECK;
fcp_log(CE_WARN, pptr->port_dip,
"!SCSI command to d_id=0x%x lun=0x%x"
" failed, Bad FCP response values:"
" rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
" sts-rsvd2=%x, rsplen=%x, senselen=%x",
ptgt->tgt_d_id, plun->lun_num,
rsp->reserved_0, rsp->reserved_1,
rsp->fcp_u.fcp_status.reserved_0,
rsp->fcp_u.fcp_status.reserved_1,
rsp->fcp_response_len, rsp->fcp_sense_len);
return;
}
if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
FCP_CP_IN(fpkt->pkt_resp +
sizeof (struct fcp_rsp), bep,
fpkt->pkt_resp_acc,
sizeof (struct fcp_rsp_info));
}
if (bep->rsp_code != FCP_NO_FAILURE) {
child_info_t *cip;
pkt->pkt_reason = CMD_TRAN_ERR;
mutex_enter(&plun->lun_mutex);
cip = plun->lun_cip;
mutex_exit(&plun->lun_mutex);
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_2, 0,
"FCP response error on cmd=%p"
" target=0x%x, cip=%p", cmd,
ptgt->tgt_d_id, cip);
}
}
if (rsp->fcp_u.fcp_status.sense_len_set) {
uchar_t rqlen;
caddr_t sense_from;
child_info_t *cip;
timeout_id_t tid;
struct scsi_arq_status *arq;
struct scsi_extended_sense *sense_to;
arq = (struct scsi_arq_status *)pkt->pkt_scbp;
sense_to = &arq->sts_sensedata;
rqlen = (uchar_t)min(rsp->fcp_sense_len,
sizeof (struct scsi_extended_sense));
sense_from = (caddr_t)fpkt->pkt_resp +
sizeof (struct fcp_rsp) + rsp->fcp_response_len;
if (fcp_validate_fcp_response(rsp, pptr) !=
FC_SUCCESS) {
pkt->pkt_reason = CMD_CMPLT;
*(pkt->pkt_scbp) = STATUS_CHECK;
fcp_log(CE_WARN, pptr->port_dip,
"!SCSI command to d_id=0x%x lun=0x%x"
" failed, Bad FCP response values:"
" rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
" sts-rsvd2=%x, rsplen=%x, senselen=%x",
ptgt->tgt_d_id, plun->lun_num,
rsp->reserved_0, rsp->reserved_1,
rsp->fcp_u.fcp_status.reserved_0,
rsp->fcp_u.fcp_status.reserved_1,
rsp->fcp_response_len, rsp->fcp_sense_len);
return;
}
if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
FCP_CP_IN(sense_from, sense_to,
fpkt->pkt_resp_acc, rqlen);
} else {
bcopy(sense_from, sense_to, rqlen);
}
if ((FCP_SENSE_REPORTLUN_CHANGED(sense_to)) ||
(FCP_SENSE_NO_LUN(sense_to))) {
mutex_enter(&ptgt->tgt_mutex);
if (ptgt->tgt_tid == NULL) {
tid = timeout(fcp_reconfigure_luns,
(caddr_t)ptgt, drv_usectohz(1));
ptgt->tgt_tid = tid;
ptgt->tgt_state |= FCP_TGT_BUSY;
}
mutex_exit(&ptgt->tgt_mutex);
if (FCP_SENSE_REPORTLUN_CHANGED(sense_to)) {
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_3, 0,
"!FCP: Report Lun Has Changed"
" target=%x", ptgt->tgt_d_id);
} else if (FCP_SENSE_NO_LUN(sense_to)) {
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_3, 0,
"!FCP: LU Not Supported"
" target=%x", ptgt->tgt_d_id);
}
}
ASSERT(pkt->pkt_scbp != NULL);
pkt->pkt_state |= STATE_ARQ_DONE;
arq->sts_rqpkt_resid = SENSE_LENGTH - rqlen;
*((uchar_t *)&arq->sts_rqpkt_status) = STATUS_GOOD;
arq->sts_rqpkt_reason = 0;
arq->sts_rqpkt_statistics = 0;
arq->sts_rqpkt_state = STATE_GOT_BUS |
STATE_GOT_TARGET | STATE_SENT_CMD |
STATE_GOT_STATUS | STATE_ARQ_DONE |
STATE_XFERRED_DATA;
mutex_enter(&plun->lun_mutex);
cip = plun->lun_cip;
mutex_exit(&plun->lun_mutex);
FCP_DTRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_8, 0,
"SCSI Check condition on cmd=%p target=0x%x"
" LUN=%p, cmd=%x SCSI status=%x, es key=%x"
" ASC=%x ASCQ=%x", cmd, ptgt->tgt_d_id, cip,
cmd->cmd_fcp_cmd.fcp_cdb[0],
rsp->fcp_u.fcp_status.scsi_status,
sense_to->es_key, sense_to->es_add_code,
sense_to->es_qual_code);
}
} else {
plun = ADDR2LUN(&pkt->pkt_address);
ptgt = plun->lun_tgt;
ASSERT(ptgt != NULL);
pkt->pkt_state = 0;
pkt->pkt_statistics = 0;
switch (fpkt->pkt_state) {
case FC_PKT_TRAN_ERROR:
switch (fpkt->pkt_reason) {
case FC_REASON_OVERRUN:
pkt->pkt_reason = CMD_CMD_OVR;
pkt->pkt_statistics |= STAT_ABORTED;
break;
case FC_REASON_XCHG_BSY: {
caddr_t ptr;
pkt->pkt_reason = CMD_CMPLT;
ptr = (caddr_t)pkt->pkt_scbp;
if (ptr) {
*ptr = STATUS_BUSY;
}
break;
}
case FC_REASON_ABORTED:
pkt->pkt_reason = CMD_TRAN_ERR;
pkt->pkt_statistics |= STAT_ABORTED;
break;
case FC_REASON_ABORT_FAILED:
pkt->pkt_reason = CMD_ABORT_FAIL;
break;
case FC_REASON_NO_SEQ_INIT:
case FC_REASON_CRC_ERROR:
pkt->pkt_reason = CMD_TRAN_ERR;
pkt->pkt_statistics |= STAT_ABORTED;
break;
default:
pkt->pkt_reason = CMD_TRAN_ERR;
break;
}
break;
case FC_PKT_PORT_OFFLINE: {
dev_info_t *cdip = NULL;
caddr_t ptr;
if (fpkt->pkt_reason == FC_REASON_LOGIN_REQUIRED) {
FCP_DTRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_8, 0,
"SCSI cmd; LOGIN REQUIRED from FCA for %x",
ptgt->tgt_d_id);
}
mutex_enter(&plun->lun_mutex);
if (plun->lun_mpxio == 0) {
cdip = DIP(plun->lun_cip);
} else if (plun->lun_cip) {
cdip = mdi_pi_get_client(PIP(plun->lun_cip));
}
mutex_exit(&plun->lun_mutex);
if (cdip) {
(void) ndi_event_retrieve_cookie(
pptr->port_ndi_event_hdl, cdip,
FCAL_REMOVE_EVENT, &fcp_remove_eid,
NDI_EVENT_NOPASS);
(void) ndi_event_run_callbacks(
pptr->port_ndi_event_hdl, cdip,
fcp_remove_eid, NULL);
}
if ((plun->lun_type == DTYPE_SEQUENTIAL) ||
(plun->lun_type == DTYPE_CHANGER)) {
pkt->pkt_reason = CMD_CMPLT;
ptr = (caddr_t)pkt->pkt_scbp;
if (ptr) {
*ptr = STATUS_BUSY;
}
} else {
pkt->pkt_reason = CMD_TRAN_ERR;
pkt->pkt_statistics |= STAT_BUS_RESET;
}
break;
}
case FC_PKT_TRAN_BSY:
*pkt->pkt_scbp = STATUS_INTERMEDIATE;
pkt->pkt_state = STATE_GOT_BUS;
break;
case FC_PKT_TIMEOUT:
pkt->pkt_reason = CMD_TIMEOUT;
if (fpkt->pkt_reason == FC_REASON_ABORT_FAILED) {
pkt->pkt_statistics |= STAT_TIMEOUT;
} else {
pkt->pkt_statistics |= STAT_ABORTED;
}
break;
case FC_PKT_LOCAL_RJT:
switch (fpkt->pkt_reason) {
case FC_REASON_OFFLINE: {
dev_info_t *cdip = NULL;
mutex_enter(&plun->lun_mutex);
if (plun->lun_mpxio == 0) {
cdip = DIP(plun->lun_cip);
} else if (plun->lun_cip) {
cdip = mdi_pi_get_client(
PIP(plun->lun_cip));
}
mutex_exit(&plun->lun_mutex);
if (cdip) {
(void) ndi_event_retrieve_cookie(
pptr->port_ndi_event_hdl, cdip,
FCAL_REMOVE_EVENT,
&fcp_remove_eid,
NDI_EVENT_NOPASS);
(void) ndi_event_run_callbacks(
pptr->port_ndi_event_hdl,
cdip, fcp_remove_eid, NULL);
}
pkt->pkt_reason = CMD_TRAN_ERR;
pkt->pkt_statistics |= STAT_BUS_RESET;
break;
}
case FC_REASON_NOMEM:
case FC_REASON_QFULL: {
caddr_t ptr;
pkt->pkt_reason = CMD_CMPLT;
ptr = (caddr_t)pkt->pkt_scbp;
if (ptr) {
*ptr = STATUS_BUSY;
}
break;
}
case FC_REASON_DMA_ERROR:
pkt->pkt_reason = CMD_DMA_DERR;
pkt->pkt_statistics |= STAT_ABORTED;
break;
case FC_REASON_CRC_ERROR:
case FC_REASON_UNDERRUN: {
uchar_t status;
if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
FCP_CP_IN(fpkt->pkt_resp, rsp,
fpkt->pkt_resp_acc,
sizeof (struct fcp_rsp));
}
status = rsp->fcp_u.fcp_status.scsi_status;
if (((plun->lun_type & DTYPE_MASK) ==
DTYPE_ESI) && (status == STATUS_GOOD)) {
pkt->pkt_reason = CMD_CMPLT;
*pkt->pkt_scbp = status;
pkt->pkt_resid = 0;
} else {
pkt->pkt_reason = CMD_TRAN_ERR;
pkt->pkt_statistics |= STAT_ABORTED;
}
break;
}
case FC_REASON_NO_CONNECTION:
case FC_REASON_UNSUPPORTED:
case FC_REASON_ILLEGAL_REQ:
case FC_REASON_BAD_SID:
case FC_REASON_DIAG_BUSY:
case FC_REASON_FCAL_OPN_FAIL:
case FC_REASON_BAD_XID:
default:
pkt->pkt_reason = CMD_TRAN_ERR;
pkt->pkt_statistics |= STAT_ABORTED;
break;
}
break;
case FC_PKT_NPORT_RJT:
case FC_PKT_FABRIC_RJT:
case FC_PKT_NPORT_BSY:
case FC_PKT_FABRIC_BSY:
default:
FCP_DTRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_8, 0,
"FC Status 0x%x, reason 0x%x",
fpkt->pkt_state, fpkt->pkt_reason);
pkt->pkt_reason = CMD_TRAN_ERR;
pkt->pkt_statistics |= STAT_ABORTED;
break;
}
FCP_DTRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_9, 0,
"!FC error on cmd=%p target=0x%x: pkt state=0x%x "
" pkt reason=0x%x", cmd, ptgt->tgt_d_id, fpkt->pkt_state,
fpkt->pkt_reason);
}
ASSERT(save.a_hba_tran == pkt->pkt_address.a_hba_tran);
}
static int
fcp_validate_fcp_response(struct fcp_rsp *rsp, struct fcp_port *pptr)
{
if (rsp->reserved_0 || rsp->reserved_1 ||
rsp->fcp_u.fcp_status.reserved_0 ||
rsp->fcp_u.fcp_status.reserved_1) {
FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
FCP_BUF_LEVEL_5, 0,
"Got fcp response packet with non-zero reserved fields "
"rsp->reserved_0:0x%x, rsp_reserved_1:0x%x, "
"status.reserved_0:0x%x, status.reserved_1:0x%x",
rsp->reserved_0, rsp->reserved_1,
rsp->fcp_u.fcp_status.reserved_0,
rsp->fcp_u.fcp_status.reserved_1);
}
if (rsp->fcp_u.fcp_status.rsp_len_set && (rsp->fcp_response_len >
(FCP_MAX_RSP_IU_SIZE - sizeof (struct fcp_rsp)))) {
return (FC_FAILURE);
}
if (rsp->fcp_u.fcp_status.sense_len_set && rsp->fcp_sense_len >
(FCP_MAX_RSP_IU_SIZE - rsp->fcp_response_len -
sizeof (struct fcp_rsp))) {
return (FC_FAILURE);
}
return (FC_SUCCESS);
}
static int
fcp_device_changed(struct fcp_port *pptr, struct fcp_tgt *ptgt,
fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause)
{
ASSERT(mutex_owned(&pptr->port_mutex));
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_3, 0,
"Starting fcp_device_changed...");
if ((ptgt->tgt_d_id != map_entry->map_did.port_id) ||
(FC_TOP_EXTERNAL(pptr->port_topology) &&
(ptgt->tgt_hard_addr != map_entry->map_hard_addr.hard_addr))) {
mutex_enter(&ptgt->tgt_mutex);
if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
(void) fcp_offline_target(pptr, ptgt, link_cnt,
0, 1, NDI_DEVI_REMOVE);
}
mutex_exit(&ptgt->tgt_mutex);
fcp_log(CE_NOTE, pptr->port_dip,
"Change in target properties: Old D_ID=%x New D_ID=%x"
" Old HA=%x New HA=%x", ptgt->tgt_d_id,
map_entry->map_did.port_id, ptgt->tgt_hard_addr,
map_entry->map_hard_addr.hard_addr);
}
return (fcp_handle_mapflags(pptr, ptgt, map_entry,
link_cnt, tgt_cnt, cause));
}
static struct fcp_lun *
fcp_alloc_lun(struct fcp_tgt *ptgt)
{
struct fcp_lun *plun;
plun = kmem_zalloc(sizeof (struct fcp_lun), KM_NOSLEEP);
if (plun != NULL) {
mutex_init(&plun->lun_mutex, NULL, MUTEX_DRIVER, NULL);
plun->lun_tgt = ptgt;
mutex_enter(&ptgt->tgt_mutex);
plun->lun_next = ptgt->tgt_lun;
ptgt->tgt_lun = plun;
plun->lun_old_guid = NULL;
plun->lun_old_guid_size = 0;
mutex_exit(&ptgt->tgt_mutex);
}
return (plun);
}
static void
fcp_dealloc_lun(struct fcp_lun *plun)
{
mutex_enter(&plun->lun_mutex);
if (plun->lun_cip) {
fcp_remove_child(plun);
}
mutex_exit(&plun->lun_mutex);
mutex_destroy(&plun->lun_mutex);
if (plun->lun_guid) {
kmem_free(plun->lun_guid, plun->lun_guid_size);
}
if (plun->lun_old_guid) {
kmem_free(plun->lun_old_guid, plun->lun_old_guid_size);
}
kmem_free(plun, sizeof (*plun));
}
static struct fcp_tgt *
fcp_alloc_tgt(struct fcp_port *pptr, fc_portmap_t *map_entry, int link_cnt)
{
int hash;
uchar_t *wwn;
struct fcp_tgt *ptgt;
ptgt = kmem_zalloc(sizeof (*ptgt), KM_NOSLEEP);
if (ptgt != NULL) {
mutex_enter(&pptr->port_mutex);
if (link_cnt != pptr->port_link_cnt) {
mutex_exit(&pptr->port_mutex);
kmem_free(ptgt, sizeof (*ptgt));
ptgt = NULL;
} else {
mutex_init(&ptgt->tgt_mutex, NULL, MUTEX_DRIVER, NULL);
wwn = (uchar_t *)&map_entry->map_pwwn;
hash = FCP_HASH(wwn);
ptgt->tgt_next = pptr->port_tgt_hash_table[hash];
pptr->port_tgt_hash_table[hash] = ptgt;
ptgt->tgt_port = pptr;
ptgt->tgt_change_cnt = 1;
if (fcp_enable_auto_configuration) {
ptgt->tgt_manual_config_only = 0;
} else {
ptgt->tgt_manual_config_only = 1;
}
mutex_exit(&pptr->port_mutex);
}
}
return (ptgt);
}
static void
fcp_dealloc_tgt(struct fcp_tgt *ptgt)
{
mutex_destroy(&ptgt->tgt_mutex);
kmem_free(ptgt, sizeof (*ptgt));
}
static void
fcp_queue_ipkt(struct fcp_port *pptr, fc_packet_t *fpkt)
{
struct fcp_ipkt *icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
struct fcp_tgt *ptgt = icmd->ipkt_tgt;
mutex_enter(&pptr->port_mutex);
mutex_enter(&ptgt->tgt_mutex);
if (FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_2, 0,
"fcp_queue_ipkt,1:state change occured"
" for D_ID=0x%x", ptgt->tgt_d_id);
mutex_exit(&ptgt->tgt_mutex);
mutex_exit(&pptr->port_mutex);
(void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
icmd->ipkt_change_cnt, icmd->ipkt_cause);
fcp_icmd_free(pptr, icmd);
return;
}
mutex_exit(&ptgt->tgt_mutex);
icmd->ipkt_restart = fcp_watchdog_time + icmd->ipkt_retries++;
if (pptr->port_ipkt_list != NULL) {
pptr->port_ipkt_list->ipkt_prev = icmd;
icmd->ipkt_next = pptr->port_ipkt_list;
pptr->port_ipkt_list = icmd;
icmd->ipkt_prev = NULL;
} else {
pptr->port_ipkt_list = icmd;
icmd->ipkt_next = NULL;
icmd->ipkt_prev = NULL;
}
mutex_exit(&pptr->port_mutex);
}
static int
fcp_transport(opaque_t port_handle, fc_packet_t *fpkt, int internal)
{
int rval;
rval = fc_ulp_transport(port_handle, fpkt);
if (rval == FC_SUCCESS) {
return (rval);
}
if ((rval == FC_STATEC_BUSY) || (rval == FC_OFFLINE) ||
(rval == FC_LOGINREQ) || (rval == FC_DEVICE_BUSY) ||
(rval == FC_DEVICE_BUSY_NEW_RSCN) || (rval == FC_TRAN_BUSY)) {
if (internal) {
char *op;
struct fcp_ipkt *icmd;
icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
switch (icmd->ipkt_opcode) {
case SCMD_REPORT_LUN:
op = "REPORT LUN";
break;
case SCMD_INQUIRY:
op = "INQUIRY";
break;
case SCMD_INQUIRY_PAGE83:
op = "INQUIRY-83";
break;
default:
op = "Internal SCSI COMMAND";
break;
}
if (fcp_handle_ipkt_errors(icmd->ipkt_port,
icmd->ipkt_tgt, icmd, rval, op) == DDI_SUCCESS) {
rval = FC_SUCCESS;
}
} else {
struct fcp_pkt *cmd;
struct fcp_port *pptr;
cmd = (struct fcp_pkt *)fpkt->pkt_ulp_private;
cmd->cmd_state = FCP_PKT_IDLE;
pptr = ADDR2FCP(&cmd->cmd_pkt->pkt_address);
if (cmd->cmd_pkt->pkt_flags & FLAG_NOQUEUE) {
FCP_DTRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_9, 0,
"fcp_transport: xport busy for pkt %p",
cmd->cmd_pkt);
rval = FC_TRAN_BUSY;
} else {
fcp_queue_pkt(pptr, cmd);
rval = FC_SUCCESS;
}
}
}
return (rval);
}
static void
fcp_log(int level, dev_info_t *dip, const char *fmt, ...)
{
char buf[256];
va_list ap;
if (dip == NULL) {
dip = fcp_global_dip;
}
va_start(ap, fmt);
(void) vsprintf(buf, fmt, ap);
va_end(ap);
scsi_log(dip, "fcp", level, buf);
}
static void
fcp_retry_ns_registry(struct fcp_port *pptr, uint32_t s_id)
{
int rval;
ASSERT(MUTEX_HELD(&pptr->port_mutex));
if (((pptr->port_state & FCP_STATE_NS_REG_FAILED) == 0) ||
((pptr->port_topology != FC_TOP_FABRIC) &&
(pptr->port_topology != FC_TOP_PUBLIC_LOOP))) {
if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
pptr->port_state &= ~FCP_STATE_NS_REG_FAILED;
}
return;
}
mutex_exit(&pptr->port_mutex);
rval = fcp_do_ns_registry(pptr, s_id);
mutex_enter(&pptr->port_mutex);
if (rval == 0) {
pptr->port_state &= ~(FCP_STATE_NS_REG_FAILED);
}
}
static int
fcp_do_ns_registry(struct fcp_port *pptr, uint32_t s_id)
{
fc_ns_cmd_t ns_cmd;
ns_rfc_type_t rfc;
uint32_t types[8];
bzero(&rfc, sizeof (rfc));
bzero(types, sizeof (types));
types[FC4_TYPE_WORD_POS(FC_TYPE_SCSI_FCP)] =
(1 << FC4_TYPE_BIT_POS(FC_TYPE_SCSI_FCP));
rfc.rfc_port_id.port_id = s_id;
bcopy(types, rfc.rfc_types, sizeof (types));
ns_cmd.ns_flags = 0;
ns_cmd.ns_cmd = NS_RFT_ID;
ns_cmd.ns_req_len = sizeof (rfc);
ns_cmd.ns_req_payload = (caddr_t)&rfc;
ns_cmd.ns_resp_len = 0;
ns_cmd.ns_resp_payload = NULL;
if (fc_ulp_port_ns(pptr->port_fp_handle, NULL, &ns_cmd)) {
fcp_log(CE_WARN, pptr->port_dip,
"!ns_registry: failed name server registration");
return (1);
}
return (0);
}
int
fcp_handle_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
uint32_t s_id, int instance)
{
int res = DDI_FAILURE;
scsi_hba_tran_t *tran;
int mutex_initted = FALSE;
int hba_attached = FALSE;
int soft_state_linked = FALSE;
int event_bind = FALSE;
struct fcp_port *pptr;
fc_portmap_t *tmp_list = NULL;
uint32_t max_cnt, alloc_cnt;
uchar_t *boot_wwn = NULL;
uint_t nbytes;
int manual_cfg;
FCP_TRACE(fcp_logq, "fcp", fcp_trace,
FCP_BUF_LEVEL_3, 0, "port attach: for port %d", instance);
if (ddi_soft_state_zalloc(fcp_softstate, instance) != DDI_SUCCESS) {
cmn_err(CE_WARN, "fcp: Softstate struct alloc failed"
"parent dip: %p; instance: %d", (void *)pinfo->port_dip,
instance);
return (res);
}
if ((pptr = ddi_get_soft_state(fcp_softstate, instance)) == NULL) {
ddi_soft_state_free(fcp_softstate, instance);
cmn_err(CE_WARN, "fcp: bad soft state");
return (res);
}
(void) sprintf(pptr->port_instbuf, "fcp(%d)", instance);
(void) fcp_cp_pinfo(pptr, pinfo);
if ((manual_cfg = ddi_prop_get_int(DDI_DEV_T_ANY, pptr->port_dip,
DDI_PROP_NOTPROM | DDI_PROP_DONTPASS,
MANUAL_CFG_ONLY,
-1)) != -1) {
if (manual_cfg == 1) {
char *pathname;
pathname = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
(void) ddi_pathname(pptr->port_dip, pathname);
cmn_err(CE_NOTE,
"%s (%s%d) %s is enabled via %s.conf.",
pathname,
ddi_driver_name(pptr->port_dip),
ddi_get_instance(pptr->port_dip),
MANUAL_CFG_ONLY,
ddi_driver_name(pptr->port_dip));
fcp_enable_auto_configuration = 0;
kmem_free(pathname, MAXPATHLEN);
}
}
_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(pptr->port_link_cnt));
pptr->port_link_cnt = 1;
_NOTE(NOW_VISIBLE_TO_OTHER_THREADS(pptr->port_link_cnt));
pptr->port_id = s_id;
pptr->port_instance = instance;
_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(pptr->port_state));
pptr->port_state = FCP_STATE_INIT;
if (pinfo->port_acc_attr == NULL) {
pptr->port_state |= FCP_STATE_FCA_IS_NODMA;
}
_NOTE(NOW_VISIBLE_TO_OTHER_THREADS(pptr->port_state));
if (!(pptr->port_state & FCP_STATE_FCA_IS_NODMA)) {
pptr->port_dmacookie_sz = sizeof (ddi_dma_cookie_t) *
pptr->port_data_dma_attr.dma_attr_sgllen;
}
mutex_init(&pptr->port_mutex, NULL, MUTEX_DRIVER, NULL);
mutex_init(&pptr->port_pkt_mutex, NULL, MUTEX_DRIVER, NULL);
mutex_initted++;
if ((tran = scsi_hba_tran_alloc(pptr->port_dip, 0)) == NULL) {
fcp_log(CE_WARN, pptr->port_dip,
"!fcp%d: scsi_hba_tran_alloc failed", instance);
goto fail;
}
pptr->port_tran = tran;
tran->tran_hba_private = pptr;
tran->tran_tgt_init = fcp_scsi_tgt_init;
tran->tran_tgt_probe = NULL;
tran->tran_tgt_free = fcp_scsi_tgt_free;
tran->tran_start = fcp_scsi_start;
tran->tran_reset = fcp_scsi_reset;
tran->tran_abort = fcp_scsi_abort;
tran->tran_getcap = fcp_scsi_getcap;
tran->tran_setcap = fcp_scsi_setcap;
tran->tran_init_pkt = NULL;
tran->tran_destroy_pkt = NULL;
tran->tran_dmafree = NULL;
tran->tran_sync_pkt = NULL;
tran->tran_reset_notify = fcp_scsi_reset_notify;
tran->tran_get_bus_addr = fcp_scsi_get_bus_addr;
tran->tran_get_name = fcp_scsi_get_name;
tran->tran_clear_aca = NULL;
tran->tran_clear_task_set = NULL;
tran->tran_terminate_task = NULL;
tran->tran_get_eventcookie = fcp_scsi_bus_get_eventcookie;
tran->tran_add_eventcall = fcp_scsi_bus_add_eventcall;
tran->tran_remove_eventcall = fcp_scsi_bus_remove_eventcall;
tran->tran_post_event = fcp_scsi_bus_post_event;
tran->tran_quiesce = NULL;
tran->tran_unquiesce = NULL;
tran->tran_bus_reset = NULL;
tran->tran_bus_config = fcp_scsi_bus_config;
tran->tran_bus_unconfig = fcp_scsi_bus_unconfig;
tran->tran_bus_power = NULL;
tran->tran_interconnect_type = INTERCONNECT_FABRIC;
tran->tran_pkt_constructor = fcp_kmem_cache_constructor;
tran->tran_pkt_destructor = fcp_kmem_cache_destructor;
tran->tran_setup_pkt = fcp_pkt_setup;
tran->tran_teardown_pkt = fcp_pkt_teardown;
tran->tran_hba_len = pptr->port_priv_pkt_len +
sizeof (struct fcp_pkt) + pptr->port_dmacookie_sz;
if (pptr->port_state & FCP_STATE_FCA_IS_NODMA) {
tran->tran_start = fcp_pseudo_start;
tran->tran_init_pkt = fcp_pseudo_init_pkt;
tran->tran_destroy_pkt = fcp_pseudo_destroy_pkt;
tran->tran_sync_pkt = fcp_pseudo_sync_pkt;
tran->tran_dmafree = fcp_pseudo_dmafree;
tran->tran_setup_pkt = NULL;
tran->tran_teardown_pkt = NULL;
tran->tran_pkt_constructor = NULL;
tran->tran_pkt_destructor = NULL;
pptr->port_data_dma_attr = pseudo_fca_dma_attr;
}
pptr->port_ndi_event_defs = (ndi_event_definition_t *)
kmem_zalloc(sizeof (fcp_ndi_event_defs), KM_SLEEP);
bcopy(fcp_ndi_event_defs, pptr->port_ndi_event_defs,
sizeof (fcp_ndi_event_defs));
(void) ndi_event_alloc_hdl(pptr->port_dip, NULL,
&pptr->port_ndi_event_hdl, NDI_SLEEP);
pptr->port_ndi_events.ndi_events_version = NDI_EVENTS_REV1;
pptr->port_ndi_events.ndi_n_events = FCP_N_NDI_EVENTS;
pptr->port_ndi_events.ndi_event_defs = pptr->port_ndi_event_defs;
if (DEVI_IS_ATTACHING(pptr->port_dip) &&
(ndi_event_bind_set(pptr->port_ndi_event_hdl,
&pptr->port_ndi_events, NDI_SLEEP) != NDI_SUCCESS)) {
goto fail;
}
event_bind++;
if (scsi_hba_attach_setup(pptr->port_dip, &pptr->port_data_dma_attr,
tran, SCSI_HBA_ADDR_COMPLEX | SCSI_HBA_TRAN_SCB)
!= DDI_SUCCESS) {
fcp_log(CE_WARN, pptr->port_dip,
"!fcp%d: scsi_hba_attach_setup failed", instance);
goto fail;
}
hba_attached++;
pptr->port_mpxio = 0;
if (mdi_phci_register(MDI_HCI_CLASS_SCSI, pptr->port_dip, 0) ==
MDI_SUCCESS) {
pptr->port_mpxio++;
}
mutex_enter(&fcp_global_mutex);
if (fcp_port_head == NULL) {
fcp_read_blacklist(pinfo->port_dip, &fcp_lun_blacklist);
}
pptr->port_next = fcp_port_head;
fcp_port_head = pptr;
soft_state_linked++;
if (fcp_watchdog_init++ == 0) {
fcp_watchdog_tick = fcp_watchdog_timeout *
drv_usectohz(1000000);
fcp_watchdog_id = timeout(fcp_watch, NULL,
fcp_watchdog_tick);
}
mutex_exit(&fcp_global_mutex);
if (fcp_do_ns_registry(pptr, s_id)) {
mutex_enter(&pptr->port_mutex);
pptr->port_state |= FCP_STATE_NS_REG_FAILED;
mutex_exit(&pptr->port_mutex);
} else {
mutex_enter(&pptr->port_mutex);
pptr->port_state &= ~(FCP_STATE_NS_REG_FAILED);
mutex_exit(&pptr->port_mutex);
}
if (modrootloaded != 1) {
if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY,
ddi_get_parent(pinfo->port_dip),
DDI_PROP_DONTPASS, OBP_BOOT_WWN,
&boot_wwn, &nbytes) == DDI_PROP_SUCCESS) &&
(nbytes == FC_WWN_SIZE)) {
bcopy(boot_wwn, pptr->port_boot_wwn, FC_WWN_SIZE);
}
if (boot_wwn) {
ddi_prop_free(boot_wwn);
}
}
switch (FC_PORT_STATE_MASK(pptr->port_phys_state)) {
case FC_STATE_OFFLINE:
res = DDI_SUCCESS;
pptr->port_state |= FCP_STATE_OFFLINE;
break;
case FC_STATE_ONLINE: {
if (pptr->port_topology == FC_TOP_UNKNOWN) {
(void) fcp_linkreset(pptr, NULL, KM_NOSLEEP);
res = DDI_SUCCESS;
break;
}
ASSERT(pptr->port_topology != FC_TOP_UNKNOWN);
if ((tmp_list = (fc_portmap_t *)kmem_zalloc(
sizeof (fc_portmap_t) * FCP_MAX_DEVICES,
KM_NOSLEEP)) == NULL) {
fcp_log(CE_WARN, pptr->port_dip,
"!fcp%d: failed to allocate portmap",
instance);
goto fail;
}
max_cnt = FCP_MAX_DEVICES;
alloc_cnt = FCP_MAX_DEVICES;
if ((res = fc_ulp_getportmap(pptr->port_fp_handle,
&tmp_list, &max_cnt, FC_ULP_PLOGI_PRESERVE)) !=
FC_SUCCESS) {
caddr_t msg;
(void) fc_ulp_error(res, &msg);
fcp_log(CE_WARN, pptr->port_dip,
"!failed to get port map : %s", msg);
res = DDI_SUCCESS;
break;
}
if (max_cnt > alloc_cnt) {
alloc_cnt = max_cnt;
}
fcp_statec_callback(ulph, pptr->port_fp_handle,
pptr->port_phys_state, pptr->port_topology, tmp_list,
max_cnt, pptr->port_id);
res = DDI_SUCCESS;
break;
}
default:
fcp_log(CE_WARN, pptr->port_dip,
"!fcp%d: invalid port state at attach=0x%x",
instance, pptr->port_phys_state);
mutex_enter(&pptr->port_mutex);
pptr->port_phys_state = FCP_STATE_OFFLINE;
mutex_exit(&pptr->port_mutex);
res = DDI_SUCCESS;
break;
}
if (tmp_list != NULL) {
kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
}
pptr->port_attach_time = ddi_get_lbolt64();
return (res);
fail:
fcp_log(CE_WARN, pptr->port_dip, "!failed to attach to port");
if (soft_state_linked) {
(void) fcp_soft_state_unlink(pptr);
}
if (pptr->port_ndi_event_hdl) {
if (event_bind) {
(void) ndi_event_unbind_set(pptr->port_ndi_event_hdl,
&pptr->port_ndi_events, NDI_SLEEP);
}
(void) ndi_event_free_hdl(pptr->port_ndi_event_hdl);
}
if (pptr->port_ndi_event_defs) {
(void) kmem_free(pptr->port_ndi_event_defs,
sizeof (fcp_ndi_event_defs));
}
if (pptr->port_mpxio) {
(void) mdi_phci_unregister(pptr->port_dip, 0);
pptr->port_mpxio--;
}
if (hba_attached) {
(void) scsi_hba_detach(pptr->port_dip);
}
if (pptr->port_tran != NULL) {
scsi_hba_tran_free(pptr->port_tran);
}
mutex_enter(&fcp_global_mutex);
if (soft_state_linked) {
if (--fcp_watchdog_init == 0) {
timeout_id_t tid = fcp_watchdog_id;
mutex_exit(&fcp_global_mutex);
(void) untimeout(tid);
} else {
mutex_exit(&fcp_global_mutex);
}
} else {
mutex_exit(&fcp_global_mutex);
}
if (mutex_initted) {
mutex_destroy(&pptr->port_mutex);
mutex_destroy(&pptr->port_pkt_mutex);
}
if (tmp_list != NULL) {
kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
}
ddi_soft_state_free(fcp_softstate, instance);
return (DDI_FAILURE);
}
static int
fcp_handle_port_detach(struct fcp_port *pptr, int flag, int instance)
{
int count = 0;
mutex_enter(&pptr->port_mutex);
if (flag != FCP_STATE_DETACHING) {
if (pptr->port_state & (FCP_STATE_POWER_DOWN |
FCP_STATE_SUSPENDED)) {
pptr->port_state |= flag;
mutex_exit(&pptr->port_mutex);
return (FC_SUCCESS);
}
}
if (pptr->port_state & FCP_STATE_IN_MDI) {
mutex_exit(&pptr->port_mutex);
return (FC_FAILURE);
}
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_2, 0,
"fcp_handle_port_detach: port is detaching");
pptr->port_state |= flag;
while (pptr->port_tmp_cnt || pptr->port_ipkt_cnt ||
(pptr->port_state & FCP_STATE_IN_WATCHDOG)) {
if (count++ >= FCP_ICMD_DEADLINE) {
break;
}
mutex_exit(&pptr->port_mutex);
delay(drv_usectohz(1000000));
mutex_enter(&pptr->port_mutex);
}
if (pptr->port_tmp_cnt || pptr->port_ipkt_cnt ||
(pptr->port_state & FCP_STATE_IN_WATCHDOG)) {
pptr->port_state &= ~flag;
mutex_exit(&pptr->port_mutex);
return (FC_FAILURE);
}
if (flag == FCP_STATE_DETACHING) {
pptr = fcp_soft_state_unlink(pptr);
ASSERT(pptr != NULL);
}
pptr->port_link_cnt++;
pptr->port_state |= FCP_STATE_OFFLINE;
pptr->port_state &= ~(FCP_STATE_ONLINING | FCP_STATE_ONLINE);
fcp_update_state(pptr, (FCP_LUN_BUSY | FCP_LUN_MARK),
FCP_CAUSE_LINK_DOWN);
mutex_exit(&pptr->port_mutex);
mutex_enter(&fcp_global_mutex);
if (--fcp_watchdog_init == 0) {
timeout_id_t tid = fcp_watchdog_id;
mutex_exit(&fcp_global_mutex);
(void) untimeout(tid);
} else {
mutex_exit(&fcp_global_mutex);
}
if (flag == FCP_STATE_DETACHING) {
fcp_cleanup_port(pptr, instance);
}
return (FC_SUCCESS);
}
static void
fcp_cleanup_port(struct fcp_port *pptr, int instance)
{
ASSERT(pptr != NULL);
if (pptr->port_ndi_event_hdl) {
(void) ndi_event_unbind_set(pptr->port_ndi_event_hdl,
&pptr->port_ndi_events, NDI_SLEEP);
(void) ndi_event_free_hdl(pptr->port_ndi_event_hdl);
}
if (pptr->port_ndi_event_defs) {
(void) kmem_free(pptr->port_ndi_event_defs,
sizeof (fcp_ndi_event_defs));
}
fcp_free_targets(pptr);
if (pptr->port_mpxio) {
(void) mdi_phci_unregister(pptr->port_dip, 0);
pptr->port_mpxio--;
}
(void) scsi_hba_detach(pptr->port_dip);
if (pptr->port_tran != NULL) {
scsi_hba_tran_free(pptr->port_tran);
}
#ifdef KSTATS_CODE
if (pptr->fcp_ksp != NULL) {
kstat_delete(pptr->fcp_ksp);
}
#endif
mutex_destroy(&pptr->port_mutex);
mutex_destroy(&pptr->port_pkt_mutex);
ddi_soft_state_free(fcp_softstate, instance);
}
static int
fcp_kmem_cache_constructor(struct scsi_pkt *pkt, scsi_hba_tran_t *tran,
int kmflags)
{
struct fcp_pkt *cmd;
struct fcp_port *pptr;
fc_packet_t *fpkt;
pptr = (struct fcp_port *)tran->tran_hba_private;
cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
bzero(cmd, tran->tran_hba_len);
cmd->cmd_pkt = pkt;
pkt->pkt_cdbp = cmd->cmd_fcp_cmd.fcp_cdb;
fpkt = (fc_packet_t *)&cmd->cmd_fc_packet;
cmd->cmd_fp_pkt = fpkt;
cmd->cmd_pkt->pkt_ha_private = (opaque_t)cmd;
cmd->cmd_fp_pkt->pkt_ulp_private = (opaque_t)cmd;
cmd->cmd_fp_pkt->pkt_fca_private = (opaque_t)((caddr_t)cmd +
sizeof (struct fcp_pkt) + pptr->port_dmacookie_sz);
fpkt->pkt_data_cookie = (ddi_dma_cookie_t *)((caddr_t)cmd +
sizeof (struct fcp_pkt));
fpkt->pkt_cmdlen = sizeof (struct fcp_cmd);
fpkt->pkt_rsplen = FCP_MAX_RSP_IU_SIZE;
if (pptr->port_fcp_dma == FC_NO_DVMA_SPACE) {
fpkt->pkt_resp_acc = fpkt->pkt_cmd_acc = NULL;
fpkt->pkt_cmd = (caddr_t)&cmd->cmd_fcp_cmd;
fpkt->pkt_resp = cmd->cmd_fcp_rsp;
} else {
if (fcp_alloc_cmd_resp(pptr, fpkt, kmflags) != FC_SUCCESS) {
return (-1);
}
}
return (0);
}
static void
fcp_kmem_cache_destructor(struct scsi_pkt *pkt, scsi_hba_tran_t *tran)
{
struct fcp_pkt *cmd;
struct fcp_port *pptr;
pptr = (struct fcp_port *)(tran->tran_hba_private);
cmd = pkt->pkt_ha_private;
if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
fcp_free_cmd_resp(pptr, cmd->cmd_fp_pkt);
}
}
static int
fcp_alloc_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt, int flags)
{
int rval;
int cmd_len;
int resp_len;
ulong_t real_len;
int (*cb) (caddr_t);
ddi_dma_cookie_t pkt_cookie;
ddi_dma_cookie_t *cp;
uint32_t cnt;
cb = (flags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
cmd_len = fpkt->pkt_cmdlen;
resp_len = fpkt->pkt_rsplen;
ASSERT(fpkt->pkt_cmd_dma == NULL);
if (ddi_dma_alloc_handle(pptr->port_dip, &pptr->port_cmd_dma_attr,
cb, NULL, &fpkt->pkt_cmd_dma) != DDI_SUCCESS) {
return (FC_FAILURE);
}
rval = ddi_dma_mem_alloc(fpkt->pkt_cmd_dma, cmd_len,
&pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT, cb, NULL,
(caddr_t *)&fpkt->pkt_cmd, &real_len, &fpkt->pkt_cmd_acc);
if (rval != DDI_SUCCESS) {
ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
return (FC_FAILURE);
}
if (real_len < cmd_len) {
ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
return (FC_FAILURE);
}
rval = ddi_dma_addr_bind_handle(fpkt->pkt_cmd_dma, NULL,
fpkt->pkt_cmd, real_len, DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
cb, NULL, &pkt_cookie, &fpkt->pkt_cmd_cookie_cnt);
if (rval != DDI_DMA_MAPPED) {
ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
return (FC_FAILURE);
}
if (fpkt->pkt_cmd_cookie_cnt >
pptr->port_cmd_dma_attr.dma_attr_sgllen) {
(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
return (FC_FAILURE);
}
ASSERT(fpkt->pkt_cmd_cookie_cnt != 0);
cp = fpkt->pkt_cmd_cookie = (ddi_dma_cookie_t *)kmem_alloc(
fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie),
KM_NOSLEEP);
if (cp == NULL) {
(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
return (FC_FAILURE);
}
*cp = pkt_cookie;
cp++;
for (cnt = 1; cnt < fpkt->pkt_cmd_cookie_cnt; cnt++, cp++) {
ddi_dma_nextcookie(fpkt->pkt_cmd_dma,
&pkt_cookie);
*cp = pkt_cookie;
}
ASSERT(fpkt->pkt_resp_dma == NULL);
if (ddi_dma_alloc_handle(pptr->port_dip, &pptr->port_resp_dma_attr,
cb, NULL, &fpkt->pkt_resp_dma) != DDI_SUCCESS) {
(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
return (FC_FAILURE);
}
rval = ddi_dma_mem_alloc(fpkt->pkt_resp_dma, resp_len,
&pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT, cb, NULL,
(caddr_t *)&fpkt->pkt_resp, &real_len,
&fpkt->pkt_resp_acc);
if (rval != DDI_SUCCESS) {
ddi_dma_free_handle(&fpkt->pkt_resp_dma);
(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
kmem_free(fpkt->pkt_cmd_cookie,
fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
return (FC_FAILURE);
}
if (real_len < resp_len) {
ddi_dma_mem_free(&fpkt->pkt_resp_acc);
ddi_dma_free_handle(&fpkt->pkt_resp_dma);
(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
kmem_free(fpkt->pkt_cmd_cookie,
fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
return (FC_FAILURE);
}
rval = ddi_dma_addr_bind_handle(fpkt->pkt_resp_dma, NULL,
fpkt->pkt_resp, real_len, DDI_DMA_READ | DDI_DMA_CONSISTENT,
cb, NULL, &pkt_cookie, &fpkt->pkt_resp_cookie_cnt);
if (rval != DDI_DMA_MAPPED) {
ddi_dma_mem_free(&fpkt->pkt_resp_acc);
ddi_dma_free_handle(&fpkt->pkt_resp_dma);
(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
kmem_free(fpkt->pkt_cmd_cookie,
fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
return (FC_FAILURE);
}
if (fpkt->pkt_resp_cookie_cnt >
pptr->port_resp_dma_attr.dma_attr_sgllen) {
ddi_dma_mem_free(&fpkt->pkt_resp_acc);
ddi_dma_free_handle(&fpkt->pkt_resp_dma);
(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
kmem_free(fpkt->pkt_cmd_cookie,
fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
return (FC_FAILURE);
}
ASSERT(fpkt->pkt_resp_cookie_cnt != 0);
cp = fpkt->pkt_resp_cookie = (ddi_dma_cookie_t *)kmem_alloc(
fpkt->pkt_resp_cookie_cnt * sizeof (pkt_cookie),
KM_NOSLEEP);
if (cp == NULL) {
ddi_dma_mem_free(&fpkt->pkt_resp_acc);
ddi_dma_free_handle(&fpkt->pkt_resp_dma);
(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
kmem_free(fpkt->pkt_cmd_cookie,
fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
return (FC_FAILURE);
}
*cp = pkt_cookie;
cp++;
for (cnt = 1; cnt < fpkt->pkt_resp_cookie_cnt; cnt++, cp++) {
ddi_dma_nextcookie(fpkt->pkt_resp_dma,
&pkt_cookie);
*cp = pkt_cookie;
}
return (FC_SUCCESS);
}
static void
fcp_free_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt)
{
ASSERT(fpkt->pkt_resp_dma != NULL && fpkt->pkt_cmd_dma != NULL);
if (fpkt->pkt_resp_dma) {
(void) ddi_dma_unbind_handle(fpkt->pkt_resp_dma);
ddi_dma_mem_free(&fpkt->pkt_resp_acc);
ddi_dma_free_handle(&fpkt->pkt_resp_dma);
}
if (fpkt->pkt_resp_cookie) {
kmem_free(fpkt->pkt_resp_cookie,
fpkt->pkt_resp_cookie_cnt * sizeof (ddi_dma_cookie_t));
fpkt->pkt_resp_cookie = NULL;
}
if (fpkt->pkt_cmd_dma) {
(void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
}
if (fpkt->pkt_cmd_cookie) {
kmem_free(fpkt->pkt_cmd_cookie,
fpkt->pkt_cmd_cookie_cnt * sizeof (ddi_dma_cookie_t));
fpkt->pkt_cmd_cookie = NULL;
}
}
static int
fcp_phys_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
{
uchar_t *bytes;
uint_t nbytes;
uint16_t lun_num;
struct fcp_tgt *ptgt;
struct fcp_lun *plun;
struct fcp_port *pptr = (struct fcp_port *)
hba_tran->tran_hba_private;
ASSERT(pptr != NULL);
FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
FCP_BUF_LEVEL_8, 0,
"fcp_phys_tgt_init: called for %s (instance %d)",
ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip));
bytes = NULL;
if ((scsi_device_prop_lookup_byte_array(sd, SCSI_DEVICE_PROP_PATH,
PORT_WWN_PROP, &bytes, &nbytes) != DDI_PROP_SUCCESS) ||
(nbytes != FC_WWN_SIZE)) {
FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
FCP_BUF_LEVEL_8, 0,
"fcp_phys_tgt_init: Returning DDI_NOT_WELL_FORMED"
" for %s (instance %d): bytes=%p nbytes=%x",
ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip), bytes,
nbytes);
if (bytes != NULL) {
scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
}
return (DDI_NOT_WELL_FORMED);
}
ASSERT(bytes != NULL);
lun_num = scsi_device_prop_get_int(sd, SCSI_DEVICE_PROP_PATH,
LUN_PROP, 0xFFFF);
if (lun_num == 0xFFFF) {
FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
FCP_BUF_LEVEL_8, 0,
"fcp_phys_tgt_init: Returning DDI_FAILURE:lun"
" for %s (instance %d)", ddi_get_name(tgt_dip),
ddi_get_instance(tgt_dip));
scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
return (DDI_NOT_WELL_FORMED);
}
mutex_enter(&pptr->port_mutex);
if ((plun = fcp_lookup_lun(pptr, bytes, lun_num)) == NULL) {
mutex_exit(&pptr->port_mutex);
FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
FCP_BUF_LEVEL_8, 0,
"fcp_phys_tgt_init: Returning DDI_FAILURE: No Lun"
" for %s (instance %d)", ddi_get_name(tgt_dip),
ddi_get_instance(tgt_dip));
scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
return (DDI_FAILURE);
}
ASSERT(bcmp(plun->lun_tgt->tgt_port_wwn.raw_wwn, bytes,
FC_WWN_SIZE) == 0);
ASSERT(plun->lun_num == lun_num);
scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
ptgt = plun->lun_tgt;
mutex_enter(&ptgt->tgt_mutex);
plun->lun_tgt_count++;
scsi_device_hba_private_set(sd, plun);
plun->lun_state |= FCP_SCSI_LUN_TGT_INIT;
plun->lun_sd = sd;
mutex_exit(&ptgt->tgt_mutex);
mutex_exit(&pptr->port_mutex);
return (DDI_SUCCESS);
}
static int
fcp_virt_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
{
uchar_t *bytes;
uint_t nbytes;
uint16_t lun_num;
struct fcp_tgt *ptgt;
struct fcp_lun *plun;
struct fcp_port *pptr = (struct fcp_port *)
hba_tran->tran_hba_private;
child_info_t *cip;
ASSERT(pptr != NULL);
FCP_DTRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_8, 0,
"fcp_virt_tgt_init: called for %s (instance %d) (hba_dip %p),"
" (tgt_dip %p)", ddi_get_name(tgt_dip),
ddi_get_instance(tgt_dip), hba_dip, tgt_dip);
cip = (child_info_t *)sd->sd_pathinfo;
if (cip == NULL) {
FCP_DTRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_8, 0,
"fcp_virt_tgt_init: Returning DDI_NOT_WELL_FORMED"
" for %s (instance %d)", ddi_get_name(tgt_dip),
ddi_get_instance(tgt_dip));
return (DDI_NOT_WELL_FORMED);
}
bytes = NULL;
if ((scsi_device_prop_lookup_byte_array(sd, SCSI_DEVICE_PROP_PATH,
PORT_WWN_PROP, &bytes, &nbytes) != DDI_PROP_SUCCESS) ||
(nbytes != FC_WWN_SIZE)) {
if (bytes) {
scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
}
return (DDI_NOT_WELL_FORMED);
}
ASSERT(bytes != NULL);
lun_num = scsi_device_prop_get_int(sd, SCSI_DEVICE_PROP_PATH,
LUN_PROP, 0xFFFF);
if (lun_num == 0xFFFF) {
FCP_DTRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_8, 0,
"fcp_virt_tgt_init: Returning DDI_FAILURE:lun"
" for %s (instance %d)", ddi_get_name(tgt_dip),
ddi_get_instance(tgt_dip));
scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
return (DDI_NOT_WELL_FORMED);
}
mutex_enter(&pptr->port_mutex);
if ((plun = fcp_lookup_lun(pptr, bytes, lun_num)) == NULL) {
mutex_exit(&pptr->port_mutex);
FCP_DTRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_8, 0,
"fcp_virt_tgt_init: Returning DDI_FAILURE: No Lun"
" for %s (instance %d)", ddi_get_name(tgt_dip),
ddi_get_instance(tgt_dip));
scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
return (DDI_FAILURE);
}
ASSERT(bcmp(plun->lun_tgt->tgt_port_wwn.raw_wwn, bytes,
FC_WWN_SIZE) == 0);
ASSERT(plun->lun_num == lun_num);
scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
ptgt = plun->lun_tgt;
mutex_enter(&ptgt->tgt_mutex);
plun->lun_tgt_count++;
scsi_device_hba_private_set(sd, plun);
plun->lun_state |= FCP_SCSI_LUN_TGT_INIT;
plun->lun_sd = sd;
mutex_exit(&ptgt->tgt_mutex);
mutex_exit(&pptr->port_mutex);
return (DDI_SUCCESS);
}
static int
fcp_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
{
struct fcp_port *pptr = (struct fcp_port *)
hba_tran->tran_hba_private;
int rval;
ASSERT(pptr != NULL);
if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
rval = fcp_virt_tgt_init(hba_dip, tgt_dip, hba_tran, sd);
} else {
rval = fcp_phys_tgt_init(hba_dip, tgt_dip, hba_tran, sd);
}
return (rval);
}
static void
fcp_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
{
struct fcp_lun *plun = scsi_device_hba_private_get(sd);
struct fcp_tgt *ptgt;
FCP_DTRACE(fcp_logq, LUN_PORT->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_8, 0,
"fcp_scsi_tgt_free: called for tran %s%d, dev %s%d",
ddi_get_name(hba_dip), ddi_get_instance(hba_dip),
ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip));
if (plun == NULL) {
return;
}
ptgt = plun->lun_tgt;
ASSERT(ptgt != NULL);
mutex_enter(&ptgt->tgt_mutex);
ASSERT(plun->lun_tgt_count > 0);
if (--plun->lun_tgt_count == 0) {
plun->lun_state &= ~FCP_SCSI_LUN_TGT_INIT;
}
plun->lun_sd = NULL;
mutex_exit(&ptgt->tgt_mutex);
}
static int
fcp_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
{
struct fcp_port *pptr = ADDR2FCP(ap);
struct fcp_lun *plun = ADDR2LUN(ap);
struct fcp_pkt *cmd = PKT2CMD(pkt);
struct fcp_tgt *ptgt = plun->lun_tgt;
int rval;
ASSERT(cmd->cmd_state != FCP_PKT_ISSUED);
FCP_DTRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_9, 0,
"fcp_transport Invoked for %x", plun->lun_tgt->tgt_d_id);
mutex_enter(&pptr->port_mutex);
mutex_enter(&ptgt->tgt_mutex);
if ((plun->lun_state & FCP_LUN_OFFLINE) &&
!(plun->lun_state & FCP_LUN_ONLINING)) {
mutex_exit(&ptgt->tgt_mutex);
mutex_exit(&pptr->port_mutex);
if (cmd->cmd_fp_pkt->pkt_pd == NULL) {
pkt->pkt_reason = CMD_DEV_GONE;
}
return (TRAN_FATAL_ERROR);
}
cmd->cmd_fp_pkt->pkt_timeout = pkt->pkt_time;
if (((plun->lun_state & FCP_LUN_BUSY) && (!(pptr->port_state &
FCP_STATE_SUSPENDED)) && !ddi_in_panic()) ||
(pptr->port_state & (FCP_STATE_ONLINING | FCP_STATE_IN_CB_DEVC)) ||
(ptgt->tgt_pd_handle == NULL) ||
(cmd->cmd_fp_pkt->pkt_pd == NULL)) {
mutex_exit(&ptgt->tgt_mutex);
mutex_exit(&pptr->port_mutex);
if (pkt->pkt_flags & FLAG_NOINTR) {
pkt->pkt_resid = 0;
return (TRAN_BUSY);
}
if (pkt->pkt_flags & FLAG_NOQUEUE) {
FCP_DTRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_9, 0,
"fcp_scsi_start: lun busy for pkt %p", pkt);
return (TRAN_BUSY);
}
#ifdef DEBUG
mutex_enter(&pptr->port_pkt_mutex);
pptr->port_npkts++;
mutex_exit(&pptr->port_pkt_mutex);
#endif
fcp_queue_pkt(pptr, cmd);
return (TRAN_ACCEPT);
}
cmd->cmd_state = FCP_PKT_ISSUED;
mutex_exit(&ptgt->tgt_mutex);
mutex_exit(&pptr->port_mutex);
fcp_reconfig_wait(pptr);
cmd->cmd_timeout = pkt->pkt_time ? fcp_watchdog_time +
pkt->pkt_time : 0;
fcp_prepare_pkt(pptr, cmd, plun);
if (cmd->cmd_pkt->pkt_time) {
cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
} else {
cmd->cmd_fp_pkt->pkt_timeout = 5 * 60 * 60;
}
if (pkt->pkt_flags & FLAG_NOINTR) {
cmd->cmd_state &= ~FCP_PKT_ISSUED;
return (fcp_dopoll(pptr, cmd));
}
#ifdef DEBUG
mutex_enter(&pptr->port_pkt_mutex);
pptr->port_npkts++;
mutex_exit(&pptr->port_pkt_mutex);
#endif
rval = fcp_transport(pptr->port_fp_handle, cmd->cmd_fp_pkt, 0);
if (rval == FC_SUCCESS) {
FCP_DTRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_9, 0,
"fcp_transport success for %x", plun->lun_tgt->tgt_d_id);
return (TRAN_ACCEPT);
}
cmd->cmd_state = FCP_PKT_IDLE;
#ifdef DEBUG
mutex_enter(&pptr->port_pkt_mutex);
pptr->port_npkts--;
mutex_exit(&pptr->port_pkt_mutex);
#endif
if (rval == FC_TRAN_BUSY) {
pkt->pkt_resid = 0;
rval = TRAN_BUSY;
} else {
mutex_enter(&ptgt->tgt_mutex);
if (plun->lun_state & FCP_LUN_OFFLINE) {
child_info_t *cip;
mutex_enter(&plun->lun_mutex);
cip = plun->lun_cip;
mutex_exit(&plun->lun_mutex);
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_6, 0,
"fcp_transport failed 2 for %x: %x; dip=%p",
plun->lun_tgt->tgt_d_id, rval, cip);
rval = TRAN_FATAL_ERROR;
} else {
if (pkt->pkt_flags & FLAG_NOQUEUE) {
FCP_DTRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_9, 0,
"fcp_scsi_start: FC_BUSY for pkt %p",
pkt);
rval = TRAN_BUSY;
} else {
rval = TRAN_ACCEPT;
fcp_queue_pkt(pptr, cmd);
}
}
mutex_exit(&ptgt->tgt_mutex);
}
return (rval);
}
static int
fcp_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
{
int tgt_cnt;
struct fcp_port *pptr = ADDR2FCP(ap);
struct fcp_lun *plun = ADDR2LUN(ap);
struct fcp_tgt *ptgt = plun->lun_tgt;
if (pkt == NULL) {
if (ptgt) {
mutex_enter(&ptgt->tgt_mutex);
tgt_cnt = ptgt->tgt_change_cnt;
mutex_exit(&ptgt->tgt_mutex);
fcp_abort_all(pptr, ptgt, plun, tgt_cnt);
return (TRUE);
}
}
return (FALSE);
}
int
fcp_scsi_reset(struct scsi_address *ap, int level)
{
int rval = 0;
struct fcp_port *pptr = ADDR2FCP(ap);
struct fcp_lun *plun = ADDR2LUN(ap);
struct fcp_tgt *ptgt = plun->lun_tgt;
if (level == RESET_ALL) {
if (fcp_linkreset(pptr, ap, KM_NOSLEEP) == FC_SUCCESS) {
rval = 1;
}
} else if (level == RESET_TARGET || level == RESET_LUN) {
mutex_enter(&ptgt->tgt_mutex);
if (ptgt->tgt_state & (FCP_TGT_OFFLINE | FCP_TGT_BUSY)) {
mutex_exit(&ptgt->tgt_mutex);
return (1);
}
mutex_exit(&ptgt->tgt_mutex);
if (fcp_reset_target(ap, level) == FC_SUCCESS) {
rval = 1;
}
}
return (rval);
}
static int
fcp_scsi_getcap(struct scsi_address *ap, char *cap, int whom)
{
return (fcp_commoncap(ap, cap, 0, whom, 0));
}
static int
fcp_scsi_setcap(struct scsi_address *ap, char *cap, int value, int whom)
{
return (fcp_commoncap(ap, cap, value, whom, 1));
}
static int
fcp_pkt_setup(struct scsi_pkt *pkt,
int (*callback)(caddr_t arg),
caddr_t arg)
{
struct fcp_pkt *cmd;
struct fcp_port *pptr;
struct fcp_lun *plun;
struct fcp_tgt *ptgt;
int kf;
fc_packet_t *fpkt;
fc_frame_hdr_t *hp;
pptr = ADDR2FCP(&pkt->pkt_address);
plun = ADDR2LUN(&pkt->pkt_address);
ptgt = plun->lun_tgt;
cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
fpkt = cmd->cmd_fp_pkt;
kf = (callback == SLEEP_FUNC) ? KM_SLEEP: KM_NOSLEEP;
cmd->cmd_back = NULL;
cmd->cmd_next = NULL;
bzero(&cmd->cmd_fcp_cmd, sizeof (cmd->cmd_fcp_cmd));
cmd->cmd_state = FCP_PKT_IDLE;
fpkt = cmd->cmd_fp_pkt;
fpkt->pkt_data_acc = NULL;
if (pptr->port_state & FCP_STATE_OFFLINE) {
return (-1);
}
mutex_enter(&ptgt->tgt_mutex);
fpkt->pkt_pd = ptgt->tgt_pd_handle;
if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, kf)
!= FC_SUCCESS) {
mutex_exit(&ptgt->tgt_mutex);
return (-1);
}
mutex_exit(&ptgt->tgt_mutex);
hp = &fpkt->pkt_cmd_fhdr;
hp->r_ctl = R_CTL_COMMAND;
hp->rsvd = 0;
hp->type = FC_TYPE_SCSI_FCP;
hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
hp->seq_id = 0;
hp->df_ctl = 0;
hp->seq_cnt = 0;
hp->ox_id = 0xffff;
hp->rx_id = 0xffff;
hp->ro = 0;
mutex_enter(&plun->lun_mutex);
if ((cmd->cmd_forw = plun->lun_pkt_head) != NULL) {
plun->lun_pkt_head->cmd_back = cmd;
} else {
plun->lun_pkt_tail = cmd;
}
plun->lun_pkt_head = cmd;
mutex_exit(&plun->lun_mutex);
return (0);
}
static void
fcp_pkt_teardown(struct scsi_pkt *pkt)
{
struct fcp_port *pptr = ADDR2FCP(&pkt->pkt_address);
struct fcp_lun *plun = ADDR2LUN(&pkt->pkt_address);
struct fcp_pkt *cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
mutex_enter(&plun->lun_mutex);
if (cmd->cmd_back) {
ASSERT(cmd != plun->lun_pkt_head);
cmd->cmd_back->cmd_forw = cmd->cmd_forw;
} else {
ASSERT(cmd == plun->lun_pkt_head);
plun->lun_pkt_head = cmd->cmd_forw;
}
if (cmd->cmd_forw) {
cmd->cmd_forw->cmd_back = cmd->cmd_back;
} else {
ASSERT(cmd == plun->lun_pkt_tail);
plun->lun_pkt_tail = cmd->cmd_back;
}
mutex_exit(&plun->lun_mutex);
(void) fc_ulp_uninit_packet(pptr->port_fp_handle, cmd->cmd_fp_pkt);
}
static int
fcp_scsi_reset_notify(struct scsi_address *ap, int flag,
void (*callback)(caddr_t), caddr_t arg)
{
struct fcp_port *pptr = ADDR2FCP(ap);
return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
&pptr->port_mutex, &pptr->port_reset_notify_listf));
}
static int
fcp_scsi_bus_get_eventcookie(dev_info_t *dip, dev_info_t *rdip, char *name,
ddi_eventcookie_t *event_cookiep)
{
struct fcp_port *pptr = fcp_dip2port(dip);
if (pptr == NULL) {
return (DDI_FAILURE);
}
return (ndi_event_retrieve_cookie(pptr->port_ndi_event_hdl, rdip, name,
event_cookiep, NDI_EVENT_NOPASS));
}
static int
fcp_scsi_bus_add_eventcall(dev_info_t *dip, dev_info_t *rdip,
ddi_eventcookie_t eventid, void (*callback)(), void *arg,
ddi_callback_id_t *cb_id)
{
struct fcp_port *pptr = fcp_dip2port(dip);
if (pptr == NULL) {
return (DDI_FAILURE);
}
return (ndi_event_add_callback(pptr->port_ndi_event_hdl, rdip,
eventid, callback, arg, NDI_SLEEP, cb_id));
}
static int
fcp_scsi_bus_remove_eventcall(dev_info_t *dip, ddi_callback_id_t cb_id)
{
struct fcp_port *pptr = fcp_dip2port(dip);
if (pptr == NULL) {
return (DDI_FAILURE);
}
return (ndi_event_remove_callback(pptr->port_ndi_event_hdl, cb_id));
}
static int
fcp_scsi_bus_post_event(dev_info_t *dip, dev_info_t *rdip,
ddi_eventcookie_t eventid, void *impldata)
{
struct fcp_port *pptr = fcp_dip2port(dip);
if (pptr == NULL) {
return (DDI_FAILURE);
}
return (ndi_event_run_callbacks(pptr->port_ndi_event_hdl, rdip,
eventid, impldata));
}
static int
fcp_reset_target(struct scsi_address *ap, int level)
{
int rval = FC_FAILURE;
char lun_id[25];
struct fcp_port *pptr = ADDR2FCP(ap);
struct fcp_lun *plun = ADDR2LUN(ap);
struct fcp_tgt *ptgt = plun->lun_tgt;
struct scsi_pkt *pkt;
struct fcp_pkt *cmd;
struct fcp_rsp *rsp;
uint32_t tgt_cnt;
struct fcp_rsp_info *rsp_info;
struct fcp_reset_elem *p;
int bval;
if ((p = kmem_alloc(sizeof (struct fcp_reset_elem),
KM_NOSLEEP)) == NULL) {
return (rval);
}
mutex_enter(&ptgt->tgt_mutex);
if (level == RESET_TARGET) {
if (ptgt->tgt_state & (FCP_TGT_OFFLINE | FCP_TGT_BUSY)) {
mutex_exit(&ptgt->tgt_mutex);
kmem_free(p, sizeof (struct fcp_reset_elem));
return (rval);
}
fcp_update_tgt_state(ptgt, FCP_SET, FCP_LUN_BUSY);
(void) strcpy(lun_id, " ");
} else {
if (plun->lun_state & (FCP_LUN_OFFLINE | FCP_LUN_BUSY)) {
mutex_exit(&ptgt->tgt_mutex);
kmem_free(p, sizeof (struct fcp_reset_elem));
return (rval);
}
fcp_update_lun_state(plun, FCP_SET, FCP_LUN_BUSY);
(void) sprintf(lun_id, ", LUN=%d", plun->lun_num);
}
tgt_cnt = ptgt->tgt_change_cnt;
mutex_exit(&ptgt->tgt_mutex);
if ((pkt = scsi_init_pkt(ap, NULL, NULL, 0, 0,
0, 0, NULL, 0)) == NULL) {
kmem_free(p, sizeof (struct fcp_reset_elem));
mutex_enter(&ptgt->tgt_mutex);
fcp_update_tgt_state(ptgt, FCP_RESET, FCP_LUN_BUSY);
mutex_exit(&ptgt->tgt_mutex);
return (rval);
}
pkt->pkt_time = FCP_POLL_TIMEOUT;
cmd = PKT2CMD(pkt);
if (level == RESET_TARGET) {
cmd->cmd_fcp_cmd.fcp_cntl.cntl_reset_tgt = 1;
} else {
cmd->cmd_fcp_cmd.fcp_cntl.cntl_reset_lun = 1;
}
cmd->cmd_fp_pkt->pkt_comp = NULL;
cmd->cmd_pkt->pkt_flags |= FLAG_NOINTR;
fcp_prepare_pkt(pptr, cmd, plun);
if (cmd->cmd_pkt->pkt_time) {
cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
} else {
cmd->cmd_fp_pkt->pkt_timeout = 5 * 60 * 60;
}
(void) fc_ulp_busy_port(pptr->port_fp_handle);
bval = fcp_dopoll(pptr, cmd);
fc_ulp_idle_port(pptr->port_fp_handle);
if (bval == TRAN_ACCEPT) {
int error = 3;
rsp = (struct fcp_rsp *)cmd->cmd_fcp_rsp;
rsp_info = (struct fcp_rsp_info *)(cmd->cmd_fcp_rsp +
sizeof (struct fcp_rsp));
if (rsp->fcp_u.fcp_status.rsp_len_set) {
if (fcp_validate_fcp_response(rsp, pptr) ==
FC_SUCCESS) {
if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
FCP_CP_IN(cmd->cmd_fp_pkt->pkt_resp +
sizeof (struct fcp_rsp), rsp_info,
cmd->cmd_fp_pkt->pkt_resp_acc,
sizeof (struct fcp_rsp_info));
}
if (rsp_info->rsp_code == FCP_NO_FAILURE) {
rval = FC_SUCCESS;
error = 0;
} else {
error = 1;
}
} else {
error = 2;
}
}
switch (error) {
case 0:
fcp_log(CE_WARN, pptr->port_dip,
"!FCP: WWN 0x%08x%08x %s reset successfully",
*((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
*((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id);
break;
case 1:
fcp_log(CE_WARN, pptr->port_dip,
"!FCP: Reset to WWN 0x%08x%08x %s failed,"
" response code=%x",
*((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
*((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id,
rsp_info->rsp_code);
break;
case 2:
fcp_log(CE_WARN, pptr->port_dip,
"!FCP: Reset to WWN 0x%08x%08x %s failed,"
" Bad FCP response values: rsvd1=%x,"
" rsvd2=%x, sts-rsvd1=%x, sts-rsvd2=%x,"
" rsplen=%x, senselen=%x",
*((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
*((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id,
rsp->reserved_0, rsp->reserved_1,
rsp->fcp_u.fcp_status.reserved_0,
rsp->fcp_u.fcp_status.reserved_1,
rsp->fcp_response_len, rsp->fcp_sense_len);
break;
default:
fcp_log(CE_WARN, pptr->port_dip,
"!FCP: Reset to WWN 0x%08x%08x %s failed",
*((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
*((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id);
break;
}
}
scsi_destroy_pkt(pkt);
if (rval == FC_FAILURE) {
mutex_enter(&ptgt->tgt_mutex);
if (level == RESET_TARGET) {
fcp_update_tgt_state(ptgt, FCP_RESET, FCP_LUN_BUSY);
} else {
fcp_update_lun_state(plun, FCP_RESET, FCP_LUN_BUSY);
}
mutex_exit(&ptgt->tgt_mutex);
kmem_free(p, sizeof (struct fcp_reset_elem));
return (rval);
}
mutex_enter(&pptr->port_mutex);
if (level == RESET_TARGET) {
p->tgt = ptgt;
p->lun = NULL;
} else {
p->tgt = NULL;
p->lun = plun;
}
p->tgt = ptgt;
p->tgt_cnt = tgt_cnt;
p->timeout = fcp_watchdog_time + FCP_RESET_DELAY;
p->next = pptr->port_reset_list;
pptr->port_reset_list = p;
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_3, 0,
"Notify ssd of the reset to reinstate the reservations");
scsi_hba_reset_notify_callback(&pptr->port_mutex,
&pptr->port_reset_notify_listf);
mutex_exit(&pptr->port_mutex);
return (rval);
}
static int
fcp_commoncap(struct scsi_address *ap, char *cap,
int val, int tgtonly, int doset)
{
struct fcp_port *pptr = ADDR2FCP(ap);
struct fcp_lun *plun = ADDR2LUN(ap);
struct fcp_tgt *ptgt = plun->lun_tgt;
int cidx;
int rval = FALSE;
if (cap == (char *)0) {
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_3, 0,
"fcp_commoncap: invalid arg");
return (rval);
}
if ((cidx = scsi_hba_lookup_capstr(cap)) == -1) {
return (UNDEFINED);
}
if (doset) {
switch (cidx) {
case SCSI_CAP_ARQ:
if (val == 0) {
rval = FALSE;
} else {
rval = TRUE;
}
break;
case SCSI_CAP_LUN_RESET:
if (val) {
plun->lun_cap |= FCP_LUN_CAP_RESET;
} else {
plun->lun_cap &= ~FCP_LUN_CAP_RESET;
}
rval = TRUE;
break;
case SCSI_CAP_SECTOR_SIZE:
rval = TRUE;
break;
default:
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_4, 0,
"fcp_setcap: unsupported %d", cidx);
rval = UNDEFINED;
break;
}
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_5, 0,
"set cap: cap=%s, val/tgtonly/doset/rval = "
"0x%x/0x%x/0x%x/%d",
cap, val, tgtonly, doset, rval);
} else {
switch (cidx) {
case SCSI_CAP_DMA_MAX:
rval = (int)pptr->port_data_dma_attr.dma_attr_maxxfer;
if (rval == -1) {
rval = MAX_INT_DMA;
}
break;
case SCSI_CAP_INITIATOR_ID:
rval = pptr->port_id;
break;
case SCSI_CAP_ARQ:
case SCSI_CAP_RESET_NOTIFICATION:
case SCSI_CAP_TAGGED_QING:
rval = TRUE;
break;
case SCSI_CAP_SCSI_VERSION:
rval = 3;
break;
case SCSI_CAP_INTERCONNECT_TYPE:
if (FC_TOP_EXTERNAL(pptr->port_topology) ||
(ptgt->tgt_hard_addr == 0)) {
rval = INTERCONNECT_FABRIC;
} else {
rval = INTERCONNECT_FIBRE;
}
break;
case SCSI_CAP_LUN_RESET:
rval = ((plun->lun_cap & FCP_LUN_CAP_RESET) != 0) ?
TRUE : FALSE;
break;
default:
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_4, 0,
"fcp_getcap: unsupported %d", cidx);
rval = UNDEFINED;
break;
}
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_8, 0,
"get cap: cap=%s, val/tgtonly/doset/rval = "
"0x%x/0x%x/0x%x/%d",
cap, val, tgtonly, doset, rval);
}
return (rval);
}
static int
fcp_scsi_get_name(struct scsi_device *sd, char *name, int len)
{
int i;
int *lun;
int numChars;
uint_t nlun;
uint_t count;
uint_t nbytes;
uchar_t *bytes;
uint16_t lun_num;
uint32_t tgt_id;
char **conf_wwn;
char tbuf[(FC_WWN_SIZE << 1) + 1];
uchar_t barray[FC_WWN_SIZE];
dev_info_t *tgt_dip;
struct fcp_tgt *ptgt;
struct fcp_port *pptr;
struct fcp_lun *plun;
ASSERT(sd != NULL);
ASSERT(name != NULL);
tgt_dip = sd->sd_dev;
pptr = ddi_get_soft_state(fcp_softstate,
ddi_get_instance(ddi_get_parent(tgt_dip)));
if (pptr == NULL) {
return (0);
}
ASSERT(tgt_dip != NULL);
if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, sd->sd_dev,
DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
LUN_PROP, &lun, &nlun) != DDI_SUCCESS) {
name[0] = '\0';
return (0);
}
if (nlun == 0) {
ddi_prop_free(lun);
return (0);
}
lun_num = lun[0];
ddi_prop_free(lun);
if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, tgt_dip,
DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, CONF_WWN_PROP,
&conf_wwn, &count) == DDI_PROP_SUCCESS) {
ASSERT(count >= 1);
fcp_ascii_to_wwn(conf_wwn[0], barray, FC_WWN_SIZE);
ddi_prop_free(conf_wwn);
mutex_enter(&pptr->port_mutex);
if ((plun = fcp_lookup_lun(pptr, barray, lun_num)) == NULL) {
mutex_exit(&pptr->port_mutex);
return (0);
}
ptgt = plun->lun_tgt;
mutex_exit(&pptr->port_mutex);
(void) ndi_prop_update_byte_array(DDI_DEV_T_NONE,
tgt_dip, PORT_WWN_PROP, barray, FC_WWN_SIZE);
if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
ptgt->tgt_hard_addr != 0) {
tgt_id = (uint32_t)fcp_alpa_to_switch[
ptgt->tgt_hard_addr];
} else {
tgt_id = ptgt->tgt_d_id;
}
(void) ndi_prop_update_int(DDI_DEV_T_NONE, tgt_dip,
TARGET_PROP, tgt_id);
}
bytes = NULL;
if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, tgt_dip,
DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
&nbytes) != DDI_PROP_SUCCESS) || nbytes != FC_WWN_SIZE) {
if (bytes != NULL) {
ddi_prop_free(bytes);
}
return (0);
}
for (i = 0; i < FC_WWN_SIZE; i++) {
(void) sprintf(&tbuf[i << 1], "%02x", *(bytes + i));
}
numChars = snprintf(name, len, "w%s,%x", tbuf, lun_num);
ASSERT(numChars < len);
if (numChars >= len) {
fcp_log(CE_WARN, pptr->port_dip,
"!fcp_scsi_get_name: "
"name parameter length too small, it needs to be %d",
numChars+1);
}
ddi_prop_free(bytes);
return (1);
}
static int
fcp_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len)
{
struct fcp_lun *plun = ADDR2LUN(&sd->sd_address);
struct fcp_tgt *ptgt;
int numChars;
if (plun == NULL) {
return (0);
}
if ((ptgt = plun->lun_tgt) == NULL) {
return (0);
}
numChars = snprintf(name, len, "%x", ptgt->tgt_d_id);
ASSERT(numChars < len);
if (numChars >= len) {
fcp_log(CE_WARN, NULL,
"!fcp_scsi_get_bus_addr: "
"name parameter length too small, it needs to be %d",
numChars+1);
}
return (1);
}
static int
fcp_linkreset(struct fcp_port *pptr, struct scsi_address *ap, int sleep)
{
la_wwn_t wwn;
struct fcp_lun *plun;
struct fcp_tgt *ptgt;
mutex_enter(&pptr->port_mutex);
if (pptr->port_state & (FCP_STATE_SUSPENDED |
FCP_STATE_POWER_DOWN)) {
mutex_exit(&pptr->port_mutex);
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_2, 0,
"fcp_linkreset, fcp%d: link reset "
"disabled due to DDI_SUSPEND",
ddi_get_instance(pptr->port_dip));
return (FC_FAILURE);
}
if (pptr->port_state & (FCP_STATE_OFFLINE | FCP_STATE_ONLINING)) {
mutex_exit(&pptr->port_mutex);
return (FC_SUCCESS);
}
FCP_DTRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_8, 0, "Forcing link reset");
if (FC_TOP_EXTERNAL(pptr->port_topology) && (ap != NULL)) {
plun = ADDR2LUN(ap);
ptgt = plun->lun_tgt;
bcopy(&ptgt->tgt_port_wwn.raw_wwn[0], &wwn, sizeof (wwn));
} else {
bzero((caddr_t)&wwn, sizeof (wwn));
}
mutex_exit(&pptr->port_mutex);
return (fc_ulp_linkreset(pptr->port_fp_handle, &wwn, sleep));
}
static int
fcp_handle_port_resume(opaque_t ulph, fc_ulp_port_info_t *pinfo,
uint32_t s_id, fc_attach_cmd_t cmd, int instance)
{
int res = DDI_FAILURE;
struct fcp_port *pptr;
uint32_t alloc_cnt;
uint32_t max_cnt;
fc_portmap_t *tmp_list = NULL;
FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
FCP_BUF_LEVEL_8, 0, "port resume: for port %d",
instance);
if ((pptr = ddi_get_soft_state(fcp_softstate, instance)) == NULL) {
cmn_err(CE_WARN, "fcp: bad soft state");
return (res);
}
mutex_enter(&pptr->port_mutex);
switch (cmd) {
case FC_CMD_RESUME:
ASSERT((pptr->port_state & FCP_STATE_POWER_DOWN) == 0);
pptr->port_state &= ~FCP_STATE_SUSPENDED;
break;
case FC_CMD_POWER_UP:
if (pptr->port_state & FCP_STATE_SUSPENDED) {
pptr->port_state &= ~FCP_STATE_POWER_DOWN;
mutex_exit(&pptr->port_mutex);
return (DDI_SUCCESS);
}
pptr->port_state &= ~FCP_STATE_POWER_DOWN;
}
pptr->port_id = s_id;
pptr->port_state = FCP_STATE_INIT;
mutex_exit(&pptr->port_mutex);
(void) fcp_cp_pinfo(pptr, pinfo);
mutex_enter(&fcp_global_mutex);
if (fcp_watchdog_init++ == 0) {
fcp_watchdog_tick = fcp_watchdog_timeout *
drv_usectohz(1000000);
fcp_watchdog_id = timeout(fcp_watch,
NULL, fcp_watchdog_tick);
}
mutex_exit(&fcp_global_mutex);
switch (FC_PORT_STATE_MASK(pptr->port_phys_state)) {
case FC_STATE_OFFLINE:
res = DDI_SUCCESS;
break;
case FC_STATE_ONLINE:
if (pptr->port_topology == FC_TOP_UNKNOWN) {
(void) fcp_linkreset(pptr, NULL, KM_NOSLEEP);
res = DDI_SUCCESS;
break;
}
if (FC_TOP_EXTERNAL(pptr->port_topology) &&
!fcp_enable_auto_configuration) {
tmp_list = fcp_construct_map(pptr, &alloc_cnt);
if (tmp_list == NULL) {
if (!alloc_cnt) {
res = DDI_SUCCESS;
}
break;
}
max_cnt = alloc_cnt;
} else {
ASSERT(pptr->port_topology != FC_TOP_UNKNOWN);
alloc_cnt = FCP_MAX_DEVICES;
if ((tmp_list = (fc_portmap_t *)kmem_zalloc(
(sizeof (fc_portmap_t)) * alloc_cnt,
KM_NOSLEEP)) == NULL) {
fcp_log(CE_WARN, pptr->port_dip,
"!fcp%d: failed to allocate portmap",
instance);
break;
}
max_cnt = alloc_cnt;
if ((res = fc_ulp_getportmap(pptr->port_fp_handle,
&tmp_list, &max_cnt, FC_ULP_PLOGI_PRESERVE)) !=
FC_SUCCESS) {
caddr_t msg;
(void) fc_ulp_error(res, &msg);
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_2, 0,
"resume failed getportmap: reason=0x%x",
res);
fcp_log(CE_WARN, pptr->port_dip,
"!failed to get port map : %s", msg);
break;
}
if (max_cnt > alloc_cnt) {
alloc_cnt = max_cnt;
}
}
fcp_statec_callback(ulph, pptr->port_fp_handle,
pptr->port_phys_state, pptr->port_topology, tmp_list,
max_cnt, pptr->port_id);
res = DDI_SUCCESS;
break;
default:
fcp_log(CE_WARN, pptr->port_dip,
"!fcp%d: invalid port state at attach=0x%x",
instance, pptr->port_phys_state);
mutex_enter(&pptr->port_mutex);
pptr->port_phys_state = FCP_STATE_OFFLINE;
mutex_exit(&pptr->port_mutex);
res = DDI_SUCCESS;
break;
}
if (tmp_list != NULL) {
kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
}
return (res);
}
static void
fcp_cp_pinfo(struct fcp_port *pptr, fc_ulp_port_info_t *pinfo)
{
pptr->port_fp_modlinkage = *pinfo->port_linkage;
pptr->port_dip = pinfo->port_dip;
pptr->port_fp_handle = pinfo->port_handle;
if (pinfo->port_acc_attr != NULL) {
pptr->port_data_dma_attr = *pinfo->port_data_dma_attr;
pptr->port_cmd_dma_attr = *pinfo->port_cmd_dma_attr;
pptr->port_resp_dma_attr = *pinfo->port_resp_dma_attr;
pptr->port_dma_acc_attr = *pinfo->port_acc_attr;
}
pptr->port_priv_pkt_len = pinfo->port_fca_pkt_size;
pptr->port_max_exch = pinfo->port_fca_max_exch;
pptr->port_phys_state = pinfo->port_state;
pptr->port_topology = pinfo->port_flags;
pptr->port_reset_action = pinfo->port_reset_action;
pptr->port_cmds_dma_flags = pinfo->port_dma_behavior;
pptr->port_fcp_dma = pinfo->port_fcp_dma;
bcopy(&pinfo->port_nwwn, &pptr->port_nwwn, sizeof (la_wwn_t));
bcopy(&pinfo->port_pwwn, &pptr->port_pwwn, sizeof (la_wwn_t));
if (pptr->port_cmd_dma_attr.dma_attr_flags & DDI_DMA_FLAGERR)
pptr->port_cmd_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
if (pptr->port_data_dma_attr.dma_attr_flags & DDI_DMA_FLAGERR)
pptr->port_data_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
if (pptr->port_resp_dma_attr.dma_attr_flags & DDI_DMA_FLAGERR)
pptr->port_resp_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
}
static void
fcp_process_elem(struct fcp_hp_elem *elem, int result)
{
ASSERT(elem != NULL);
mutex_enter(&elem->mutex);
elem->result = result;
if (elem->wait) {
elem->wait = 0;
cv_signal(&elem->cv);
mutex_exit(&elem->mutex);
} else {
mutex_exit(&elem->mutex);
cv_destroy(&elem->cv);
mutex_destroy(&elem->mutex);
kmem_free(elem, sizeof (struct fcp_hp_elem));
}
}
static void
fcp_hp_task(void *arg)
{
struct fcp_hp_elem *elem = (struct fcp_hp_elem *)arg;
struct fcp_lun *plun = elem->lun;
struct fcp_port *pptr = elem->port;
int result;
ASSERT(elem->what == FCP_ONLINE ||
elem->what == FCP_OFFLINE ||
elem->what == FCP_MPXIO_PATH_CLEAR_BUSY ||
elem->what == FCP_MPXIO_PATH_SET_BUSY);
mutex_enter(&pptr->port_mutex);
mutex_enter(&plun->lun_mutex);
if (((elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) &&
plun->lun_event_count != elem->event_cnt) ||
pptr->port_state & (FCP_STATE_SUSPENDED |
FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN)) {
mutex_exit(&plun->lun_mutex);
mutex_exit(&pptr->port_mutex);
fcp_process_elem(elem, NDI_FAILURE);
return;
}
mutex_exit(&plun->lun_mutex);
mutex_exit(&pptr->port_mutex);
result = fcp_trigger_lun(plun, elem->cip, elem->old_lun_mpxio,
elem->what, elem->link_cnt, elem->tgt_cnt, elem->flags);
fcp_process_elem(elem, result);
}
static child_info_t *
fcp_get_cip(struct fcp_lun *plun, child_info_t *cip, int lcount,
int tcount)
{
ASSERT(MUTEX_HELD(&plun->lun_mutex));
if (fcp_is_child_present(plun, cip) == FC_FAILURE) {
struct fcp_port *pptr = plun->lun_tgt->tgt_port;
ASSERT(MUTEX_HELD(&pptr->port_mutex));
if (pptr->port_mpxio == 0 || plun->lun_mpxio == 0) {
plun->lun_cip =
CIP(fcp_create_dip(plun, lcount, tcount));
plun->lun_mpxio = 0;
} else {
plun->lun_cip =
CIP(fcp_create_pip(plun, lcount, tcount));
plun->lun_mpxio = 1;
}
} else {
plun->lun_cip = cip;
}
return (plun->lun_cip);
}
static int
fcp_is_dip_present(struct fcp_lun *plun, dev_info_t *cdip)
{
int rval = FC_FAILURE;
dev_info_t *pdip;
struct dev_info *dip;
ASSERT(MUTEX_HELD(&plun->lun_mutex));
pdip = plun->lun_tgt->tgt_port->port_dip;
if (plun->lun_cip == NULL) {
FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_3, 0,
"fcp_is_dip_present: plun->lun_cip is NULL: "
"plun: %p lun state: %x num: %d target state: %x",
plun, plun->lun_state, plun->lun_num,
plun->lun_tgt->tgt_port->port_state);
return (rval);
}
ndi_devi_enter(pdip);
dip = DEVI(pdip)->devi_child;
while (dip) {
if (dip == DEVI(cdip)) {
rval = FC_SUCCESS;
break;
}
dip = dip->devi_sibling;
}
ndi_devi_exit(pdip);
return (rval);
}
static int
fcp_is_child_present(struct fcp_lun *plun, child_info_t *cip)
{
int rval = FC_FAILURE;
ASSERT(plun != NULL);
ASSERT(MUTEX_HELD(&plun->lun_mutex));
if (plun->lun_mpxio == 0) {
rval = fcp_is_dip_present(plun, DIP(cip));
} else {
rval = fcp_is_pip_present(plun, PIP(cip));
}
return (rval);
}
static dev_info_t *
fcp_create_dip(struct fcp_lun *plun, int link_cnt, int tgt_cnt)
{
int failure = 0;
uint32_t tgt_id;
uint64_t sam_lun;
struct fcp_tgt *ptgt = plun->lun_tgt;
struct fcp_port *pptr = ptgt->tgt_port;
dev_info_t *pdip = pptr->port_dip;
dev_info_t *cdip = NULL;
dev_info_t *old_dip = DIP(plun->lun_cip);
char *nname = NULL;
char **compatible = NULL;
int ncompatible;
char *scsi_binding_set;
char t_pwwn[17];
ASSERT(MUTEX_HELD(&plun->lun_mutex));
ASSERT(MUTEX_HELD(&pptr->port_mutex));
if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip,
DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, "scsi-binding-set",
&scsi_binding_set) != DDI_PROP_SUCCESS) {
scsi_binding_set = NULL;
}
scsi_hba_nodename_compatible_get(&plun->lun_inq, scsi_binding_set,
plun->lun_inq.inq_dtype, NULL, &nname, &compatible, &ncompatible);
if (scsi_binding_set) {
ddi_prop_free(scsi_binding_set);
}
if (nname == NULL) {
#ifdef DEBUG
cmn_err(CE_WARN, "%s%d: no driver for "
"device @w%02x%02x%02x%02x%02x%02x%02x%02x,%d:"
" compatible: %s",
ddi_driver_name(pdip), ddi_get_instance(pdip),
ptgt->tgt_port_wwn.raw_wwn[0],
ptgt->tgt_port_wwn.raw_wwn[1],
ptgt->tgt_port_wwn.raw_wwn[2],
ptgt->tgt_port_wwn.raw_wwn[3],
ptgt->tgt_port_wwn.raw_wwn[4],
ptgt->tgt_port_wwn.raw_wwn[5],
ptgt->tgt_port_wwn.raw_wwn[6],
ptgt->tgt_port_wwn.raw_wwn[7], plun->lun_num,
*compatible);
#endif
failure++;
goto end_of_fcp_create_dip;
}
cdip = fcp_find_existing_dip(plun, pdip, nname);
if (old_dip && (cdip != old_dip ||
plun->lun_state & FCP_LUN_CHANGED)) {
plun->lun_state &= ~(FCP_LUN_INIT);
mutex_exit(&plun->lun_mutex);
mutex_exit(&pptr->port_mutex);
mutex_enter(&ptgt->tgt_mutex);
(void) fcp_pass_to_hp(pptr, plun, CIP(old_dip), FCP_OFFLINE,
link_cnt, tgt_cnt, NDI_DEVI_REMOVE, 0);
mutex_exit(&ptgt->tgt_mutex);
#ifdef DEBUG
if (cdip != NULL) {
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_2, 0,
"Old dip=%p; New dip=%p don't match", old_dip,
cdip);
} else {
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_2, 0,
"Old dip=%p; New dip=NULL don't match", old_dip);
}
#endif
mutex_enter(&pptr->port_mutex);
mutex_enter(&plun->lun_mutex);
}
if (cdip == NULL || plun->lun_state & FCP_LUN_CHANGED) {
plun->lun_state &= ~(FCP_LUN_CHANGED);
if (ndi_devi_alloc(pptr->port_dip, nname,
DEVI_SID_NODEID, &cdip) != NDI_SUCCESS) {
failure++;
goto end_of_fcp_create_dip;
}
}
if (ndi_prop_update_string_array(DDI_DEV_T_NONE, cdip,
"compatible", compatible, ncompatible) != DDI_PROP_SUCCESS) {
failure++;
goto end_of_fcp_create_dip;
}
if (ndi_prop_update_byte_array(DDI_DEV_T_NONE, cdip, NODE_WWN_PROP,
ptgt->tgt_node_wwn.raw_wwn, FC_WWN_SIZE) != DDI_PROP_SUCCESS) {
failure++;
goto end_of_fcp_create_dip;
}
if (ndi_prop_update_byte_array(DDI_DEV_T_NONE, cdip, PORT_WWN_PROP,
ptgt->tgt_port_wwn.raw_wwn, FC_WWN_SIZE) != DDI_PROP_SUCCESS) {
failure++;
goto end_of_fcp_create_dip;
}
fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, t_pwwn);
t_pwwn[16] = '\0';
if (ndi_prop_update_string(DDI_DEV_T_NONE, cdip, TGT_PORT_PROP, t_pwwn)
!= DDI_PROP_SUCCESS) {
failure++;
goto end_of_fcp_create_dip;
}
if (!FC_TOP_EXTERNAL(pptr->port_topology) && ptgt->tgt_hard_addr != 0) {
tgt_id = (uint32_t)fcp_alpa_to_switch[ptgt->tgt_hard_addr];
} else {
tgt_id = ptgt->tgt_d_id;
}
if (ndi_prop_update_int(DDI_DEV_T_NONE, cdip, TARGET_PROP,
tgt_id) != DDI_PROP_SUCCESS) {
failure++;
goto end_of_fcp_create_dip;
}
if (ndi_prop_update_int(DDI_DEV_T_NONE, cdip, LUN_PROP,
(int)plun->lun_num) != DDI_PROP_SUCCESS) {
failure++;
goto end_of_fcp_create_dip;
}
bcopy(&plun->lun_addr, &sam_lun, FCP_LUN_SIZE);
if (ndi_prop_update_int64(DDI_DEV_T_NONE, cdip, SAM_LUN_PROP,
sam_lun) != DDI_PROP_SUCCESS) {
failure++;
goto end_of_fcp_create_dip;
}
end_of_fcp_create_dip:
scsi_hba_nodename_compatible_free(nname, compatible);
if (cdip != NULL && failure) {
(void) ndi_prop_remove_all(cdip);
(void) ndi_devi_free(cdip);
cdip = NULL;
}
return (cdip);
}
static mdi_pathinfo_t *
fcp_create_pip(struct fcp_lun *plun, int lcount, int tcount)
{
int i;
char buf[MAXNAMELEN];
char uaddr[MAXNAMELEN];
int failure = 0;
uint32_t tgt_id;
uint64_t sam_lun;
struct fcp_tgt *ptgt = plun->lun_tgt;
struct fcp_port *pptr = ptgt->tgt_port;
dev_info_t *pdip = pptr->port_dip;
mdi_pathinfo_t *pip = NULL;
mdi_pathinfo_t *old_pip = PIP(plun->lun_cip);
char *nname = NULL;
char **compatible = NULL;
int ncompatible;
char *scsi_binding_set;
char t_pwwn[17];
ASSERT(MUTEX_HELD(&plun->lun_mutex));
ASSERT(MUTEX_HELD(&pptr->port_mutex));
scsi_binding_set = "vhci";
scsi_hba_nodename_compatible_get(&plun->lun_inq, scsi_binding_set,
plun->lun_inq.inq_dtype, NULL, &nname, &compatible, &ncompatible);
if (nname == NULL) {
#ifdef DEBUG
cmn_err(CE_WARN, "fcp_create_dip: %s%d: no driver for "
"device @w%02x%02x%02x%02x%02x%02x%02x%02x,%d:"
" compatible: %s",
ddi_driver_name(pdip), ddi_get_instance(pdip),
ptgt->tgt_port_wwn.raw_wwn[0],
ptgt->tgt_port_wwn.raw_wwn[1],
ptgt->tgt_port_wwn.raw_wwn[2],
ptgt->tgt_port_wwn.raw_wwn[3],
ptgt->tgt_port_wwn.raw_wwn[4],
ptgt->tgt_port_wwn.raw_wwn[5],
ptgt->tgt_port_wwn.raw_wwn[6],
ptgt->tgt_port_wwn.raw_wwn[7], plun->lun_num,
*compatible);
#endif
failure++;
goto end_of_fcp_create_pip;
}
pip = fcp_find_existing_pip(plun, pdip);
if (old_pip && (pip != old_pip ||
plun->lun_state & FCP_LUN_CHANGED)) {
plun->lun_state &= ~(FCP_LUN_INIT);
mutex_exit(&plun->lun_mutex);
mutex_exit(&pptr->port_mutex);
mutex_enter(&ptgt->tgt_mutex);
(void) fcp_pass_to_hp(pptr, plun, CIP(old_pip),
FCP_OFFLINE, lcount, tcount,
NDI_DEVI_REMOVE, 0);
mutex_exit(&ptgt->tgt_mutex);
if (pip != NULL) {
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_2, 0,
"Old pip=%p; New pip=%p don't match",
old_pip, pip);
} else {
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_2, 0,
"Old pip=%p; New pip=NULL don't match",
old_pip);
}
mutex_enter(&pptr->port_mutex);
mutex_enter(&plun->lun_mutex);
}
for (i = 0; i < FC_WWN_SIZE; i++) {
(void) sprintf(&buf[i << 1], "%02x",
ptgt->tgt_port_wwn.raw_wwn[i]);
}
(void) snprintf(uaddr, MAXNAMELEN, "w%s,%x",
buf, plun->lun_num);
if (pip == NULL || plun->lun_state & FCP_LUN_CHANGED) {
plun->lun_state &= ~(FCP_LUN_CHANGED);
pptr->port_state |= FCP_STATE_IN_MDI;
mutex_exit(&plun->lun_mutex);
mutex_exit(&pptr->port_mutex);
if (mdi_pi_alloc_compatible(pdip, nname, plun->lun_guid,
uaddr, compatible, ncompatible, 0, &pip) != MDI_SUCCESS) {
fcp_log(CE_WARN, pptr->port_dip,
"!path alloc failed:0x%x", plun);
mutex_enter(&pptr->port_mutex);
mutex_enter(&plun->lun_mutex);
pptr->port_state &= ~FCP_STATE_IN_MDI;
failure++;
goto end_of_fcp_create_pip;
}
mutex_enter(&pptr->port_mutex);
mutex_enter(&plun->lun_mutex);
pptr->port_state &= ~FCP_STATE_IN_MDI;
} else {
(void) mdi_prop_remove(pip, NULL);
}
mdi_pi_set_phci_private(pip, (caddr_t)plun);
if (mdi_prop_update_byte_array(pip, NODE_WWN_PROP,
ptgt->tgt_node_wwn.raw_wwn, FC_WWN_SIZE)
!= DDI_PROP_SUCCESS) {
failure++;
goto end_of_fcp_create_pip;
}
if (mdi_prop_update_byte_array(pip, PORT_WWN_PROP,
ptgt->tgt_port_wwn.raw_wwn, FC_WWN_SIZE)
!= DDI_PROP_SUCCESS) {
failure++;
goto end_of_fcp_create_pip;
}
fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, t_pwwn);
t_pwwn[16] = '\0';
if (mdi_prop_update_string(pip, TGT_PORT_PROP, t_pwwn)
!= DDI_PROP_SUCCESS) {
failure++;
goto end_of_fcp_create_pip;
}
if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
ptgt->tgt_hard_addr != 0) {
tgt_id = (uint32_t)
fcp_alpa_to_switch[ptgt->tgt_hard_addr];
} else {
tgt_id = ptgt->tgt_d_id;
}
if (mdi_prop_update_int(pip, TARGET_PROP, tgt_id)
!= DDI_PROP_SUCCESS) {
failure++;
goto end_of_fcp_create_pip;
}
if (mdi_prop_update_int(pip, LUN_PROP, (int)plun->lun_num)
!= DDI_PROP_SUCCESS) {
failure++;
goto end_of_fcp_create_pip;
}
bcopy(&plun->lun_addr, &sam_lun, FCP_LUN_SIZE);
if (mdi_prop_update_int64(pip, SAM_LUN_PROP, sam_lun)
!= DDI_PROP_SUCCESS) {
failure++;
goto end_of_fcp_create_pip;
}
end_of_fcp_create_pip:
scsi_hba_nodename_compatible_free(nname, compatible);
if (pip != NULL && failure) {
(void) mdi_prop_remove(pip, NULL);
mutex_exit(&plun->lun_mutex);
mutex_exit(&pptr->port_mutex);
(void) mdi_pi_free(pip, 0);
mutex_enter(&pptr->port_mutex);
mutex_enter(&plun->lun_mutex);
pip = NULL;
}
return (pip);
}
static dev_info_t *
fcp_find_existing_dip(struct fcp_lun *plun, dev_info_t *pdip, caddr_t name)
{
uint_t nbytes;
uchar_t *bytes;
uint_t nwords;
uint32_t tgt_id;
int *words;
dev_info_t *cdip;
dev_info_t *ndip;
struct fcp_tgt *ptgt = plun->lun_tgt;
struct fcp_port *pptr = ptgt->tgt_port;
ndi_devi_enter(pdip);
ndip = (dev_info_t *)DEVI(pdip)->devi_child;
while ((cdip = ndip) != NULL) {
ndip = (dev_info_t *)DEVI(cdip)->devi_sibling;
if (strcmp(DEVI(cdip)->devi_node_name, name)) {
continue;
}
if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, cdip,
DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, NODE_WWN_PROP, &bytes,
&nbytes) != DDI_PROP_SUCCESS) {
continue;
}
if (nbytes != FC_WWN_SIZE || bytes == NULL) {
if (bytes != NULL) {
ddi_prop_free(bytes);
}
continue;
}
ASSERT(bytes != NULL);
if (bcmp(bytes, ptgt->tgt_node_wwn.raw_wwn, nbytes) != 0) {
ddi_prop_free(bytes);
continue;
}
ddi_prop_free(bytes);
if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, cdip,
DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
&nbytes) != DDI_PROP_SUCCESS) {
continue;
}
if (nbytes != FC_WWN_SIZE || bytes == NULL) {
if (bytes != NULL) {
ddi_prop_free(bytes);
}
continue;
}
ASSERT(bytes != NULL);
if (bcmp(bytes, ptgt->tgt_port_wwn.raw_wwn, nbytes) != 0) {
ddi_prop_free(bytes);
continue;
}
ddi_prop_free(bytes);
if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip,
DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, TARGET_PROP, &words,
&nwords) != DDI_PROP_SUCCESS) {
continue;
}
if (nwords != 1 || words == NULL) {
if (words != NULL) {
ddi_prop_free(words);
}
continue;
}
ASSERT(words != NULL);
if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
ptgt->tgt_hard_addr != 0) {
tgt_id =
(uint32_t)fcp_alpa_to_switch[ptgt->tgt_hard_addr];
} else {
tgt_id = ptgt->tgt_d_id;
}
if (tgt_id != (uint32_t)*words) {
ddi_prop_free(words);
continue;
}
ddi_prop_free(words);
if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip,
DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, LUN_PROP, &words,
&nwords) != DDI_PROP_SUCCESS) {
continue;
}
if (nwords != 1 || words == NULL) {
if (words != NULL) {
ddi_prop_free(words);
}
continue;
}
ASSERT(words != NULL);
if (plun->lun_num == (uint16_t)*words) {
ddi_prop_free(words);
break;
}
ddi_prop_free(words);
}
ndi_devi_exit(pdip);
return (cdip);
}
static int
fcp_is_pip_present(struct fcp_lun *plun, mdi_pathinfo_t *pip)
{
dev_info_t *pdip;
char buf[MAXNAMELEN];
char uaddr[MAXNAMELEN];
int rval = FC_FAILURE;
ASSERT(MUTEX_HELD(&plun->lun_mutex));
pdip = plun->lun_tgt->tgt_port->port_dip;
if (pip == NULL) {
FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_4, 0,
"fcp_is_pip_present: plun->lun_cip is NULL: "
"plun: %p lun state: %x num: %d target state: %x",
plun, plun->lun_state, plun->lun_num,
plun->lun_tgt->tgt_port->port_state);
return (rval);
}
fcp_wwn_to_ascii(plun->lun_tgt->tgt_port_wwn.raw_wwn, buf);
(void) snprintf(uaddr, MAXNAMELEN, "w%s,%x", buf, plun->lun_num);
if (plun->lun_old_guid) {
if (mdi_pi_find(pdip, plun->lun_old_guid, uaddr) == pip) {
rval = FC_SUCCESS;
}
} else {
if (mdi_pi_find(pdip, plun->lun_guid, uaddr) == pip) {
rval = FC_SUCCESS;
}
}
return (rval);
}
static mdi_pathinfo_t *
fcp_find_existing_pip(struct fcp_lun *plun, dev_info_t *pdip)
{
char buf[MAXNAMELEN];
char uaddr[MAXNAMELEN];
mdi_pathinfo_t *pip;
struct fcp_tgt *ptgt = plun->lun_tgt;
struct fcp_port *pptr = ptgt->tgt_port;
ASSERT(MUTEX_HELD(&pptr->port_mutex));
fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, buf);
(void) snprintf(uaddr, MAXNAMELEN, "w%s,%x", buf, plun->lun_num);
pip = mdi_pi_find(pdip, plun->lun_guid, uaddr);
return (pip);
}
static int
fcp_online_child(struct fcp_lun *plun, child_info_t *cip, int lcount,
int tcount, int flags)
{
int rval;
struct fcp_port *pptr = plun->lun_tgt->tgt_port;
struct fcp_tgt *ptgt = plun->lun_tgt;
dev_info_t *cdip = NULL;
ASSERT(MUTEX_HELD(&pptr->port_mutex));
ASSERT(MUTEX_HELD(&plun->lun_mutex));
if (plun->lun_cip == NULL) {
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_3, 0,
"fcp_online_child: plun->lun_cip is NULL: "
"plun: %p state: %x num: %d target state: %x",
plun, plun->lun_state, plun->lun_num,
plun->lun_tgt->tgt_port->port_state);
return (NDI_FAILURE);
}
again:
if (plun->lun_mpxio == 0) {
cdip = DIP(cip);
mutex_exit(&plun->lun_mutex);
mutex_exit(&pptr->port_mutex);
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_3, 0,
"!Invoking ndi_devi_online for %s: target=%x lun=%x",
ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
if (!i_ddi_devi_attached(ddi_get_parent(cdip))) {
rval = ndi_devi_bind_driver(cdip, flags);
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_3, 0,
"!Invoking ndi_devi_bind_driver: rval=%d", rval);
} else {
rval = ndi_devi_online(cdip, flags);
}
if (rval == NDI_SUCCESS) {
mutex_enter(&ptgt->tgt_mutex);
plun->lun_state |= FCP_LUN_INIT;
mutex_exit(&ptgt->tgt_mutex);
} else if (strncmp(ddi_node_name(cdip), "ses", 3) != 0) {
fcp_log(CE_NOTE, pptr->port_dip,
"!ndi_devi_online:"
" failed for %s: target=%x lun=%x %x",
ddi_get_name(cdip), ptgt->tgt_d_id,
plun->lun_num, rval);
} else {
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_3, 0,
" !ndi_devi_online:"
" failed for %s: target=%x lun=%x %x",
ddi_get_name(cdip), ptgt->tgt_d_id,
plun->lun_num, rval);
}
} else {
cdip = mdi_pi_get_client(PIP(cip));
mutex_exit(&plun->lun_mutex);
mutex_exit(&pptr->port_mutex);
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_3, 0,
"!Invoking mdi_pi_online for %s: target=%x lun=%x",
ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
mdi_hold_path(PIP(cip));
mdi_devi_exit_phci(pptr->port_dip);
rval = mdi_pi_online(PIP(cip), flags);
mdi_devi_enter_phci(pptr->port_dip);
mdi_rele_path(PIP(cip));
if (rval == MDI_SUCCESS) {
mutex_enter(&ptgt->tgt_mutex);
plun->lun_state |= FCP_LUN_INIT;
mutex_exit(&ptgt->tgt_mutex);
(void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE);
} else if (rval == MDI_NOT_SUPPORTED) {
child_info_t *old_cip = cip;
mutex_enter(&pptr->port_mutex);
mutex_enter(&plun->lun_mutex);
plun->lun_mpxio = 0;
plun->lun_cip = NULL;
cdip = fcp_create_dip(plun, lcount, tcount);
plun->lun_cip = cip = CIP(cdip);
if (cip == NULL) {
fcp_log(CE_WARN, pptr->port_dip,
"!fcp_online_child: "
"Create devinfo failed for LU=%p", plun);
mutex_exit(&plun->lun_mutex);
mutex_enter(&ptgt->tgt_mutex);
plun->lun_state |= FCP_LUN_OFFLINE;
mutex_exit(&ptgt->tgt_mutex);
mutex_exit(&pptr->port_mutex);
(void) mdi_pi_free(PIP(old_cip), 0);
} else {
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_3, 0,
"fcp_online_child: creating devinfo "
"node 0x%p for plun 0x%p",
cip, plun);
mutex_exit(&plun->lun_mutex);
mutex_exit(&pptr->port_mutex);
(void) mdi_pi_free(PIP(old_cip), 0);
mutex_enter(&pptr->port_mutex);
mutex_enter(&plun->lun_mutex);
goto again;
}
} else {
if (cdip) {
fcp_log(CE_NOTE, pptr->port_dip,
"!fcp_online_child: mdi_pi_online:"
" failed for %s: target=%x lun=%x %x",
ddi_get_name(cdip), ptgt->tgt_d_id,
plun->lun_num, rval);
}
}
rval = (rval == MDI_SUCCESS) ? NDI_SUCCESS : NDI_FAILURE;
}
if (rval == NDI_SUCCESS) {
if (cdip) {
(void) ndi_event_retrieve_cookie(
pptr->port_ndi_event_hdl, cdip, FCAL_INSERT_EVENT,
&fcp_insert_eid, NDI_EVENT_NOPASS);
(void) ndi_event_run_callbacks(pptr->port_ndi_event_hdl,
cdip, fcp_insert_eid, NULL);
}
}
mutex_enter(&pptr->port_mutex);
mutex_enter(&plun->lun_mutex);
return (rval);
}
static int
fcp_offline_child(struct fcp_lun *plun, child_info_t *cip, int lcount,
int tcount, int flags)
{
int rval;
int lun_mpxio;
struct fcp_port *pptr = plun->lun_tgt->tgt_port;
struct fcp_tgt *ptgt = plun->lun_tgt;
dev_info_t *cdip;
ASSERT(MUTEX_HELD(&plun->lun_mutex));
ASSERT(MUTEX_HELD(&pptr->port_mutex));
if (plun->lun_cip == NULL) {
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_3, 0,
"fcp_offline_child: plun->lun_cip is NULL: "
"plun: %p lun state: %x num: %d target state: %x",
plun, plun->lun_state, plun->lun_num,
plun->lun_tgt->tgt_port->port_state);
return (NDI_FAILURE);
}
lun_mpxio = plun->lun_mpxio;
if (lun_mpxio == 0) {
cdip = DIP(cip);
mutex_exit(&plun->lun_mutex);
mutex_exit(&pptr->port_mutex);
rval = ndi_devi_offline(DIP(cip), flags);
if (rval != NDI_SUCCESS) {
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_3, 0,
"fcp_offline_child: ndi_devi_offline failed "
"rval=%x cip=%p", rval, cip);
}
} else {
cdip = mdi_pi_get_client(PIP(cip));
mutex_exit(&plun->lun_mutex);
mutex_exit(&pptr->port_mutex);
mdi_hold_path(PIP(cip));
mdi_devi_exit_phci(pptr->port_dip);
rval = mdi_pi_offline(PIP(cip), flags);
mdi_devi_enter_phci(pptr->port_dip);
mdi_rele_path(PIP(cip));
rval = (rval == MDI_SUCCESS) ? NDI_SUCCESS : NDI_FAILURE;
}
mutex_enter(&ptgt->tgt_mutex);
plun->lun_state &= ~FCP_LUN_INIT;
mutex_exit(&ptgt->tgt_mutex);
if (rval == NDI_SUCCESS) {
cdip = NULL;
if (flags & NDI_DEVI_REMOVE) {
mutex_enter(&plun->lun_mutex);
if (plun->lun_cip == cip) {
plun->lun_cip = NULL;
}
if (plun->lun_old_guid) {
kmem_free(plun->lun_old_guid,
plun->lun_old_guid_size);
plun->lun_old_guid = NULL;
plun->lun_old_guid_size = 0;
}
mutex_exit(&plun->lun_mutex);
}
}
if (lun_mpxio != 0) {
if (rval == NDI_SUCCESS) {
(void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE);
if (flags & NDI_DEVI_REMOVE) {
(void) mdi_pi_free(PIP(cip), 0);
}
} else {
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_3, 0,
"fcp_offline_child: mdi_pi_offline failed "
"rval=%x cip=%p", rval, cip);
}
}
mutex_enter(&pptr->port_mutex);
mutex_enter(&plun->lun_mutex);
if (cdip) {
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_3, 0, "!%s failed for %s:"
" target=%x lun=%x", "ndi_offline",
ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
}
return (rval);
}
static void
fcp_remove_child(struct fcp_lun *plun)
{
child_info_t *cip;
boolean_t enteredv;
ASSERT(MUTEX_HELD(&plun->lun_mutex));
if (fcp_is_child_present(plun, plun->lun_cip) == FC_SUCCESS) {
if (plun->lun_mpxio == 0) {
(void) ndi_prop_remove_all(DIP(plun->lun_cip));
(void) ndi_devi_free(DIP(plun->lun_cip));
plun->lun_cip = NULL;
} else {
cip = plun->lun_cip;
plun->lun_cip = NULL;
mutex_exit(&plun->lun_mutex);
mutex_exit(&plun->lun_tgt->tgt_mutex);
mutex_exit(&plun->lun_tgt->tgt_port->port_mutex);
mdi_devi_enter(
plun->lun_tgt->tgt_port->port_dip, &enteredv);
mdi_hold_path(PIP(cip));
mdi_devi_exit_phci(
plun->lun_tgt->tgt_port->port_dip);
(void) mdi_pi_offline(PIP(cip),
NDI_DEVI_REMOVE);
mdi_devi_enter_phci(
plun->lun_tgt->tgt_port->port_dip);
mdi_rele_path(PIP(cip));
mdi_devi_exit(
plun->lun_tgt->tgt_port->port_dip, enteredv);
FCP_TRACE(fcp_logq,
plun->lun_tgt->tgt_port->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_3, 0,
"lun=%p pip freed %p", plun, cip);
(void) mdi_prop_remove(PIP(cip), NULL);
(void) mdi_pi_free(PIP(cip), 0);
mutex_enter(&plun->lun_tgt->tgt_port->port_mutex);
mutex_enter(&plun->lun_tgt->tgt_mutex);
mutex_enter(&plun->lun_mutex);
}
} else {
plun->lun_cip = NULL;
}
}
static void
fcp_watch(void *arg)
{
struct fcp_port *pptr;
struct fcp_ipkt *icmd;
struct fcp_ipkt *nicmd;
struct fcp_pkt *cmd;
struct fcp_pkt *ncmd;
struct fcp_pkt *tail;
struct fcp_pkt *pcmd;
struct fcp_pkt *save_head;
struct fcp_port *save_port;
fcp_watchdog_time += fcp_watchdog_timeout;
mutex_enter(&fcp_global_mutex);
for (pptr = fcp_port_head; pptr != NULL; pptr = pptr->port_next) {
save_port = fcp_port_head;
pptr->port_state |= FCP_STATE_IN_WATCHDOG;
mutex_exit(&fcp_global_mutex);
mutex_enter(&pptr->port_mutex);
if (pptr->port_ipkt_list == NULL &&
(pptr->port_state & (FCP_STATE_SUSPENDED |
FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN))) {
pptr->port_state &= ~FCP_STATE_IN_WATCHDOG;
mutex_exit(&pptr->port_mutex);
mutex_enter(&fcp_global_mutex);
goto end_of_watchdog;
}
if (pptr->port_offline_tgts) {
fcp_scan_offline_tgts(pptr);
}
if (pptr->port_offline_luns) {
fcp_scan_offline_luns(pptr);
}
if (pptr->port_reset_list) {
fcp_check_reset_delay(pptr);
}
mutex_exit(&pptr->port_mutex);
mutex_enter(&pptr->port_pkt_mutex);
tail = pptr->port_pkt_tail;
for (pcmd = NULL, cmd = pptr->port_pkt_head;
cmd != NULL; cmd = ncmd) {
ncmd = cmd->cmd_next;
ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
if (cmd->cmd_timeout >= fcp_watchdog_time) {
pcmd = cmd;
goto end_of_loop;
}
if (cmd == pptr->port_pkt_head) {
ASSERT(pcmd == NULL);
pptr->port_pkt_head = cmd->cmd_next;
} else {
ASSERT(pcmd != NULL);
pcmd->cmd_next = cmd->cmd_next;
}
if (cmd == pptr->port_pkt_tail) {
ASSERT(cmd->cmd_next == NULL);
pptr->port_pkt_tail = pcmd;
if (pcmd) {
pcmd->cmd_next = NULL;
}
}
cmd->cmd_next = NULL;
save_head = pptr->port_pkt_head;
mutex_exit(&pptr->port_pkt_mutex);
if (cmd->cmd_fp_pkt->pkt_timeout ==
FCP_INVALID_TIMEOUT) {
struct scsi_pkt *pkt = cmd->cmd_pkt;
struct fcp_lun *plun;
struct fcp_tgt *ptgt;
plun = ADDR2LUN(&pkt->pkt_address);
ptgt = plun->lun_tgt;
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_2, 0,
"SCSI cmd 0x%x to D_ID=%x timed out",
pkt->pkt_cdbp[0], ptgt->tgt_d_id);
cmd->cmd_state == FCP_PKT_ABORTING ?
fcp_fail_cmd(cmd, CMD_RESET,
STAT_DEV_RESET) : fcp_fail_cmd(cmd,
CMD_TIMEOUT, STAT_ABORTED);
} else {
fcp_retransport_cmd(pptr, cmd);
}
mutex_enter(&pptr->port_pkt_mutex);
if (save_head && save_head != pptr->port_pkt_head) {
break;
}
end_of_loop:
if (cmd == tail) {
break;
}
}
mutex_exit(&pptr->port_pkt_mutex);
mutex_enter(&pptr->port_mutex);
for (icmd = pptr->port_ipkt_list; icmd != NULL; icmd = nicmd) {
struct fcp_tgt *ptgt = icmd->ipkt_tgt;
nicmd = icmd->ipkt_next;
if ((icmd->ipkt_restart != 0) &&
(icmd->ipkt_restart >= fcp_watchdog_time)) {
continue;
}
if (icmd == pptr->port_ipkt_list) {
pptr->port_ipkt_list = icmd->ipkt_next;
if (pptr->port_ipkt_list) {
pptr->port_ipkt_list->ipkt_prev =
NULL;
}
} else {
icmd->ipkt_prev->ipkt_next = icmd->ipkt_next;
if (icmd->ipkt_next) {
icmd->ipkt_next->ipkt_prev =
icmd->ipkt_prev;
}
}
icmd->ipkt_next = NULL;
icmd->ipkt_prev = NULL;
mutex_exit(&pptr->port_mutex);
if (fcp_is_retryable(icmd)) {
fc_ulp_rscn_info_t *rscnp =
(fc_ulp_rscn_info_t *)icmd->ipkt_fpkt->
pkt_ulp_rscn_infop;
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_2, 0,
"%x to D_ID=%x Retrying..",
icmd->ipkt_opcode,
icmd->ipkt_fpkt->pkt_cmd_fhdr.d_id);
if (rscnp != NULL) {
rscnp->ulp_rscn_count =
fc_ulp_get_rscn_count(pptr->
port_fp_handle);
}
mutex_enter(&pptr->port_mutex);
mutex_enter(&ptgt->tgt_mutex);
if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
mutex_exit(&ptgt->tgt_mutex);
mutex_exit(&pptr->port_mutex);
switch (icmd->ipkt_opcode) {
int rval;
case LA_ELS_PLOGI:
if ((rval = fc_ulp_login(
pptr->port_fp_handle,
&icmd->ipkt_fpkt, 1)) ==
FC_SUCCESS) {
mutex_enter(
&pptr->port_mutex);
continue;
}
if (fcp_handle_ipkt_errors(
pptr, ptgt, icmd, rval,
"PLOGI") == DDI_SUCCESS) {
mutex_enter(
&pptr->port_mutex);
continue;
}
break;
case LA_ELS_PRLI:
if ((rval = fc_ulp_issue_els(
pptr->port_fp_handle,
icmd->ipkt_fpkt)) ==
FC_SUCCESS) {
mutex_enter(
&pptr->port_mutex);
continue;
}
if (fcp_handle_ipkt_errors(
pptr, ptgt, icmd, rval,
"PRLI") == DDI_SUCCESS) {
mutex_enter(
&pptr->port_mutex);
continue;
}
break;
default:
if ((rval = fcp_transport(
pptr->port_fp_handle,
icmd->ipkt_fpkt, 1)) ==
FC_SUCCESS) {
mutex_enter(
&pptr->port_mutex);
continue;
}
if (fcp_handle_ipkt_errors(
pptr, ptgt, icmd, rval,
"PRLI") == DDI_SUCCESS) {
mutex_enter(
&pptr->port_mutex);
continue;
}
break;
}
} else {
mutex_exit(&ptgt->tgt_mutex);
mutex_exit(&pptr->port_mutex);
}
} else {
fcp_print_error(icmd->ipkt_fpkt);
}
(void) fcp_call_finish_init(pptr, ptgt,
icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
icmd->ipkt_cause);
fcp_icmd_free(pptr, icmd);
mutex_enter(&pptr->port_mutex);
}
pptr->port_state &= ~FCP_STATE_IN_WATCHDOG;
mutex_exit(&pptr->port_mutex);
mutex_enter(&fcp_global_mutex);
end_of_watchdog:
if (save_port != fcp_port_head) {
break;
}
}
if (fcp_watchdog_init > 0) {
fcp_watchdog_id =
timeout(fcp_watch, NULL, fcp_watchdog_tick);
}
mutex_exit(&fcp_global_mutex);
}
static void
fcp_check_reset_delay(struct fcp_port *pptr)
{
uint32_t tgt_cnt;
int level;
struct fcp_tgt *ptgt;
struct fcp_lun *plun;
struct fcp_reset_elem *cur = NULL;
struct fcp_reset_elem *next = NULL;
struct fcp_reset_elem *prev = NULL;
ASSERT(mutex_owned(&pptr->port_mutex));
next = pptr->port_reset_list;
while ((cur = next) != NULL) {
next = cur->next;
if (cur->timeout < fcp_watchdog_time) {
prev = cur;
continue;
}
ptgt = cur->tgt;
plun = cur->lun;
tgt_cnt = cur->tgt_cnt;
if (ptgt) {
level = RESET_TARGET;
} else {
ASSERT(plun != NULL);
level = RESET_LUN;
ptgt = plun->lun_tgt;
}
if (prev) {
prev->next = next;
} else {
if (cur == pptr->port_reset_list) {
pptr->port_reset_list = next;
} else {
struct fcp_reset_elem *which;
which = pptr->port_reset_list;
while (which && which->next != cur) {
which = which->next;
}
ASSERT(which != NULL);
which->next = next;
prev = which;
}
}
kmem_free(cur, sizeof (*cur));
if (tgt_cnt == ptgt->tgt_change_cnt) {
mutex_enter(&ptgt->tgt_mutex);
if (level == RESET_TARGET) {
fcp_update_tgt_state(ptgt,
FCP_RESET, FCP_LUN_BUSY);
} else {
fcp_update_lun_state(plun,
FCP_RESET, FCP_LUN_BUSY);
}
mutex_exit(&ptgt->tgt_mutex);
mutex_exit(&pptr->port_mutex);
fcp_abort_all(pptr, ptgt, plun, tgt_cnt);
mutex_enter(&pptr->port_mutex);
}
}
}
static void
fcp_abort_all(struct fcp_port *pptr, struct fcp_tgt *ttgt,
struct fcp_lun *rlun, int tgt_cnt)
{
int rval;
struct fcp_lun *tlun, *nlun;
struct fcp_pkt *pcmd = NULL, *ncmd = NULL,
*cmd = NULL, *head = NULL,
*tail = NULL;
mutex_enter(&pptr->port_pkt_mutex);
for (cmd = pptr->port_pkt_head; cmd != NULL; cmd = ncmd) {
struct fcp_lun *plun = ADDR2LUN(&cmd->cmd_pkt->pkt_address);
struct fcp_tgt *ptgt = plun->lun_tgt;
ncmd = cmd->cmd_next;
if (ptgt != ttgt && plun != rlun) {
pcmd = cmd;
continue;
}
if (pcmd != NULL) {
ASSERT(pptr->port_pkt_head != cmd);
pcmd->cmd_next = ncmd;
} else {
ASSERT(cmd == pptr->port_pkt_head);
pptr->port_pkt_head = ncmd;
}
if (pptr->port_pkt_tail == cmd) {
ASSERT(cmd->cmd_next == NULL);
pptr->port_pkt_tail = pcmd;
if (pcmd != NULL) {
pcmd->cmd_next = NULL;
}
}
if (head == NULL) {
head = tail = cmd;
} else {
ASSERT(tail != NULL);
tail->cmd_next = cmd;
tail = cmd;
}
cmd->cmd_next = NULL;
}
mutex_exit(&pptr->port_pkt_mutex);
for (cmd = head; cmd != NULL; cmd = ncmd) {
struct scsi_pkt *pkt = cmd->cmd_pkt;
ncmd = cmd->cmd_next;
ASSERT(pkt != NULL);
mutex_enter(&pptr->port_mutex);
if (ttgt->tgt_change_cnt == tgt_cnt) {
mutex_exit(&pptr->port_mutex);
cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
pkt->pkt_reason = CMD_RESET;
pkt->pkt_statistics |= STAT_DEV_RESET;
cmd->cmd_state = FCP_PKT_IDLE;
fcp_post_callback(cmd);
} else {
mutex_exit(&pptr->port_mutex);
}
}
if (pptr->port_reset_action == FC_RESET_RETURN_ALL) {
return;
}
if (ttgt == NULL) {
ASSERT(rlun != NULL);
ttgt = rlun->lun_tgt;
ASSERT(ttgt != NULL);
}
mutex_enter(&ttgt->tgt_mutex);
nlun = ttgt->tgt_lun;
mutex_exit(&ttgt->tgt_mutex);
while ((tlun = nlun) != NULL) {
int restart = 0;
if (rlun && rlun != tlun) {
mutex_enter(&ttgt->tgt_mutex);
nlun = tlun->lun_next;
mutex_exit(&ttgt->tgt_mutex);
continue;
}
mutex_enter(&tlun->lun_mutex);
cmd = tlun->lun_pkt_head;
while (cmd != NULL) {
if (cmd->cmd_state == FCP_PKT_ISSUED) {
struct scsi_pkt *pkt;
restart = 1;
cmd->cmd_state = FCP_PKT_ABORTING;
mutex_exit(&tlun->lun_mutex);
rval = fc_ulp_abort(pptr->port_fp_handle,
cmd->cmd_fp_pkt, KM_SLEEP);
if (rval == FC_SUCCESS) {
pkt = cmd->cmd_pkt;
pkt->pkt_reason = CMD_RESET;
pkt->pkt_statistics |= STAT_DEV_RESET;
cmd->cmd_state = FCP_PKT_IDLE;
fcp_post_callback(cmd);
} else {
caddr_t msg;
(void) fc_ulp_error(rval, &msg);
fcp_log(CE_WARN, pptr->port_dip,
"!Abort failed after reset %s",
msg);
cmd->cmd_timeout =
fcp_watchdog_time +
cmd->cmd_pkt->pkt_time +
FCP_FAILED_DELAY;
cmd->cmd_fp_pkt->pkt_timeout =
FCP_INVALID_TIMEOUT;
cmd->cmd_flags |= CFLAG_IN_QUEUE;
mutex_enter(&pptr->port_pkt_mutex);
if (pptr->port_pkt_head) {
ASSERT(pptr->port_pkt_tail
!= NULL);
pptr->port_pkt_tail->cmd_next
= cmd;
pptr->port_pkt_tail = cmd;
} else {
ASSERT(pptr->port_pkt_tail
== NULL);
pptr->port_pkt_head =
pptr->port_pkt_tail
= cmd;
}
cmd->cmd_next = NULL;
mutex_exit(&pptr->port_pkt_mutex);
}
mutex_enter(&tlun->lun_mutex);
cmd = tlun->lun_pkt_head;
} else {
cmd = cmd->cmd_forw;
}
}
mutex_exit(&tlun->lun_mutex);
mutex_enter(&ttgt->tgt_mutex);
restart == 1 ? (nlun = ttgt->tgt_lun) : (nlun = tlun->lun_next);
mutex_exit(&ttgt->tgt_mutex);
mutex_enter(&pptr->port_mutex);
if (tgt_cnt != ttgt->tgt_change_cnt) {
mutex_exit(&pptr->port_mutex);
return;
} else {
mutex_exit(&pptr->port_mutex);
}
}
}
struct fcp_port *
fcp_soft_state_unlink(struct fcp_port *pptr)
{
struct fcp_port *hptr;
struct fcp_port *tptr;
mutex_enter(&fcp_global_mutex);
for (hptr = fcp_port_head, tptr = NULL;
hptr != NULL;
tptr = hptr, hptr = hptr->port_next) {
if (hptr == pptr) {
if (tptr == NULL) {
fcp_port_head = hptr->port_next;
} else {
tptr->port_next = hptr->port_next;
}
break;
}
}
if (fcp_port_head == NULL) {
fcp_cleanup_blacklist(&fcp_lun_blacklist);
}
mutex_exit(&fcp_global_mutex);
return (hptr);
}
static struct fcp_lun *
fcp_lookup_lun(struct fcp_port *pptr, uchar_t *wwn, uint16_t lun)
{
int hash;
struct fcp_tgt *ptgt;
struct fcp_lun *plun;
ASSERT(mutex_owned(&pptr->port_mutex));
hash = FCP_HASH(wwn);
for (ptgt = pptr->port_tgt_hash_table[hash]; ptgt != NULL;
ptgt = ptgt->tgt_next) {
if (bcmp((caddr_t)wwn, (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
sizeof (ptgt->tgt_port_wwn)) == 0) {
mutex_enter(&ptgt->tgt_mutex);
for (plun = ptgt->tgt_lun;
plun != NULL;
plun = plun->lun_next) {
if (plun->lun_num == lun) {
mutex_exit(&ptgt->tgt_mutex);
return (plun);
}
}
mutex_exit(&ptgt->tgt_mutex);
return (NULL);
}
}
return (NULL);
}
static void
fcp_prepare_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd,
struct fcp_lun *plun)
{
fc_packet_t *fpkt = cmd->cmd_fp_pkt;
struct fcp_tgt *ptgt = plun->lun_tgt;
struct fcp_cmd *fcmd = &cmd->cmd_fcp_cmd;
ASSERT(cmd->cmd_pkt->pkt_comp ||
(cmd->cmd_pkt->pkt_flags & FLAG_NOINTR));
if (cmd->cmd_pkt->pkt_numcookies) {
if (cmd->cmd_pkt->pkt_dma_flags & DDI_DMA_READ) {
fcmd->fcp_cntl.cntl_read_data = 1;
fcmd->fcp_cntl.cntl_write_data = 0;
fpkt->pkt_tran_type = FC_PKT_FCP_READ;
} else {
fcmd->fcp_cntl.cntl_read_data = 0;
fcmd->fcp_cntl.cntl_write_data = 1;
fpkt->pkt_tran_type = FC_PKT_FCP_WRITE;
}
fpkt->pkt_data_cookie = cmd->cmd_pkt->pkt_cookies;
fpkt->pkt_data_cookie_cnt = cmd->cmd_pkt->pkt_numcookies;
ASSERT(fpkt->pkt_data_cookie_cnt <=
pptr->port_data_dma_attr.dma_attr_sgllen);
cmd->cmd_dmacount = cmd->cmd_pkt->pkt_dma_len;
fpkt->pkt_datalen = cmd->cmd_dmacount;
fcmd->fcp_data_len = cmd->cmd_dmacount;
} else {
fcmd->fcp_cntl.cntl_read_data = 0;
fcmd->fcp_cntl.cntl_write_data = 0;
fpkt->pkt_tran_type = FC_PKT_EXCHANGE;
fpkt->pkt_datalen = 0;
fcmd->fcp_data_len = 0;
}
if (cmd->cmd_pkt->pkt_flags & FLAG_HTAG) {
fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_HEAD_OF_Q;
} else if (cmd->cmd_pkt->pkt_flags & FLAG_OTAG) {
fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_ORDERED;
} else if (cmd->cmd_pkt->pkt_flags & FLAG_STAG) {
fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
} else {
fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
}
fcmd->fcp_ent_addr = plun->lun_addr;
if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
} else {
ASSERT(fpkt->pkt_cmd_dma == NULL && fpkt->pkt_resp_dma == NULL);
}
cmd->cmd_pkt->pkt_reason = CMD_CMPLT;
cmd->cmd_pkt->pkt_state = 0;
cmd->cmd_pkt->pkt_statistics = 0;
cmd->cmd_pkt->pkt_resid = 0;
cmd->cmd_fp_pkt->pkt_data_dma = cmd->cmd_pkt->pkt_handle;
if (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR) {
fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_NO_INTR);
fpkt->pkt_comp = NULL;
} else {
fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
if (cmd->cmd_pkt->pkt_flags & FLAG_IMMEDIATE_CB) {
fpkt->pkt_tran_flags |= FC_TRAN_IMMEDIATE_CB;
}
fpkt->pkt_comp = fcp_cmd_callback;
}
mutex_enter(&pptr->port_mutex);
if (pptr->port_state & FCP_STATE_SUSPENDED) {
fpkt->pkt_tran_flags |= FC_TRAN_DUMPING;
}
mutex_exit(&pptr->port_mutex);
fpkt->pkt_cmd_fhdr.d_id = ptgt->tgt_d_id;
fpkt->pkt_cmd_fhdr.s_id = pptr->port_id;
#ifndef __lock_lint
fpkt->pkt_fca_device = ptgt->tgt_fca_dev;
#endif
}
static void
fcp_post_callback(struct fcp_pkt *cmd)
{
scsi_hba_pkt_comp(cmd->cmd_pkt);
}
static int
fcp_dopoll(struct fcp_port *pptr, struct fcp_pkt *cmd)
{
int rval;
#ifdef DEBUG
mutex_enter(&pptr->port_pkt_mutex);
pptr->port_npkts++;
mutex_exit(&pptr->port_pkt_mutex);
#endif
if (cmd->cmd_fp_pkt->pkt_timeout) {
cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
} else {
cmd->cmd_fp_pkt->pkt_timeout = FCP_POLL_TIMEOUT;
}
ASSERT(cmd->cmd_fp_pkt->pkt_comp == NULL);
cmd->cmd_state = FCP_PKT_ISSUED;
rval = fc_ulp_transport(pptr->port_fp_handle, cmd->cmd_fp_pkt);
#ifdef DEBUG
mutex_enter(&pptr->port_pkt_mutex);
pptr->port_npkts--;
mutex_exit(&pptr->port_pkt_mutex);
#endif
cmd->cmd_state = FCP_PKT_IDLE;
switch (rval) {
case FC_SUCCESS:
if (cmd->cmd_fp_pkt->pkt_state == FC_PKT_SUCCESS) {
fcp_complete_pkt(cmd->cmd_fp_pkt);
rval = TRAN_ACCEPT;
} else {
rval = TRAN_FATAL_ERROR;
}
break;
case FC_TRAN_BUSY:
rval = TRAN_BUSY;
cmd->cmd_pkt->pkt_resid = 0;
break;
case FC_BADPACKET:
rval = TRAN_BADPKT;
break;
default:
rval = TRAN_FATAL_ERROR;
break;
}
return (rval);
}
static struct fcp_port *
fcp_dip2port(dev_info_t *dip)
{
int instance;
instance = ddi_get_instance(dip);
return (ddi_get_soft_state(fcp_softstate, instance));
}
struct fcp_lun *
fcp_get_lun_from_cip(struct fcp_port *pptr, child_info_t *cip)
{
struct fcp_tgt *ptgt;
struct fcp_lun *plun;
int i;
ASSERT(mutex_owned(&pptr->port_mutex));
for (i = 0; i < FCP_NUM_HASH; i++) {
for (ptgt = pptr->port_tgt_hash_table[i];
ptgt != NULL;
ptgt = ptgt->tgt_next) {
mutex_enter(&ptgt->tgt_mutex);
for (plun = ptgt->tgt_lun; plun != NULL;
plun = plun->lun_next) {
mutex_enter(&plun->lun_mutex);
if (plun->lun_cip == cip) {
mutex_exit(&plun->lun_mutex);
mutex_exit(&ptgt->tgt_mutex);
return (plun);
}
mutex_exit(&plun->lun_mutex);
}
mutex_exit(&ptgt->tgt_mutex);
}
}
return (NULL);
}
static int
fcp_pass_to_hp_and_wait(struct fcp_port *pptr, struct fcp_lun *plun,
child_info_t *cip, int what, int link_cnt, int tgt_cnt, int flags)
{
struct fcp_hp_elem *elem;
int rval;
mutex_enter(&plun->lun_tgt->tgt_mutex);
if ((elem = fcp_pass_to_hp(pptr, plun, cip,
what, link_cnt, tgt_cnt, flags, 1)) == NULL) {
mutex_exit(&plun->lun_tgt->tgt_mutex);
fcp_log(CE_CONT, pptr->port_dip,
"Can not pass_to_hp: what: %d; D_ID=%x, LUN=%x\n",
what, plun->lun_tgt->tgt_d_id, plun->lun_num);
return (NDI_FAILURE);
}
mutex_exit(&plun->lun_tgt->tgt_mutex);
mutex_enter(&elem->mutex);
if (elem->wait) {
while (elem->wait) {
cv_wait(&elem->cv, &elem->mutex);
}
}
rval = (elem->result);
mutex_exit(&elem->mutex);
mutex_destroy(&elem->mutex);
cv_destroy(&elem->cv);
kmem_free(elem, sizeof (struct fcp_hp_elem));
return (rval);
}
static struct fcp_hp_elem *
fcp_pass_to_hp(struct fcp_port *pptr, struct fcp_lun *plun,
child_info_t *cip, int what, int link_cnt, int tgt_cnt, int flags, int wait)
{
struct fcp_hp_elem *elem;
dev_info_t *pdip;
ASSERT(pptr != NULL);
ASSERT(plun != NULL);
ASSERT(plun->lun_tgt != NULL);
ASSERT(mutex_owned(&plun->lun_tgt->tgt_mutex));
if ((elem = kmem_zalloc(sizeof (struct fcp_hp_elem), KM_NOSLEEP))
== NULL) {
fcp_log(CE_WARN, NULL,
"!can't allocate memory for hotplug element");
return (NULL);
}
elem->port = pptr;
elem->lun = plun;
elem->cip = cip;
elem->old_lun_mpxio = plun->lun_mpxio;
elem->what = what;
elem->flags = flags;
elem->link_cnt = link_cnt;
elem->tgt_cnt = tgt_cnt;
elem->wait = wait;
mutex_init(&elem->mutex, NULL, MUTEX_DRIVER, NULL);
cv_init(&elem->cv, NULL, CV_DRIVER, NULL);
pdip = pptr->port_dip;
mutex_enter(&plun->lun_mutex);
if (elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) {
plun->lun_event_count++;
elem->event_cnt = plun->lun_event_count;
}
mutex_exit(&plun->lun_mutex);
if (taskq_dispatch(DEVI(pdip)->devi_taskq, fcp_hp_task,
(void *)elem, KM_NOSLEEP) == TASKQID_INVALID) {
mutex_enter(&plun->lun_mutex);
if (elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) {
plun->lun_event_count--;
}
mutex_exit(&plun->lun_mutex);
kmem_free(elem, sizeof (*elem));
return (0);
}
return (elem);
}
static void
fcp_retransport_cmd(struct fcp_port *pptr, struct fcp_pkt *cmd)
{
int rval;
struct scsi_address *ap;
struct fcp_lun *plun;
struct fcp_tgt *ptgt;
fc_packet_t *fpkt;
ap = &cmd->cmd_pkt->pkt_address;
plun = ADDR2LUN(ap);
ptgt = plun->lun_tgt;
ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
cmd->cmd_state = FCP_PKT_IDLE;
mutex_enter(&pptr->port_mutex);
mutex_enter(&ptgt->tgt_mutex);
if (((plun->lun_state & (FCP_LUN_BUSY | FCP_LUN_OFFLINE)) == 0) &&
(!(pptr->port_state & FCP_STATE_ONLINING))) {
fc_ulp_rscn_info_t *rscnp;
cmd->cmd_state = FCP_PKT_ISSUED;
fpkt = cmd->cmd_fp_pkt;
if ((fpkt->pkt_pd == NULL) && (ptgt->tgt_pd_handle != NULL)) {
fpkt->pkt_pd = ptgt->tgt_pd_handle;
fc_ulp_hold_remote_port(ptgt->tgt_pd_handle);
}
mutex_exit(&ptgt->tgt_mutex);
mutex_exit(&pptr->port_mutex);
ASSERT((cmd->cmd_pkt->pkt_flags & FLAG_NOINTR) == 0);
fcp_prepare_pkt(pptr, cmd, plun);
rscnp = (fc_ulp_rscn_info_t *)cmd->cmd_fp_pkt->
pkt_ulp_rscn_infop;
cmd->cmd_timeout = cmd->cmd_pkt->pkt_time ?
fcp_watchdog_time + cmd->cmd_pkt->pkt_time : 0;
if (rscnp != NULL) {
rscnp->ulp_rscn_count =
fc_ulp_get_rscn_count(pptr->
port_fp_handle);
}
rval = fcp_transport(pptr->port_fp_handle,
cmd->cmd_fp_pkt, 0);
if (rval == FC_SUCCESS) {
return;
}
cmd->cmd_state &= ~FCP_PKT_ISSUED;
} else {
mutex_exit(&ptgt->tgt_mutex);
mutex_exit(&pptr->port_mutex);
}
fcp_queue_pkt(pptr, cmd);
}
static void
fcp_fail_cmd(struct fcp_pkt *cmd, uchar_t reason, uint_t statistics)
{
ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
cmd->cmd_state = FCP_PKT_IDLE;
cmd->cmd_pkt->pkt_reason = reason;
cmd->cmd_pkt->pkt_state = 0;
cmd->cmd_pkt->pkt_statistics = statistics;
fcp_post_callback(cmd);
}
static void
fcp_queue_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd)
{
ASSERT((cmd->cmd_pkt->pkt_flags & FLAG_NOQUEUE) == 0);
mutex_enter(&pptr->port_pkt_mutex);
cmd->cmd_flags |= CFLAG_IN_QUEUE;
ASSERT(cmd->cmd_state != FCP_PKT_ISSUED);
cmd->cmd_timeout = fcp_watchdog_time + FCP_QUEUE_DELAY;
if (cmd->cmd_pkt->pkt_time) {
if (cmd->cmd_fp_pkt->pkt_timeout > FCP_QUEUE_DELAY) {
cmd->cmd_fp_pkt->pkt_timeout -= FCP_QUEUE_DELAY;
} else {
cmd->cmd_timeout = fcp_watchdog_time;
cmd->cmd_fp_pkt->pkt_timeout = FCP_INVALID_TIMEOUT;
}
}
if (pptr->port_pkt_head) {
ASSERT(pptr->port_pkt_tail != NULL);
pptr->port_pkt_tail->cmd_next = cmd;
pptr->port_pkt_tail = cmd;
} else {
ASSERT(pptr->port_pkt_tail == NULL);
pptr->port_pkt_head = pptr->port_pkt_tail = cmd;
}
cmd->cmd_next = NULL;
mutex_exit(&pptr->port_pkt_mutex);
}
static void
fcp_update_targets(struct fcp_port *pptr, fc_portmap_t *dev_list,
uint32_t count, uint32_t state, int cause)
{
fc_portmap_t *map_entry;
struct fcp_tgt *ptgt;
ASSERT(MUTEX_HELD(&pptr->port_mutex));
while (count--) {
map_entry = &(dev_list[count]);
ptgt = fcp_lookup_target(pptr,
(uchar_t *)&(map_entry->map_pwwn));
if (ptgt == NULL) {
continue;
}
mutex_enter(&ptgt->tgt_mutex);
ptgt->tgt_trace = 0;
ptgt->tgt_change_cnt++;
ptgt->tgt_statec_cause = cause;
ptgt->tgt_tmp_cnt = 1;
fcp_update_tgt_state(ptgt, FCP_SET, state);
mutex_exit(&ptgt->tgt_mutex);
}
}
static int
fcp_call_finish_init(struct fcp_port *pptr, struct fcp_tgt *ptgt,
int lcount, int tcount, int cause)
{
int rval;
mutex_enter(&pptr->port_mutex);
rval = fcp_call_finish_init_held(pptr, ptgt, lcount, tcount, cause);
mutex_exit(&pptr->port_mutex);
return (rval);
}
static int
fcp_call_finish_init_held(struct fcp_port *pptr, struct fcp_tgt *ptgt,
int lcount, int tcount, int cause)
{
int finish_init = 0;
int finish_tgt = 0;
int do_finish_init = 0;
int rval = FCP_NO_CHANGE;
if (cause == FCP_CAUSE_LINK_CHANGE ||
cause == FCP_CAUSE_LINK_DOWN) {
do_finish_init = 1;
}
if (ptgt != NULL) {
FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
FCP_BUF_LEVEL_2, 0,
"link_cnt: %d,%d; tgt_cnt: %d,%d; tmp_cnt: %d,%d;"
" cause = %d, d_id = 0x%x, tgt_done = %d",
pptr->port_link_cnt, lcount, ptgt->tgt_change_cnt, tcount,
pptr->port_tmp_cnt, ptgt->tgt_tmp_cnt, cause,
ptgt->tgt_d_id, ptgt->tgt_done);
mutex_enter(&ptgt->tgt_mutex);
if (tcount && (ptgt->tgt_change_cnt != tcount)) {
rval = FCP_DEV_CHANGE;
if (do_finish_init && ptgt->tgt_done == 0) {
ptgt->tgt_done++;
finish_init = 1;
}
} else {
if (--ptgt->tgt_tmp_cnt <= 0) {
ptgt->tgt_tmp_cnt = 0;
finish_tgt = 1;
if (do_finish_init) {
finish_init = 1;
}
}
}
mutex_exit(&ptgt->tgt_mutex);
} else {
FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
FCP_BUF_LEVEL_2, 0,
"Call Finish Init for NO target");
if (do_finish_init) {
finish_init = 1;
}
}
if (finish_tgt) {
ASSERT(ptgt != NULL);
mutex_enter(&ptgt->tgt_mutex);
#ifdef DEBUG
bzero(ptgt->tgt_tmp_cnt_stack,
sizeof (ptgt->tgt_tmp_cnt_stack));
ptgt->tgt_tmp_cnt_depth = getpcstack(ptgt->tgt_tmp_cnt_stack,
FCP_STACK_DEPTH);
#endif
mutex_exit(&ptgt->tgt_mutex);
(void) fcp_finish_tgt(pptr, ptgt, lcount, tcount, cause);
}
if (finish_init && lcount == pptr->port_link_cnt) {
ASSERT(pptr->port_tmp_cnt > 0);
if (--pptr->port_tmp_cnt == 0) {
fcp_finish_init(pptr);
}
} else if (lcount != pptr->port_link_cnt) {
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_2, 0,
"fcp_call_finish_init_held,1: state change occured"
" for D_ID=0x%x", (ptgt) ? ptgt->tgt_d_id : 0);
}
return (rval);
}
static void
fcp_reconfigure_luns(void * tgt_handle)
{
uint32_t dev_cnt;
fc_portmap_t *devlist;
struct fcp_tgt *ptgt = (struct fcp_tgt *)tgt_handle;
struct fcp_port *pptr = ptgt->tgt_port;
if (ptgt->tgt_tid == NULL) {
return;
}
devlist = kmem_zalloc(sizeof (*devlist), KM_NOSLEEP);
if (devlist == NULL) {
fcp_log(CE_WARN, pptr->port_dip,
"!fcp%d: failed to allocate for portmap",
pptr->port_instance);
return;
}
dev_cnt = 1;
devlist->map_pd = ptgt->tgt_pd_handle;
devlist->map_hard_addr.hard_addr = ptgt->tgt_hard_addr;
devlist->map_did.port_id = ptgt->tgt_d_id;
bcopy(&ptgt->tgt_node_wwn.raw_wwn[0], &devlist->map_nwwn, FC_WWN_SIZE);
bcopy(&ptgt->tgt_port_wwn.raw_wwn[0], &devlist->map_pwwn, FC_WWN_SIZE);
devlist->map_state = PORT_DEVICE_LOGGED_IN;
devlist->map_type = PORT_DEVICE_REPORTLUN_CHANGED;
devlist->map_flags = 0;
fcp_statec_callback(NULL, pptr->port_fp_handle, FC_STATE_DEVICE_CHANGE,
pptr->port_topology, devlist, dev_cnt, pptr->port_id);
mutex_enter(&ptgt->tgt_mutex);
ptgt->tgt_tid = NULL;
mutex_exit(&ptgt->tgt_mutex);
kmem_free(devlist, sizeof (*devlist));
}
static void
fcp_free_targets(struct fcp_port *pptr)
{
int i;
struct fcp_tgt *ptgt;
mutex_enter(&pptr->port_mutex);
for (i = 0; i < FCP_NUM_HASH; i++) {
ptgt = pptr->port_tgt_hash_table[i];
while (ptgt != NULL) {
struct fcp_tgt *next_tgt = ptgt->tgt_next;
fcp_free_target(ptgt);
ptgt = next_tgt;
}
}
mutex_exit(&pptr->port_mutex);
}
static void
fcp_free_target(struct fcp_tgt *ptgt)
{
struct fcp_lun *plun;
timeout_id_t tid;
mutex_enter(&ptgt->tgt_mutex);
tid = ptgt->tgt_tid;
if (tid != NULL) {
ptgt->tgt_tid = NULL;
mutex_exit(&ptgt->tgt_mutex);
(void) untimeout(tid);
mutex_enter(&ptgt->tgt_mutex);
}
plun = ptgt->tgt_lun;
while (plun != NULL) {
struct fcp_lun *next_lun = plun->lun_next;
fcp_dealloc_lun(plun);
plun = next_lun;
}
mutex_exit(&ptgt->tgt_mutex);
fcp_dealloc_tgt(ptgt);
}
static int
fcp_is_retryable(struct fcp_ipkt *icmd)
{
if (icmd->ipkt_port->port_state & (FCP_STATE_SUSPENDED |
FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN)) {
return (0);
}
return (((fcp_watchdog_time + icmd->ipkt_fpkt->pkt_timeout) <
icmd->ipkt_port->port_deadline) ? 1 : 0);
}
static int
fcp_create_on_demand(struct fcp_port *pptr, uchar_t *pwwn)
{
int wait_ms;
int tcount;
int lcount;
int ret;
int error;
int rval = EIO;
int ntries;
fc_portmap_t *devlist;
opaque_t pd;
struct fcp_lun *plun;
struct fcp_tgt *ptgt;
int old_manual = 0;
devlist = kmem_zalloc(sizeof (*devlist), KM_SLEEP);
mutex_enter(&pptr->port_mutex);
pptr->port_deadline = fcp_watchdog_time + FCP_ICMD_DEADLINE;
mutex_exit(&pptr->port_mutex);
pd = fc_ulp_get_remote_port(pptr->port_fp_handle,
(la_wwn_t *)pwwn, &error, 1);
if (pd == NULL) {
kmem_free(devlist, sizeof (*devlist));
return (rval);
}
ret = fc_ulp_pwwn_to_portmap(pptr->port_fp_handle,
(la_wwn_t *)pwwn, devlist);
if (ret != FC_SUCCESS) {
kmem_free(devlist, sizeof (*devlist));
return (rval);
}
devlist->map_type = PORT_DEVICE_USER_CREATE;
mutex_enter(&pptr->port_mutex);
ptgt = fcp_lookup_target(pptr, pwwn);
if (ptgt == NULL) {
lcount = pptr->port_link_cnt;
mutex_exit(&pptr->port_mutex);
ptgt = fcp_alloc_tgt(pptr, devlist, lcount);
if (ptgt == NULL) {
fcp_log(CE_WARN, pptr->port_dip,
"!FC target allocation failed");
return (ENOMEM);
}
mutex_enter(&pptr->port_mutex);
}
mutex_enter(&ptgt->tgt_mutex);
ptgt->tgt_statec_cause = FCP_CAUSE_USER_CREATE;
ptgt->tgt_tmp_cnt = 1;
ptgt->tgt_device_created = 0;
if (FC_TOP_EXTERNAL(pptr->port_topology) &&
fcp_enable_auto_configuration &&
ptgt->tgt_manual_config_only == 1) {
old_manual = 1;
ptgt->tgt_manual_config_only = 0;
}
mutex_exit(&ptgt->tgt_mutex);
fcp_update_targets(pptr, devlist, 1,
FCP_LUN_BUSY | FCP_LUN_MARK, FCP_CAUSE_USER_CREATE);
lcount = pptr->port_link_cnt;
tcount = ptgt->tgt_change_cnt;
if (fcp_handle_mapflags(pptr, ptgt, devlist, lcount,
tcount, FCP_CAUSE_USER_CREATE) == TRUE) {
if (FC_TOP_EXTERNAL(pptr->port_topology) &&
fcp_enable_auto_configuration && old_manual) {
mutex_enter(&ptgt->tgt_mutex);
ptgt->tgt_manual_config_only = 1;
mutex_exit(&ptgt->tgt_mutex);
}
if (pptr->port_link_cnt != lcount ||
ptgt->tgt_change_cnt != tcount) {
rval = EBUSY;
}
mutex_exit(&pptr->port_mutex);
FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
FCP_BUF_LEVEL_3, 0,
"fcp_create_on_demand: mapflags ptgt=%x, "
"lcount=%x::port_link_cnt=%x, "
"tcount=%x: tgt_change_cnt=%x, rval=%x",
ptgt, lcount, pptr->port_link_cnt,
tcount, ptgt->tgt_change_cnt, rval);
return (rval);
}
wait_ms = 500;
ntries = fcp_max_target_retries;
FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
FCP_BUF_LEVEL_3, 0,
"fcp_create_on_demand(1): ntries=%x, ptgt=%x, "
"lcount=%x::port_link_cnt=%x, "
"tcount=%x::tgt_change_cnt=%x, rval=%x, tgt_device_created=%x "
"tgt_tmp_cnt =%x",
ntries, ptgt, lcount, pptr->port_link_cnt,
tcount, ptgt->tgt_change_cnt, rval, ptgt->tgt_device_created,
ptgt->tgt_tmp_cnt);
mutex_enter(&ptgt->tgt_mutex);
while (ntries-- != 0 && pptr->port_link_cnt == lcount &&
ptgt->tgt_change_cnt == tcount && ptgt->tgt_device_created == 0) {
mutex_exit(&ptgt->tgt_mutex);
mutex_exit(&pptr->port_mutex);
delay(drv_usectohz(wait_ms * 1000));
mutex_enter(&pptr->port_mutex);
mutex_enter(&ptgt->tgt_mutex);
}
if (pptr->port_link_cnt != lcount || ptgt->tgt_change_cnt != tcount) {
rval = EBUSY;
} else {
if (ptgt->tgt_tmp_cnt == 0 && ptgt->tgt_node_state ==
FCP_TGT_NODE_PRESENT) {
rval = 0;
}
}
FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
FCP_BUF_LEVEL_3, 0,
"fcp_create_on_demand(2): ntries=%x, ptgt=%x, "
"lcount=%x::port_link_cnt=%x, "
"tcount=%x::tgt_change_cnt=%x, rval=%x, tgt_device_created=%x "
"tgt_tmp_cnt =%x",
ntries, ptgt, lcount, pptr->port_link_cnt,
tcount, ptgt->tgt_change_cnt, rval, ptgt->tgt_device_created,
ptgt->tgt_tmp_cnt);
if (rval) {
if (FC_TOP_EXTERNAL(pptr->port_topology) &&
fcp_enable_auto_configuration && old_manual) {
ptgt->tgt_manual_config_only = 1;
}
mutex_exit(&ptgt->tgt_mutex);
mutex_exit(&pptr->port_mutex);
kmem_free(devlist, sizeof (*devlist));
FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
FCP_BUF_LEVEL_3, 0,
"fcp_create_on_demand(3): ntries=%x, ptgt=%x, "
"lcount=%x::port_link_cnt=%x, "
"tcount=%x::tgt_change_cnt=%x, rval=%x, "
"tgt_device_created=%x, tgt D_ID=%x",
ntries, ptgt, lcount, pptr->port_link_cnt,
tcount, ptgt->tgt_change_cnt, rval,
ptgt->tgt_device_created, ptgt->tgt_d_id);
return (rval);
}
if ((plun = ptgt->tgt_lun) != NULL) {
tcount = plun->lun_tgt->tgt_change_cnt;
} else {
rval = EINVAL;
}
lcount = pptr->port_link_cnt;
if (ptgt->tgt_lun_cnt == 0) {
ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
}
mutex_exit(&ptgt->tgt_mutex);
mutex_exit(&pptr->port_mutex);
while (plun) {
child_info_t *cip;
mutex_enter(&plun->lun_mutex);
cip = plun->lun_cip;
mutex_exit(&plun->lun_mutex);
mutex_enter(&ptgt->tgt_mutex);
if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
mutex_exit(&ptgt->tgt_mutex);
rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
FCP_ONLINE, lcount, tcount,
NDI_ONLINE_ATTACH);
if (rval != NDI_SUCCESS) {
FCP_TRACE(fcp_logq,
pptr->port_instbuf, fcp_trace,
FCP_BUF_LEVEL_3, 0,
"fcp_create_on_demand: "
"pass_to_hp_and_wait failed "
"rval=%x", rval);
rval = EIO;
} else {
mutex_enter(&LUN_TGT->tgt_mutex);
plun->lun_state &= ~(FCP_LUN_OFFLINE |
FCP_LUN_BUSY);
mutex_exit(&LUN_TGT->tgt_mutex);
}
mutex_enter(&ptgt->tgt_mutex);
}
plun = plun->lun_next;
mutex_exit(&ptgt->tgt_mutex);
}
kmem_free(devlist, sizeof (*devlist));
if (FC_TOP_EXTERNAL(pptr->port_topology) &&
fcp_enable_auto_configuration && old_manual) {
mutex_enter(&ptgt->tgt_mutex);
if (rval == 0) {
ptgt->tgt_manual_config_only = 0;
} else {
ptgt->tgt_manual_config_only = 1;
}
mutex_exit(&ptgt->tgt_mutex);
}
return (rval);
}
static void
fcp_ascii_to_wwn(caddr_t string, uchar_t bytes[], unsigned int byte_len)
{
int count;
uchar_t byte;
count = 0;
while (*string) {
byte = FCP_ATOB(*string); string++;
byte = byte << 4 | FCP_ATOB(*string); string++;
bytes[count++] = byte;
if (count >= byte_len) {
break;
}
}
}
static void
fcp_wwn_to_ascii(uchar_t wwn[], char *string)
{
int i;
for (i = 0; i < FC_WWN_SIZE; i++) {
(void) sprintf(string + (i * 2),
"%02x", wwn[i]);
}
}
static void
fcp_print_error(fc_packet_t *fpkt)
{
struct fcp_ipkt *icmd = (struct fcp_ipkt *)
fpkt->pkt_ulp_private;
struct fcp_port *pptr;
struct fcp_tgt *ptgt;
struct fcp_lun *plun;
caddr_t buf;
int scsi_cmd = 0;
ptgt = icmd->ipkt_tgt;
plun = icmd->ipkt_lun;
pptr = ptgt->tgt_port;
buf = kmem_zalloc(256, KM_NOSLEEP);
if (buf == NULL) {
return;
}
switch (icmd->ipkt_opcode) {
case SCMD_REPORT_LUN:
(void) sprintf(buf, "!REPORT LUN to D_ID=0x%%x"
" lun=0x%%x failed");
scsi_cmd++;
break;
case SCMD_INQUIRY_PAGE83:
(void) sprintf(buf, "!INQUIRY-83 to D_ID=0x%%x"
" lun=0x%%x failed");
scsi_cmd++;
break;
case SCMD_INQUIRY:
(void) sprintf(buf, "!INQUIRY to D_ID=0x%%x"
" lun=0x%%x failed");
scsi_cmd++;
break;
case LA_ELS_PLOGI:
(void) sprintf(buf, "!PLOGI to D_ID=0x%%x failed");
break;
case LA_ELS_PRLI:
(void) sprintf(buf, "!PRLI to D_ID=0x%%x failed");
break;
}
if (scsi_cmd && fpkt->pkt_state == FC_PKT_SUCCESS) {
struct fcp_rsp response, *rsp;
uchar_t asc, ascq;
caddr_t sense_key = NULL;
struct fcp_rsp_info fcp_rsp_err, *bep;
if (icmd->ipkt_nodma) {
rsp = (struct fcp_rsp *)fpkt->pkt_resp;
bep = (struct fcp_rsp_info *)((caddr_t)rsp +
sizeof (struct fcp_rsp));
} else {
rsp = &response;
bep = &fcp_rsp_err;
FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
sizeof (struct fcp_rsp));
FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp),
bep, fpkt->pkt_resp_acc,
sizeof (struct fcp_rsp_info));
}
if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
(void) sprintf(buf + strlen(buf),
" : Bad FCP response values rsvd1=%%x, rsvd2=%%x,"
" sts-rsvd1=%%x, sts-rsvd2=%%x, rsplen=%%x,"
" senselen=%%x. Giving up");
fcp_log(CE_WARN, pptr->port_dip, buf,
ptgt->tgt_d_id, plun->lun_num, rsp->reserved_0,
rsp->reserved_1, rsp->fcp_u.fcp_status.reserved_0,
rsp->fcp_u.fcp_status.reserved_1,
rsp->fcp_response_len, rsp->fcp_sense_len);
kmem_free(buf, 256);
return;
}
if (rsp->fcp_u.fcp_status.rsp_len_set &&
bep->rsp_code != FCP_NO_FAILURE) {
(void) sprintf(buf + strlen(buf),
" FCP Response code = 0x%x", bep->rsp_code);
}
if (rsp->fcp_u.fcp_status.scsi_status & STATUS_CHECK) {
struct scsi_extended_sense sense_info, *sense_ptr;
if (icmd->ipkt_nodma) {
sense_ptr = (struct scsi_extended_sense *)
((caddr_t)fpkt->pkt_resp +
sizeof (struct fcp_rsp) +
rsp->fcp_response_len);
} else {
sense_ptr = &sense_info;
FCP_CP_IN(fpkt->pkt_resp +
sizeof (struct fcp_rsp) +
rsp->fcp_response_len, &sense_info,
fpkt->pkt_resp_acc,
sizeof (struct scsi_extended_sense));
}
if (sense_ptr->es_key < NUM_SENSE_KEYS +
NUM_IMPL_SENSE_KEYS) {
sense_key = sense_keys[sense_ptr->es_key];
} else {
sense_key = "Undefined";
}
asc = sense_ptr->es_add_code;
ascq = sense_ptr->es_qual_code;
(void) sprintf(buf + strlen(buf),
": sense key=%%s, ASC=%%x," " ASCQ=%%x."
" Giving up");
fcp_log(CE_WARN, pptr->port_dip, buf,
ptgt->tgt_d_id, plun->lun_num, sense_key,
asc, ascq);
} else {
(void) sprintf(buf + strlen(buf),
" : SCSI status=%%x. Giving up");
fcp_log(CE_WARN, pptr->port_dip, buf,
ptgt->tgt_d_id, plun->lun_num,
rsp->fcp_u.fcp_status.scsi_status);
}
} else {
caddr_t state, reason, action, expln;
(void) fc_ulp_pkt_error(fpkt, &state, &reason,
&action, &expln);
(void) sprintf(buf + strlen(buf), ": State:%%s,"
" Reason:%%s. Giving up");
if (scsi_cmd) {
fcp_log(CE_WARN, pptr->port_dip, buf,
ptgt->tgt_d_id, plun->lun_num, state, reason);
} else {
fcp_log(CE_WARN, pptr->port_dip, buf,
ptgt->tgt_d_id, state, reason);
}
}
kmem_free(buf, 256);
}
static int
fcp_handle_ipkt_errors(struct fcp_port *pptr, struct fcp_tgt *ptgt,
struct fcp_ipkt *icmd, int rval, caddr_t op)
{
int ret = DDI_FAILURE;
char *error;
switch (rval) {
case FC_DEVICE_BUSY_NEW_RSCN:
icmd->ipkt_retries = 0;
icmd->ipkt_port->port_deadline = fcp_watchdog_time +
FCP_ICMD_DEADLINE;
FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
FCP_BUF_LEVEL_3, 0,
"fcp_handle_ipkt_errors: rval=%x for D_ID=%x",
rval, ptgt->tgt_d_id);
case FC_STATEC_BUSY:
case FC_DEVICE_BUSY:
case FC_PBUSY:
case FC_FBUSY:
case FC_TRAN_BUSY:
case FC_OFFLINE:
FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
FCP_BUF_LEVEL_3, 0,
"fcp_handle_ipkt_errors: rval=%x for D_ID=%x",
rval, ptgt->tgt_d_id);
if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
fcp_is_retryable(icmd)) {
fcp_queue_ipkt(pptr, icmd->ipkt_fpkt);
ret = DDI_SUCCESS;
}
break;
case FC_LOGINREQ:
if (icmd->ipkt_opcode == LA_ELS_PRLI) {
ret = fcp_send_els(icmd->ipkt_port, icmd->ipkt_tgt,
icmd, LA_ELS_PLOGI, icmd->ipkt_link_cnt,
icmd->ipkt_change_cnt, icmd->ipkt_cause);
} else {
FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
FCP_BUF_LEVEL_3, 0,
"fcp_handle_ipkt_errors: rval=%x for D_ID=%x",
rval, ptgt->tgt_d_id);
if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
fcp_is_retryable(icmd)) {
fcp_queue_ipkt(pptr, icmd->ipkt_fpkt);
ret = DDI_SUCCESS;
}
}
break;
default:
mutex_enter(&pptr->port_mutex);
mutex_enter(&ptgt->tgt_mutex);
if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
mutex_exit(&ptgt->tgt_mutex);
mutex_exit(&pptr->port_mutex);
(void) fc_ulp_error(rval, &error);
fcp_log(CE_WARN, pptr->port_dip,
"!Failed to send %s to D_ID=%x error=%s",
op, ptgt->tgt_d_id, error);
} else {
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_2, 0,
"fcp_handle_ipkt_errors,1: state change occured"
" for D_ID=0x%x", ptgt->tgt_d_id);
mutex_exit(&ptgt->tgt_mutex);
mutex_exit(&pptr->port_mutex);
}
break;
}
return (ret);
}
static int
fcp_outstanding_lun_cmds(struct fcp_tgt *ptgt)
{
struct fcp_lun *plun;
struct fcp_pkt *cmd;
for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
mutex_enter(&plun->lun_mutex);
for (cmd = plun->lun_pkt_head; cmd != NULL;
cmd = cmd->cmd_forw) {
if (cmd->cmd_state == FCP_PKT_ISSUED) {
mutex_exit(&plun->lun_mutex);
return (FC_SUCCESS);
}
}
mutex_exit(&plun->lun_mutex);
}
return (FC_FAILURE);
}
static fc_portmap_t *
fcp_construct_map(struct fcp_port *pptr, uint32_t *dev_cnt)
{
int i;
fc_portmap_t *devlist;
fc_portmap_t *devptr = NULL;
struct fcp_tgt *ptgt;
mutex_enter(&pptr->port_mutex);
for (i = 0, *dev_cnt = 0; i < FCP_NUM_HASH; i++) {
for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
ptgt = ptgt->tgt_next) {
if (!(ptgt->tgt_state & FCP_TGT_ORPHAN)) {
++*dev_cnt;
}
}
}
devptr = devlist = kmem_zalloc(sizeof (*devlist) * *dev_cnt,
KM_NOSLEEP);
if (devlist == NULL) {
mutex_exit(&pptr->port_mutex);
fcp_log(CE_WARN, pptr->port_dip,
"!fcp%d: failed to allocate for portmap for construct map",
pptr->port_instance);
return (devptr);
}
for (i = 0; i < FCP_NUM_HASH; i++) {
for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
ptgt = ptgt->tgt_next) {
if (!(ptgt->tgt_state & FCP_TGT_ORPHAN)) {
int ret;
ret = fc_ulp_pwwn_to_portmap(
pptr->port_fp_handle,
(la_wwn_t *)&ptgt->tgt_port_wwn.raw_wwn[0],
devlist);
if (ret == FC_SUCCESS) {
devlist++;
continue;
}
devlist->map_pd = NULL;
devlist->map_did.port_id = ptgt->tgt_d_id;
devlist->map_hard_addr.hard_addr =
ptgt->tgt_hard_addr;
devlist->map_state = PORT_DEVICE_INVALID;
devlist->map_type = PORT_DEVICE_OLD;
bcopy(&ptgt->tgt_node_wwn.raw_wwn[0],
&devlist->map_nwwn, FC_WWN_SIZE);
bcopy(&ptgt->tgt_port_wwn.raw_wwn[0],
&devlist->map_pwwn, FC_WWN_SIZE);
devlist++;
}
}
}
mutex_exit(&pptr->port_mutex);
return (devptr);
}
static void
fcp_update_mpxio_path_verifybusy(struct fcp_port *pptr)
{
int i;
struct fcp_tgt *ptgt;
struct fcp_lun *plun;
for (i = 0; i < FCP_NUM_HASH; i++) {
for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
ptgt = ptgt->tgt_next) {
mutex_enter(&ptgt->tgt_mutex);
for (plun = ptgt->tgt_lun; plun != NULL;
plun = plun->lun_next) {
if (plun->lun_mpxio &&
plun->lun_state & FCP_LUN_BUSY) {
if (!fcp_pass_to_hp(pptr, plun,
plun->lun_cip,
FCP_MPXIO_PATH_SET_BUSY,
pptr->port_link_cnt,
ptgt->tgt_change_cnt, 0, 0)) {
FCP_TRACE(fcp_logq,
pptr->port_instbuf,
fcp_trace,
FCP_BUF_LEVEL_2, 0,
"path_verifybusy: "
"disable lun %p failed!",
plun);
}
}
}
mutex_exit(&ptgt->tgt_mutex);
}
}
}
static int
fcp_update_mpxio_path(struct fcp_lun *plun, child_info_t *cip, int what)
{
dev_info_t *cdip = NULL;
dev_info_t *pdip = NULL;
ASSERT(plun);
mutex_enter(&plun->lun_mutex);
if (fcp_is_child_present(plun, cip) == FC_FAILURE) {
mutex_exit(&plun->lun_mutex);
return (NDI_FAILURE);
}
mutex_exit(&plun->lun_mutex);
cdip = mdi_pi_get_client(PIP(cip));
pdip = mdi_pi_get_phci(PIP(cip));
ASSERT(cdip != NULL);
ASSERT(pdip != NULL);
if (what == FCP_MPXIO_PATH_CLEAR_BUSY) {
(void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE_TRANSIENT);
} else {
(void) mdi_pi_disable_path(PIP(cip), DRIVER_DISABLE_TRANSIENT);
}
return (NDI_SUCCESS);
}
static char *
fcp_get_lun_path(struct fcp_lun *plun)
{
dev_info_t *dip = NULL;
char *path = NULL;
mdi_pathinfo_t *pip = NULL;
if (plun == NULL) {
return (NULL);
}
mutex_enter(&plun->lun_mutex);
if (plun->lun_mpxio == 0) {
dip = DIP(plun->lun_cip);
mutex_exit(&plun->lun_mutex);
} else {
pip = PIP(plun->lun_cip);
mdi_hold_path(pip);
mutex_exit(&plun->lun_mutex);
dip = mdi_pi_get_client(pip);
}
if (dip == NULL)
goto out;
if (ddi_get_instance(dip) < 0)
goto out;
path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
if (path == NULL)
goto out;
(void) ddi_pathname(dip, path);
out:
if (pip != NULL)
mdi_rele_path(pip);
return (path);
}
static int
fcp_scsi_bus_config(dev_info_t *parent, uint_t flag,
ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
{
int64_t reset_delay;
int rval, retry = 0;
struct fcp_port *pptr = fcp_dip2port(parent);
reset_delay = (int64_t)(USEC_TO_TICK(FCP_INIT_WAIT_TIMEOUT)) -
(ddi_get_lbolt64() - pptr->port_attach_time);
if (reset_delay < 0) {
reset_delay = 0;
}
if (fcp_bus_config_debug) {
flag |= NDI_DEVI_DEBUG;
}
switch (op) {
case BUS_CONFIG_ONE:
while (retry++ < fcp_max_bus_config_retries) {
rval = (ndi_busop_bus_config(parent,
flag | NDI_MDI_FALLBACK, op,
arg, childp, (clock_t)reset_delay));
if (rval == 0) {
return (rval);
}
}
taskq_wait(DEVI(parent)->devi_taskq);
return (ndi_busop_bus_config(parent, flag | NDI_MDI_FALLBACK,
op, arg, childp, 0));
case BUS_CONFIG_DRIVER:
case BUS_CONFIG_ALL: {
mutex_enter(&pptr->port_mutex);
while ((reset_delay > 0) && pptr->port_tmp_cnt) {
(void) cv_timedwait(&pptr->port_config_cv,
&pptr->port_mutex,
ddi_get_lbolt() + (clock_t)reset_delay);
reset_delay =
(int64_t)(USEC_TO_TICK(FCP_INIT_WAIT_TIMEOUT)) -
(ddi_get_lbolt64() - pptr->port_attach_time);
}
mutex_exit(&pptr->port_mutex);
taskq_wait(DEVI(parent)->devi_taskq);
return (ndi_busop_bus_config(parent, flag, op,
arg, childp, 0));
}
default:
return (NDI_FAILURE);
}
}
static int
fcp_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
ddi_bus_config_op_t op, void *arg)
{
if (fcp_bus_config_debug) {
flag |= NDI_DEVI_DEBUG;
}
return (ndi_busop_bus_unconfig(parent, flag, op, arg));
}
static int
fcp_copy_guid_2_lun_block(struct fcp_lun *plun, char *guidp)
{
int retval = 0;
const unsigned int len = strlen(guidp) + 1;
if ((guidp == NULL) || (plun == NULL)) {
return (1);
}
if (plun->lun_guid) {
if (plun->lun_guid_size != len) {
kmem_free(plun->lun_guid, plun->lun_guid_size);
plun->lun_guid = NULL;
plun->lun_guid_size = 0;
}
}
if (plun->lun_guid == NULL) {
plun->lun_guid = kmem_zalloc(len, KM_NOSLEEP);
if (plun->lun_guid == NULL) {
cmn_err(CE_WARN, "fcp_copy_guid_2_lun_block:"
"Unable to allocate"
"Memory for GUID!!! size %d", len);
retval = 1;
} else {
plun->lun_guid_size = len;
}
}
if (plun->lun_guid) {
bcopy(guidp, plun->lun_guid, plun->lun_guid_size);
}
return (retval);
}
static void
fcp_reconfig_wait(struct fcp_port *pptr)
{
clock_t reconfig_start, wait_timeout;
mutex_enter(&pptr->port_mutex);
if (pptr->port_tmp_cnt == 0) {
mutex_exit(&pptr->port_mutex);
return;
}
mutex_exit(&pptr->port_mutex);
reconfig_start = ddi_get_lbolt();
wait_timeout = drv_usectohz(FCP_INIT_WAIT_TIMEOUT);
mutex_enter(&pptr->port_mutex);
while (((ddi_get_lbolt() - reconfig_start) < wait_timeout) &&
pptr->port_tmp_cnt) {
(void) cv_timedwait(&pptr->port_config_cv, &pptr->port_mutex,
reconfig_start + wait_timeout);
}
mutex_exit(&pptr->port_mutex);
}
static void
fcp_read_blacklist(dev_info_t *dip,
struct fcp_black_list_entry **pplun_blacklist)
{
char **prop_array = NULL;
char *curr_pwwn = NULL;
char *curr_lun = NULL;
uint32_t prop_item = 0;
int idx = 0;
int len = 0;
ASSERT(mutex_owned(&fcp_global_mutex));
if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, dip,
DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
LUN_BLACKLIST_PROP, &prop_array, &prop_item) != DDI_PROP_SUCCESS) {
return;
}
for (idx = 0; idx < prop_item; idx++) {
curr_pwwn = prop_array[idx];
while (*curr_pwwn == ' ') {
curr_pwwn++;
}
if (strlen(curr_pwwn) <= (sizeof (la_wwn_t) * 2 + 1)) {
fcp_log(CE_WARN, NULL, "Invalid WWN %s in the blacklist"
", please check.", curr_pwwn);
continue;
}
if ((*(curr_pwwn + sizeof (la_wwn_t) * 2) != ' ') &&
(*(curr_pwwn + sizeof (la_wwn_t) * 2) != ',')) {
fcp_log(CE_WARN, NULL, "Invalid WWN %s in the blacklist"
", please check.", curr_pwwn);
continue;
}
for (len = 0; len < sizeof (la_wwn_t) * 2; len++) {
if (isxdigit(curr_pwwn[len]) != TRUE) {
fcp_log(CE_WARN, NULL, "Invalid WWN %s in the "
"blacklist, please check.", curr_pwwn);
break;
}
}
if (len != sizeof (la_wwn_t) * 2) {
continue;
}
curr_lun = curr_pwwn + sizeof (la_wwn_t) * 2 + 1;
*(curr_lun - 1) = '\0';
fcp_mask_pwwn_lun(curr_pwwn, curr_lun, pplun_blacklist);
}
ddi_prop_free(prop_array);
}
static void
fcp_mask_pwwn_lun(char *curr_pwwn, char *curr_lun,
struct fcp_black_list_entry **pplun_blacklist)
{
int idx = 0;
uint32_t offset = 0;
unsigned long lun_id = 0;
char lunid_buf[16];
char *pend = NULL;
int illegal_digit = 0;
while (offset < strlen(curr_lun)) {
while ((curr_lun[offset + idx] != ',') &&
(curr_lun[offset + idx] != '\0') &&
(curr_lun[offset + idx] != ' ')) {
if (isdigit(curr_lun[offset + idx]) == 0) {
illegal_digit++;
}
idx++;
}
if (illegal_digit > 0) {
offset += (idx+1);
idx = 0;
illegal_digit = 0;
fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
"the blacklist, please check digits.",
curr_lun, curr_pwwn);
continue;
}
if (idx >= (sizeof (lunid_buf) / sizeof (lunid_buf[0]))) {
fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
"the blacklist, please check the length of LUN#.",
curr_lun, curr_pwwn);
break;
}
if (idx == 0) {
offset++;
continue;
}
bcopy(curr_lun + offset, lunid_buf, idx);
lunid_buf[idx] = '\0';
if (ddi_strtoul(lunid_buf, &pend, 10, &lun_id) == 0) {
fcp_add_one_mask(curr_pwwn, lun_id, pplun_blacklist);
} else {
fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
"the blacklist, please check %s.",
curr_lun, curr_pwwn, lunid_buf);
}
offset += (idx+1);
idx = 0;
}
}
static void
fcp_add_one_mask(char *curr_pwwn, uint32_t lun_id,
struct fcp_black_list_entry **pplun_blacklist)
{
struct fcp_black_list_entry *tmp_entry = *pplun_blacklist;
struct fcp_black_list_entry *new_entry = NULL;
la_wwn_t wwn;
fcp_ascii_to_wwn(curr_pwwn, wwn.raw_wwn, sizeof (la_wwn_t));
while (tmp_entry) {
if ((bcmp(&tmp_entry->wwn, &wwn,
sizeof (la_wwn_t)) == 0) && (tmp_entry->lun == lun_id)) {
return;
}
tmp_entry = tmp_entry->next;
}
new_entry = (struct fcp_black_list_entry *)kmem_zalloc
(sizeof (struct fcp_black_list_entry), KM_SLEEP);
bcopy(&wwn, &new_entry->wwn, sizeof (la_wwn_t));
new_entry->lun = lun_id;
new_entry->masked = 0;
new_entry->next = *pplun_blacklist;
*pplun_blacklist = new_entry;
}
static int
fcp_should_mask(la_wwn_t *wwn, uint32_t lun_id)
{
struct fcp_black_list_entry *remote_port;
remote_port = fcp_lun_blacklist;
while (remote_port != NULL) {
if (bcmp(wwn, &remote_port->wwn, sizeof (la_wwn_t)) == 0) {
if (remote_port->lun == lun_id) {
remote_port->masked++;
if (remote_port->masked == 1) {
fcp_log(CE_NOTE, NULL, "LUN %d of port "
"%02x%02x%02x%02x%02x%02x%02x%02x "
"is masked due to black listing.\n",
lun_id, wwn->raw_wwn[0],
wwn->raw_wwn[1], wwn->raw_wwn[2],
wwn->raw_wwn[3], wwn->raw_wwn[4],
wwn->raw_wwn[5], wwn->raw_wwn[6],
wwn->raw_wwn[7]);
}
return (TRUE);
}
}
remote_port = remote_port->next;
}
return (FALSE);
}
static void
fcp_cleanup_blacklist(struct fcp_black_list_entry **pplun_blacklist)
{
struct fcp_black_list_entry *tmp_entry = *pplun_blacklist;
struct fcp_black_list_entry *current_entry = NULL;
ASSERT(mutex_owned(&fcp_global_mutex));
while (tmp_entry) {
current_entry = tmp_entry;
tmp_entry = tmp_entry->next;
kmem_free(current_entry, sizeof (struct fcp_black_list_entry));
}
*pplun_blacklist = NULL;
}
static struct scsi_pkt *
fcp_pseudo_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
struct buf *bp, int cmdlen, int statuslen, int tgtlen,
int flags, int (*callback)(), caddr_t arg)
{
fcp_port_t *pptr = ADDR2FCP(ap);
fcp_pkt_t *cmd = NULL;
fc_frame_hdr_t *hp;
if (pkt == NULL) {
pkt = scsi_hba_pkt_alloc(pptr->port_dip, ap, cmdlen, statuslen,
tgtlen, sizeof (fcp_pkt_t) + pptr->port_priv_pkt_len,
callback, arg);
if (pkt == NULL) {
return (NULL);
}
cmd = PKT2CMD(pkt);
cmd->cmd_pkt = pkt;
cmd->cmd_fp_pkt = &cmd->cmd_fc_packet;
cmd->cmd_fp_pkt->pkt_ulp_private = (opaque_t)cmd;
cmd->cmd_fp_pkt->pkt_fca_private = (opaque_t)((caddr_t)cmd +
sizeof (struct fcp_pkt));
cmd->cmd_fp_pkt->pkt_cmd = (caddr_t)&cmd->cmd_fcp_cmd;
cmd->cmd_fp_pkt->pkt_cmdlen = sizeof (struct fcp_cmd);
cmd->cmd_fp_pkt->pkt_resp = cmd->cmd_fcp_rsp;
cmd->cmd_fp_pkt->pkt_rsplen = FCP_MAX_RSP_IU_SIZE;
hp = &cmd->cmd_fp_pkt->pkt_cmd_fhdr;
hp->r_ctl = R_CTL_COMMAND;
hp->rsvd = 0;
hp->type = FC_TYPE_SCSI_FCP;
hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
hp->seq_id = 0;
hp->df_ctl = 0;
hp->seq_cnt = 0;
hp->ox_id = 0xffff;
hp->rx_id = 0xffff;
hp->ro = 0;
} else {
FCP_TRACE(fcp_logq, pptr->port_instbuf,
fcp_trace, FCP_BUF_LEVEL_6, 0,
"reusing pkt, flags %d", flags);
cmd = PKT2CMD(pkt);
if (cmd->cmd_fp_pkt->pkt_pd) {
cmd->cmd_fp_pkt->pkt_pd = NULL;
}
}
if (bp && bp->b_bcount != 0) {
if (bp->b_flags & B_READ) {
cmd->cmd_flags |= CFLAG_IS_READ;
} else {
cmd->cmd_flags &= ~CFLAG_IS_READ;
}
bp_mapin(bp);
cmd->cmd_fp_pkt->pkt_data = bp->b_un.b_addr;
cmd->cmd_fp_pkt->pkt_datalen = bp->b_bcount;
cmd->cmd_fp_pkt->pkt_data_resid = 0;
} else {
cmd->cmd_fp_pkt->pkt_data = NULL;
cmd->cmd_fp_pkt->pkt_datalen = 0;
}
return (pkt);
}
static void
fcp_pseudo_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
{
fcp_port_t *pptr = ADDR2FCP(ap);
(void) fc_ulp_uninit_packet(pptr->port_fp_handle,
PKT2CMD(pkt)->cmd_fp_pkt);
scsi_hba_pkt_free(ap, pkt);
}
static int
fcp_pseudo_start(struct scsi_address *ap, struct scsi_pkt *pkt)
{
fcp_port_t *pptr = ADDR2FCP(ap);
fcp_lun_t *plun = ADDR2LUN(ap);
fcp_tgt_t *ptgt = plun->lun_tgt;
fcp_pkt_t *cmd = PKT2CMD(pkt);
fcp_cmd_t *fcmd = &cmd->cmd_fcp_cmd;
fc_packet_t *fpkt = cmd->cmd_fp_pkt;
int rval;
fpkt->pkt_pd = ptgt->tgt_pd_handle;
(void) fc_ulp_init_packet(pptr->port_fp_handle, cmd->cmd_fp_pkt, 1);
bcopy(pkt->pkt_cdbp, fcmd->fcp_cdb, pkt->pkt_cdblen);
fcmd->fcp_data_len = fpkt->pkt_datalen;
fcmd->fcp_ent_addr = plun->lun_addr;
if (pkt->pkt_flags & FLAG_HTAG) {
fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_HEAD_OF_Q;
} else if (pkt->pkt_flags & FLAG_OTAG) {
fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_ORDERED;
} else if (pkt->pkt_flags & FLAG_STAG) {
fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
} else {
fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
}
if (cmd->cmd_flags & CFLAG_IS_READ) {
fcmd->fcp_cntl.cntl_read_data = 1;
fcmd->fcp_cntl.cntl_write_data = 0;
} else {
fcmd->fcp_cntl.cntl_read_data = 0;
fcmd->fcp_cntl.cntl_write_data = 1;
}
fpkt->pkt_timeout = pkt->pkt_time + 2;
fpkt->pkt_cmd_fhdr.d_id = ptgt->tgt_d_id;
fpkt->pkt_cmd_fhdr.s_id = pptr->port_id;
if (cmd->cmd_flags & CFLAG_IS_READ) {
fpkt->pkt_tran_type = FC_PKT_FCP_READ;
} else {
fpkt->pkt_tran_type = FC_PKT_FCP_WRITE;
}
if (pkt->pkt_flags & FLAG_NOINTR) {
fpkt->pkt_comp = NULL;
fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_NO_INTR);
} else {
fpkt->pkt_comp = fcp_cmd_callback;
fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
if (pkt->pkt_flags & FLAG_IMMEDIATE_CB) {
fpkt->pkt_tran_flags |= FC_TRAN_IMMEDIATE_CB;
}
}
pkt->pkt_reason = CMD_CMPLT;
pkt->pkt_state = 0;
pkt->pkt_statistics = 0;
pkt->pkt_resid = 0;
if (pkt->pkt_flags & FLAG_NOINTR) {
return (fcp_dopoll(pptr, cmd));
}
cmd->cmd_state = FCP_PKT_ISSUED;
rval = fcp_transport(pptr->port_fp_handle, fpkt, 0);
if (rval == FC_SUCCESS) {
return (TRAN_ACCEPT);
}
cmd->cmd_state = FCP_PKT_IDLE;
if (rval == FC_TRAN_BUSY) {
return (TRAN_BUSY);
} else {
return (TRAN_FATAL_ERROR);
}
}
static void
fcp_pseudo_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
{
FCP_TRACE(fcp_logq, "fcp_pseudo_sync_pkt", fcp_trace,
FCP_BUF_LEVEL_2, 0, "ap-%p, scsi_pkt-%p", ap, pkt);
}
static void
fcp_pseudo_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
{
FCP_TRACE(fcp_logq, "fcp_pseudo_dmafree", fcp_trace,
FCP_BUF_LEVEL_2, 0, "ap-%p, scsi_pkt-%p", ap, pkt);
}