#include <sys/scsi/scsi.h>
#include <sys/sunddi.h>
#include <sys/dklabel.h>
#include <sys/dkio.h>
#include <sys/vtoc.h>
#include <sys/dktp/fdisk.h>
#include <sys/vtrace.h>
#include <sys/efi_partition.h>
#include <sys/cmlb.h>
#include <sys/cmlb_impl.h>
#if defined(__x86)
#include <sys/fs/dv_node.h>
#endif
#include <sys/ddi_impldefs.h>
struct driver_minor_data {
char *name;
minor_t minor;
int type;
};
static struct driver_minor_data dk_minor_data[] = {
{"a", 0, S_IFBLK},
{"b", 1, S_IFBLK},
{"c", 2, S_IFBLK},
{"d", 3, S_IFBLK},
{"e", 4, S_IFBLK},
{"f", 5, S_IFBLK},
{"g", 6, S_IFBLK},
{"h", 7, S_IFBLK},
#if defined(_SUNOS_VTOC_16)
{"i", 8, S_IFBLK},
{"j", 9, S_IFBLK},
{"k", 10, S_IFBLK},
{"l", 11, S_IFBLK},
{"m", 12, S_IFBLK},
{"n", 13, S_IFBLK},
{"o", 14, S_IFBLK},
{"p", 15, S_IFBLK},
#endif
#if defined(_FIRMWARE_NEEDS_FDISK)
{"q", 16, S_IFBLK},
{"r", 17, S_IFBLK},
{"s", 18, S_IFBLK},
{"t", 19, S_IFBLK},
{"u", 20, S_IFBLK},
#endif
{"a,raw", 0, S_IFCHR},
{"b,raw", 1, S_IFCHR},
{"c,raw", 2, S_IFCHR},
{"d,raw", 3, S_IFCHR},
{"e,raw", 4, S_IFCHR},
{"f,raw", 5, S_IFCHR},
{"g,raw", 6, S_IFCHR},
{"h,raw", 7, S_IFCHR},
#if defined(_SUNOS_VTOC_16)
{"i,raw", 8, S_IFCHR},
{"j,raw", 9, S_IFCHR},
{"k,raw", 10, S_IFCHR},
{"l,raw", 11, S_IFCHR},
{"m,raw", 12, S_IFCHR},
{"n,raw", 13, S_IFCHR},
{"o,raw", 14, S_IFCHR},
{"p,raw", 15, S_IFCHR},
#endif
#if defined(_FIRMWARE_NEEDS_FDISK)
{"q,raw", 16, S_IFCHR},
{"r,raw", 17, S_IFCHR},
{"s,raw", 18, S_IFCHR},
{"t,raw", 19, S_IFCHR},
{"u,raw", 20, S_IFCHR},
#endif
{0}
};
#if defined(__x86)
#if defined(_FIRMWARE_NEEDS_FDISK)
static struct driver_minor_data dk_ext_minor_data[] = {
{"p5", 21, S_IFBLK},
{"p6", 22, S_IFBLK},
{"p7", 23, S_IFBLK},
{"p8", 24, S_IFBLK},
{"p9", 25, S_IFBLK},
{"p10", 26, S_IFBLK},
{"p11", 27, S_IFBLK},
{"p12", 28, S_IFBLK},
{"p13", 29, S_IFBLK},
{"p14", 30, S_IFBLK},
{"p15", 31, S_IFBLK},
{"p16", 32, S_IFBLK},
{"p17", 33, S_IFBLK},
{"p18", 34, S_IFBLK},
{"p19", 35, S_IFBLK},
{"p20", 36, S_IFBLK},
{"p21", 37, S_IFBLK},
{"p22", 38, S_IFBLK},
{"p23", 39, S_IFBLK},
{"p24", 40, S_IFBLK},
{"p25", 41, S_IFBLK},
{"p26", 42, S_IFBLK},
{"p27", 43, S_IFBLK},
{"p28", 44, S_IFBLK},
{"p29", 45, S_IFBLK},
{"p30", 46, S_IFBLK},
{"p31", 47, S_IFBLK},
{"p32", 48, S_IFBLK},
{"p33", 49, S_IFBLK},
{"p34", 50, S_IFBLK},
{"p35", 51, S_IFBLK},
{"p36", 52, S_IFBLK},
{"p5,raw", 21, S_IFCHR},
{"p6,raw", 22, S_IFCHR},
{"p7,raw", 23, S_IFCHR},
{"p8,raw", 24, S_IFCHR},
{"p9,raw", 25, S_IFCHR},
{"p10,raw", 26, S_IFCHR},
{"p11,raw", 27, S_IFCHR},
{"p12,raw", 28, S_IFCHR},
{"p13,raw", 29, S_IFCHR},
{"p14,raw", 30, S_IFCHR},
{"p15,raw", 31, S_IFCHR},
{"p16,raw", 32, S_IFCHR},
{"p17,raw", 33, S_IFCHR},
{"p18,raw", 34, S_IFCHR},
{"p19,raw", 35, S_IFCHR},
{"p20,raw", 36, S_IFCHR},
{"p21,raw", 37, S_IFCHR},
{"p22,raw", 38, S_IFCHR},
{"p23,raw", 39, S_IFCHR},
{"p24,raw", 40, S_IFCHR},
{"p25,raw", 41, S_IFCHR},
{"p26,raw", 42, S_IFCHR},
{"p27,raw", 43, S_IFCHR},
{"p28,raw", 44, S_IFCHR},
{"p29,raw", 45, S_IFCHR},
{"p30,raw", 46, S_IFCHR},
{"p31,raw", 47, S_IFCHR},
{"p32,raw", 48, S_IFCHR},
{"p33,raw", 49, S_IFCHR},
{"p34,raw", 50, S_IFCHR},
{"p35,raw", 51, S_IFCHR},
{"p36,raw", 52, S_IFCHR},
{0}
};
#endif
#endif
static struct driver_minor_data dk_minor_data_efi[] = {
{"a", 0, S_IFBLK},
{"b", 1, S_IFBLK},
{"c", 2, S_IFBLK},
{"d", 3, S_IFBLK},
{"e", 4, S_IFBLK},
{"f", 5, S_IFBLK},
{"g", 6, S_IFBLK},
{"wd", 7, S_IFBLK},
#if defined(_SUNOS_VTOC_16)
{"i", 8, S_IFBLK},
{"j", 9, S_IFBLK},
{"k", 10, S_IFBLK},
{"l", 11, S_IFBLK},
{"m", 12, S_IFBLK},
{"n", 13, S_IFBLK},
{"o", 14, S_IFBLK},
{"p", 15, S_IFBLK},
#endif
#if defined(_FIRMWARE_NEEDS_FDISK)
{"q", 16, S_IFBLK},
{"r", 17, S_IFBLK},
{"s", 18, S_IFBLK},
{"t", 19, S_IFBLK},
{"u", 20, S_IFBLK},
#endif
{"a,raw", 0, S_IFCHR},
{"b,raw", 1, S_IFCHR},
{"c,raw", 2, S_IFCHR},
{"d,raw", 3, S_IFCHR},
{"e,raw", 4, S_IFCHR},
{"f,raw", 5, S_IFCHR},
{"g,raw", 6, S_IFCHR},
{"wd,raw", 7, S_IFCHR},
#if defined(_SUNOS_VTOC_16)
{"i,raw", 8, S_IFCHR},
{"j,raw", 9, S_IFCHR},
{"k,raw", 10, S_IFCHR},
{"l,raw", 11, S_IFCHR},
{"m,raw", 12, S_IFCHR},
{"n,raw", 13, S_IFCHR},
{"o,raw", 14, S_IFCHR},
{"p,raw", 15, S_IFCHR},
#endif
#if defined(_FIRMWARE_NEEDS_FDISK)
{"q,raw", 16, S_IFCHR},
{"r,raw", 17, S_IFCHR},
{"s,raw", 18, S_IFCHR},
{"t,raw", 19, S_IFCHR},
{"u,raw", 20, S_IFCHR},
#endif
{0}
};
static i_ddi_prop_dyn_t cmlb_prop_dyn[] = {
{"Nblocks", DDI_PROP_TYPE_INT64, S_IFBLK},
{"Size", DDI_PROP_TYPE_INT64, S_IFCHR},
{"device-nblocks", DDI_PROP_TYPE_INT64},
{"device-blksize", DDI_PROP_TYPE_INT},
{"device-solid-state", DDI_PROP_TYPE_INT},
{"device-rotational", DDI_PROP_TYPE_INT},
{NULL}
};
len_t cmlb_tg_max_efi_xfer = 1024 * 1024;
extern struct mod_ops mod_miscops;
extern int ddi_create_internal_pathname(dev_info_t *dip, char *name,
int spec_type, minor_t minor_num);
static char cmlb_log_buffer[1024];
static kmutex_t cmlb_log_mutex;
struct cmlb_lun *cmlb_debug_cl = NULL;
uint_t cmlb_level_mask = 0x0;
int cmlb_rot_delay = 4;
static struct modlmisc modlmisc = {
&mod_miscops,
"Common Labeling module"
};
static struct modlinkage modlinkage = {
MODREV_1, (void *)&modlmisc, NULL
};
static dev_t cmlb_make_device(struct cmlb_lun *cl);
static int cmlb_validate_geometry(struct cmlb_lun *cl, boolean_t forcerevalid,
int flags, void *tg_cookie);
static void cmlb_resync_geom_caches(struct cmlb_lun *cl, diskaddr_t capacity,
void *tg_cookie);
static int cmlb_read_fdisk(struct cmlb_lun *cl, diskaddr_t capacity,
void *tg_cookie);
static void cmlb_swap_efi_gpt(efi_gpt_t *e);
static void cmlb_swap_efi_gpe(int nparts, efi_gpe_t *p);
static int cmlb_validate_efi(efi_gpt_t *labp);
static int cmlb_use_efi(struct cmlb_lun *cl, diskaddr_t capacity, int flags,
void *tg_cookie);
static void cmlb_build_default_label(struct cmlb_lun *cl, void *tg_cookie);
static int cmlb_uselabel(struct cmlb_lun *cl, struct dk_label *l, int flags);
#if defined(_SUNOS_VTOC_8)
static void cmlb_build_user_vtoc(struct cmlb_lun *cl, struct vtoc *user_vtoc);
#endif
static int cmlb_build_label_vtoc(struct cmlb_lun *cl, struct vtoc *user_vtoc);
static int cmlb_write_label(struct cmlb_lun *cl, void *tg_cookie);
static int cmlb_set_vtoc(struct cmlb_lun *cl, struct dk_label *dkl,
void *tg_cookie);
static void cmlb_clear_efi(struct cmlb_lun *cl, void *tg_cookie);
static void cmlb_clear_vtoc(struct cmlb_lun *cl, void *tg_cookie);
static void cmlb_setup_default_geometry(struct cmlb_lun *cl, void *tg_cookie);
static int cmlb_create_minor_nodes(struct cmlb_lun *cl);
static int cmlb_check_update_blockcount(struct cmlb_lun *cl, void *tg_cookie);
static boolean_t cmlb_check_efi_mbr(uchar_t *buf, boolean_t *is_mbr);
#if defined(__x86)
static int cmlb_update_fdisk_and_vtoc(struct cmlb_lun *cl, void *tg_cookie);
#endif
#if defined(_FIRMWARE_NEEDS_FDISK)
static boolean_t cmlb_has_max_chs_vals(struct ipart *fdp);
#endif
#if defined(_SUNOS_VTOC_16)
static void cmlb_convert_geometry(struct cmlb_lun *cl, diskaddr_t capacity,
struct dk_geom *cl_g, void *tg_cookie);
#endif
static int cmlb_dkio_get_geometry(struct cmlb_lun *cl, caddr_t arg, int flag,
void *tg_cookie);
static int cmlb_dkio_set_geometry(struct cmlb_lun *cl, caddr_t arg, int flag);
static int cmlb_dkio_get_partition(struct cmlb_lun *cl, caddr_t arg, int flag,
void *tg_cookie);
static int cmlb_dkio_set_partition(struct cmlb_lun *cl, caddr_t arg, int flag);
static int cmlb_dkio_get_efi(struct cmlb_lun *cl, caddr_t arg, int flag,
void *tg_cookie);
static int cmlb_dkio_set_efi(struct cmlb_lun *cl, dev_t dev, caddr_t arg,
int flag, void *tg_cookie);
static int cmlb_dkio_get_vtoc(struct cmlb_lun *cl, caddr_t arg, int flag,
void *tg_cookie);
static int cmlb_dkio_get_extvtoc(struct cmlb_lun *cl, caddr_t arg, int flag,
void *tg_cookie);
static int cmlb_dkio_set_vtoc(struct cmlb_lun *cl, dev_t dev, caddr_t arg,
int flag, void *tg_cookie);
static int cmlb_dkio_set_extvtoc(struct cmlb_lun *cl, dev_t dev, caddr_t arg,
int flag, void *tg_cookie);
static int cmlb_dkio_get_mboot(struct cmlb_lun *cl, caddr_t arg, int flag,
void *tg_cookie);
static int cmlb_dkio_set_mboot(struct cmlb_lun *cl, caddr_t arg, int flag,
void *tg_cookie);
static int cmlb_dkio_partition(struct cmlb_lun *cl, caddr_t arg, int flag,
void *tg_cookie);
#if defined(__x86)
static int cmlb_dkio_set_ext_part(struct cmlb_lun *cl, caddr_t arg, int flag,
void *tg_cookie);
static int cmlb_validate_ext_part(struct cmlb_lun *cl, int part, int epart,
uint32_t start, uint32_t size);
static int cmlb_is_linux_swap(struct cmlb_lun *cl, uint32_t part_start,
void *tg_cookie);
static int cmlb_dkio_get_virtgeom(struct cmlb_lun *cl, caddr_t arg, int flag);
static int cmlb_dkio_get_phygeom(struct cmlb_lun *cl, caddr_t arg, int flag,
void *tg_cookie);
static int cmlb_dkio_partinfo(struct cmlb_lun *cl, dev_t dev, caddr_t arg,
int flag);
static int cmlb_dkio_extpartinfo(struct cmlb_lun *cl, dev_t dev, caddr_t arg,
int flag);
#endif
static void cmlb_dbg(uint_t comp, struct cmlb_lun *cl, const char *fmt, ...);
static void cmlb_v_log(dev_info_t *dev, const char *label, uint_t level,
const char *fmt, va_list ap);
static void cmlb_log(dev_info_t *dev, const char *label, uint_t level,
const char *fmt, ...);
int
_init(void)
{
mutex_init(&cmlb_log_mutex, NULL, MUTEX_DRIVER, NULL);
return (mod_install(&modlinkage));
}
int
_info(struct modinfo *modinfop)
{
return (mod_info(&modlinkage, modinfop));
}
int
_fini(void)
{
int err;
if ((err = mod_remove(&modlinkage)) != 0) {
return (err);
}
mutex_destroy(&cmlb_log_mutex);
return (err);
}
static void
cmlb_dbg(uint_t comp, struct cmlb_lun *cl, const char *fmt, ...)
{
va_list ap;
dev_info_t *dev;
uint_t level_mask = 0;
ASSERT(cl != NULL);
dev = CMLB_DEVINFO(cl);
ASSERT(dev != NULL);
if (comp & CMLB_TRACE)
level_mask |= CMLB_LOGMASK_TRACE;
if (comp & CMLB_INFO)
level_mask |= CMLB_LOGMASK_INFO;
if (comp & CMLB_ERROR)
level_mask |= CMLB_LOGMASK_ERROR;
if ((cmlb_level_mask & level_mask) &&
((cmlb_debug_cl == NULL) || (cmlb_debug_cl == cl))) {
va_start(ap, fmt);
cmlb_v_log(dev, CMLB_LABEL(cl), CE_CONT, fmt, ap);
va_end(ap);
}
}
static void
cmlb_log(dev_info_t *dev, const char *label, uint_t level, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
cmlb_v_log(dev, label, level, fmt, ap);
va_end(ap);
}
static void
cmlb_v_log(dev_info_t *dev, const char *label, uint_t level, const char *fmt,
va_list ap)
{
static char name[256];
int log_only = 0;
int boot_only = 0;
int console_only = 0;
mutex_enter(&cmlb_log_mutex);
if (dev) {
if (level == CE_PANIC || level == CE_WARN ||
level == CE_NOTE) {
(void) sprintf(name, "%s (%s%d):\n",
ddi_pathname(dev, cmlb_log_buffer),
label, ddi_get_instance(dev));
} else {
name[0] = '\0';
}
} else {
(void) sprintf(name, "%s:", label);
}
(void) vsprintf(cmlb_log_buffer, fmt, ap);
switch (cmlb_log_buffer[0]) {
case '!':
log_only = 1;
break;
case '?':
boot_only = 1;
break;
case '^':
console_only = 1;
break;
}
switch (level) {
case CE_NOTE:
level = CE_CONT;
case CE_CONT:
case CE_WARN:
case CE_PANIC:
if (boot_only) {
cmn_err(level, "?%s\t%s", name, &cmlb_log_buffer[1]);
} else if (console_only) {
cmn_err(level, "^%s\t%s", name, &cmlb_log_buffer[1]);
} else if (log_only) {
cmn_err(level, "!%s\t%s", name, &cmlb_log_buffer[1]);
} else {
cmn_err(level, "%s\t%s", name, cmlb_log_buffer);
}
break;
case CE_IGNORE:
break;
default:
cmn_err(CE_CONT, "^DEBUG: %s\t%s", name, cmlb_log_buffer);
break;
}
mutex_exit(&cmlb_log_mutex);
}
void
cmlb_alloc_handle(cmlb_handle_t *cmlbhandlep)
{
struct cmlb_lun *cl;
cl = kmem_zalloc(sizeof (struct cmlb_lun), KM_SLEEP);
ASSERT(cmlbhandlep != NULL);
cl->cl_state = CMLB_INITED;
cl->cl_def_labeltype = CMLB_LABEL_UNDEF;
mutex_init(CMLB_MUTEX(cl), NULL, MUTEX_DRIVER, NULL);
*cmlbhandlep = (cmlb_handle_t)(cl);
}
void
cmlb_free_handle(cmlb_handle_t *cmlbhandlep)
{
struct cmlb_lun *cl;
cl = (struct cmlb_lun *)*cmlbhandlep;
if (cl != NULL) {
mutex_destroy(CMLB_MUTEX(cl));
kmem_free(cl, sizeof (struct cmlb_lun));
}
}
int
cmlb_attach(dev_info_t *devi, cmlb_tg_ops_t *tgopsp, int device_type,
boolean_t is_removable, boolean_t is_hotpluggable, char *node_type,
int alter_behavior, cmlb_handle_t cmlbhandle, void *tg_cookie)
{
struct cmlb_lun *cl = (struct cmlb_lun *)cmlbhandle;
diskaddr_t cap;
int status;
ASSERT(VALID_BOOLEAN(is_removable));
ASSERT(VALID_BOOLEAN(is_hotpluggable));
if (tgopsp->tg_version < TG_DK_OPS_VERSION_1)
return (EINVAL);
mutex_enter(CMLB_MUTEX(cl));
CMLB_DEVINFO(cl) = devi;
cl->cmlb_tg_ops = tgopsp;
cl->cl_device_type = device_type;
cl->cl_is_removable = is_removable;
cl->cl_is_hotpluggable = is_hotpluggable;
cl->cl_node_type = node_type;
cl->cl_sys_blocksize = DEV_BSIZE;
cl->cl_f_geometry_is_valid = B_FALSE;
cl->cl_def_labeltype = CMLB_LABEL_VTOC;
cl->cl_alter_behavior = alter_behavior;
cl->cl_reserved = -1;
cl->cl_msglog_flag |= CMLB_ALLOW_2TB_WARN;
#if defined(__x86)
cl->cl_logical_drive_count = 0;
#endif
if (!is_removable) {
mutex_exit(CMLB_MUTEX(cl));
status = DK_TG_GETCAP(cl, &cap, tg_cookie);
mutex_enter(CMLB_MUTEX(cl));
if (status == 0 && cap > CMLB_EXTVTOC_LIMIT) {
cl->cl_def_labeltype = CMLB_LABEL_EFI;
}
}
cl->cl_last_labeltype = CMLB_LABEL_UNDEF;
cl->cl_cur_labeltype = CMLB_LABEL_UNDEF;
if (cmlb_create_minor_nodes(cl) != 0) {
mutex_exit(CMLB_MUTEX(cl));
return (ENXIO);
}
i_ddi_prop_dyn_driver_set(CMLB_DEVINFO(cl), cmlb_prop_dyn);
cl->cl_state = CMLB_ATTACHED;
mutex_exit(CMLB_MUTEX(cl));
return (0);
}
void
cmlb_detach(cmlb_handle_t cmlbhandle, void *tg_cookie)
{
struct cmlb_lun *cl = (struct cmlb_lun *)cmlbhandle;
mutex_enter(CMLB_MUTEX(cl));
cl->cl_def_labeltype = CMLB_LABEL_UNDEF;
cl->cl_f_geometry_is_valid = B_FALSE;
ddi_remove_minor_node(CMLB_DEVINFO(cl), NULL);
i_ddi_prop_dyn_driver_set(CMLB_DEVINFO(cl), NULL);
cl->cl_state = CMLB_INITED;
mutex_exit(CMLB_MUTEX(cl));
}
int
cmlb_validate(cmlb_handle_t cmlbhandle, int flags, void *tg_cookie)
{
struct cmlb_lun *cl = (struct cmlb_lun *)cmlbhandle;
int rval;
int ret = 0;
if (cl == NULL)
return (ENXIO);
mutex_enter(CMLB_MUTEX(cl));
if (cl->cl_state < CMLB_ATTACHED) {
mutex_exit(CMLB_MUTEX(cl));
return (ENXIO);
}
rval = cmlb_validate_geometry((struct cmlb_lun *)cmlbhandle, B_TRUE,
flags, tg_cookie);
if (rval == ENOTSUP) {
if (cl->cl_f_geometry_is_valid) {
cl->cl_cur_labeltype = CMLB_LABEL_EFI;
ret = 0;
} else {
ret = EINVAL;
}
} else {
ret = rval;
if (ret == 0)
cl->cl_cur_labeltype = CMLB_LABEL_VTOC;
}
if (ret == 0)
(void) cmlb_create_minor_nodes(cl);
mutex_exit(CMLB_MUTEX(cl));
return (ret);
}
void
cmlb_invalidate(cmlb_handle_t cmlbhandle, void *tg_cookie)
{
struct cmlb_lun *cl = (struct cmlb_lun *)cmlbhandle;
if (cl == NULL)
return;
mutex_enter(CMLB_MUTEX(cl));
cl->cl_f_geometry_is_valid = B_FALSE;
mutex_exit(CMLB_MUTEX(cl));
}
boolean_t
cmlb_is_valid(cmlb_handle_t cmlbhandle)
{
struct cmlb_lun *cl = (struct cmlb_lun *)cmlbhandle;
if (cmlbhandle == NULL)
return (B_FALSE);
return (cl->cl_f_geometry_is_valid);
}
int
cmlb_close(cmlb_handle_t cmlbhandle, void *tg_cookie)
{
struct cmlb_lun *cl = (struct cmlb_lun *)cmlbhandle;
mutex_enter(CMLB_MUTEX(cl));
cl->cl_f_geometry_is_valid = B_FALSE;
if (ISREMOVABLE(cl)) {
cl->cl_cur_labeltype = CMLB_LABEL_UNDEF;
(void) cmlb_create_minor_nodes(cl);
}
mutex_exit(CMLB_MUTEX(cl));
return (0);
}
int
cmlb_get_devid_block(cmlb_handle_t cmlbhandle, diskaddr_t *devidblockp,
void *tg_cookie)
{
daddr_t spc, blk, head, cyl;
struct cmlb_lun *cl = (struct cmlb_lun *)cmlbhandle;
mutex_enter(CMLB_MUTEX(cl));
if (cl->cl_state < CMLB_ATTACHED) {
mutex_exit(CMLB_MUTEX(cl));
return (EINVAL);
}
if ((!cl->cl_f_geometry_is_valid) ||
(cl->cl_solaris_size < DK_LABEL_LOC)) {
mutex_exit(CMLB_MUTEX(cl));
return (EINVAL);
}
if (cl->cl_cur_labeltype == CMLB_LABEL_EFI) {
if (cl->cl_reserved != -1) {
blk = cl->cl_map[cl->cl_reserved].dkl_cylno;
} else {
mutex_exit(CMLB_MUTEX(cl));
return (EINVAL);
}
} else {
if (cl->cl_label_from_media != CMLB_LABEL_VTOC) {
mutex_exit(CMLB_MUTEX(cl));
return (EINVAL);
}
if (cl->cl_g.dkg_acyl < 2) {
mutex_exit(CMLB_MUTEX(cl));
return (EINVAL);
}
cyl = cl->cl_g.dkg_ncyl + cl->cl_g.dkg_acyl - 2;
spc = cl->cl_g.dkg_nhead * cl->cl_g.dkg_nsect;
head = cl->cl_g.dkg_nhead - 1;
blk = cl->cl_solaris_offset +
(cyl * (spc - cl->cl_g.dkg_apc)) +
(head * cl->cl_g.dkg_nsect) + 1;
}
*devidblockp = blk;
mutex_exit(CMLB_MUTEX(cl));
return (0);
}
int
cmlb_partinfo(cmlb_handle_t cmlbhandle, int part, diskaddr_t *nblocksp,
diskaddr_t *startblockp, char **partnamep, uint16_t *tagp, void *tg_cookie)
{
struct cmlb_lun *cl = (struct cmlb_lun *)cmlbhandle;
int rval;
#if defined(__x86)
int ext_part;
#endif
ASSERT(cl != NULL);
mutex_enter(CMLB_MUTEX(cl));
if (cl->cl_state < CMLB_ATTACHED) {
mutex_exit(CMLB_MUTEX(cl));
return (EINVAL);
}
if (part < 0 || part >= MAXPART) {
rval = EINVAL;
} else {
if (!cl->cl_f_geometry_is_valid)
(void) cmlb_validate_geometry((struct cmlb_lun *)cl,
B_FALSE, 0, tg_cookie);
if (((!cl->cl_f_geometry_is_valid) ||
(part < NDKMAP && cl->cl_solaris_size == 0)) &&
(part != P0_RAW_DISK)) {
rval = EINVAL;
} else {
if (startblockp != NULL)
*startblockp = (diskaddr_t)cl->cl_offset[part];
if (nblocksp != NULL)
*nblocksp = (diskaddr_t)
cl->cl_map[part].dkl_nblk;
if (tagp != NULL)
*tagp =
((cl->cl_cur_labeltype == CMLB_LABEL_EFI) ||
(part >= NDKMAP)) ? V_UNASSIGNED :
cl->cl_vtoc.v_part[part].p_tag;
rval = 0;
}
if (partnamep != NULL) {
#if defined(__x86)
#if defined(_FIRMWARE_NEEDS_FDISK)
if (part > FDISK_P4) {
ext_part = part-FDISK_P4-1;
*partnamep = dk_ext_minor_data[ext_part].name;
} else
#endif
#endif
*partnamep = dk_minor_data[part].name;
}
}
mutex_exit(CMLB_MUTEX(cl));
return (rval);
}
int
cmlb_efi_label_capacity(cmlb_handle_t cmlbhandle, diskaddr_t *capacity,
void *tg_cookie)
{
struct cmlb_lun *cl = (struct cmlb_lun *)cmlbhandle;
int rval;
ASSERT(cl != NULL);
mutex_enter(CMLB_MUTEX(cl));
if (cl->cl_state < CMLB_ATTACHED) {
mutex_exit(CMLB_MUTEX(cl));
return (EINVAL);
}
if (!cl->cl_f_geometry_is_valid)
(void) cmlb_validate_geometry((struct cmlb_lun *)cl, B_FALSE,
0, tg_cookie);
if ((!cl->cl_f_geometry_is_valid) || (capacity == NULL) ||
(cl->cl_cur_labeltype != CMLB_LABEL_EFI)) {
rval = EINVAL;
} else {
*capacity = (diskaddr_t)cl->cl_map[WD_NODE].dkl_nblk;
rval = 0;
}
mutex_exit(CMLB_MUTEX(cl));
return (rval);
}
int
cmlb_ioctl(cmlb_handle_t cmlbhandle, dev_t dev, int cmd, intptr_t arg,
int flag, cred_t *cred_p, int *rval_p, void *tg_cookie)
{
int err;
struct cmlb_lun *cl;
cl = (struct cmlb_lun *)cmlbhandle;
ASSERT(cl != NULL);
mutex_enter(CMLB_MUTEX(cl));
if (cl->cl_state < CMLB_ATTACHED) {
mutex_exit(CMLB_MUTEX(cl));
return (EIO);
}
switch (cmd) {
case DKIOCSEXTVTOC:
case DKIOCSGEOM:
case DKIOCSETEFI:
case DKIOCSMBOOT:
#if defined(__x86)
case DKIOCSETEXTPART:
#endif
break;
case DKIOCSVTOC:
#if defined(__x86)
case DKIOCPARTINFO:
#endif
if (cl->cl_blockcount > CMLB_OLDVTOC_LIMIT) {
mutex_exit(CMLB_MUTEX(cl));
return (EOVERFLOW);
}
break;
default:
(void) cmlb_validate_geometry(cl, 1, CMLB_SILENT,
tg_cookie);
switch (cmd) {
case DKIOCGVTOC:
case DKIOCGAPART:
case DKIOCSAPART:
if (cl->cl_label_from_media == CMLB_LABEL_EFI) {
mutex_exit(CMLB_MUTEX(cl));
return (ENOTSUP);
} else if
(cl->cl_blockcount > CMLB_OLDVTOC_LIMIT) {
mutex_exit(CMLB_MUTEX(cl));
return (EOVERFLOW);
}
break;
case DKIOCGGEOM:
if (cl->cl_label_from_media == CMLB_LABEL_EFI) {
mutex_exit(CMLB_MUTEX(cl));
return (ENOTSUP);
}
break;
default:
break;
}
}
mutex_exit(CMLB_MUTEX(cl));
switch (cmd) {
case DKIOCGGEOM:
cmlb_dbg(CMLB_TRACE, cl, "DKIOCGGEOM\n");
err = cmlb_dkio_get_geometry(cl, (caddr_t)arg, flag, tg_cookie);
break;
case DKIOCSGEOM:
cmlb_dbg(CMLB_TRACE, cl, "DKIOCSGEOM\n");
err = cmlb_dkio_set_geometry(cl, (caddr_t)arg, flag);
break;
case DKIOCGAPART:
cmlb_dbg(CMLB_TRACE, cl, "DKIOCGAPART\n");
err = cmlb_dkio_get_partition(cl, (caddr_t)arg,
flag, tg_cookie);
break;
case DKIOCSAPART:
cmlb_dbg(CMLB_TRACE, cl, "DKIOCSAPART\n");
err = cmlb_dkio_set_partition(cl, (caddr_t)arg, flag);
break;
case DKIOCGVTOC:
cmlb_dbg(CMLB_TRACE, cl, "DKIOCGVTOC\n");
err = cmlb_dkio_get_vtoc(cl, (caddr_t)arg, flag, tg_cookie);
break;
case DKIOCGEXTVTOC:
cmlb_dbg(CMLB_TRACE, cl, "DKIOCGVTOC\n");
err = cmlb_dkio_get_extvtoc(cl, (caddr_t)arg, flag, tg_cookie);
break;
case DKIOCGETEFI:
cmlb_dbg(CMLB_TRACE, cl, "DKIOCGETEFI\n");
err = cmlb_dkio_get_efi(cl, (caddr_t)arg, flag, tg_cookie);
break;
case DKIOCPARTITION:
cmlb_dbg(CMLB_TRACE, cl, "DKIOCPARTITION\n");
err = cmlb_dkio_partition(cl, (caddr_t)arg, flag, tg_cookie);
break;
case DKIOCSVTOC:
cmlb_dbg(CMLB_TRACE, cl, "DKIOCSVTOC\n");
err = cmlb_dkio_set_vtoc(cl, dev, (caddr_t)arg, flag,
tg_cookie);
break;
case DKIOCSEXTVTOC:
cmlb_dbg(CMLB_TRACE, cl, "DKIOCSVTOC\n");
err = cmlb_dkio_set_extvtoc(cl, dev, (caddr_t)arg, flag,
tg_cookie);
break;
case DKIOCSETEFI:
cmlb_dbg(CMLB_TRACE, cl, "DKIOCSETEFI\n");
err = cmlb_dkio_set_efi(cl, dev, (caddr_t)arg, flag, tg_cookie);
break;
case DKIOCGMBOOT:
cmlb_dbg(CMLB_TRACE, cl, "DKIOCGMBOOT\n");
err = cmlb_dkio_get_mboot(cl, (caddr_t)arg, flag, tg_cookie);
break;
case DKIOCSMBOOT:
cmlb_dbg(CMLB_TRACE, cl, "DKIOCSMBOOT\n");
err = cmlb_dkio_set_mboot(cl, (caddr_t)arg, flag, tg_cookie);
break;
case DKIOCG_PHYGEOM:
cmlb_dbg(CMLB_TRACE, cl, "DKIOCG_PHYGEOM\n");
#if defined(__x86)
err = cmlb_dkio_get_phygeom(cl, (caddr_t)arg, flag, tg_cookie);
#else
err = ENOTTY;
#endif
break;
case DKIOCG_VIRTGEOM:
cmlb_dbg(CMLB_TRACE, cl, "DKIOCG_VIRTGEOM\n");
#if defined(__x86)
err = cmlb_dkio_get_virtgeom(cl, (caddr_t)arg, flag);
#else
err = ENOTTY;
#endif
break;
case DKIOCPARTINFO:
cmlb_dbg(CMLB_TRACE, cl, "DKIOCPARTINFO");
#if defined(__x86)
err = cmlb_dkio_partinfo(cl, dev, (caddr_t)arg, flag);
#else
err = ENOTTY;
#endif
break;
case DKIOCEXTPARTINFO:
cmlb_dbg(CMLB_TRACE, cl, "DKIOCPARTINFO");
#if defined(__x86)
err = cmlb_dkio_extpartinfo(cl, dev, (caddr_t)arg, flag);
#else
err = ENOTTY;
#endif
break;
#if defined(__x86)
case DKIOCSETEXTPART:
cmlb_dbg(CMLB_TRACE, cl, "DKIOCSETEXTPART");
err = cmlb_dkio_set_ext_part(cl, (caddr_t)arg, flag, tg_cookie);
break;
#endif
default:
err = ENOTTY;
}
if (err == 0) {
switch (cmd) {
case DKIOCSGEOM:
case DKIOCSAPART:
case DKIOCSVTOC:
case DKIOCSEXTVTOC:
case DKIOCSETEFI:
i_ddi_prop_dyn_cache_invalidate(CMLB_DEVINFO(cl),
i_ddi_prop_dyn_driver_get(CMLB_DEVINFO(cl)));
}
}
return (err);
}
dev_t
cmlb_make_device(struct cmlb_lun *cl)
{
if (cl->cl_alter_behavior & CMLB_CREATE_P0_MINOR_NODE) {
return (makedevice(ddi_driver_major(CMLB_DEVINFO(cl)),
ddi_get_instance(
CMLB_DEVINFO(cl)) << CMLBUNIT_FORCE_P0_SHIFT));
} else {
return (makedevice(ddi_driver_major(CMLB_DEVINFO(cl)),
ddi_get_instance(CMLB_DEVINFO(cl)) << CMLBUNIT_SHIFT));
}
}
static int
cmlb_check_update_blockcount(struct cmlb_lun *cl, void *tg_cookie)
{
int status;
diskaddr_t capacity;
uint32_t lbasize;
ASSERT(mutex_owned(CMLB_MUTEX(cl)));
if (cl->cl_f_geometry_is_valid)
return (0);
mutex_exit(CMLB_MUTEX(cl));
status = DK_TG_GETCAP(cl, &capacity, tg_cookie);
if (status != 0) {
mutex_enter(CMLB_MUTEX(cl));
return (EIO);
}
status = DK_TG_GETBLOCKSIZE(cl, &lbasize, tg_cookie);
mutex_enter(CMLB_MUTEX(cl));
if (status != 0)
return (EIO);
if ((capacity != 0) && (lbasize != 0)) {
cl->cl_blockcount = capacity;
cl->cl_tgt_blocksize = lbasize;
if (!cl->cl_is_removable) {
cl->cl_sys_blocksize = lbasize;
}
return (0);
} else {
return (EIO);
}
}
static int
cmlb_create_minor(dev_info_t *dip, char *name, int spec_type,
minor_t minor_num, char *node_type, int flag, boolean_t internal)
{
ASSERT(VALID_BOOLEAN(internal));
if (internal)
return (ddi_create_internal_pathname(dip,
name, spec_type, minor_num));
else
return (ddi_create_minor_node(dip,
name, spec_type, minor_num, node_type, flag));
}
static int
cmlb_create_minor_nodes(struct cmlb_lun *cl)
{
struct driver_minor_data *dmdp;
int instance, shift;
char name[48];
cmlb_label_t newlabeltype;
boolean_t internal;
ASSERT(cl != NULL);
ASSERT(mutex_owned(CMLB_MUTEX(cl)));
internal = VOID2BOOLEAN(
(cl->cl_alter_behavior & (CMLB_INTERNAL_MINOR_NODES)) != 0);
if (cl->cl_alter_behavior & CMLB_CREATE_P0_MINOR_NODE)
shift = CMLBUNIT_FORCE_P0_SHIFT;
else
shift = CMLBUNIT_SHIFT;
if (cl->cl_cur_labeltype != CMLB_LABEL_UNDEF &&
cl->cl_last_labeltype == cl->cl_cur_labeltype) {
return (0);
}
if (cl->cl_def_labeltype == CMLB_LABEL_UNDEF) {
return (ENXIO);
}
if (cl->cl_last_labeltype == CMLB_LABEL_UNDEF) {
newlabeltype = cl->cl_def_labeltype;
instance = ddi_get_instance(CMLB_DEVINFO(cl));
dmdp = (newlabeltype == CMLB_LABEL_EFI) ? dk_minor_data_efi :
dk_minor_data;
while (dmdp->name != NULL) {
(void) sprintf(name, "%s", dmdp->name);
if (cmlb_create_minor(CMLB_DEVINFO(cl), name,
dmdp->type,
(instance << shift) | dmdp->minor,
cl->cl_node_type, 0, internal) == DDI_FAILURE) {
ddi_remove_minor_node(CMLB_DEVINFO(cl), NULL);
return (ENXIO);
}
dmdp++;
}
cl->cl_last_labeltype = newlabeltype;
#if defined(_SUNOS_VTOC_8)
if (cl->cl_alter_behavior & CMLB_CREATE_P0_MINOR_NODE) {
if (cmlb_create_minor(CMLB_DEVINFO(cl), "q", S_IFBLK,
(instance << CMLBUNIT_FORCE_P0_SHIFT) | P0_RAW_DISK,
cl->cl_node_type, 0, internal) == DDI_FAILURE) {
ddi_remove_minor_node(CMLB_DEVINFO(cl), NULL);
return (ENXIO);
}
if (cmlb_create_minor(CMLB_DEVINFO(cl), "q,raw",
S_IFCHR,
(instance << CMLBUNIT_FORCE_P0_SHIFT) | P0_RAW_DISK,
cl->cl_node_type, 0, internal) == DDI_FAILURE) {
ddi_remove_minor_node(CMLB_DEVINFO(cl), NULL);
return (ENXIO);
}
}
#endif
return (0);
}
if (cl->cl_cur_labeltype == CMLB_LABEL_UNDEF) {
if (cl->cl_last_labeltype != cl->cl_def_labeltype) {
newlabeltype = cl->cl_def_labeltype;
} else {
return (0);
}
} else {
if (cl->cl_cur_labeltype != cl->cl_last_labeltype) {
newlabeltype = cl->cl_cur_labeltype;
} else {
return (0);
}
}
instance = ddi_get_instance(CMLB_DEVINFO(cl));
if (newlabeltype == CMLB_LABEL_EFI &&
cl->cl_last_labeltype != CMLB_LABEL_EFI) {
ddi_remove_minor_node(CMLB_DEVINFO(cl), "h");
ddi_remove_minor_node(CMLB_DEVINFO(cl), "h,raw");
(void) cmlb_create_minor(CMLB_DEVINFO(cl), "wd",
S_IFBLK, (instance << shift) | WD_NODE,
cl->cl_node_type, 0, internal);
(void) cmlb_create_minor(CMLB_DEVINFO(cl), "wd,raw",
S_IFCHR, (instance << shift) | WD_NODE,
cl->cl_node_type, 0, internal);
} else {
ddi_remove_minor_node(CMLB_DEVINFO(cl), "wd");
ddi_remove_minor_node(CMLB_DEVINFO(cl), "wd,raw");
(void) cmlb_create_minor(CMLB_DEVINFO(cl), "h",
S_IFBLK, (instance << shift) | WD_NODE,
cl->cl_node_type, 0, internal);
(void) cmlb_create_minor(CMLB_DEVINFO(cl), "h,raw",
S_IFCHR, (instance << shift) | WD_NODE,
cl->cl_node_type, 0, internal);
}
cl->cl_last_labeltype = newlabeltype;
return (0);
}
static int
cmlb_validate_geometry(struct cmlb_lun *cl, boolean_t forcerevalid, int flags,
void *tg_cookie)
{
int label_error = 0;
diskaddr_t capacity;
int count;
ASSERT(mutex_owned(CMLB_MUTEX(cl)));
ASSERT(VALID_BOOLEAN(forcerevalid));
if ((cl->cl_f_geometry_is_valid) && (!forcerevalid)) {
if (cl->cl_cur_labeltype == CMLB_LABEL_EFI)
return (ENOTSUP);
return (0);
}
if (cmlb_check_update_blockcount(cl, tg_cookie) != 0)
return (EIO);
capacity = cl->cl_blockcount;
cl->cl_map[P0_RAW_DISK].dkl_cylno = 0;
cl->cl_offset[P0_RAW_DISK] = 0;
cl->cl_map[P0_RAW_DISK].dkl_nblk = capacity;
cmlb_resync_geom_caches(cl, capacity, tg_cookie);
cl->cl_label_from_media = CMLB_LABEL_UNDEF;
label_error = cmlb_use_efi(cl, capacity, flags, tg_cookie);
if (label_error == 0) {
cmlb_dbg(CMLB_TRACE, cl,
"cmlb_validate_geometry: found EFI label\n");
return (ENOTSUP);
}
if (capacity > CMLB_EXTVTOC_LIMIT) {
if (label_error == ESRCH) {
if (!(flags & CMLB_SILENT) &&
(cl->cl_msglog_flag & CMLB_ALLOW_2TB_WARN)) {
cmlb_log(CMLB_DEVINFO(cl), CMLB_LABEL(cl),
CE_NOTE, "!Disk (%s%d) is limited to 2 TB "
"due to VTOC label. To use the full "
"capacity of the disk, use format(8) to "
"relabel the disk with EFI/GPT label.\n",
CMLB_LABEL(cl),
ddi_get_instance(CMLB_DEVINFO(cl)));
cl->cl_msglog_flag &= ~CMLB_ALLOW_2TB_WARN;
}
} else {
return (ENOTSUP);
}
}
label_error = 0;
if (cl->cl_device_type == DTYPE_DIRECT || ISREMOVABLE(cl)) {
struct dk_label *dkl;
offset_t label_addr;
int rval;
size_t buffer_size;
rval = cmlb_read_fdisk(cl, capacity, tg_cookie);
if ((rval != 0) && !ISCD(cl)) {
ASSERT(mutex_owned(CMLB_MUTEX(cl)));
return (rval);
}
if (cl->cl_solaris_size <= DK_LABEL_LOC) {
label_error = 0;
cl->cl_f_geometry_is_valid = B_TRUE;
goto no_solaris_partition;
}
label_addr = (daddr_t)(cl->cl_solaris_offset + DK_LABEL_LOC);
buffer_size = cl->cl_sys_blocksize;
cmlb_dbg(CMLB_TRACE, cl, "cmlb_validate_geometry: "
"label_addr: 0x%x allocation size: 0x%x\n",
label_addr, buffer_size);
if ((dkl = kmem_zalloc(buffer_size, KM_NOSLEEP)) == NULL)
return (ENOMEM);
mutex_exit(CMLB_MUTEX(cl));
rval = DK_TG_READ(cl, dkl, label_addr, buffer_size, tg_cookie);
mutex_enter(CMLB_MUTEX(cl));
switch (rval) {
case 0:
if (cmlb_uselabel(cl,
(struct dk_label *)(uintptr_t)dkl, flags) !=
CMLB_LABEL_IS_VALID) {
label_error = EINVAL;
} else
cl->cl_label_from_media = CMLB_LABEL_VTOC;
break;
case EACCES:
label_error = EACCES;
break;
default:
label_error = EINVAL;
break;
}
kmem_free(dkl, buffer_size);
}
#if defined(_SUNOS_VTOC_8)
if ((ISREMOVABLE(cl) || ISHOTPLUGGABLE(cl)) &&
(label_error != EACCES)) {
#elif defined(_SUNOS_VTOC_16)
if (label_error != EACCES) {
#endif
if (!cl->cl_f_geometry_is_valid) {
cmlb_build_default_label(cl, tg_cookie);
}
label_error = 0;
}
no_solaris_partition:
#if defined(_SUNOS_VTOC_16)
for (count = 0; count < FDISK_PARTS; count++) {
cl->cl_map[FDISK_P1 + count].dkl_cylno = UINT16_MAX;
cl->cl_map[FDISK_P1 + count].dkl_nblk =
cl->cl_fmap[count].fmap_nblk;
cl->cl_offset[FDISK_P1 + count] =
cl->cl_fmap[count].fmap_start;
}
#endif
for (count = 0; count < NDKMAP; count++) {
#if defined(_SUNOS_VTOC_8)
struct dk_map *lp = &cl->cl_map[count];
cl->cl_offset[count] =
cl->cl_g.dkg_nhead * cl->cl_g.dkg_nsect * lp->dkl_cylno;
#elif defined(_SUNOS_VTOC_16)
struct dkl_partition *vp = &cl->cl_vtoc.v_part[count];
cl->cl_offset[count] = vp->p_start + cl->cl_solaris_offset;
#else
#error "No VTOC format defined."
#endif
}
return (label_error);
}
#if defined(_SUNOS_VTOC_16)
static void
cmlb_convert_geometry(struct cmlb_lun *cl, diskaddr_t capacity,
struct dk_geom *cl_g, void *tg_cookie)
{
ASSERT(cl != NULL);
ASSERT(mutex_owned(CMLB_MUTEX(cl)));
if (capacity < 160) {
cl_g->dkg_nhead = 1;
cl_g->dkg_ncyl = capacity;
cl_g->dkg_nsect = 1;
return;
} else if (capacity <= 0x1000) {
cl_g->dkg_nhead = 2;
cl_g->dkg_ncyl = 80;
cl_g->dkg_nsect = capacity / (cl_g->dkg_nhead * cl_g->dkg_ncyl);
return;
}
if (capacity <= 0x200000) {
cl_g->dkg_nhead = 64;
cl_g->dkg_nsect = 32;
} else if (capacity <= 0x01000000) {
cl_g->dkg_nhead = 128;
cl_g->dkg_nsect = 32;
} else {
tg_attribute_t tgattribute;
int is_solid_state;
unsigned short nhead;
unsigned short nsect;
bzero(&tgattribute, sizeof (tg_attribute_t));
mutex_exit(CMLB_MUTEX(cl));
is_solid_state =
(DK_TG_GETATTRIBUTE(cl, &tgattribute, tg_cookie) == 0) ?
tgattribute.media_is_solid_state : FALSE;
mutex_enter(CMLB_MUTEX(cl));
if (is_solid_state) {
nhead = 224;
nsect = 56;
} else {
nhead = 255;
nsect = 63;
}
cl_g->dkg_nhead = nhead;
cl_g->dkg_nsect = ((capacity +
(UINT16_MAX * nhead * nsect) - 1) /
(UINT16_MAX * nhead * nsect)) * nsect;
if (cl_g->dkg_nsect == 0)
cl_g->dkg_nsect = (UINT16_MAX / nsect) * nsect;
}
}
#endif
static void
cmlb_resync_geom_caches(struct cmlb_lun *cl, diskaddr_t capacity,
void *tg_cookie)
{
struct cmlb_geom pgeom;
struct cmlb_geom lgeom;
struct cmlb_geom *pgeomp = &pgeom;
unsigned short nhead;
unsigned short nsect;
int spc;
int ret;
ASSERT(cl != NULL);
ASSERT(mutex_owned(CMLB_MUTEX(cl)));
mutex_exit(CMLB_MUTEX(cl));
bzero(&lgeom, sizeof (struct cmlb_geom));
ret = DK_TG_GETVIRTGEOM(cl, &lgeom, tg_cookie);
mutex_enter(CMLB_MUTEX(cl));
bcopy(&lgeom, &cl->cl_lgeom, sizeof (cl->cl_lgeom));
if (ret != 0 || cl->cl_lgeom.g_nsect == 0 ||
cl->cl_lgeom.g_nhead == 0) {
nhead = 255;
nsect = 63;
} else {
nhead = cl->cl_lgeom.g_nhead;
nsect = cl->cl_lgeom.g_nsect;
}
if (ISCD(cl)) {
pgeomp->g_nhead = 1;
pgeomp->g_nsect = nsect * nhead;
} else {
pgeomp->g_nhead = nhead;
pgeomp->g_nsect = nsect;
}
spc = pgeomp->g_nhead * pgeomp->g_nsect;
pgeomp->g_capacity = capacity;
if (spc == 0)
pgeomp->g_ncyl = 0;
else
pgeomp->g_ncyl = pgeomp->g_capacity / spc;
pgeomp->g_acyl = 0;
mutex_exit(CMLB_MUTEX(cl));
(void) DK_TG_GETPHYGEOM(cl, pgeomp, tg_cookie);
mutex_enter(CMLB_MUTEX(cl));
bcopy(pgeomp, &cl->cl_pgeom, sizeof (cl->cl_pgeom));
cmlb_dbg(CMLB_INFO, cl, "cmlb_resync_geom_caches: "
"(cached from lgeom)\n");
cmlb_dbg(CMLB_INFO, cl,
" ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n",
cl->cl_pgeom.g_ncyl, cl->cl_pgeom.g_acyl,
cl->cl_pgeom.g_nhead, cl->cl_pgeom.g_nsect);
cmlb_dbg(CMLB_INFO, cl, " lbasize: %d; capacity: %ld; "
"intrlv: %d; rpm: %d\n", cl->cl_pgeom.g_secsize,
cl->cl_pgeom.g_capacity, cl->cl_pgeom.g_intrlv,
cl->cl_pgeom.g_rpm);
}
#if defined(__x86)
static int
cmlb_update_ext_minor_nodes(struct cmlb_lun *cl, int num_parts)
{
int i, count, shift;
char name[48];
int instance;
struct driver_minor_data *demdp, *demdpr;
char *devnm;
dev_info_t *pdip;
boolean_t internal;
ASSERT(mutex_owned(CMLB_MUTEX(cl)));
ASSERT(cl->cl_update_ext_minor_nodes == 1);
internal = VOID2BOOLEAN(
(cl->cl_alter_behavior & (CMLB_INTERNAL_MINOR_NODES)) != 0);
instance = ddi_get_instance(CMLB_DEVINFO(cl));
demdp = dk_ext_minor_data;
demdpr = &dk_ext_minor_data[MAX_EXT_PARTS];
if (cl->cl_alter_behavior & CMLB_CREATE_P0_MINOR_NODE)
shift = CMLBUNIT_FORCE_P0_SHIFT;
else
shift = CMLBUNIT_SHIFT;
if (cl->cl_logical_drive_count) {
for (i = 0; i < cl->cl_logical_drive_count; i++) {
(void) sprintf(name, "%s", demdp->name);
ddi_remove_minor_node(CMLB_DEVINFO(cl), name);
(void) sprintf(name, "%s", demdpr->name);
ddi_remove_minor_node(CMLB_DEVINFO(cl), name);
demdp++;
demdpr++;
}
devnm = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
(void) ddi_deviname(cl->cl_devi, devnm);
pdip = ddi_get_parent(cl->cl_devi);
(void) devfs_clean(pdip, devnm + 1, DV_CLEAN_FORCE);
kmem_free(devnm, MAXNAMELEN + 1);
}
demdp = dk_ext_minor_data;
demdpr = &dk_ext_minor_data[MAX_EXT_PARTS];
for (i = 0; i < num_parts; i++) {
(void) sprintf(name, "%s", demdp->name);
if (cmlb_create_minor(CMLB_DEVINFO(cl), name,
demdp->type,
(instance << shift) | demdp->minor,
cl->cl_node_type, 0, internal) == DDI_FAILURE) {
ddi_remove_minor_node(CMLB_DEVINFO(cl), NULL);
cl->cl_logical_drive_count = 0;
return (ENXIO);
}
(void) sprintf(name, "%s", demdpr->name);
if (ddi_create_minor_node(CMLB_DEVINFO(cl), name,
demdpr->type,
(instance << shift) | demdpr->minor,
cl->cl_node_type, 0) == DDI_FAILURE) {
ddi_remove_minor_node(CMLB_DEVINFO(cl), NULL);
cl->cl_logical_drive_count = 0;
return (ENXIO);
}
demdp++;
demdpr++;
}
for (count = 0; count < MAX_EXT_PARTS; count++) {
cl->cl_map[FDISK_P4 + 1 + count].dkl_cylno = UINT32_MAX;
cl->cl_map[FDISK_P4 + 1 + count].dkl_nblk =
cl->cl_fmap[FD_NUMPART + count].fmap_nblk;
cl->cl_offset[FDISK_P4 + 1 + count] =
cl->cl_fmap[FD_NUMPART + count].fmap_start;
}
cl->cl_logical_drive_count = i;
cl->cl_update_ext_minor_nodes = 0;
return (0);
}
static int
cmlb_validate_ext_part(struct cmlb_lun *cl, int part, int epart, uint32_t start,
uint32_t size)
{
int i;
uint32_t end = start + size - 1;
uint32_t ext_start = cl->cl_fmap[part].fmap_start;
uint32_t ext_end = ext_start + cl->cl_fmap[part].fmap_nblk - 1;
uint32_t ts, te;
uint32_t poss_end = ext_end;
if (end <= start) {
return (1);
}
if (start <= ext_start || start > ext_end || end <= ext_start ||
end > ext_end) {
return (1);
}
if (epart == FD_NUMPART) {
return (0);
}
i = FD_NUMPART;
ts = cl->cl_fmap[FD_NUMPART].fmap_start;
te = ts + cl->cl_fmap[FD_NUMPART].fmap_nblk - 1;
while ((i < epart) && ts && te) {
if (start >= ts && start <= te) {
return (1);
}
if ((ts < poss_end) && (ts > start)) {
poss_end = ts - 1;
}
i++;
ts = cl->cl_fmap[i].fmap_start;
te = ts + cl->cl_fmap[i].fmap_nblk - 1;
}
if (end > poss_end) {
return (1);
}
return (0);
}
static int
cmlb_is_linux_swap(struct cmlb_lun *cl, uint32_t part_start, void *tg_cookie)
{
int i;
int rval = -1;
uint32_t seek_offset;
uint32_t linux_pg_size;
char *buf, *linux_swap_magic;
int sec_sz = cl->cl_sys_blocksize;
uint32_t linux_pg_size_arr[] = {4096, };
ASSERT(cl != NULL);
ASSERT(mutex_owned(CMLB_MUTEX(cl)));
if ((buf = kmem_zalloc(sec_sz, KM_NOSLEEP)) == NULL) {
return (ENOMEM);
}
mutex_exit(CMLB_MUTEX(cl));
rval = DK_TG_READ(cl, buf, part_start + DK_LABEL_LOC,
sec_sz, tg_cookie);
mutex_enter(CMLB_MUTEX(cl));
if (rval != 0) {
cmlb_dbg(CMLB_ERROR, cl,
"cmlb_is_linux_swap: disk vtoc read err\n");
rval = EIO;
goto done;
}
if ((((struct dk_label *)buf)->dkl_magic == DKL_MAGIC) &&
(((struct dk_label *)buf)->dkl_vtoc.v_sanity == VTOC_SANE)) {
rval = -1;
goto done;
}
linux_swap_magic = buf + sec_sz - 10;
for (i = 0; i < sizeof (linux_pg_size_arr)/sizeof (uint32_t); i++) {
linux_pg_size = linux_pg_size_arr[i];
seek_offset = linux_pg_size/sec_sz - 1;
seek_offset += part_start;
mutex_exit(CMLB_MUTEX(cl));
rval = DK_TG_READ(cl, buf, seek_offset, sec_sz, tg_cookie);
mutex_enter(CMLB_MUTEX(cl));
if (rval != 0) {
cmlb_dbg(CMLB_ERROR, cl,
"cmlb_is_linux_swap: disk read err\n");
rval = EIO;
break;
}
rval = -1;
if ((strncmp(linux_swap_magic, "SWAP-SPACE", 10) == 0) ||
(strncmp(linux_swap_magic, "SWAPSPACE2", 10) == 0)) {
rval = 0;
break;
}
}
done:
kmem_free(buf, sec_sz);
return (rval);
}
#endif
static int
cmlb_read_fdisk(struct cmlb_lun *cl, diskaddr_t capacity, void *tg_cookie)
{
#if defined(_NO_FDISK_PRESENT)
cl->cl_solaris_offset = 0;
cl->cl_solaris_size = capacity;
bzero(cl->cl_fmap, sizeof (struct fmap) * FD_NUMPART);
return (0);
#elif defined(_FIRMWARE_NEEDS_FDISK)
struct ipart *fdp;
struct mboot *mbp;
struct ipart fdisk[FD_NUMPART];
int i, k;
char sigbuf[2];
caddr_t bufp;
int uidx;
int rval;
int lba = 0;
uint_t solaris_offset;
daddr_t solaris_size;
uint32_t blocksize;
#if defined(__x86)
struct ipart eparts[2];
struct ipart *efdp1 = &eparts[0];
struct ipart *efdp2 = &eparts[1];
int ext_part_exists = 0;
int ld_count = 0;
#endif
ASSERT(cl != NULL);
ASSERT(mutex_owned(CMLB_MUTEX(cl)));
solaris_offset = 0;
solaris_size = capacity;
blocksize = cl->cl_tgt_blocksize;
bufp = kmem_zalloc(blocksize, KM_SLEEP);
mutex_exit(CMLB_MUTEX(cl));
rval = DK_TG_READ(cl, bufp, 0, blocksize, tg_cookie);
mutex_enter(CMLB_MUTEX(cl));
if (rval != 0) {
cmlb_dbg(CMLB_ERROR, cl,
"cmlb_read_fdisk: fdisk read err\n");
bzero(cl->cl_fmap, sizeof (struct fmap) * FD_NUMPART);
goto done;
}
mbp = (struct mboot *)bufp;
bcopy(&mbp->parts[0], fdisk, sizeof (fdisk));
if (ddi_getprop(DDI_DEV_T_ANY, ddi_root_node(), 0,
"lba-access-ok", 0) != 0) {
lba = 1;
} else {
for (fdp = fdisk, i = 0; i < FD_NUMPART; i++, fdp++) {
lba = (lba || cmlb_has_max_chs_vals(fdp));
}
}
if (lba != 0) {
dev_t dev = cmlb_make_device(cl);
if (ddi_getprop(dev, CMLB_DEVINFO(cl), DDI_PROP_DONTPASS,
"lba-access-ok", 0) == 0) {
if (ddi_prop_create(dev, CMLB_DEVINFO(cl), 0,
"lba-access-ok", (caddr_t)NULL, 0) !=
DDI_PROP_SUCCESS) {
cmlb_dbg(CMLB_ERROR, cl,
"cmlb_read_fdisk: Can't create lba "
"property for instance %d\n",
ddi_get_instance(CMLB_DEVINFO(cl)));
}
}
}
bcopy(&mbp->signature, sigbuf, sizeof (sigbuf));
if (((sigbuf[1] & 0xFF) != ((MBB_MAGIC >> 8) & 0xFF)) ||
(sigbuf[0] != (MBB_MAGIC & 0xFF))) {
cmlb_dbg(CMLB_ERROR, cl,
"cmlb_read_fdisk: no fdisk\n");
bzero(cl->cl_fmap, sizeof (struct fmap) * FD_NUMPART);
goto done;
}
#ifdef CMLBDEBUG
if (cmlb_level_mask & CMLB_LOGMASK_INFO) {
fdp = fdisk;
cmlb_dbg(CMLB_INFO, cl, "cmlb_read_fdisk:\n");
cmlb_dbg(CMLB_INFO, cl, " relsect "
"numsect sysid bootid\n");
for (i = 0; i < FD_NUMPART; i++, fdp++) {
cmlb_dbg(CMLB_INFO, cl,
" %d: %8d %8d 0x%08x 0x%08x\n",
i, fdp->relsect, fdp->numsect,
fdp->systid, fdp->bootid);
}
}
#endif
uidx = -1;
solaris_offset = 0;
solaris_size = 0;
for (fdp = fdisk, i = 0; i < FD_NUMPART; i++, fdp++) {
uint32_t relsect;
uint32_t numsect;
uchar_t systid;
#if defined(__x86)
int ext_relsect = 0;
#endif
if (fdp->numsect == 0) {
cl->cl_fmap[i].fmap_start = 0;
cl->cl_fmap[i].fmap_nblk = 0;
continue;
}
relsect = LE_32(fdp->relsect);
numsect = LE_32(fdp->numsect);
cl->cl_fmap[i].fmap_start = relsect;
cl->cl_fmap[i].fmap_nblk = numsect;
cl->cl_fmap[i].fmap_systid = LE_8(fdp->systid);
#if defined(__x86)
if ((fdp->systid == EXTDOS || fdp->systid == FDISK_EXTLBA) &&
(ext_part_exists == 0)) {
int j;
uint32_t logdrive_offset;
uint32_t ext_numsect;
uint32_t abs_secnum;
ext_part_exists = 1;
for (j = FD_NUMPART; j < FDISK_PARTS; j++) {
mutex_exit(CMLB_MUTEX(cl));
rval = DK_TG_READ(cl, bufp,
(relsect + ext_relsect), blocksize,
tg_cookie);
mutex_enter(CMLB_MUTEX(cl));
if (rval != 0) {
cmlb_dbg(CMLB_ERROR, cl,
"cmlb_read_fdisk: Extended "
"partition read err\n");
goto done;
}
bcopy(&bufp[FDISK_PART_TABLE_START], eparts,
sizeof (eparts));
logdrive_offset = LE_32(efdp1->relsect);
ext_numsect = LE_32(efdp1->numsect);
systid = LE_8(efdp1->systid);
if (logdrive_offset <= 0 || ext_numsect <= 0)
break;
abs_secnum = relsect + ext_relsect +
logdrive_offset;
if (cmlb_validate_ext_part(cl, i, j, abs_secnum,
ext_numsect)) {
break;
}
if ((cl->cl_fmap[j].fmap_start != abs_secnum) ||
(cl->cl_fmap[j].fmap_nblk != ext_numsect) ||
(cl->cl_fmap[j].fmap_systid != systid)) {
cl->cl_update_ext_minor_nodes = 1;
}
cl->cl_fmap[j].fmap_start = abs_secnum;
cl->cl_fmap[j].fmap_nblk = ext_numsect;
cl->cl_fmap[j].fmap_systid = systid;
ld_count++;
if ((efdp1->systid == SUNIXOS &&
(cmlb_is_linux_swap(cl, abs_secnum,
tg_cookie) != 0)) ||
efdp1->systid == SUNIXOS2) {
if (uidx == -1) {
uidx = 0;
solaris_offset = abs_secnum;
solaris_size = ext_numsect;
}
}
if ((ext_relsect = LE_32(efdp2->relsect)) == 0)
break;
}
}
#endif
if (fdp->systid != SUNIXOS &&
fdp->systid != SUNIXOS2 &&
fdp->systid != EFI_PMBR) {
continue;
}
if ((uidx == -1) || (fdp->bootid == ACTIVE)) {
#if defined(__x86)
if (fdp->systid != SUNIXOS ||
(fdp->systid == SUNIXOS &&
(cmlb_is_linux_swap(cl, relsect,
tg_cookie) != 0))) {
#endif
uidx = i;
solaris_offset = relsect;
solaris_size = numsect;
#if defined(__x86)
}
#endif
}
}
#if defined(__x86)
if (ld_count < cl->cl_logical_drive_count) {
for (k = ld_count + FD_NUMPART;
k < cl->cl_logical_drive_count + FD_NUMPART; k++) {
cl->cl_fmap[k].fmap_start = 0;
cl->cl_fmap[k].fmap_nblk = 0;
cl->cl_fmap[k].fmap_systid = 0;
}
cl->cl_update_ext_minor_nodes = 1;
}
if (cl->cl_update_ext_minor_nodes) {
rval = cmlb_update_ext_minor_nodes(cl, ld_count);
if (rval != 0) {
goto done;
}
}
#endif
cmlb_dbg(CMLB_INFO, cl, "fdisk 0x%x 0x%lx",
cl->cl_solaris_offset, cl->cl_solaris_size);
done:
if ((cl->cl_solaris_offset != solaris_offset) ||
(cl->cl_solaris_size != solaris_size) ||
solaris_size <= DK_LABEL_LOC) {
cmlb_dbg(CMLB_INFO, cl, "fdisk moved 0x%x 0x%lx",
solaris_offset, solaris_size);
bzero(&cl->cl_g, sizeof (struct dk_geom));
bzero(&cl->cl_vtoc, sizeof (struct dk_vtoc));
bzero(&cl->cl_map, NDKMAP * (sizeof (struct dk_map)));
cl->cl_f_geometry_is_valid = B_FALSE;
}
cl->cl_solaris_offset = solaris_offset;
cl->cl_solaris_size = solaris_size;
kmem_free(bufp, blocksize);
return (rval);
#else
#error "fdisk table presence undetermined for this platform."
#endif
}
static void
cmlb_swap_efi_gpt(efi_gpt_t *e)
{
_NOTE(ASSUMING_PROTECTED(*e))
e->efi_gpt_Signature = LE_64(e->efi_gpt_Signature);
e->efi_gpt_Revision = LE_32(e->efi_gpt_Revision);
e->efi_gpt_HeaderSize = LE_32(e->efi_gpt_HeaderSize);
e->efi_gpt_HeaderCRC32 = LE_32(e->efi_gpt_HeaderCRC32);
e->efi_gpt_MyLBA = LE_64(e->efi_gpt_MyLBA);
e->efi_gpt_AlternateLBA = LE_64(e->efi_gpt_AlternateLBA);
e->efi_gpt_FirstUsableLBA = LE_64(e->efi_gpt_FirstUsableLBA);
e->efi_gpt_LastUsableLBA = LE_64(e->efi_gpt_LastUsableLBA);
UUID_LE_CONVERT(e->efi_gpt_DiskGUID, e->efi_gpt_DiskGUID);
e->efi_gpt_PartitionEntryLBA = LE_64(e->efi_gpt_PartitionEntryLBA);
e->efi_gpt_NumberOfPartitionEntries =
LE_32(e->efi_gpt_NumberOfPartitionEntries);
e->efi_gpt_SizeOfPartitionEntry =
LE_32(e->efi_gpt_SizeOfPartitionEntry);
e->efi_gpt_PartitionEntryArrayCRC32 =
LE_32(e->efi_gpt_PartitionEntryArrayCRC32);
}
static void
cmlb_swap_efi_gpe(int nparts, efi_gpe_t *p)
{
int i;
_NOTE(ASSUMING_PROTECTED(*p))
for (i = 0; i < nparts; i++) {
UUID_LE_CONVERT(p[i].efi_gpe_PartitionTypeGUID,
p[i].efi_gpe_PartitionTypeGUID);
p[i].efi_gpe_StartingLBA = LE_64(p[i].efi_gpe_StartingLBA);
p[i].efi_gpe_EndingLBA = LE_64(p[i].efi_gpe_EndingLBA);
}
}
static int
cmlb_validate_efi(efi_gpt_t *labp)
{
if (labp->efi_gpt_Signature != EFI_SIGNATURE)
return (EINVAL);
if (sizeof (efi_gpt_t) - sizeof (labp->efi_gpt_Reserved2) >
labp->efi_gpt_HeaderSize)
return (EINVAL);
if (labp->efi_gpt_SizeOfPartitionEntry != sizeof (efi_gpe_t))
return (EINVAL);
return (0);
}
static boolean_t
cmlb_check_efi_mbr(uchar_t *buf, boolean_t *is_mbr)
{
struct ipart *fdp;
struct mboot *mbp = (struct mboot *)buf;
struct ipart fdisk[FD_NUMPART];
int i;
if (is_mbr != NULL)
*is_mbr = B_TRUE;
if (LE_16(mbp->signature) != MBB_MAGIC) {
if (is_mbr != NULL)
*is_mbr = B_FALSE;
return (B_TRUE);
}
bcopy(&mbp->parts[0], fdisk, sizeof (fdisk));
for (fdp = fdisk, i = 0; i < FD_NUMPART; i++, fdp++) {
if (fdp->systid == EFI_PMBR)
return (B_TRUE);
}
return (B_FALSE);
}
static int
cmlb_use_efi(struct cmlb_lun *cl, diskaddr_t capacity, int flags,
void *tg_cookie)
{
int i;
int rval = 0;
efi_gpe_t *partitions;
uchar_t *buf;
uint_t lbasize;
diskaddr_t cap = 0;
uint_t nparts;
diskaddr_t gpe_lba;
diskaddr_t alternate_lba;
int iofailed = 0;
struct uuid uuid_type_reserved = EFI_RESERVED;
#if defined(_FIRMWARE_NEEDS_FDISK)
boolean_t is_mbr;
#endif
ASSERT(mutex_owned(CMLB_MUTEX(cl)));
lbasize = cl->cl_sys_blocksize;
cl->cl_reserved = -1;
mutex_exit(CMLB_MUTEX(cl));
buf = kmem_zalloc(EFI_MIN_ARRAY_SIZE, KM_SLEEP);
rval = DK_TG_READ(cl, buf, 0, lbasize, tg_cookie);
if (rval) {
iofailed = 1;
goto done_err;
}
if (((struct dk_label *)buf)->dkl_magic == DKL_MAGIC) {
rval = ESRCH;
goto done_err;
}
#if defined(_FIRMWARE_NEEDS_FDISK)
if (!cmlb_check_efi_mbr(buf, &is_mbr)) {
if (is_mbr)
rval = ESRCH;
else
rval = EINVAL;
goto done_err;
}
#else
if (!cmlb_check_efi_mbr(buf, NULL)) {
rval = EINVAL;
goto done_err;
}
#endif
rval = DK_TG_READ(cl, buf, 1, lbasize, tg_cookie);
if (rval) {
iofailed = 1;
goto done_err;
}
cmlb_swap_efi_gpt((efi_gpt_t *)buf);
if ((rval = cmlb_validate_efi((efi_gpt_t *)buf)) != 0) {
rval = DK_TG_GETCAP(cl, &cap, tg_cookie);
if (rval) {
iofailed = 1;
goto done_err;
}
if ((rval = DK_TG_READ(cl, buf,
cap - ((cl->cl_alter_behavior & CMLB_OFF_BY_ONE) ? 2 : 1),
lbasize, tg_cookie))
!= 0) {
iofailed = 1;
goto done_err;
}
cmlb_swap_efi_gpt((efi_gpt_t *)buf);
if ((rval = cmlb_validate_efi((efi_gpt_t *)buf)) != 0) {
if (!(cl->cl_alter_behavior & CMLB_OFF_BY_ONE))
goto done_err;
if ((rval = DK_TG_READ(cl, buf, cap - 1, lbasize,
tg_cookie)) != 0)
goto done_err;
cmlb_swap_efi_gpt((efi_gpt_t *)buf);
if ((rval = cmlb_validate_efi((efi_gpt_t *)buf)) != 0)
goto done_err;
}
if (!(flags & CMLB_SILENT))
cmlb_log(CMLB_DEVINFO(cl), CMLB_LABEL(cl), CE_WARN,
"primary label corrupt; using backup\n");
}
nparts = ((efi_gpt_t *)buf)->efi_gpt_NumberOfPartitionEntries;
gpe_lba = ((efi_gpt_t *)buf)->efi_gpt_PartitionEntryLBA;
alternate_lba = ((efi_gpt_t *)buf)->efi_gpt_AlternateLBA;
rval = DK_TG_READ(cl, buf, gpe_lba, EFI_MIN_ARRAY_SIZE, tg_cookie);
if (rval) {
iofailed = 1;
goto done_err;
}
partitions = (efi_gpe_t *)buf;
if (nparts > MAXPART) {
nparts = MAXPART;
}
cmlb_swap_efi_gpe(nparts, partitions);
mutex_enter(CMLB_MUTEX(cl));
for (i = 0; i < nparts; i++) {
if (partitions->efi_gpe_StartingLBA != 0 ||
partitions->efi_gpe_EndingLBA != 0) {
cl->cl_map[i].dkl_cylno =
partitions->efi_gpe_StartingLBA;
cl->cl_map[i].dkl_nblk =
partitions->efi_gpe_EndingLBA -
partitions->efi_gpe_StartingLBA + 1;
cl->cl_offset[i] =
partitions->efi_gpe_StartingLBA;
}
if (cl->cl_reserved == -1) {
if (bcmp(&partitions->efi_gpe_PartitionTypeGUID,
&uuid_type_reserved, sizeof (struct uuid)) == 0) {
cl->cl_reserved = i;
}
}
if (i == WD_NODE) {
cl->cl_map[i].dkl_cylno = 0;
if (alternate_lba == 1) {
cl->cl_map[i].dkl_nblk = capacity;
} else {
cl->cl_map[i].dkl_nblk = alternate_lba + 1;
}
cl->cl_offset[i] = 0;
}
partitions++;
}
cl->cl_solaris_offset = 0;
cl->cl_solaris_size = capacity;
cl->cl_label_from_media = CMLB_LABEL_EFI;
cl->cl_f_geometry_is_valid = B_TRUE;
bzero(&cl->cl_vtoc, sizeof (struct dk_vtoc));
kmem_free(buf, EFI_MIN_ARRAY_SIZE);
return (0);
done_err:
kmem_free(buf, EFI_MIN_ARRAY_SIZE);
mutex_enter(CMLB_MUTEX(cl));
if ((capacity > CMLB_EXTVTOC_LIMIT) && (rval != ESRCH) && !iofailed) {
cl->cl_f_geometry_is_valid = B_FALSE;
}
return (rval);
}
static int
cmlb_uselabel(struct cmlb_lun *cl, struct dk_label *labp, int flags)
{
short *sp;
short sum;
short count;
int label_error = CMLB_LABEL_IS_VALID;
int i;
diskaddr_t label_capacity;
uint32_t part_end;
diskaddr_t track_capacity;
#if defined(_SUNOS_VTOC_16)
struct dkl_partition *vpartp;
#endif
ASSERT(cl != NULL);
ASSERT(mutex_owned(CMLB_MUTEX(cl)));
if (labp->dkl_magic != DKL_MAGIC) {
#if defined(__sparc)
if (!ISREMOVABLE(cl) && !ISHOTPLUGGABLE(cl)) {
if (!(flags & CMLB_SILENT))
cmlb_log(CMLB_DEVINFO(cl), CMLB_LABEL(cl),
CE_WARN,
"Corrupt label; wrong magic number\n");
}
#endif
return (CMLB_LABEL_IS_INVALID);
}
sp = (short *)labp;
sum = 0;
count = sizeof (struct dk_label) / sizeof (short);
while (count--) {
sum ^= *sp++;
}
if (sum != 0) {
#if defined(_SUNOS_VTOC_16)
if (!ISCD(cl)) {
#elif defined(_SUNOS_VTOC_8)
if (!ISREMOVABLE(cl) && !ISHOTPLUGGABLE(cl)) {
#endif
if (!(flags & CMLB_SILENT))
cmlb_log(CMLB_DEVINFO(cl), CMLB_LABEL(cl),
CE_WARN,
"Corrupt label - label checksum failed\n");
}
return (CMLB_LABEL_IS_INVALID);
}
bzero(&cl->cl_g, sizeof (struct dk_geom));
cl->cl_g.dkg_ncyl = labp->dkl_ncyl;
cl->cl_g.dkg_acyl = labp->dkl_acyl;
cl->cl_g.dkg_bcyl = 0;
cl->cl_g.dkg_nhead = labp->dkl_nhead;
cl->cl_g.dkg_nsect = labp->dkl_nsect;
cl->cl_g.dkg_intrlv = labp->dkl_intrlv;
#if defined(_SUNOS_VTOC_8)
cl->cl_g.dkg_gap1 = labp->dkl_gap1;
cl->cl_g.dkg_gap2 = labp->dkl_gap2;
cl->cl_g.dkg_bhead = labp->dkl_bhead;
#endif
#if defined(_SUNOS_VTOC_16)
cl->cl_dkg_skew = labp->dkl_skew;
#endif
#if defined(__x86)
cl->cl_g.dkg_apc = labp->dkl_apc;
#endif
cl->cl_g.dkg_rpm = (labp->dkl_rpm != 0) ? labp->dkl_rpm : 3600;
cl->cl_g.dkg_pcyl = (labp->dkl_pcyl != 0) ? labp->dkl_pcyl :
(cl->cl_g.dkg_ncyl + cl->cl_g.dkg_acyl);
cl->cl_g.dkg_read_reinstruct = labp->dkl_read_reinstruct;
cl->cl_g.dkg_write_reinstruct = labp->dkl_write_reinstruct;
#if defined(_SUNOS_VTOC_8)
for (i = 0; i < NDKMAP; i++) {
cl->cl_map[i].dkl_cylno = labp->dkl_map[i].dkl_cylno;
cl->cl_map[i].dkl_nblk = labp->dkl_map[i].dkl_nblk;
}
#endif
#if defined(_SUNOS_VTOC_16)
vpartp = labp->dkl_vtoc.v_part;
track_capacity = labp->dkl_nhead * labp->dkl_nsect;
if (track_capacity == 0) {
if (!(flags & CMLB_SILENT))
cmlb_log(CMLB_DEVINFO(cl), CMLB_LABEL(cl), CE_WARN,
"Corrupt label - zero nhead or nsect value\n");
return (CMLB_LABEL_IS_INVALID);
}
for (i = 0; i < NDKMAP; i++, vpartp++) {
cl->cl_map[i].dkl_cylno = vpartp->p_start / track_capacity;
cl->cl_map[i].dkl_nblk = vpartp->p_size;
}
#endif
bcopy(&labp->dkl_vtoc, &cl->cl_vtoc, sizeof (struct dk_vtoc));
#if defined(_SUNOS_VTOC_8)
bcopy(labp->dkl_asciilabel, cl->cl_asciilabel, LEN_DKL_ASCII);
#endif
track_capacity = (cl->cl_g.dkg_nhead * cl->cl_g.dkg_nsect);
label_capacity = (cl->cl_g.dkg_ncyl * track_capacity);
if (cl->cl_g.dkg_acyl) {
#if defined(__x86)
label_capacity += (track_capacity * cl->cl_g.dkg_acyl);
#else
label_capacity += track_capacity;
#endif
}
if (label_capacity == 0) {
if (!(flags & CMLB_SILENT))
cmlb_log(CMLB_DEVINFO(cl), CMLB_LABEL(cl), CE_WARN,
"Corrupt label - no valid capacity could be "
"retrieved\n");
return (CMLB_LABEL_IS_INVALID);
}
cl->cl_f_geometry_is_valid = B_TRUE;
if (label_capacity <= cl->cl_blockcount) {
#if defined(_SUNOS_VTOC_8)
cmlb_dbg(CMLB_ERROR, cl,
"cmlb_uselabel: Label %d blocks; Drive %d blocks\n",
label_capacity, cl->cl_blockcount);
cl->cl_solaris_size = label_capacity;
#endif
goto done;
}
if (ISCD(cl)) {
#if defined(_SUNOS_VTOC_8)
for (i = 0; i < NDKMAP; i++) {
part_end = labp->dkl_nhead * labp->dkl_nsect *
labp->dkl_map[i].dkl_cylno +
labp->dkl_map[i].dkl_nblk - 1;
if ((labp->dkl_map[i].dkl_nblk) &&
(part_end > cl->cl_blockcount)) {
cl->cl_f_geometry_is_valid = B_FALSE;
break;
}
}
#endif
#if defined(_SUNOS_VTOC_16)
vpartp = &(labp->dkl_vtoc.v_part[0]);
for (i = 0; i < NDKMAP; i++, vpartp++) {
part_end = vpartp->p_start + vpartp->p_size;
if ((vpartp->p_size > 0) &&
(part_end > cl->cl_blockcount)) {
cl->cl_f_geometry_is_valid = B_FALSE;
break;
}
}
#endif
} else {
if (!(flags & CMLB_SILENT)) {
cmlb_log(CMLB_DEVINFO(cl), CMLB_LABEL(cl), CE_WARN,
"Corrupt label - bad geometry\n");
cmlb_log(CMLB_DEVINFO(cl), CMLB_LABEL(cl), CE_CONT,
"Label says %llu blocks; Drive says %llu blocks\n",
label_capacity, cl->cl_blockcount);
}
cl->cl_f_geometry_is_valid = B_FALSE;
label_error = CMLB_LABEL_IS_INVALID;
}
done:
cmlb_dbg(CMLB_INFO, cl, "cmlb_uselabel: (label geometry)\n");
cmlb_dbg(CMLB_INFO, cl,
" ncyl: %d; acyl: %d; nhead: %d; nsect: %d\n",
cl->cl_g.dkg_ncyl, cl->cl_g.dkg_acyl,
cl->cl_g.dkg_nhead, cl->cl_g.dkg_nsect);
cmlb_dbg(CMLB_INFO, cl,
" label_capacity: %d; intrlv: %d; rpm: %d\n",
cl->cl_blockcount, cl->cl_g.dkg_intrlv, cl->cl_g.dkg_rpm);
cmlb_dbg(CMLB_INFO, cl, " wrt_reinstr: %d; rd_reinstr: %d\n",
cl->cl_g.dkg_write_reinstruct, cl->cl_g.dkg_read_reinstruct);
ASSERT(mutex_owned(CMLB_MUTEX(cl)));
return (label_error);
}
static void
cmlb_build_default_label(struct cmlb_lun *cl, void *tg_cookie)
{
#if defined(_SUNOS_VTOC_16)
uint_t phys_spc;
uint_t disksize;
struct dk_geom cl_g;
diskaddr_t capacity;
#endif
ASSERT(cl != NULL);
ASSERT(mutex_owned(CMLB_MUTEX(cl)));
#if defined(_SUNOS_VTOC_8)
if (!ISREMOVABLE(cl) && !ISHOTPLUGGABLE(cl)) {
return;
}
#endif
bzero(&cl->cl_g, sizeof (struct dk_geom));
bzero(&cl->cl_vtoc, sizeof (struct dk_vtoc));
bzero(&cl->cl_map, NDKMAP * (sizeof (struct dk_map)));
#if defined(_SUNOS_VTOC_8)
cl->cl_solaris_size = cl->cl_blockcount;
if (ISCD(cl)) {
tg_attribute_t tgattribute;
int is_writable;
bzero(&tgattribute, sizeof (tg_attribute_t));
mutex_exit(CMLB_MUTEX(cl));
is_writable =
(DK_TG_GETATTRIBUTE(cl, &tgattribute, tg_cookie) == 0) ?
tgattribute.media_is_writable : 1;
mutex_enter(CMLB_MUTEX(cl));
if (is_writable) {
cl->cl_g.dkg_nhead = 64;
cl->cl_g.dkg_nsect = 32;
cl->cl_g.dkg_ncyl = cl->cl_blockcount / (64 * 32);
cl->cl_solaris_size = (diskaddr_t)cl->cl_g.dkg_ncyl *
cl->cl_g.dkg_nhead * cl->cl_g.dkg_nsect;
} else {
cl->cl_g.dkg_ncyl = 1;
cl->cl_g.dkg_nhead = 1;
cl->cl_g.dkg_nsect = cl->cl_blockcount;
}
} else {
if (cl->cl_blockcount < 160) {
cl->cl_g.dkg_nhead = 1;
cl->cl_g.dkg_ncyl = cl->cl_blockcount;
cl->cl_g.dkg_nsect = 1;
} else if (cl->cl_blockcount <= 0x1000) {
cl->cl_g.dkg_nhead = 2;
cl->cl_g.dkg_ncyl = 80;
cl->cl_g.dkg_nsect = cl->cl_blockcount / (2 * 80);
} else if (cl->cl_blockcount <= 0x200000) {
cl->cl_g.dkg_nhead = 64;
cl->cl_g.dkg_nsect = 32;
cl->cl_g.dkg_ncyl = cl->cl_blockcount / (64 * 32);
} else {
cl->cl_g.dkg_nhead = 255;
cl->cl_g.dkg_nsect = ((cl->cl_blockcount +
(UINT16_MAX * 255 * 63) - 1) /
(UINT16_MAX * 255 * 63)) * 63;
if (cl->cl_g.dkg_nsect == 0)
cl->cl_g.dkg_nsect = (UINT16_MAX / 63) * 63;
cl->cl_g.dkg_ncyl = cl->cl_blockcount /
(255 * cl->cl_g.dkg_nsect);
}
cl->cl_solaris_size =
(diskaddr_t)cl->cl_g.dkg_ncyl * cl->cl_g.dkg_nhead *
cl->cl_g.dkg_nsect;
}
cl->cl_g.dkg_acyl = 0;
cl->cl_g.dkg_bcyl = 0;
cl->cl_g.dkg_rpm = 200;
cl->cl_asciilabel[0] = '\0';
cl->cl_g.dkg_pcyl = cl->cl_g.dkg_ncyl;
cl->cl_map[0].dkl_cylno = 0;
cl->cl_map[0].dkl_nblk = cl->cl_solaris_size;
cl->cl_map[2].dkl_cylno = 0;
cl->cl_map[2].dkl_nblk = cl->cl_solaris_size;
#elif defined(_SUNOS_VTOC_16)
if (cl->cl_solaris_size == 0) {
cl->cl_f_geometry_is_valid = B_TRUE;
return;
}
if (ISCD(cl)) {
phys_spc = cl->cl_pgeom.g_nhead * cl->cl_pgeom.g_nsect;
} else {
bzero(&cl_g, sizeof (struct dk_geom));
if (cl->cl_alter_behavior & CMLB_OFF_BY_ONE)
capacity = cl->cl_blockcount - 1;
else
capacity = cl->cl_blockcount;
cmlb_convert_geometry(cl, capacity, &cl_g, tg_cookie);
bcopy(&cl_g, &cl->cl_g, sizeof (cl->cl_g));
phys_spc = cl->cl_g.dkg_nhead * cl->cl_g.dkg_nsect;
}
if (phys_spc == 0)
return;
cl->cl_g.dkg_pcyl = cl->cl_solaris_size / phys_spc;
if (cl->cl_alter_behavior & CMLB_FAKE_LABEL_ONE_PARTITION) {
cl->cl_g.dkg_ncyl = cl->cl_g.dkg_pcyl;
disksize = cl->cl_solaris_size;
} else {
cl->cl_g.dkg_acyl = DK_ACYL;
cl->cl_g.dkg_ncyl = cl->cl_g.dkg_pcyl - DK_ACYL;
disksize = cl->cl_g.dkg_ncyl * phys_spc;
}
if (ISCD(cl)) {
disksize = cl->cl_solaris_size;
cl->cl_g.dkg_nhead = 1;
cl->cl_g.dkg_nsect = 1;
cl->cl_g.dkg_rpm =
(cl->cl_pgeom.g_rpm == 0) ? 200 : cl->cl_pgeom.g_rpm;
cl->cl_vtoc.v_part[0].p_start = 0;
cl->cl_vtoc.v_part[0].p_size = disksize;
cl->cl_vtoc.v_part[0].p_tag = V_BACKUP;
cl->cl_vtoc.v_part[0].p_flag = V_UNMNT;
cl->cl_map[0].dkl_cylno = 0;
cl->cl_map[0].dkl_nblk = disksize;
cl->cl_offset[0] = 0;
} else {
cl->cl_g.dkg_rpm =
(cl->cl_pgeom.g_rpm == 0) ? 3600: cl->cl_pgeom.g_rpm;
cl->cl_vtoc.v_sectorsz = cl->cl_sys_blocksize;
cl->cl_vtoc.v_part[8].p_start = 0;
cl->cl_vtoc.v_part[8].p_size = phys_spc;
cl->cl_vtoc.v_part[8].p_tag = V_BOOT;
cl->cl_vtoc.v_part[8].p_flag = V_UNMNT;
cl->cl_map[8].dkl_cylno = 0;
cl->cl_map[8].dkl_nblk = phys_spc;
cl->cl_offset[8] = 0;
if ((cl->cl_alter_behavior &
CMLB_CREATE_ALTSLICE_VTOC_16_DTYPE_DIRECT) &&
cl->cl_device_type == DTYPE_DIRECT) {
cl->cl_vtoc.v_part[9].p_start = phys_spc;
cl->cl_vtoc.v_part[9].p_size = 2 * phys_spc;
cl->cl_vtoc.v_part[9].p_tag = V_ALTSCTR;
cl->cl_vtoc.v_part[9].p_flag = 0;
cl->cl_map[9].dkl_cylno = 1;
cl->cl_map[9].dkl_nblk = 2 * phys_spc;
cl->cl_offset[9] = phys_spc;
}
}
cl->cl_g.dkg_apc = 0;
cl->cl_vtoc.v_part[2].p_start = 0;
cl->cl_vtoc.v_part[2].p_size = disksize;
cl->cl_vtoc.v_part[2].p_tag = V_BACKUP;
cl->cl_vtoc.v_part[2].p_flag = V_UNMNT;
cl->cl_map[2].dkl_cylno = 0;
cl->cl_map[2].dkl_nblk = disksize;
cl->cl_offset[2] = 0;
if (cl->cl_alter_behavior & CMLB_FAKE_LABEL_ONE_PARTITION) {
cl->cl_vtoc.v_part[0].p_start = 0;
cl->cl_vtoc.v_part[0].p_tag = V_UNASSIGNED;
cl->cl_vtoc.v_part[0].p_flag = 0;
cl->cl_vtoc.v_part[0].p_size = disksize;
cl->cl_map[0].dkl_cylno = 0;
cl->cl_map[0].dkl_nblk = disksize;
cl->cl_offset[0] = 0;
}
(void) sprintf(cl->cl_vtoc.v_asciilabel, "DEFAULT cyl %d alt %d"
" hd %d sec %d", cl->cl_g.dkg_ncyl, cl->cl_g.dkg_acyl,
cl->cl_g.dkg_nhead, cl->cl_g.dkg_nsect);
#else
#error "No VTOC format defined."
#endif
cl->cl_g.dkg_read_reinstruct = 0;
cl->cl_g.dkg_write_reinstruct = 0;
cl->cl_g.dkg_intrlv = 1;
cl->cl_vtoc.v_sanity = VTOC_SANE;
cl->cl_vtoc.v_nparts = V_NUMPAR;
cl->cl_vtoc.v_version = V_VERSION;
cl->cl_f_geometry_is_valid = B_TRUE;
cl->cl_label_from_media = CMLB_LABEL_UNDEF;
cmlb_dbg(CMLB_INFO, cl,
"cmlb_build_default_label: Default label created: "
"cyl: %d\tacyl: %d\tnhead: %d\tnsect: %d\tcap: %d\n",
cl->cl_g.dkg_ncyl, cl->cl_g.dkg_acyl, cl->cl_g.dkg_nhead,
cl->cl_g.dkg_nsect, cl->cl_blockcount);
}
#if defined(_FIRMWARE_NEEDS_FDISK)
#define LBA_MAX_SECT (63 | ((1022 & 0x300) >> 2))
#define LBA_MAX_CYL (1022 & 0xFF)
#define LBA_MAX_HEAD (254)
static boolean_t
cmlb_has_max_chs_vals(struct ipart *fdp)
{
return ((fdp->begcyl == LBA_MAX_CYL) &&
(fdp->beghead == LBA_MAX_HEAD) &&
(fdp->begsect == LBA_MAX_SECT) &&
(fdp->endcyl == LBA_MAX_CYL) &&
(fdp->endhead == LBA_MAX_HEAD) &&
(fdp->endsect == LBA_MAX_SECT));
}
#endif
static int
cmlb_dkio_get_geometry(struct cmlb_lun *cl, caddr_t arg, int flag,
void *tg_cookie)
{
struct dk_geom *tmp_geom = NULL;
int rval = 0;
mutex_enter(CMLB_MUTEX(cl));
rval = cmlb_validate_geometry(cl, B_TRUE, 0, tg_cookie);
#if defined(_SUNOS_VTOC_8)
if (rval == EINVAL &&
cl->cl_alter_behavior & CMLB_FAKE_GEOM_LABEL_IOCTLS_VTOC8) {
if (cl->cl_blockcount <= CMLB_OLDVTOC_LIMIT) {
cmlb_setup_default_geometry(cl, tg_cookie);
rval = 0;
}
}
#endif
if (rval) {
mutex_exit(CMLB_MUTEX(cl));
return (rval);
}
#if defined(__x86)
if (cl->cl_solaris_size == 0) {
mutex_exit(CMLB_MUTEX(cl));
return (EIO);
}
#endif
tmp_geom = kmem_zalloc(sizeof (struct dk_geom), KM_SLEEP);
bcopy(&cl->cl_g, tmp_geom, sizeof (struct dk_geom));
if (tmp_geom->dkg_write_reinstruct == 0) {
tmp_geom->dkg_write_reinstruct =
(int)((int)(tmp_geom->dkg_nsect * tmp_geom->dkg_rpm *
cmlb_rot_delay) / (int)60000);
}
mutex_exit(CMLB_MUTEX(cl));
rval = ddi_copyout(tmp_geom, (void *)arg, sizeof (struct dk_geom),
flag);
if (rval != 0) {
rval = EFAULT;
}
kmem_free(tmp_geom, sizeof (struct dk_geom));
return (rval);
}
static int
cmlb_dkio_set_geometry(struct cmlb_lun *cl, caddr_t arg, int flag)
{
struct dk_geom *tmp_geom;
struct dk_map *lp;
int rval = 0;
int i;
#if defined(__x86)
if (cl->cl_solaris_size == 0) {
return (EIO);
}
#endif
tmp_geom = (struct dk_geom *)
kmem_zalloc(sizeof (struct dk_geom), KM_SLEEP);
rval = ddi_copyin(arg, tmp_geom, sizeof (struct dk_geom), flag);
if (rval != 0) {
kmem_free(tmp_geom, sizeof (struct dk_geom));
return (EFAULT);
}
mutex_enter(CMLB_MUTEX(cl));
bcopy(tmp_geom, &cl->cl_g, sizeof (struct dk_geom));
for (i = 0; i < NDKMAP; i++) {
lp = &cl->cl_map[i];
cl->cl_offset[i] =
cl->cl_g.dkg_nhead * cl->cl_g.dkg_nsect * lp->dkl_cylno;
#if defined(__x86)
cl->cl_offset[i] += cl->cl_solaris_offset;
#endif
}
cl->cl_f_geometry_is_valid = B_FALSE;
mutex_exit(CMLB_MUTEX(cl));
kmem_free(tmp_geom, sizeof (struct dk_geom));
return (rval);
}
static int
cmlb_dkio_get_partition(struct cmlb_lun *cl, caddr_t arg, int flag,
void *tg_cookie)
{
int rval = 0;
int size;
mutex_enter(CMLB_MUTEX(cl));
if ((rval = cmlb_validate_geometry(cl, B_TRUE, 0, tg_cookie)) != 0) {
mutex_exit(CMLB_MUTEX(cl));
return (rval);
}
mutex_exit(CMLB_MUTEX(cl));
#if defined(__x86)
if (cl->cl_solaris_size == 0) {
return (EIO);
}
#endif
#ifdef _MULTI_DATAMODEL
switch (ddi_model_convert_from(flag & FMODELS)) {
case DDI_MODEL_ILP32: {
struct dk_map32 dk_map32[NDKMAP];
int i;
for (i = 0; i < NDKMAP; i++) {
dk_map32[i].dkl_cylno = cl->cl_map[i].dkl_cylno;
dk_map32[i].dkl_nblk = cl->cl_map[i].dkl_nblk;
}
size = NDKMAP * sizeof (struct dk_map32);
rval = ddi_copyout(dk_map32, (void *)arg, size, flag);
if (rval != 0) {
rval = EFAULT;
}
break;
}
case DDI_MODEL_NONE:
size = NDKMAP * sizeof (struct dk_map);
rval = ddi_copyout(cl->cl_map, (void *)arg, size, flag);
if (rval != 0) {
rval = EFAULT;
}
break;
}
#else
size = NDKMAP * sizeof (struct dk_map);
rval = ddi_copyout(cl->cl_map, (void *)arg, size, flag);
if (rval != 0) {
rval = EFAULT;
}
#endif
return (rval);
}
static int
cmlb_dkio_set_partition(struct cmlb_lun *cl, caddr_t arg, int flag)
{
struct dk_map dk_map[NDKMAP];
struct dk_map *lp;
int rval = 0;
int size;
int i;
#if defined(_SUNOS_VTOC_16)
struct dkl_partition *vp;
#endif
_NOTE(DATA_READABLE_WITHOUT_LOCK(cmlb_lun::cl_solaris_size))
mutex_enter(CMLB_MUTEX(cl));
if (cl->cl_blockcount > CMLB_OLDVTOC_LIMIT) {
mutex_exit(CMLB_MUTEX(cl));
return (ENOTSUP);
}
mutex_exit(CMLB_MUTEX(cl));
if (cl->cl_solaris_size == 0) {
return (EIO);
}
#ifdef _MULTI_DATAMODEL
switch (ddi_model_convert_from(flag & FMODELS)) {
case DDI_MODEL_ILP32: {
struct dk_map32 dk_map32[NDKMAP];
size = NDKMAP * sizeof (struct dk_map32);
rval = ddi_copyin((void *)arg, dk_map32, size, flag);
if (rval != 0) {
return (EFAULT);
}
for (i = 0; i < NDKMAP; i++) {
dk_map[i].dkl_cylno = dk_map32[i].dkl_cylno;
dk_map[i].dkl_nblk = dk_map32[i].dkl_nblk;
}
break;
}
case DDI_MODEL_NONE:
size = NDKMAP * sizeof (struct dk_map);
rval = ddi_copyin((void *)arg, dk_map, size, flag);
if (rval != 0) {
return (EFAULT);
}
break;
}
#else
size = NDKMAP * sizeof (struct dk_map);
rval = ddi_copyin((void *)arg, dk_map, size, flag);
if (rval != 0) {
return (EFAULT);
}
#endif
mutex_enter(CMLB_MUTEX(cl));
bcopy(dk_map, cl->cl_map, size);
#if defined(_SUNOS_VTOC_16)
vp = (struct dkl_partition *)&(cl->cl_vtoc);
#endif
for (i = 0; i < NDKMAP; i++) {
lp = &cl->cl_map[i];
cl->cl_offset[i] =
cl->cl_g.dkg_nhead * cl->cl_g.dkg_nsect * lp->dkl_cylno;
#if defined(_SUNOS_VTOC_16)
vp->p_start = cl->cl_offset[i];
vp->p_size = lp->dkl_nblk;
vp++;
#endif
#if defined(__x86)
cl->cl_offset[i] += cl->cl_solaris_offset;
#endif
}
mutex_exit(CMLB_MUTEX(cl));
return (rval);
}
static int
cmlb_dkio_get_vtoc(struct cmlb_lun *cl, caddr_t arg, int flag, void *tg_cookie)
{
#if defined(_SUNOS_VTOC_8)
struct vtoc user_vtoc;
#endif
int rval = 0;
mutex_enter(CMLB_MUTEX(cl));
if (cl->cl_blockcount > CMLB_OLDVTOC_LIMIT) {
mutex_exit(CMLB_MUTEX(cl));
return (EOVERFLOW);
}
rval = cmlb_validate_geometry(cl, B_TRUE, 0, tg_cookie);
#if defined(_SUNOS_VTOC_8)
if (rval == EINVAL &&
(cl->cl_alter_behavior & CMLB_FAKE_GEOM_LABEL_IOCTLS_VTOC8)) {
if (cl->cl_blockcount <= CMLB_OLDVTOC_LIMIT) {
cmlb_setup_default_geometry(cl, tg_cookie);
rval = 0;
}
}
#endif
if (rval) {
mutex_exit(CMLB_MUTEX(cl));
return (rval);
}
#if defined(_SUNOS_VTOC_8)
cmlb_build_user_vtoc(cl, &user_vtoc);
mutex_exit(CMLB_MUTEX(cl));
#ifdef _MULTI_DATAMODEL
switch (ddi_model_convert_from(flag & FMODELS)) {
case DDI_MODEL_ILP32: {
struct vtoc32 user_vtoc32;
vtoctovtoc32(user_vtoc, user_vtoc32);
if (ddi_copyout(&user_vtoc32, (void *)arg,
sizeof (struct vtoc32), flag)) {
return (EFAULT);
}
break;
}
case DDI_MODEL_NONE:
if (ddi_copyout(&user_vtoc, (void *)arg,
sizeof (struct vtoc), flag)) {
return (EFAULT);
}
break;
}
#else
if (ddi_copyout(&user_vtoc, (void *)arg, sizeof (struct vtoc), flag)) {
return (EFAULT);
}
#endif
#elif defined(_SUNOS_VTOC_16)
mutex_exit(CMLB_MUTEX(cl));
#ifdef _MULTI_DATAMODEL
ASSERT(sizeof (cl->cl_vtoc) == sizeof (struct vtoc32));
switch (ddi_model_convert_from(flag & FMODELS)) {
case DDI_MODEL_ILP32:
if (ddi_copyout(&(cl->cl_vtoc), (void *)arg,
sizeof (cl->cl_vtoc), flag)) {
return (EFAULT);
}
break;
case DDI_MODEL_NONE: {
struct vtoc user_vtoc;
vtoc32tovtoc(cl->cl_vtoc, user_vtoc);
if (ddi_copyout(&user_vtoc, (void *)arg,
sizeof (struct vtoc), flag)) {
return (EFAULT);
}
break;
}
}
#else
if (ddi_copyout(&(cl->cl_vtoc), (void *)arg, sizeof (cl->cl_vtoc),
flag)) {
return (EFAULT);
}
#endif
#else
#error "No VTOC format defined."
#endif
return (rval);
}
static int
cmlb_dkio_get_extvtoc(struct cmlb_lun *cl, caddr_t arg, int flag,
void *tg_cookie)
{
struct extvtoc ext_vtoc;
#if defined(_SUNOS_VTOC_8)
struct vtoc user_vtoc;
#endif
int rval = 0;
bzero(&ext_vtoc, sizeof (struct extvtoc));
mutex_enter(CMLB_MUTEX(cl));
rval = cmlb_validate_geometry(cl, B_TRUE, 0, tg_cookie);
#if defined(_SUNOS_VTOC_8)
if (rval == EINVAL &&
(cl->cl_alter_behavior & CMLB_FAKE_GEOM_LABEL_IOCTLS_VTOC8)) {
if (cl->cl_blockcount <= CMLB_OLDVTOC_LIMIT) {
cmlb_setup_default_geometry(cl, tg_cookie);
rval = 0;
}
}
#endif
if (rval) {
mutex_exit(CMLB_MUTEX(cl));
return (rval);
}
#if defined(_SUNOS_VTOC_8)
cmlb_build_user_vtoc(cl, &user_vtoc);
mutex_exit(CMLB_MUTEX(cl));
#ifdef _LP64
if (ddi_copyout(&user_vtoc, (void *)arg,
sizeof (struct extvtoc), flag)) {
return (EFAULT);
}
#else
vtoc32tovtoc(user_vtoc, ext_vtoc);
if (ddi_copyout(&ext_vtoc, (void *)arg,
sizeof (struct extvtoc), flag)) {
return (EFAULT);
}
#endif
#elif defined(_SUNOS_VTOC_16)
vtoc32tovtoc(cl->cl_vtoc, ext_vtoc);
mutex_exit(CMLB_MUTEX(cl));
if (ddi_copyout(&ext_vtoc, (void *)arg, sizeof (struct extvtoc), flag))
return (EFAULT);
#else
#error "No VTOC format defined."
#endif
return (rval);
}
static int
cmlb_dkio_get_efi(struct cmlb_lun *cl, caddr_t arg, int flag, void *tg_cookie)
{
dk_efi_t user_efi;
int rval = 0;
void *buffer;
diskaddr_t tgt_lba;
if (ddi_copyin(arg, &user_efi, sizeof (dk_efi_t), flag))
return (EFAULT);
user_efi.dki_data = (void *)(uintptr_t)user_efi.dki_data_64;
if (user_efi.dki_length == 0 ||
user_efi.dki_length > cmlb_tg_max_efi_xfer)
return (EINVAL);
tgt_lba = user_efi.dki_lba;
mutex_enter(CMLB_MUTEX(cl));
if ((cmlb_check_update_blockcount(cl, tg_cookie) != 0) ||
(cl->cl_tgt_blocksize == 0) ||
(user_efi.dki_length % cl->cl_sys_blocksize)) {
mutex_exit(CMLB_MUTEX(cl));
return (EINVAL);
}
if (cl->cl_tgt_blocksize != cl->cl_sys_blocksize)
tgt_lba = tgt_lba * cl->cl_tgt_blocksize /
cl->cl_sys_blocksize;
mutex_exit(CMLB_MUTEX(cl));
buffer = kmem_alloc(user_efi.dki_length, KM_SLEEP);
rval = DK_TG_READ(cl, buffer, tgt_lba, user_efi.dki_length, tg_cookie);
if (rval == 0 && ddi_copyout(buffer, user_efi.dki_data,
user_efi.dki_length, flag) != 0)
rval = EFAULT;
kmem_free(buffer, user_efi.dki_length);
return (rval);
}
#if defined(_SUNOS_VTOC_8)
static void
cmlb_build_user_vtoc(struct cmlb_lun *cl, struct vtoc *user_vtoc)
{
struct dk_map2 *lpart;
struct dk_map *lmap;
struct partition *vpart;
uint32_t nblks;
int i;
ASSERT(mutex_owned(CMLB_MUTEX(cl)));
bzero(user_vtoc, sizeof (struct vtoc));
user_vtoc->v_bootinfo[0] = cl->cl_vtoc.v_bootinfo[0];
user_vtoc->v_bootinfo[1] = cl->cl_vtoc.v_bootinfo[1];
user_vtoc->v_bootinfo[2] = cl->cl_vtoc.v_bootinfo[2];
user_vtoc->v_sanity = VTOC_SANE;
user_vtoc->v_version = cl->cl_vtoc.v_version;
bcopy(cl->cl_vtoc.v_volume, user_vtoc->v_volume, LEN_DKL_VVOL);
user_vtoc->v_sectorsz = cl->cl_sys_blocksize;
user_vtoc->v_nparts = cl->cl_vtoc.v_nparts;
for (i = 0; i < 10; i++)
user_vtoc->v_reserved[i] = cl->cl_vtoc.v_reserved[i];
lmap = cl->cl_map;
lpart = (struct dk_map2 *)cl->cl_vtoc.v_part;
vpart = user_vtoc->v_part;
nblks = cl->cl_g.dkg_nsect * cl->cl_g.dkg_nhead;
for (i = 0; i < V_NUMPAR; i++) {
vpart->p_tag = lpart->p_tag;
vpart->p_flag = lpart->p_flag;
vpart->p_start = lmap->dkl_cylno * nblks;
vpart->p_size = lmap->dkl_nblk;
lmap++;
lpart++;
vpart++;
user_vtoc->timestamp[i] = (time_t)cl->cl_vtoc.v_timestamp[i];
}
bcopy(cl->cl_asciilabel, user_vtoc->v_asciilabel, LEN_DKL_ASCII);
}
#endif
static int
cmlb_dkio_partition(struct cmlb_lun *cl, caddr_t arg, int flag,
void *tg_cookie)
{
struct partition64 p64;
int rval = 0;
uint_t nparts;
efi_gpe_t *partitions;
efi_gpt_t *buffer;
diskaddr_t gpe_lba;
int n_gpe_per_blk = 0;
if (ddi_copyin((const void *)arg, &p64,
sizeof (struct partition64), flag)) {
return (EFAULT);
}
buffer = kmem_alloc(cl->cl_sys_blocksize, KM_SLEEP);
rval = DK_TG_READ(cl, buffer, 1, cl->cl_sys_blocksize, tg_cookie);
if (rval != 0)
goto done_error;
cmlb_swap_efi_gpt(buffer);
if ((rval = cmlb_validate_efi(buffer)) != 0)
goto done_error;
nparts = buffer->efi_gpt_NumberOfPartitionEntries;
gpe_lba = buffer->efi_gpt_PartitionEntryLBA;
if (p64.p_partno >= nparts) {
rval = ESRCH;
goto done_error;
}
n_gpe_per_blk = cl->cl_sys_blocksize / sizeof (efi_gpe_t);
gpe_lba += p64.p_partno / n_gpe_per_blk;
rval = DK_TG_READ(cl, buffer, gpe_lba, cl->cl_sys_blocksize, tg_cookie);
if (rval) {
goto done_error;
}
partitions = (efi_gpe_t *)buffer;
partitions += p64.p_partno % n_gpe_per_blk;
cmlb_swap_efi_gpe(1, partitions);
bcopy(&partitions->efi_gpe_PartitionTypeGUID, &p64.p_type,
sizeof (struct uuid));
p64.p_start = partitions->efi_gpe_StartingLBA;
p64.p_size = partitions->efi_gpe_EndingLBA -
p64.p_start + 1;
if (ddi_copyout(&p64, (void *)arg, sizeof (struct partition64), flag))
rval = EFAULT;
done_error:
kmem_free(buffer, cl->cl_sys_blocksize);
return (rval);
}
static int
cmlb_dkio_set_vtoc(struct cmlb_lun *cl, dev_t dev, caddr_t arg, int flag,
void *tg_cookie)
{
struct vtoc user_vtoc;
int shift, rval = 0;
boolean_t internal;
internal = VOID2BOOLEAN(
(cl->cl_alter_behavior & (CMLB_INTERNAL_MINOR_NODES)) != 0);
if (cl->cl_alter_behavior & CMLB_CREATE_P0_MINOR_NODE)
shift = CMLBUNIT_FORCE_P0_SHIFT;
else
shift = CMLBUNIT_SHIFT;
#ifdef _MULTI_DATAMODEL
switch (ddi_model_convert_from(flag & FMODELS)) {
case DDI_MODEL_ILP32: {
struct vtoc32 user_vtoc32;
if (ddi_copyin((const void *)arg, &user_vtoc32,
sizeof (struct vtoc32), flag)) {
return (EFAULT);
}
vtoc32tovtoc(user_vtoc32, user_vtoc);
break;
}
case DDI_MODEL_NONE:
if (ddi_copyin((const void *)arg, &user_vtoc,
sizeof (struct vtoc), flag)) {
return (EFAULT);
}
break;
}
#else
if (ddi_copyin((const void *)arg, &user_vtoc,
sizeof (struct vtoc), flag)) {
return (EFAULT);
}
#endif
mutex_enter(CMLB_MUTEX(cl));
if (cl->cl_blockcount > CMLB_OLDVTOC_LIMIT) {
mutex_exit(CMLB_MUTEX(cl));
return (EOVERFLOW);
}
#if defined(__x86)
if (cl->cl_tgt_blocksize != cl->cl_sys_blocksize) {
mutex_exit(CMLB_MUTEX(cl));
return (EINVAL);
}
#endif
if (cl->cl_g.dkg_ncyl == 0) {
mutex_exit(CMLB_MUTEX(cl));
return (EINVAL);
}
mutex_exit(CMLB_MUTEX(cl));
cmlb_clear_efi(cl, tg_cookie);
ddi_remove_minor_node(CMLB_DEVINFO(cl), "wd");
ddi_remove_minor_node(CMLB_DEVINFO(cl), "wd,raw");
ddi_remove_minor_node(CMLB_DEVINFO(cl), "h");
ddi_remove_minor_node(CMLB_DEVINFO(cl), "h,raw");
(void) cmlb_create_minor(CMLB_DEVINFO(cl), "h",
S_IFBLK, (CMLBUNIT(dev, shift) << shift) | WD_NODE,
cl->cl_node_type, 0, internal);
(void) cmlb_create_minor(CMLB_DEVINFO(cl), "h,raw",
S_IFCHR, (CMLBUNIT(dev, shift) << shift) | WD_NODE,
cl->cl_node_type, 0, internal);
mutex_enter(CMLB_MUTEX(cl));
if ((rval = cmlb_build_label_vtoc(cl, &user_vtoc)) == 0) {
if ((rval = cmlb_write_label(cl, tg_cookie)) == 0) {
if (cmlb_validate_geometry(cl,
B_TRUE, 0, tg_cookie) != 0) {
cmlb_dbg(CMLB_ERROR, cl,
"cmlb_dkio_set_vtoc: "
"Failed validate geometry\n");
}
cl->cl_msglog_flag |= CMLB_ALLOW_2TB_WARN;
}
}
mutex_exit(CMLB_MUTEX(cl));
return (rval);
}
static int
cmlb_dkio_set_extvtoc(struct cmlb_lun *cl, dev_t dev, caddr_t arg, int flag,
void *tg_cookie)
{
int shift, rval = 0;
struct vtoc user_vtoc;
boolean_t internal;
if (cl->cl_alter_behavior & CMLB_CREATE_P0_MINOR_NODE)
shift = CMLBUNIT_FORCE_P0_SHIFT;
else
shift = CMLBUNIT_SHIFT;
#ifdef _LP64
if (ddi_copyin((const void *)arg, &user_vtoc,
sizeof (struct extvtoc), flag)) {
return (EFAULT);
}
#else
struct extvtoc user_extvtoc;
if (ddi_copyin((const void *)arg, &user_extvtoc,
sizeof (struct extvtoc), flag)) {
return (EFAULT);
}
vtoctovtoc32(user_extvtoc, user_vtoc);
#endif
internal = VOID2BOOLEAN(
(cl->cl_alter_behavior & (CMLB_INTERNAL_MINOR_NODES)) != 0);
mutex_enter(CMLB_MUTEX(cl));
#if defined(__x86)
if (cl->cl_tgt_blocksize != cl->cl_sys_blocksize) {
mutex_exit(CMLB_MUTEX(cl));
return (EINVAL);
}
#endif
if (cl->cl_g.dkg_ncyl == 0) {
mutex_exit(CMLB_MUTEX(cl));
return (EINVAL);
}
mutex_exit(CMLB_MUTEX(cl));
cmlb_clear_efi(cl, tg_cookie);
ddi_remove_minor_node(CMLB_DEVINFO(cl), "wd");
ddi_remove_minor_node(CMLB_DEVINFO(cl), "wd,raw");
ddi_remove_minor_node(CMLB_DEVINFO(cl), "h");
ddi_remove_minor_node(CMLB_DEVINFO(cl), "h,raw");
(void) cmlb_create_minor(CMLB_DEVINFO(cl), "h",
S_IFBLK, (CMLBUNIT(dev, shift) << shift) | WD_NODE,
cl->cl_node_type, 0, internal);
(void) cmlb_create_minor(CMLB_DEVINFO(cl), "h,raw",
S_IFCHR, (CMLBUNIT(dev, shift) << shift) | WD_NODE,
cl->cl_node_type, 0, internal);
mutex_enter(CMLB_MUTEX(cl));
if ((rval = cmlb_build_label_vtoc(cl, &user_vtoc)) == 0) {
if ((rval = cmlb_write_label(cl, tg_cookie)) == 0) {
if (cmlb_validate_geometry(cl,
B_TRUE, 0, tg_cookie) != 0) {
cmlb_dbg(CMLB_ERROR, cl,
"cmlb_dkio_set_vtoc: "
"Failed validate geometry\n");
}
}
}
mutex_exit(CMLB_MUTEX(cl));
return (rval);
}
static int
cmlb_build_label_vtoc(struct cmlb_lun *cl, struct vtoc *user_vtoc)
{
struct dk_map *lmap;
struct partition *vpart;
uint_t nblks;
#if defined(_SUNOS_VTOC_8)
int ncyl;
struct dk_map2 *lpart;
#endif
int i;
ASSERT(mutex_owned(CMLB_MUTEX(cl)));
if (user_vtoc->v_sanity != VTOC_SANE ||
user_vtoc->v_sectorsz != cl->cl_sys_blocksize ||
user_vtoc->v_nparts != V_NUMPAR) {
cmlb_dbg(CMLB_INFO, cl,
"cmlb_build_label_vtoc: vtoc not valid\n");
return (EINVAL);
}
nblks = cl->cl_g.dkg_nsect * cl->cl_g.dkg_nhead;
if (nblks == 0) {
cmlb_dbg(CMLB_INFO, cl,
"cmlb_build_label_vtoc: geom nblks is 0\n");
return (EINVAL);
}
#if defined(_SUNOS_VTOC_8)
vpart = user_vtoc->v_part;
for (i = 0; i < V_NUMPAR; i++) {
if (((unsigned)vpart->p_start % nblks) != 0) {
cmlb_dbg(CMLB_INFO, cl,
"cmlb_build_label_vtoc: p_start not multiply of"
"nblks part %d p_start %d nblks %d\n", i,
vpart->p_start, nblks);
return (EINVAL);
}
ncyl = (unsigned)vpart->p_start / nblks;
ncyl += (unsigned)vpart->p_size / nblks;
if (((unsigned)vpart->p_size % nblks) != 0) {
ncyl++;
}
if (ncyl > (int)cl->cl_g.dkg_ncyl) {
cmlb_dbg(CMLB_INFO, cl,
"cmlb_build_label_vtoc: ncyl %d > dkg_ncyl %d"
"p_size %ld p_start %ld nblks %d part number %d"
"tag %d\n",
ncyl, cl->cl_g.dkg_ncyl, vpart->p_size,
vpart->p_start, nblks,
i, vpart->p_tag);
return (EINVAL);
}
vpart++;
}
#endif
#if defined(_SUNOS_VTOC_16)
vtoctovtoc32((*user_vtoc), (*((struct vtoc32 *)&(cl->cl_vtoc))));
lmap = cl->cl_map;
vpart = user_vtoc->v_part;
for (i = 0; i < (int)user_vtoc->v_nparts; i++, lmap++, vpart++) {
lmap->dkl_cylno = (unsigned)vpart->p_start / nblks;
lmap->dkl_nblk = (unsigned)vpart->p_size;
}
#elif defined(_SUNOS_VTOC_8)
cl->cl_vtoc.v_bootinfo[0] = (uint32_t)user_vtoc->v_bootinfo[0];
cl->cl_vtoc.v_bootinfo[1] = (uint32_t)user_vtoc->v_bootinfo[1];
cl->cl_vtoc.v_bootinfo[2] = (uint32_t)user_vtoc->v_bootinfo[2];
cl->cl_vtoc.v_sanity = (uint32_t)user_vtoc->v_sanity;
cl->cl_vtoc.v_version = (uint32_t)user_vtoc->v_version;
bcopy(user_vtoc->v_volume, cl->cl_vtoc.v_volume, LEN_DKL_VVOL);
cl->cl_vtoc.v_nparts = user_vtoc->v_nparts;
for (i = 0; i < 10; i++)
cl->cl_vtoc.v_reserved[i] = user_vtoc->v_reserved[i];
lmap = cl->cl_map;
lpart = cl->cl_vtoc.v_part;
vpart = user_vtoc->v_part;
for (i = 0; i < (int)user_vtoc->v_nparts; i++) {
lpart->p_tag = vpart->p_tag;
lpart->p_flag = vpart->p_flag;
lmap->dkl_cylno = (unsigned)vpart->p_start / nblks;
lmap->dkl_nblk = (unsigned)vpart->p_size;
lmap++;
lpart++;
vpart++;
#ifdef _LP64
if (user_vtoc->timestamp[i] > TIME32_MAX) {
cl->cl_vtoc.v_timestamp[i] = TIME32_MAX;
} else {
cl->cl_vtoc.v_timestamp[i] = user_vtoc->timestamp[i];
}
#else
cl->cl_vtoc.v_timestamp[i] = user_vtoc->timestamp[i];
#endif
}
bcopy(user_vtoc->v_asciilabel, cl->cl_asciilabel, LEN_DKL_ASCII);
#else
#error "No VTOC format defined."
#endif
return (0);
}
static void
cmlb_clear_efi(struct cmlb_lun *cl, void *tg_cookie)
{
efi_gpt_t *gpt;
diskaddr_t cap;
int rval;
ASSERT(!mutex_owned(CMLB_MUTEX(cl)));
mutex_enter(CMLB_MUTEX(cl));
cl->cl_reserved = -1;
mutex_exit(CMLB_MUTEX(cl));
gpt = kmem_alloc(cl->cl_sys_blocksize, KM_SLEEP);
if (DK_TG_READ(cl, gpt, 1, cl->cl_sys_blocksize, tg_cookie) != 0) {
goto done;
}
cmlb_swap_efi_gpt(gpt);
rval = cmlb_validate_efi(gpt);
if (rval == 0) {
bzero(gpt, sizeof (efi_gpt_t));
if (rval = DK_TG_WRITE(cl, gpt, 1, cl->cl_sys_blocksize,
tg_cookie)) {
cmlb_dbg(CMLB_INFO, cl,
"cmlb_clear_efi: clear primary label failed\n");
}
}
rval = DK_TG_GETCAP(cl, &cap, tg_cookie);
if (rval) {
goto done;
}
if ((rval = DK_TG_READ(cl, gpt, cap - 1, cl->cl_sys_blocksize,
tg_cookie)) != 0) {
goto done;
}
cmlb_swap_efi_gpt(gpt);
rval = cmlb_validate_efi(gpt);
if (rval == 0) {
cmlb_dbg(CMLB_TRACE, cl,
"cmlb_clear_efi clear backup@%lu\n", cap - 1);
bzero(gpt, sizeof (efi_gpt_t));
if ((rval = DK_TG_WRITE(cl, gpt, cap - 1, cl->cl_sys_blocksize,
tg_cookie))) {
cmlb_dbg(CMLB_INFO, cl,
"cmlb_clear_efi: clear backup label failed\n");
}
} else {
if ((rval = DK_TG_READ(cl, gpt, cap - 2,
cl->cl_sys_blocksize, tg_cookie)) != 0) {
goto done;
}
cmlb_swap_efi_gpt(gpt);
rval = cmlb_validate_efi(gpt);
if (rval == 0) {
cmlb_dbg(CMLB_TRACE, cl,
"cmlb_clear_efi clear legacy backup@%lu\n",
cap - 2);
bzero(gpt, sizeof (efi_gpt_t));
if ((rval = DK_TG_WRITE(cl, gpt, cap - 2,
cl->cl_sys_blocksize, tg_cookie))) {
cmlb_dbg(CMLB_INFO, cl,
"cmlb_clear_efi: clear legacy backup label "
"failed\n");
}
}
}
done:
kmem_free(gpt, cl->cl_sys_blocksize);
}
static int
cmlb_set_vtoc(struct cmlb_lun *cl, struct dk_label *dkl, void *tg_cookie)
{
uint_t label_addr;
int sec;
diskaddr_t blk;
int head;
int cyl;
int rval;
#if defined(__x86)
label_addr = cl->cl_solaris_offset + DK_LABEL_LOC;
#else
label_addr = 0;
#endif
rval = DK_TG_WRITE(cl, dkl, label_addr, cl->cl_sys_blocksize,
tg_cookie);
if (rval != 0) {
return (rval);
}
cyl = dkl->dkl_ncyl + dkl->dkl_acyl - 1;
head = dkl->dkl_nhead - 1;
for (sec = 1; ((sec < 5 * 2 + 1) && (sec < dkl->dkl_nsect)); sec += 2) {
blk = (diskaddr_t)(
(cyl * ((dkl->dkl_nhead * dkl->dkl_nsect) - dkl->dkl_apc)) +
(head * dkl->dkl_nsect) + sec);
#if defined(__x86)
blk += cl->cl_solaris_offset;
#endif
rval = DK_TG_WRITE(cl, dkl, blk, cl->cl_sys_blocksize,
tg_cookie);
cmlb_dbg(CMLB_INFO, cl,
"cmlb_set_vtoc: wrote backup label %llx\n", blk);
if (rval != 0) {
goto exit;
}
}
exit:
return (rval);
}
static void
cmlb_clear_vtoc(struct cmlb_lun *cl, void *tg_cookie)
{
struct dk_label *dkl;
mutex_exit(CMLB_MUTEX(cl));
dkl = kmem_zalloc(cl->cl_sys_blocksize, KM_SLEEP);
mutex_enter(CMLB_MUTEX(cl));
dkl->dkl_apc = cl->cl_g.dkg_apc;
dkl->dkl_ncyl = cl->cl_g.dkg_ncyl;
dkl->dkl_acyl = cl->cl_g.dkg_acyl;
dkl->dkl_nhead = cl->cl_g.dkg_nhead;
dkl->dkl_nsect = cl->cl_g.dkg_nsect;
mutex_exit(CMLB_MUTEX(cl));
(void) cmlb_set_vtoc(cl, dkl, tg_cookie);
kmem_free(dkl, cl->cl_sys_blocksize);
mutex_enter(CMLB_MUTEX(cl));
}
static int
cmlb_write_label(struct cmlb_lun *cl, void *tg_cookie)
{
struct dk_label *dkl;
short sum;
short *sp;
int i;
int rval;
ASSERT(mutex_owned(CMLB_MUTEX(cl)));
mutex_exit(CMLB_MUTEX(cl));
dkl = kmem_zalloc(cl->cl_sys_blocksize, KM_SLEEP);
mutex_enter(CMLB_MUTEX(cl));
bcopy(&cl->cl_vtoc, &dkl->dkl_vtoc, sizeof (struct dk_vtoc));
dkl->dkl_rpm = cl->cl_g.dkg_rpm;
dkl->dkl_pcyl = cl->cl_g.dkg_pcyl;
dkl->dkl_apc = cl->cl_g.dkg_apc;
dkl->dkl_intrlv = cl->cl_g.dkg_intrlv;
dkl->dkl_ncyl = cl->cl_g.dkg_ncyl;
dkl->dkl_acyl = cl->cl_g.dkg_acyl;
dkl->dkl_nhead = cl->cl_g.dkg_nhead;
dkl->dkl_nsect = cl->cl_g.dkg_nsect;
#if defined(_SUNOS_VTOC_8)
dkl->dkl_obs1 = cl->cl_g.dkg_obs1;
dkl->dkl_obs2 = cl->cl_g.dkg_obs2;
dkl->dkl_obs3 = cl->cl_g.dkg_obs3;
for (i = 0; i < NDKMAP; i++) {
dkl->dkl_map[i].dkl_cylno = cl->cl_map[i].dkl_cylno;
dkl->dkl_map[i].dkl_nblk = cl->cl_map[i].dkl_nblk;
}
bcopy(cl->cl_asciilabel, dkl->dkl_asciilabel, LEN_DKL_ASCII);
#elif defined(_SUNOS_VTOC_16)
dkl->dkl_skew = cl->cl_dkg_skew;
#else
#error "No VTOC format defined."
#endif
dkl->dkl_magic = DKL_MAGIC;
dkl->dkl_write_reinstruct = cl->cl_g.dkg_write_reinstruct;
dkl->dkl_read_reinstruct = cl->cl_g.dkg_read_reinstruct;
sum = 0;
sp = (short *)dkl;
i = sizeof (struct dk_label) / sizeof (short);
while (i--) {
sum ^= *sp++;
}
dkl->dkl_cksum = sum;
mutex_exit(CMLB_MUTEX(cl));
rval = cmlb_set_vtoc(cl, dkl, tg_cookie);
kmem_free(dkl, cl->cl_sys_blocksize);
mutex_enter(CMLB_MUTEX(cl));
return (rval);
}
static int
cmlb_dkio_set_efi(struct cmlb_lun *cl, dev_t dev, caddr_t arg, int flag,
void *tg_cookie)
{
dk_efi_t user_efi;
int shift, rval = 0;
void *buffer;
diskaddr_t tgt_lba;
boolean_t internal;
if (ddi_copyin(arg, &user_efi, sizeof (dk_efi_t), flag))
return (EFAULT);
internal = VOID2BOOLEAN(
(cl->cl_alter_behavior & (CMLB_INTERNAL_MINOR_NODES)) != 0);
if (cl->cl_alter_behavior & CMLB_CREATE_P0_MINOR_NODE)
shift = CMLBUNIT_FORCE_P0_SHIFT;
else
shift = CMLBUNIT_SHIFT;
user_efi.dki_data = (void *)(uintptr_t)user_efi.dki_data_64;
if (user_efi.dki_length == 0 ||
user_efi.dki_length > cmlb_tg_max_efi_xfer)
return (EINVAL);
tgt_lba = user_efi.dki_lba;
mutex_enter(CMLB_MUTEX(cl));
if ((cmlb_check_update_blockcount(cl, tg_cookie) != 0) ||
(cl->cl_tgt_blocksize == 0) ||
(user_efi.dki_length % cl->cl_sys_blocksize)) {
mutex_exit(CMLB_MUTEX(cl));
return (EINVAL);
}
if (cl->cl_tgt_blocksize != cl->cl_sys_blocksize)
tgt_lba = tgt_lba *
cl->cl_tgt_blocksize / cl->cl_sys_blocksize;
mutex_exit(CMLB_MUTEX(cl));
buffer = kmem_alloc(user_efi.dki_length, KM_SLEEP);
if (ddi_copyin(user_efi.dki_data, buffer, user_efi.dki_length, flag)) {
rval = EFAULT;
} else {
mutex_enter(CMLB_MUTEX(cl));
if (cl->cl_vtoc.v_sanity == VTOC_SANE) {
cmlb_dbg(CMLB_TRACE, cl,
"cmlb_dkio_set_efi: CLEAR VTOC\n");
if (cl->cl_label_from_media == CMLB_LABEL_VTOC)
cmlb_clear_vtoc(cl, tg_cookie);
bzero(&cl->cl_vtoc, sizeof (struct dk_vtoc));
mutex_exit(CMLB_MUTEX(cl));
ddi_remove_minor_node(CMLB_DEVINFO(cl), "h");
ddi_remove_minor_node(CMLB_DEVINFO(cl), "h,raw");
(void) cmlb_create_minor(CMLB_DEVINFO(cl), "wd",
S_IFBLK,
(CMLBUNIT(dev, shift) << shift) | WD_NODE,
cl->cl_node_type, 0, internal);
(void) cmlb_create_minor(CMLB_DEVINFO(cl), "wd,raw",
S_IFCHR,
(CMLBUNIT(dev, shift) << shift) | WD_NODE,
cl->cl_node_type, 0, internal);
} else
mutex_exit(CMLB_MUTEX(cl));
rval = DK_TG_WRITE(cl, buffer, tgt_lba, user_efi.dki_length,
tg_cookie);
if (rval == 0) {
mutex_enter(CMLB_MUTEX(cl));
cl->cl_f_geometry_is_valid = B_FALSE;
mutex_exit(CMLB_MUTEX(cl));
}
}
kmem_free(buffer, user_efi.dki_length);
return (rval);
}
static int
cmlb_dkio_get_mboot(struct cmlb_lun *cl, caddr_t arg, int flag, void *tg_cookie)
{
struct mboot *mboot;
int rval;
size_t buffer_size;
#if defined(_SUNOS_VTOC_8)
if ((!ISREMOVABLE(cl) && !ISHOTPLUGGABLE(cl)) || (arg == NULL)) {
#elif defined(_SUNOS_VTOC_16)
if (arg == NULL) {
#endif
return (EINVAL);
}
buffer_size = cl->cl_sys_blocksize;
cmlb_dbg(CMLB_TRACE, cl,
"cmlb_dkio_get_mboot: allocation size: 0x%x\n", buffer_size);
mboot = kmem_zalloc(buffer_size, KM_SLEEP);
if ((rval = DK_TG_READ(cl, mboot, 0, buffer_size, tg_cookie)) == 0) {
if (ddi_copyout(mboot, (void *)arg,
sizeof (struct mboot), flag) != 0) {
rval = EFAULT;
}
}
kmem_free(mboot, buffer_size);
return (rval);
}
static int
cmlb_dkio_set_mboot(struct cmlb_lun *cl, caddr_t arg, int flag, void *tg_cookie)
{
struct mboot *mboot = NULL;
int rval;
ushort_t magic;
ASSERT(!mutex_owned(CMLB_MUTEX(cl)));
#if defined(_SUNOS_VTOC_8)
if (!ISREMOVABLE(cl) && !ISHOTPLUGGABLE(cl)) {
return (EINVAL);
}
#endif
if (arg == NULL) {
return (EINVAL);
}
mboot = kmem_zalloc(cl->cl_sys_blocksize, KM_SLEEP);
if (ddi_copyin((const void *)arg, mboot,
cl->cl_sys_blocksize, flag) != 0) {
kmem_free(mboot, cl->cl_sys_blocksize);
return (EFAULT);
}
magic = LE_16(mboot->signature);
if (magic != MBB_MAGIC) {
kmem_free(mboot, cl->cl_sys_blocksize);
return (EINVAL);
}
rval = DK_TG_WRITE(cl, mboot, 0, cl->cl_sys_blocksize, tg_cookie);
mutex_enter(CMLB_MUTEX(cl));
#if defined(__x86)
if (rval == 0) {
rval = cmlb_update_fdisk_and_vtoc(cl, tg_cookie);
if ((!cl->cl_f_geometry_is_valid) || (rval != 0)) {
mutex_exit(CMLB_MUTEX(cl));
kmem_free(mboot, cl->cl_sys_blocksize);
return (rval);
}
}
#ifdef __lock_lint
cmlb_setup_default_geometry(cl, tg_cookie);
#endif
#else
if (rval == 0) {
if (cl->cl_blockcount <= CMLB_EXTVTOC_LIMIT)
cmlb_setup_default_geometry(cl, tg_cookie);
}
#endif
cl->cl_msglog_flag |= CMLB_ALLOW_2TB_WARN;
mutex_exit(CMLB_MUTEX(cl));
kmem_free(mboot, cl->cl_sys_blocksize);
return (rval);
}
#if defined(__x86)
static int
cmlb_dkio_set_ext_part(struct cmlb_lun *cl, caddr_t arg, int flag,
void *tg_cookie)
{
int fdisk_rval;
diskaddr_t capacity;
ASSERT(!mutex_owned(CMLB_MUTEX(cl)));
mutex_enter(CMLB_MUTEX(cl));
capacity = cl->cl_blockcount;
fdisk_rval = cmlb_read_fdisk(cl, capacity, tg_cookie);
if (fdisk_rval != 0) {
mutex_exit(CMLB_MUTEX(cl));
return (fdisk_rval);
}
mutex_exit(CMLB_MUTEX(cl));
return (fdisk_rval);
}
#endif
static void
cmlb_setup_default_geometry(struct cmlb_lun *cl, void *tg_cookie)
{
struct cmlb_geom pgeom;
struct cmlb_geom *pgeomp = &pgeom;
int ret;
int geom_base_cap = 1;
ASSERT(mutex_owned(CMLB_MUTEX(cl)));
bzero(&cl->cl_g, sizeof (struct dk_geom));
bzero(&cl->cl_vtoc, sizeof (struct dk_vtoc));
bzero(cl->cl_map, NDKMAP * (sizeof (struct dk_map)));
if (cl->cl_alter_behavior & CMLB_FAKE_GEOM_LABEL_IOCTLS_VTOC8) {
mutex_exit(CMLB_MUTEX(cl));
ret = DK_TG_GETPHYGEOM(cl, pgeomp, tg_cookie);
mutex_enter(CMLB_MUTEX(cl));
if (ret == 0) {
geom_base_cap = 0;
} else {
cmlb_dbg(CMLB_ERROR, cl,
"cmlb_setup_default_geometry: "
"tg_getphygeom failed %d\n", ret);
}
}
if (geom_base_cap) {
if (ISCD(cl)) {
cl->cl_g.dkg_ncyl = 1;
cl->cl_g.dkg_nhead = 1;
cl->cl_g.dkg_nsect = cl->cl_blockcount;
} else if (cl->cl_blockcount < 160) {
cl->cl_g.dkg_nhead = 1;
cl->cl_g.dkg_ncyl = cl->cl_blockcount;
cl->cl_g.dkg_nsect = 1;
} else if (cl->cl_blockcount <= 0x1000) {
cl->cl_g.dkg_nhead = 2;
cl->cl_g.dkg_ncyl = 80;
cl->cl_g.dkg_pcyl = 80;
cl->cl_g.dkg_nsect = cl->cl_blockcount / (2 * 80);
} else if (cl->cl_blockcount <= 0x200000) {
cl->cl_g.dkg_nhead = 64;
cl->cl_g.dkg_nsect = 32;
cl->cl_g.dkg_ncyl = cl->cl_blockcount / (64 * 32);
} else {
cl->cl_g.dkg_nhead = 255;
cl->cl_g.dkg_nsect = ((cl->cl_blockcount +
(UINT16_MAX * 255 * 63) - 1) /
(UINT16_MAX * 255 * 63)) * 63;
if (cl->cl_g.dkg_nsect == 0)
cl->cl_g.dkg_nsect = (UINT16_MAX / 63) * 63;
cl->cl_g.dkg_ncyl = cl->cl_blockcount /
(255 * cl->cl_g.dkg_nsect);
}
cl->cl_g.dkg_acyl = 0;
cl->cl_g.dkg_bcyl = 0;
cl->cl_g.dkg_intrlv = 1;
cl->cl_g.dkg_rpm = 200;
if (cl->cl_g.dkg_pcyl == 0)
cl->cl_g.dkg_pcyl = cl->cl_g.dkg_ncyl +
cl->cl_g.dkg_acyl;
} else {
cl->cl_g.dkg_ncyl = (short)pgeomp->g_ncyl;
cl->cl_g.dkg_acyl = pgeomp->g_acyl;
cl->cl_g.dkg_nhead = pgeomp->g_nhead;
cl->cl_g.dkg_nsect = pgeomp->g_nsect;
cl->cl_g.dkg_intrlv = pgeomp->g_intrlv;
cl->cl_g.dkg_rpm = pgeomp->g_rpm;
cl->cl_g.dkg_pcyl = cl->cl_g.dkg_ncyl + cl->cl_g.dkg_acyl;
}
cl->cl_g.dkg_read_reinstruct = 0;
cl->cl_g.dkg_write_reinstruct = 0;
cl->cl_solaris_size = cl->cl_g.dkg_ncyl *
cl->cl_g.dkg_nhead * cl->cl_g.dkg_nsect;
cl->cl_map['a'-'a'].dkl_cylno = 0;
cl->cl_map['a'-'a'].dkl_nblk = cl->cl_solaris_size;
cl->cl_map['c'-'a'].dkl_cylno = 0;
cl->cl_map['c'-'a'].dkl_nblk = cl->cl_solaris_size;
cl->cl_vtoc.v_part[2].p_tag = V_BACKUP;
cl->cl_vtoc.v_part[2].p_flag = V_UNMNT;
cl->cl_vtoc.v_nparts = V_NUMPAR;
cl->cl_vtoc.v_version = V_VERSION;
(void) sprintf((char *)cl->cl_asciilabel, "DEFAULT cyl %d alt %d"
" hd %d sec %d", cl->cl_g.dkg_ncyl, cl->cl_g.dkg_acyl,
cl->cl_g.dkg_nhead, cl->cl_g.dkg_nsect);
cl->cl_f_geometry_is_valid = B_FALSE;
}
#if defined(__x86)
static int
cmlb_update_fdisk_and_vtoc(struct cmlb_lun *cl, void *tg_cookie)
{
int count;
int label_rc = 0;
int fdisk_rval;
diskaddr_t capacity;
ASSERT(mutex_owned(CMLB_MUTEX(cl)));
if (cmlb_check_update_blockcount(cl, tg_cookie) != 0)
return (EINVAL);
cl->cl_map[P0_RAW_DISK].dkl_cylno = 0;
cl->cl_map[P0_RAW_DISK].dkl_nblk = cl->cl_blockcount;
capacity = cl->cl_blockcount;
cmlb_resync_geom_caches(cl, capacity, tg_cookie);
if (cl->cl_device_type == DTYPE_DIRECT || ISREMOVABLE(cl)) {
fdisk_rval = cmlb_read_fdisk(cl, capacity, tg_cookie);
if (fdisk_rval != 0) {
ASSERT(mutex_owned(CMLB_MUTEX(cl)));
return (fdisk_rval);
}
if (cl->cl_solaris_size <= DK_LABEL_LOC) {
label_rc = 0;
cl->cl_f_geometry_is_valid = B_TRUE;
goto no_solaris_partition;
}
}
if (!cl->cl_f_geometry_is_valid) {
cmlb_build_default_label(cl, tg_cookie);
label_rc = 0;
}
no_solaris_partition:
#if defined(_SUNOS_VTOC_16)
for (count = 0; count < FDISK_PARTS; count++) {
cl->cl_map[FDISK_P1 + count].dkl_cylno = UINT32_MAX;
cl->cl_map[FDISK_P1 + count].dkl_nblk =
cl->cl_fmap[count].fmap_nblk;
cl->cl_offset[FDISK_P1 + count] =
cl->cl_fmap[count].fmap_start;
}
#endif
for (count = 0; count < NDKMAP; count++) {
#if defined(_SUNOS_VTOC_8)
struct dk_map *lp = &cl->cl_map[count];
cl->cl_offset[count] =
cl->cl_g.dkg_nhead * cl->cl_g.dkg_nsect * lp->dkl_cylno;
#elif defined(_SUNOS_VTOC_16)
struct dkl_partition *vp = &cl->cl_vtoc.v_part[count];
cl->cl_offset[count] = vp->p_start + cl->cl_solaris_offset;
#else
#error "No VTOC format defined."
#endif
}
ASSERT(mutex_owned(CMLB_MUTEX(cl)));
return (label_rc);
}
#endif
#if defined(__x86)
static int
cmlb_dkio_get_virtgeom(struct cmlb_lun *cl, caddr_t arg, int flag)
{
int err = 0;
struct dk_geom disk_geom;
struct dk_geom *dkgp = &disk_geom;
mutex_enter(CMLB_MUTEX(cl));
if (cl->cl_lgeom.g_nhead == 0 ||
cl->cl_lgeom.g_nsect == 0 ||
cl->cl_lgeom.g_ncyl > 1024) {
mutex_exit(CMLB_MUTEX(cl));
err = EINVAL;
} else {
dkgp->dkg_ncyl = cl->cl_lgeom.g_ncyl;
dkgp->dkg_acyl = cl->cl_lgeom.g_acyl;
dkgp->dkg_pcyl = dkgp->dkg_ncyl + dkgp->dkg_acyl;
dkgp->dkg_nhead = cl->cl_lgeom.g_nhead;
dkgp->dkg_nsect = cl->cl_lgeom.g_nsect;
mutex_exit(CMLB_MUTEX(cl));
if (ddi_copyout(dkgp, (void *)arg,
sizeof (struct dk_geom), flag)) {
err = EFAULT;
} else {
err = 0;
}
}
return (err);
}
#endif
#if defined(__x86)
static int
cmlb_dkio_get_phygeom(struct cmlb_lun *cl, caddr_t arg, int flag,
void *tg_cookie)
{
int err = 0;
diskaddr_t capacity;
struct dk_geom disk_geom;
struct dk_geom *dkgp = &disk_geom;
mutex_enter(CMLB_MUTEX(cl));
if (cl->cl_g.dkg_nhead != 0 &&
cl->cl_g.dkg_nsect != 0) {
bcopy(&cl->cl_g, dkgp, sizeof (*dkgp));
dkgp->dkg_acyl = 0;
dkgp->dkg_ncyl = cl->cl_blockcount /
(dkgp->dkg_nhead * dkgp->dkg_nsect);
} else {
bzero(dkgp, sizeof (struct dk_geom));
if (ISCD(cl)) {
dkgp->dkg_nhead = cl->cl_pgeom.g_nhead;
dkgp->dkg_nsect = cl->cl_pgeom.g_nsect;
dkgp->dkg_ncyl = cl->cl_pgeom.g_ncyl;
dkgp->dkg_acyl = cl->cl_pgeom.g_acyl;
} else {
if (cl->cl_blockcount == 0) {
mutex_exit(CMLB_MUTEX(cl));
err = EIO;
return (err);
}
if (cl->cl_alter_behavior & CMLB_OFF_BY_ONE)
capacity = cl->cl_blockcount - 1;
else
capacity = cl->cl_blockcount;
cmlb_convert_geometry(cl, capacity, dkgp, tg_cookie);
dkgp->dkg_acyl = 0;
dkgp->dkg_ncyl = capacity /
(dkgp->dkg_nhead * dkgp->dkg_nsect);
}
}
dkgp->dkg_pcyl = dkgp->dkg_ncyl + dkgp->dkg_acyl;
mutex_exit(CMLB_MUTEX(cl));
if (ddi_copyout(dkgp, (void *)arg, sizeof (struct dk_geom), flag))
err = EFAULT;
return (err);
}
#endif
#if defined(__x86)
static int
cmlb_dkio_partinfo(struct cmlb_lun *cl, dev_t dev, caddr_t arg, int flag)
{
int err = 0;
int part;
if (cl->cl_alter_behavior & CMLB_CREATE_P0_MINOR_NODE)
part = getminor(dev) & ((1 << CMLBUNIT_FORCE_P0_SHIFT) - 1);
else
part = CMLBPART(dev);
mutex_enter(CMLB_MUTEX(cl));
if (part < P0_RAW_DISK && cl->cl_solaris_size == 0) {
err = EIO;
mutex_exit(CMLB_MUTEX(cl));
} else {
struct part_info p;
p.p_start = (daddr_t)cl->cl_offset[part];
p.p_length = (int)cl->cl_map[part].dkl_nblk;
mutex_exit(CMLB_MUTEX(cl));
#ifdef _MULTI_DATAMODEL
switch (ddi_model_convert_from(flag & FMODELS)) {
case DDI_MODEL_ILP32:
{
struct part_info32 p32;
p32.p_start = (daddr32_t)p.p_start;
p32.p_length = p.p_length;
if (ddi_copyout(&p32, (void *)arg,
sizeof (p32), flag))
err = EFAULT;
break;
}
case DDI_MODEL_NONE:
{
if (ddi_copyout(&p, (void *)arg, sizeof (p),
flag))
err = EFAULT;
break;
}
}
#else
if (ddi_copyout(&p, (void *)arg, sizeof (p), flag))
err = EFAULT;
#endif
}
return (err);
}
static int
cmlb_dkio_extpartinfo(struct cmlb_lun *cl, dev_t dev, caddr_t arg, int flag)
{
int err = 0;
int part;
if (cl->cl_alter_behavior & CMLB_CREATE_P0_MINOR_NODE)
part = getminor(dev) & ((1 << CMLBUNIT_FORCE_P0_SHIFT) - 1);
else
part = CMLBPART(dev);
mutex_enter(CMLB_MUTEX(cl));
if (part < P0_RAW_DISK && cl->cl_solaris_size == 0) {
err = EIO;
mutex_exit(CMLB_MUTEX(cl));
} else {
struct extpart_info p;
p.p_start = (diskaddr_t)cl->cl_offset[part];
p.p_length = (diskaddr_t)cl->cl_map[part].dkl_nblk;
mutex_exit(CMLB_MUTEX(cl));
if (ddi_copyout(&p, (void *)arg, sizeof (p), flag))
err = EFAULT;
}
return (err);
}
#endif
int
cmlb_prop_op(cmlb_handle_t cmlbhandle,
dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
char *name, caddr_t valuep, int *lengthp, int part, void *tg_cookie)
{
struct cmlb_lun *cl;
diskaddr_t capacity;
uint32_t lbasize;
enum dp { DP_NBLOCKS, DP_BLKSIZE, DP_SSD, DP_ROT } dp;
int callers_length;
caddr_t buffer;
uint64_t nblocks64;
uint_t dblk;
tg_attribute_t tgattr;
cl = (struct cmlb_lun *)cmlbhandle;
if (cl == NULL) {
fallback: return (ddi_prop_op(dev, dip, prop_op, mod_flags,
name, valuep, lengthp));
}
capacity = cl->cl_blockcount;
if (capacity == 0)
goto fallback;
lbasize = cl->cl_tgt_blocksize;
if (lbasize == 0)
lbasize = DEV_BSIZE;
if (dev == DDI_DEV_T_ANY) {
if (strcmp(name, "device-nblocks") == 0)
dp = DP_NBLOCKS;
else if (strcmp(name, "device-blksize") == 0)
dp = DP_BLKSIZE;
else if (strcmp(name, "device-solid-state") == 0)
dp = DP_SSD;
else if (strcmp(name, "device-rotational") == 0)
dp = DP_ROT;
else
goto fallback;
callers_length = *lengthp;
if (dp == DP_NBLOCKS)
*lengthp = sizeof (uint64_t);
else if ((dp == DP_BLKSIZE) || (dp == DP_SSD))
*lengthp = sizeof (uint32_t);
if (prop_op == PROP_LEN)
return (DDI_PROP_SUCCESS);
switch (prop_op) {
case PROP_LEN_AND_VAL_ALLOC:
if ((buffer = kmem_alloc(*lengthp,
(mod_flags & DDI_PROP_CANSLEEP) ?
KM_SLEEP : KM_NOSLEEP)) == NULL)
return (DDI_PROP_NO_MEMORY);
*(caddr_t *)valuep = buffer;
break;
case PROP_LEN_AND_VAL_BUF:
if (callers_length != *lengthp)
return (DDI_PROP_INVAL_ARG);
buffer = valuep;
break;
default:
return (DDI_PROP_INVAL_ARG);
}
switch (dp) {
case DP_NBLOCKS:
*((uint64_t *)buffer) = capacity;
break;
case DP_BLKSIZE:
*((uint32_t *)buffer) = lbasize;
break;
case DP_SSD:
if (DK_TG_GETATTRIBUTE(cl, &tgattr, tg_cookie) != 0)
tgattr.media_is_solid_state = B_FALSE;
*((uint32_t *)buffer) =
tgattr.media_is_solid_state ? 1 : 0;
break;
case DP_ROT:
if (DK_TG_GETATTRIBUTE(cl, &tgattr, tg_cookie) != 0)
tgattr.media_is_rotational = B_TRUE;
*((uint32_t *)buffer) =
tgattr.media_is_rotational ? 1 : 0;
break;
}
return (DDI_PROP_SUCCESS);
}
if (!cmlb_is_valid(cmlbhandle))
goto fallback;
(void) cmlb_partinfo(cmlbhandle, part,
(diskaddr_t *)&nblocks64, NULL, NULL, NULL, tg_cookie);
dblk = lbasize / cl->cl_sys_blocksize;
return (ddi_prop_op_nblocks_blksize(dev, dip, prop_op, mod_flags,
name, valuep, lengthp, nblocks64 / dblk, lbasize));
}