#include <sys/param.h>
#include <sys/bus.h>
#include <sys/conf.h>
#include "ufshci_private.h"
#include "ufshci_reg.h"
static void
ufshci_ctrlr_fail(struct ufshci_controller *ctrlr)
{
ctrlr->is_failed = true;
ufshci_req_queue_fail(ctrlr, &ctrlr->task_mgmt_req_queue);
ufshci_req_queue_fail(ctrlr, &ctrlr->transfer_req_queue);
}
static int
ufshci_ctrlr_reinit_after_max_gear_switch(struct ufshci_controller *ctrlr)
{
int error;
ufshci_utmr_req_queue_disable(ctrlr);
ufshci_utr_req_queue_disable(ctrlr);
error = ufshci_ctrlr_disable(ctrlr);
if (error != 0)
return (error);
error = ufshci_ctrlr_enable(ctrlr);
if (error != 0)
return (error);
error = ufshci_utmr_req_queue_enable(ctrlr);
if (error != 0)
return (error);
error = ufshci_utr_req_queue_enable(ctrlr);
if (error != 0)
return (error);
error = ufshci_ctrlr_send_nop(ctrlr);
if (error != 0)
return (error);
error = ufshci_dev_init(ctrlr);
if (error != 0)
return (error);
error = ufshci_dev_init_reference_clock(ctrlr);
if (error != 0)
return (error);
return (ufshci_dev_init_unipro(ctrlr));
}
static void
ufshci_ctrlr_start(struct ufshci_controller *ctrlr, bool resetting)
{
TSENTER();
if (resetting) {
if (ufshci_utmr_req_queue_enable(ctrlr) != 0) {
ufshci_ctrlr_fail(ctrlr);
return;
}
if (ufshci_utr_req_queue_enable(ctrlr) != 0) {
ufshci_ctrlr_fail(ctrlr);
return;
}
}
if (ufshci_ctrlr_send_nop(ctrlr) != 0) {
ufshci_ctrlr_fail(ctrlr);
return;
}
if (ufshci_dev_init(ctrlr) != 0) {
ufshci_ctrlr_fail(ctrlr);
return;
}
if (ufshci_dev_init_reference_clock(ctrlr) != 0) {
ufshci_ctrlr_fail(ctrlr);
return;
}
if (ufshci_dev_init_unipro(ctrlr) != 0) {
ufshci_ctrlr_fail(ctrlr);
return;
}
if (!(ctrlr->quirks & UFSHCI_QUIRK_IGNORE_UIC_POWER_MODE) &&
ufshci_dev_init_uic_power_mode(ctrlr) != 0) {
ufshci_ctrlr_fail(ctrlr);
return;
}
ufshci_dev_init_uic_link_state(ctrlr);
if ((ctrlr->quirks & UFSHCI_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH) &&
ufshci_ctrlr_reinit_after_max_gear_switch(ctrlr) != 0) {
ufshci_ctrlr_fail(ctrlr);
return;
}
if (ufshci_dev_get_descriptor(ctrlr) != 0) {
ufshci_ctrlr_fail(ctrlr);
return;
}
if (ufshci_dev_config_write_booster(ctrlr)) {
ufshci_ctrlr_fail(ctrlr);
return;
}
ufshci_dev_init_auto_hibernate(ctrlr);
if (!resetting && ufshci_sim_attach(ctrlr) != 0) {
ufshci_ctrlr_fail(ctrlr);
return;
}
if (ufshci_dev_init_ufs_power_mode(ctrlr) != 0) {
ufshci_ctrlr_fail(ctrlr);
return;
}
TSEXIT();
}
static int
ufshci_ctrlr_disable_host_ctrlr(struct ufshci_controller *ctrlr)
{
int timeout = ticks + MSEC_2_TICKS(ctrlr->device_init_timeout_in_ms);
sbintime_t delta_t = SBT_1US;
uint32_t hce;
hce = ufshci_mmio_read_4(ctrlr, hce);
if (UFSHCIV(UFSHCI_HCE_REG_HCE, hce)) {
hce &= ~UFSHCIM(UFSHCI_HCE_REG_HCE);
ufshci_mmio_write_4(ctrlr, hce, hce);
}
while (1) {
hce = ufshci_mmio_read_4(ctrlr, hce);
if (!UFSHCIV(UFSHCI_HCE_REG_HCE, hce))
break;
if (timeout - ticks < 0) {
ufshci_printf(ctrlr,
"host controller failed to disable "
"within %d ms\n",
ctrlr->device_init_timeout_in_ms);
return (ENXIO);
}
pause_sbt("ufshci_disable_hce", delta_t, 0, C_PREL(1));
delta_t = min(SBT_1MS, delta_t * 3 / 2);
}
return (0);
}
static int
ufshci_ctrlr_enable_host_ctrlr(struct ufshci_controller *ctrlr)
{
int timeout = ticks + MSEC_2_TICKS(ctrlr->device_init_timeout_in_ms);
sbintime_t delta_t = SBT_1US;
uint32_t hce;
hce = ufshci_mmio_read_4(ctrlr, hce);
hce |= UFSHCIM(UFSHCI_HCE_REG_HCE);
ufshci_mmio_write_4(ctrlr, hce, hce);
pause_sbt("ufshci_enable_hce", ustosbt(100), 0, C_PREL(1));
while (1) {
hce = ufshci_mmio_read_4(ctrlr, hce);
if (UFSHCIV(UFSHCI_HCE_REG_HCE, hce))
break;
if (timeout - ticks < 0) {
ufshci_printf(ctrlr,
"host controller failed to enable "
"within %d ms\n",
ctrlr->device_init_timeout_in_ms);
return (ENXIO);
}
pause_sbt("ufshci_enable_hce", delta_t, 0, C_PREL(1));
delta_t = min(SBT_1MS, delta_t * 3 / 2);
}
return (0);
}
int
ufshci_ctrlr_disable(struct ufshci_controller *ctrlr)
{
int error;
ufshci_mmio_write_4(ctrlr, ie, 0);
error = ufshci_ctrlr_disable_host_ctrlr(ctrlr);
return (error);
}
int
ufshci_ctrlr_enable(struct ufshci_controller *ctrlr)
{
uint32_t ie, hcs;
int error;
error = ufshci_ctrlr_enable_host_ctrlr(ctrlr);
if (error)
return (error);
error = ufshci_uic_send_dme_link_startup(ctrlr);
if (error)
return (error);
hcs = ufshci_mmio_read_4(ctrlr, hcs);
if (!UFSHCIV(UFSHCI_HCS_REG_DP, hcs)) {
ufshci_printf(ctrlr, "UFS device not found\n");
return (ENXIO);
}
ie = ufshci_mmio_read_4(ctrlr, ie);
ie |= UFSHCIM(UFSHCI_IE_REG_UTRCE);
ie |= UFSHCIM(UFSHCI_IE_REG_UEE);
ie |= UFSHCIM(UFSHCI_IE_REG_UTMRCE);
ie |= UFSHCIM(UFSHCI_IE_REG_DFEE);
ie |= UFSHCIM(UFSHCI_IE_REG_UTPEE);
ie |= UFSHCIM(UFSHCI_IE_REG_HCFEE);
ie |= UFSHCIM(UFSHCI_IE_REG_SBFEE);
ie |= UFSHCIM(UFSHCI_IE_REG_CEFEE);
ufshci_mmio_write_4(ctrlr, ie, ie);
return (0);
}
static int
ufshci_ctrlr_hw_reset(struct ufshci_controller *ctrlr)
{
int error;
error = ufshci_ctrlr_disable(ctrlr);
if (error)
return (error);
error = ufshci_ctrlr_enable(ctrlr);
return (error);
}
static void
ufshci_ctrlr_reset_task(void *arg, int pending)
{
struct ufshci_controller *ctrlr = arg;
int error;
ufshci_utmr_req_queue_disable(ctrlr);
ufshci_utr_req_queue_disable(ctrlr);
error = ufshci_ctrlr_hw_reset(ctrlr);
if (error)
return (ufshci_ctrlr_fail(ctrlr));
ufshci_ctrlr_start(ctrlr, true);
}
int
ufshci_ctrlr_construct(struct ufshci_controller *ctrlr, device_t dev)
{
uint32_t ver, cap, ahit;
uint32_t timeout_period, retry_count;
int error;
ctrlr->device_init_timeout_in_ms = UFSHCI_DEVICE_INIT_TIMEOUT_MS;
ctrlr->uic_cmd_timeout_in_ms = UFSHCI_UIC_CMD_TIMEOUT_MS;
ctrlr->dev = dev;
ctrlr->sc_unit = device_get_unit(dev);
snprintf(ctrlr->sc_name, sizeof(ctrlr->sc_name), "%s",
device_get_nameunit(dev));
mtx_init(&ctrlr->sc_mtx, device_get_nameunit(dev), NULL,
MTX_DEF | MTX_RECURSE);
mtx_init(&ctrlr->uic_cmd_lock, "ufshci ctrlr uic cmd lock", NULL,
MTX_DEF);
ver = ufshci_mmio_read_4(ctrlr, ver);
ctrlr->major_version = UFSHCIV(UFSHCI_VER_REG_MJR, ver);
ctrlr->minor_version = UFSHCIV(UFSHCI_VER_REG_MNR, ver);
ufshci_printf(ctrlr, "UFSHCI Version: %d.%d\n", ctrlr->major_version,
ctrlr->minor_version);
ctrlr->cap = cap = ufshci_mmio_read_4(ctrlr, cap);
if (ctrlr->quirks & UFSHCI_QUIRK_BROKEN_LSDBS_MCQS_CAP) {
ctrlr->is_single_db_supported = true;
ctrlr->is_mcq_supported = true;
} else {
ctrlr->is_single_db_supported = (UFSHCIV(UFSHCI_CAP_REG_LSDBS,
cap) == 0);
ctrlr->is_mcq_supported = (UFSHCIV(UFSHCI_CAP_REG_MCQS, cap) ==
1);
}
if (!(ctrlr->is_single_db_supported || ctrlr->is_mcq_supported))
return (ENXIO);
ctrlr->page_size = PAGE_SIZE;
ctrlr->max_xfer_size = ctrlr->page_size * UFSHCI_MAX_PRDT_ENTRY_COUNT;
timeout_period = UFSHCI_DEFAULT_TIMEOUT_PERIOD;
TUNABLE_INT_FETCH("hw.ufshci.timeout_period", &timeout_period);
timeout_period = min(timeout_period, UFSHCI_MAX_TIMEOUT_PERIOD);
timeout_period = max(timeout_period, UFSHCI_MIN_TIMEOUT_PERIOD);
ctrlr->timeout_period = timeout_period;
retry_count = UFSHCI_DEFAULT_RETRY_COUNT;
TUNABLE_INT_FETCH("hw.ufshci.retry_count", &retry_count);
ctrlr->retry_count = retry_count;
ctrlr->enable_aborts = 1;
if (ctrlr->quirks & UFSHCI_QUIRK_NOT_SUPPORT_ABORT_TASK)
ctrlr->enable_aborts = 0;
else
TUNABLE_INT_FETCH("hw.ufshci.enable_aborts",
&ctrlr->enable_aborts);
error = ufshci_ctrlr_hw_reset(ctrlr);
if (error)
return (error);
ufshci_mmio_read_4(ctrlr, uecpa);
ahit = 0;
ufshci_mmio_write_4(ctrlr, ahit, ahit);
error = ufshci_utmr_req_queue_construct(ctrlr);
if (error)
return (error);
error = ufshci_utr_req_queue_construct(ctrlr);
if (error)
return (error);
ctrlr->max_hw_pend_io = ctrlr->transfer_req_queue.num_entries - 1;
ctrlr->taskqueue = taskqueue_create("ufshci_taskq", M_WAITOK,
taskqueue_thread_enqueue, &ctrlr->taskqueue);
taskqueue_start_threads(&ctrlr->taskqueue, 1, PI_DISK, "ufshci taskq");
TASK_INIT(&ctrlr->reset_task, 0, ufshci_ctrlr_reset_task, ctrlr);
return (0);
}
void
ufshci_ctrlr_destruct(struct ufshci_controller *ctrlr, device_t dev)
{
if (ctrlr->resource == NULL)
goto nores;
ufshci_utmr_req_queue_destroy(ctrlr);
ufshci_utr_req_queue_destroy(ctrlr);
if (ctrlr->tag)
bus_teardown_intr(ctrlr->dev, ctrlr->res, ctrlr->tag);
if (ctrlr->res)
bus_release_resource(ctrlr->dev, SYS_RES_IRQ,
rman_get_rid(ctrlr->res), ctrlr->res);
mtx_lock(&ctrlr->sc_mtx);
ufshci_sim_detach(ctrlr);
mtx_unlock(&ctrlr->sc_mtx);
bus_release_resource(dev, SYS_RES_MEMORY, ctrlr->resource_id,
ctrlr->resource);
nores:
KASSERT(!mtx_owned(&ctrlr->uic_cmd_lock),
("destroying uic_cmd_lock while still owned"));
mtx_destroy(&ctrlr->uic_cmd_lock);
KASSERT(!mtx_owned(&ctrlr->sc_mtx),
("destroying sc_mtx while still owned"));
mtx_destroy(&ctrlr->sc_mtx);
return;
}
void
ufshci_ctrlr_reset(struct ufshci_controller *ctrlr)
{
taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->reset_task);
}
int
ufshci_ctrlr_submit_task_mgmt_request(struct ufshci_controller *ctrlr,
struct ufshci_request *req)
{
return (
ufshci_req_queue_submit_request(&ctrlr->task_mgmt_req_queue, req));
}
int
ufshci_ctrlr_submit_transfer_request(struct ufshci_controller *ctrlr,
struct ufshci_request *req)
{
return (
ufshci_req_queue_submit_request(&ctrlr->transfer_req_queue, req));
}
int
ufshci_ctrlr_send_nop(struct ufshci_controller *ctrlr)
{
struct ufshci_completion_poll_status status;
status.done = 0;
ufshci_ctrlr_cmd_send_nop(ctrlr, ufshci_completion_poll_cb, &status);
ufshci_completion_poll(&status);
if (status.error) {
ufshci_printf(ctrlr, "ufshci_ctrlr_send_nop failed!\n");
return (ENXIO);
}
return (0);
}
void
ufshci_ctrlr_start_config_hook(void *arg)
{
struct ufshci_controller *ctrlr = arg;
TSENTER();
if (ufshci_utmr_req_queue_enable(ctrlr) == 0 &&
ufshci_utr_req_queue_enable(ctrlr) == 0)
ufshci_ctrlr_start(ctrlr, false);
else
ufshci_ctrlr_fail(ctrlr);
ufshci_sysctl_initialize_ctrlr(ctrlr);
config_intrhook_disestablish(&ctrlr->config_hook);
TSEXIT();
}
void
ufshci_ctrlr_poll(struct ufshci_controller *ctrlr)
{
uint32_t is;
is = ufshci_mmio_read_4(ctrlr, is);
if (is & UFSHCIM(UFSHCI_IS_REG_UE)) {
uint32_t uecpa, uecdl, uecn, uect, uecdme;
uecpa = ufshci_mmio_read_4(ctrlr, uecpa);
if (uecpa & UFSHCIM(UFSHCI_UECPA_REG_ERR)) {
ufshci_printf(ctrlr, "UECPA error code: 0x%x\n",
UFSHCIV(UFSHCI_UECPA_REG_EC, uecpa));
}
uecdl = ufshci_mmio_read_4(ctrlr, uecdl);
if (uecdl & UFSHCIM(UFSHCI_UECDL_REG_ERR)) {
ufshci_printf(ctrlr, "UECDL error code: 0x%x\n",
UFSHCIV(UFSHCI_UECDL_REG_EC, uecdl));
}
uecn = ufshci_mmio_read_4(ctrlr, uecn);
if (uecn & UFSHCIM(UFSHCI_UECN_REG_ERR)) {
ufshci_printf(ctrlr, "UECN error code: 0x%x\n",
UFSHCIV(UFSHCI_UECN_REG_EC, uecn));
}
uect = ufshci_mmio_read_4(ctrlr, uect);
if (uect & UFSHCIM(UFSHCI_UECT_REG_ERR)) {
ufshci_printf(ctrlr, "UECT error code: 0x%x\n",
UFSHCIV(UFSHCI_UECT_REG_EC, uect));
}
uecdme = ufshci_mmio_read_4(ctrlr, uecdme);
if (uecdme & UFSHCIM(UFSHCI_UECDME_REG_ERR)) {
ufshci_printf(ctrlr, "UECDME error code: 0x%x\n",
UFSHCIV(UFSHCI_UECDME_REG_EC, uecdme));
}
ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UE));
}
if (is & UFSHCIM(UFSHCI_IS_REG_DFES)) {
ufshci_printf(ctrlr, "Device fatal error on ISR\n");
ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_DFES));
}
if (is & UFSHCIM(UFSHCI_IS_REG_UTPES)) {
ufshci_printf(ctrlr, "UTP error on ISR\n");
ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UTPES));
}
if (is & UFSHCIM(UFSHCI_IS_REG_HCFES)) {
ufshci_printf(ctrlr, "Host controller fatal error on ISR\n");
ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_HCFES));
}
if (is & UFSHCIM(UFSHCI_IS_REG_SBFES)) {
ufshci_printf(ctrlr, "System bus fatal error on ISR\n");
ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_SBFES));
}
if (is & UFSHCIM(UFSHCI_IS_REG_CEFES)) {
ufshci_printf(ctrlr, "Crypto engine fatal error on ISR\n");
ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_CEFES));
}
if (is & UFSHCIM(UFSHCI_IS_REG_UTMRCS)) {
ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UTMRCS));
ufshci_req_queue_process_completions(
&ctrlr->task_mgmt_req_queue);
}
if (is & UFSHCIM(UFSHCI_IS_REG_UTRCS)) {
ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UTRCS));
ufshci_req_queue_process_completions(
&ctrlr->transfer_req_queue);
}
if (is & UFSHCIM(UFSHCI_IS_REG_CQES)) {
ufshci_printf(ctrlr, "MCQ completion not yet implemented\n");
ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_CQES));
}
}
void
ufshci_ctrlr_shared_handler(void *arg)
{
struct ufshci_controller *ctrlr = arg;
ufshci_ctrlr_poll(ctrlr);
}
void
ufshci_reg_dump(struct ufshci_controller *ctrlr)
{
ufshci_printf(ctrlr, "========= UFSHCI Register Dump =========\n");
UFSHCI_DUMP_REG(ctrlr, cap);
UFSHCI_DUMP_REG(ctrlr, mcqcap);
UFSHCI_DUMP_REG(ctrlr, ver);
UFSHCI_DUMP_REG(ctrlr, ext_cap);
UFSHCI_DUMP_REG(ctrlr, hcpid);
UFSHCI_DUMP_REG(ctrlr, hcmid);
UFSHCI_DUMP_REG(ctrlr, ahit);
UFSHCI_DUMP_REG(ctrlr, is);
UFSHCI_DUMP_REG(ctrlr, ie);
UFSHCI_DUMP_REG(ctrlr, hcsext);
UFSHCI_DUMP_REG(ctrlr, hcs);
UFSHCI_DUMP_REG(ctrlr, hce);
UFSHCI_DUMP_REG(ctrlr, uecpa);
UFSHCI_DUMP_REG(ctrlr, uecdl);
UFSHCI_DUMP_REG(ctrlr, uecn);
UFSHCI_DUMP_REG(ctrlr, uect);
UFSHCI_DUMP_REG(ctrlr, uecdme);
ufshci_printf(ctrlr, "========================================\n");
}
int
ufshci_ctrlr_suspend(struct ufshci_controller *ctrlr, enum power_stype stype)
{
int error;
if (!ctrlr->ufs_dev.power_mode_supported)
return (0);
if (ctrlr->ufs_device_wlun_periph) {
ctrlr->ufs_dev.power_mode = power_map[stype].dev_pwr;
error = ufshci_sim_send_ssu(ctrlr, false,
power_map[stype].ssu_pc, false);
if (error) {
ufshci_printf(ctrlr,
"Failed to send SSU in suspend handler\n");
return (error);
}
}
error = ufshci_dev_link_state_transition(ctrlr,
power_map[stype].link_state);
if (error) {
ufshci_printf(ctrlr,
"Failed to transition link state in suspend handler\n");
return (error);
}
return (0);
}
int
ufshci_ctrlr_resume(struct ufshci_controller *ctrlr, enum power_stype stype)
{
int error;
if (!ctrlr->ufs_dev.power_mode_supported)
return (0);
error = ufshci_dev_link_state_transition(ctrlr,
power_map[stype].link_state);
if (error) {
ufshci_printf(ctrlr,
"Failed to transition link state in resume handler\n");
return (error);
}
if (ctrlr->ufs_device_wlun_periph) {
ctrlr->ufs_dev.power_mode = power_map[stype].dev_pwr;
error = ufshci_sim_send_ssu(ctrlr, false,
power_map[stype].ssu_pc, false);
if (error) {
ufshci_printf(ctrlr,
"Failed to send SSU in resume handler\n");
return (error);
}
}
ufshci_dev_enable_auto_hibernate(ctrlr);
return (0);
}