#include <sys/param.h>
#include <sys/bus.h>
#include <sys/conf.h>
#include <sys/domainset.h>
#include <sys/module.h>
#include <cam/scsi/scsi_all.h>
#include "sys/kassert.h"
#include "ufshci_private.h"
static void ufshci_req_queue_submit_tracker(struct ufshci_req_queue *req_queue,
struct ufshci_tracker *tr, enum ufshci_data_direction data_direction);
static const struct ufshci_qops sdb_utmr_qops = {
.construct = ufshci_req_sdb_construct,
.destroy = ufshci_req_sdb_destroy,
.get_hw_queue = ufshci_req_sdb_get_hw_queue,
.enable = ufshci_req_sdb_enable,
.disable = ufshci_req_sdb_disable,
.reserve_slot = ufshci_req_sdb_reserve_slot,
.reserve_admin_slot = ufshci_req_sdb_reserve_slot,
.ring_doorbell = ufshci_req_sdb_utmr_ring_doorbell,
.is_doorbell_cleared = ufshci_req_sdb_utmr_is_doorbell_cleared,
.clear_cpl_ntf = ufshci_req_sdb_utmr_clear_cpl_ntf,
.process_cpl = ufshci_req_sdb_process_cpl,
.get_inflight_io = ufshci_req_sdb_get_inflight_io,
};
static const struct ufshci_qops sdb_utr_qops = {
.construct = ufshci_req_sdb_construct,
.destroy = ufshci_req_sdb_destroy,
.get_hw_queue = ufshci_req_sdb_get_hw_queue,
.enable = ufshci_req_sdb_enable,
.disable = ufshci_req_sdb_disable,
.reserve_slot = ufshci_req_sdb_reserve_slot,
.reserve_admin_slot = ufshci_req_sdb_reserve_slot,
.ring_doorbell = ufshci_req_sdb_utr_ring_doorbell,
.is_doorbell_cleared = ufshci_req_sdb_utr_is_doorbell_cleared,
.clear_cpl_ntf = ufshci_req_sdb_utr_clear_cpl_ntf,
.process_cpl = ufshci_req_sdb_process_cpl,
.get_inflight_io = ufshci_req_sdb_get_inflight_io,
};
int
ufshci_utmr_req_queue_construct(struct ufshci_controller *ctrlr)
{
struct ufshci_req_queue *req_queue;
int error;
req_queue = &ctrlr->task_mgmt_req_queue;
req_queue->queue_mode = UFSHCI_Q_MODE_SDB;
req_queue->qops = sdb_utmr_qops;
error = req_queue->qops.construct(ctrlr, req_queue, UFSHCI_UTRM_ENTRIES,
true);
return (error);
}
void
ufshci_utmr_req_queue_destroy(struct ufshci_controller *ctrlr)
{
ctrlr->task_mgmt_req_queue.qops.destroy(ctrlr,
&ctrlr->task_mgmt_req_queue);
}
void
ufshci_utmr_req_queue_disable(struct ufshci_controller *ctrlr)
{
ctrlr->task_mgmt_req_queue.qops.disable(ctrlr,
&ctrlr->task_mgmt_req_queue);
}
int
ufshci_utmr_req_queue_enable(struct ufshci_controller *ctrlr)
{
return (ctrlr->task_mgmt_req_queue.qops.enable(ctrlr,
&ctrlr->task_mgmt_req_queue));
}
int
ufshci_utr_req_queue_construct(struct ufshci_controller *ctrlr)
{
struct ufshci_req_queue *req_queue;
int error;
req_queue = &ctrlr->transfer_req_queue;
req_queue->queue_mode = UFSHCI_Q_MODE_SDB;
req_queue->qops = sdb_utr_qops;
error = req_queue->qops.construct(ctrlr, req_queue, UFSHCI_UTR_ENTRIES,
false);
return (error);
}
void
ufshci_utr_req_queue_destroy(struct ufshci_controller *ctrlr)
{
ctrlr->transfer_req_queue.qops.destroy(ctrlr,
&ctrlr->transfer_req_queue);
}
void
ufshci_utr_req_queue_disable(struct ufshci_controller *ctrlr)
{
ctrlr->transfer_req_queue.qops.disable(ctrlr,
&ctrlr->transfer_req_queue);
}
int
ufshci_utr_req_queue_enable(struct ufshci_controller *ctrlr)
{
return (ctrlr->transfer_req_queue.qops.enable(ctrlr,
&ctrlr->transfer_req_queue));
}
static bool
ufshci_req_queue_response_is_error(struct ufshci_req_queue *req_queue,
uint8_t ocs, union ufshci_reponse_upiu *response)
{
bool is_error = false;
if (ocs != UFSHCI_DESC_SUCCESS) {
ufshci_printf(req_queue->ctrlr, "Invalid OCS = 0x%x\n", ocs);
is_error = true;
}
if (response->header.response != UFSHCI_RESPONSE_CODE_TARGET_SUCCESS) {
ufshci_printf(req_queue->ctrlr,
"Function(0x%x) Invalid response code = 0x%x\n",
response->header.ext_iid_or_function,
response->header.response);
is_error = true;
}
return (is_error);
}
static void
ufshci_req_queue_manual_complete_tracker(struct ufshci_tracker *tr, uint8_t ocs,
uint8_t rc)
{
struct ufshci_utp_xfer_req_desc *desc;
struct ufshci_upiu_header *resp_header;
mtx_assert(&tr->hwq->qlock, MA_NOTOWNED);
resp_header = (struct ufshci_upiu_header *)tr->ucd->response_upiu;
resp_header->response = rc;
desc = &tr->hwq->utrd[tr->slot_num];
desc->overall_command_status = ocs;
ufshci_req_queue_complete_tracker(tr);
}
static void
ufshci_req_queue_manual_complete_request(struct ufshci_req_queue *req_queue,
struct ufshci_request *req, uint8_t ocs, uint8_t rc)
{
struct ufshci_completion cpl;
bool error;
memset(&cpl, 0, sizeof(cpl));
cpl.response_upiu.header.response = rc;
error = ufshci_req_queue_response_is_error(req_queue, ocs,
&cpl.response_upiu);
if (error) {
ufshci_printf(req_queue->ctrlr,
"Manual complete request error:0x%x", error);
}
if (req->cb_fn)
req->cb_fn(req->cb_arg, &cpl, error);
ufshci_free_request(req);
}
void
ufshci_req_queue_fail(struct ufshci_controller *ctrlr,
struct ufshci_req_queue *req_queue)
{
struct ufshci_hw_queue *hwq = req_queue->qops.get_hw_queue(req_queue);
struct ufshci_tracker *tr;
int i;
if (!mtx_initialized(&hwq->qlock))
return;
mtx_lock(&hwq->qlock);
for (i = 0; i < req_queue->num_trackers; i++) {
tr = hwq->act_tr[i];
if (tr->slot_state == UFSHCI_SLOT_STATE_RESERVED) {
mtx_unlock(&hwq->qlock);
ufshci_req_queue_manual_complete_request(req_queue,
tr->req, UFSHCI_DESC_ABORTED,
UFSHCI_RESPONSE_CODE_GENERAL_FAILURE);
mtx_lock(&hwq->qlock);
} else if (tr->slot_state == UFSHCI_SLOT_STATE_SCHEDULED) {
mtx_unlock(&hwq->qlock);
ufshci_req_queue_manual_complete_tracker(tr,
UFSHCI_DESC_ABORTED,
UFSHCI_RESPONSE_CODE_GENERAL_FAILURE);
mtx_lock(&hwq->qlock);
}
}
mtx_unlock(&hwq->qlock);
}
void
ufshci_req_queue_complete_tracker(struct ufshci_tracker *tr)
{
struct ufshci_req_queue *req_queue = tr->req_queue;
struct ufshci_hw_queue *hwq = tr->hwq;
struct ufshci_request *req = tr->req;
struct ufshci_completion cpl;
uint8_t ocs;
bool retry, error, retriable;
mtx_assert(&hwq->qlock, MA_NOTOWNED);
cpl.size = tr->response_size;
if (req_queue->is_task_mgmt) {
memcpy(&cpl.response_upiu,
(void *)hwq->utmrd[tr->slot_num].response_upiu, cpl.size);
ocs = hwq->utmrd[tr->slot_num].overall_command_status;
} else {
bus_dmamap_sync(req_queue->dma_tag_ucd, req_queue->ucdmem_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
memcpy(&cpl.response_upiu, (void *)tr->ucd->response_upiu,
cpl.size);
ocs = hwq->utrd[tr->slot_num].overall_command_status;
}
error = ufshci_req_queue_response_is_error(req_queue, ocs,
&cpl.response_upiu);
retriable = req->is_admin;
retry = error && retriable &&
req->retries < req_queue->ctrlr->retry_count;
if (retry)
hwq->num_retries++;
if (error && req->retries >= req_queue->ctrlr->retry_count && retriable)
hwq->num_failures++;
KASSERT(tr->req, ("there is no request assigned to the tracker\n"));
KASSERT(cpl.response_upiu.header.task_tag ==
req->request_upiu.header.task_tag,
("response task_tag does not match request task_tag\n"));
if (!retry) {
if (req->payload_valid) {
bus_dmamap_sync(req_queue->dma_tag_payload,
tr->payload_dma_map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
}
if (req->cb_fn)
req->cb_fn(req->cb_arg, &cpl, error);
}
mtx_lock(&hwq->qlock);
req_queue->qops.clear_cpl_ntf(req_queue->ctrlr, tr);
if (retry) {
req->retries++;
ufshci_req_queue_submit_tracker(req_queue, tr,
req->data_direction);
} else {
if (req->payload_valid) {
bus_dmamap_unload(req_queue->dma_tag_payload,
tr->payload_dma_map);
}
ufshci_free_request(req);
tr->req = NULL;
tr->slot_state = UFSHCI_SLOT_STATE_FREE;
TAILQ_REMOVE(&hwq->outstanding_tr, tr, tailq);
TAILQ_INSERT_HEAD(&hwq->free_tr, tr, tailq);
}
mtx_unlock(&tr->hwq->qlock);
}
bool
ufshci_req_queue_process_completions(struct ufshci_req_queue *req_queue)
{
struct ufshci_hw_queue *hwq;
bool done;
hwq = req_queue->qops.get_hw_queue(req_queue);
mtx_lock(&hwq->recovery_lock);
done = req_queue->qops.process_cpl(req_queue);
mtx_unlock(&hwq->recovery_lock);
return (done);
}
static void
ufshci_payload_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
{
struct ufshci_tracker *tr = arg;
struct ufshci_prdt_entry *prdt_entry;
int i;
if (error != 0) {
ufshci_printf(tr->req_queue->ctrlr,
"Failed to map payload %d\n", error);
return;
}
prdt_entry = (struct ufshci_prdt_entry *)tr->ucd->prd_table;
tr->prdt_entry_cnt = nseg;
for (i = 0; i < nseg; i++) {
prdt_entry->data_base_address = htole64(seg[i].ds_addr) &
0xffffffff;
prdt_entry->data_base_address_upper = htole64(seg[i].ds_addr) >>
32;
prdt_entry->data_byte_count = htole32(seg[i].ds_len - 1);
++prdt_entry;
}
bus_dmamap_sync(tr->req_queue->dma_tag_payload, tr->payload_dma_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
static void
ufshci_req_queue_prepare_prdt(struct ufshci_tracker *tr)
{
struct ufshci_request *req = tr->req;
struct ufshci_utp_cmd_desc *cmd_desc = tr->ucd;
int error;
tr->prdt_off = UFSHCI_UTP_XFER_REQ_SIZE + UFSHCI_UTP_XFER_RESP_SIZE;
memset(cmd_desc->prd_table, 0, sizeof(cmd_desc->prd_table));
error = bus_dmamap_load_mem(tr->req_queue->dma_tag_payload,
tr->payload_dma_map, &req->payload, ufshci_payload_map, tr,
BUS_DMA_NOWAIT);
if (error != 0) {
ufshci_printf(tr->req_queue->ctrlr,
"bus_dmamap_load_mem returned with error:0x%x!\n", error);
mtx_unlock(&tr->hwq->qlock);
ufshci_req_queue_manual_complete_tracker(tr,
UFSHCI_DESC_INVALID_PRDT_ATTRIBUTES,
UFSHCI_RESPONSE_CODE_GENERAL_FAILURE);
mtx_lock(&tr->hwq->qlock);
}
}
static void
ufshci_req_queue_fill_utmr_descriptor(
struct ufshci_utp_task_mgmt_req_desc *desc, struct ufshci_request *req)
{
memset(desc, 0, sizeof(struct ufshci_utp_task_mgmt_req_desc));
desc->interrupt = true;
desc->overall_command_status = UFSHCI_UTMR_OCS_INVALID;
memcpy(desc->request_upiu, &req->request_upiu, req->request_size);
}
static void
ufshci_req_queue_fill_utr_descriptor(struct ufshci_utp_xfer_req_desc *desc,
uint8_t data_direction, const uint64_t paddr, const uint16_t response_off,
const uint16_t response_len, const uint16_t prdt_off,
const uint16_t prdt_entry_cnt)
{
uint8_t command_type;
const uint16_t dword_size = 4;
command_type = UFSHCI_COMMAND_TYPE_UFS_STORAGE;
memset(desc, 0, sizeof(struct ufshci_utp_xfer_req_desc));
desc->command_type = command_type;
desc->data_direction = data_direction;
desc->interrupt = true;
desc->overall_command_status = UFSHCI_UTR_OCS_INVALID;
desc->utp_command_descriptor_base_address = (uint32_t)(paddr &
0xffffffff);
desc->utp_command_descriptor_base_address_upper = (uint32_t)(paddr >>
32);
desc->response_upiu_offset = response_off / dword_size;
desc->response_upiu_length = response_len / dword_size;
desc->prdt_offset = prdt_off / dword_size;
desc->prdt_length = prdt_entry_cnt;
}
static void
ufshci_req_queue_timeout_recovery(struct ufshci_controller *ctrlr,
struct ufshci_hw_queue *hwq)
{
ufshci_printf(ctrlr,
"Recovery step 5: Resetting controller due to a timeout.\n");
hwq->recovery_state = RECOVERY_WAITING;
ufshci_ctrlr_reset(ctrlr);
}
static void
ufshci_abort_complete(void *arg, const struct ufshci_completion *status,
bool error)
{
struct ufshci_tracker *tr = arg;
mtx_lock(&tr->hwq->qlock);
if (tr->slot_state != UFSHCI_SLOT_STATE_FREE) {
mtx_unlock(&tr->hwq->qlock);
ufshci_printf(tr->hwq->ctrlr,
"abort task request failed, aborting task manually\n");
ufshci_req_queue_manual_complete_tracker(tr,
UFSHCI_DESC_ABORTED, UFSHCI_RESPONSE_CODE_GENERAL_FAILURE);
if ((status->response_upiu.task_mgmt_response_upiu
.output_param1 ==
UFSHCI_TASK_MGMT_SERVICE_RESPONSE_FUNCTION_COMPLETE) ||
(status->response_upiu.task_mgmt_response_upiu
.output_param1 ==
UFSHCI_TASK_MGMT_SERVICE_RESPONSE_FUNCTION_SUCCEEDED)) {
ufshci_printf(tr->hwq->ctrlr,
"Warning: the abort task request completed \
successfully, but the original task is still incomplete.");
return;
}
ufshci_req_queue_timeout_recovery(tr->hwq->ctrlr, tr->hwq);
} else {
mtx_unlock(&tr->hwq->qlock);
}
}
static void
ufshci_req_queue_timeout(void *arg)
{
struct ufshci_hw_queue *hwq = arg;
struct ufshci_controller *ctrlr = hwq->ctrlr;
struct ufshci_tracker *tr;
sbintime_t now;
bool idle = true;
bool fast;
mtx_assert(&hwq->recovery_lock, MA_OWNED);
if (ctrlr->is_failed) {
ufshci_printf(ctrlr,
"Failed controller, stopping watchdog timeout.\n");
hwq->timer_armed = false;
return;
}
if (!hwq->timer_armed) {
ufshci_printf(ctrlr,
"Timeout fired during ufshci_utr_req_queue_destroy\n");
return;
}
switch (hwq->recovery_state) {
case RECOVERY_NONE:
fast = false;
mtx_lock(&hwq->qlock);
now = getsbinuptime();
TAILQ_FOREACH(tr, &hwq->outstanding_tr, tailq) {
idle = false;
if (now <= tr->deadline)
fast = true;
break;
}
mtx_unlock(&hwq->qlock);
if (idle || fast)
break;
hwq->req_queue->qops.process_cpl(hwq->req_queue);
mtx_lock(&hwq->qlock);
idle = true;
TAILQ_FOREACH(tr, &hwq->outstanding_tr, tailq) {
idle = false;
if (now <= tr->deadline)
break;
if (ctrlr->enable_aborts &&
!hwq->req_queue->is_task_mgmt &&
tr->req->cb_fn != ufshci_abort_complete) {
ufshci_printf(ctrlr,
"Recovery step 1: Timeout occurred. aborting the task(%d).\n",
tr->req->request_upiu.header.task_tag);
ufshci_ctrlr_cmd_send_task_mgmt_request(ctrlr,
ufshci_abort_complete, tr,
UFSHCI_TASK_MGMT_FUNCTION_ABORT_TASK,
tr->req->request_upiu.header.lun,
tr->req->request_upiu.header.task_tag, 0);
} else {
ufshci_req_queue_timeout_recovery(ctrlr, hwq);
idle = false;
break;
}
}
mtx_unlock(&hwq->qlock);
break;
case RECOVERY_WAITING:
if (!device_is_suspended(ctrlr->dev))
ufshci_printf(ctrlr, "Waiting for reset to complete\n");
idle = false;
break;
}
if (!idle) {
callout_schedule_sbt(&hwq->timer, SBT_1S / 2, SBT_1S / 2, 0);
} else {
hwq->timer_armed = false;
}
}
static void
ufshci_req_queue_submit_tracker(struct ufshci_req_queue *req_queue,
struct ufshci_tracker *tr, enum ufshci_data_direction data_direction)
{
struct ufshci_controller *ctrlr = req_queue->ctrlr;
struct ufshci_request *req = tr->req;
struct ufshci_hw_queue *hwq;
uint64_t ucd_paddr;
uint16_t request_len, response_off, response_len;
uint8_t slot_num = tr->slot_num;
int timeout;
hwq = req_queue->qops.get_hw_queue(req_queue);
mtx_assert(&hwq->qlock, MA_OWNED);
if (req->cb_fn == ufshci_completion_poll_cb)
timeout = 1;
else
timeout = ctrlr->timeout_period;
tr->deadline = getsbinuptime() + timeout * SBT_1S;
if (!hwq->timer_armed) {
hwq->timer_armed = true;
callout_reset_sbt_on(&hwq->timer, SBT_1S / 2, SBT_1S / 2,
ufshci_req_queue_timeout, hwq, hwq->cpu, 0);
}
if (req_queue->is_task_mgmt) {
ufshci_req_queue_fill_utmr_descriptor(&tr->hwq->utmrd[slot_num],
req);
} else {
request_len = req->request_size;
response_off = UFSHCI_UTP_XFER_REQ_SIZE;
response_len = req->response_size;
memcpy(tr->ucd, &req->request_upiu, request_len);
memset((uint8_t *)tr->ucd + response_off, 0, response_len);
if (req->payload_valid)
ufshci_req_queue_prepare_prdt(tr);
ucd_paddr = tr->ucd_bus_addr;
ufshci_req_queue_fill_utr_descriptor(&tr->hwq->utrd[slot_num],
data_direction, ucd_paddr, response_off, response_len,
tr->prdt_off, tr->prdt_entry_cnt);
bus_dmamap_sync(req_queue->dma_tag_ucd, req_queue->ucdmem_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
}
bus_dmamap_sync(tr->hwq->dma_tag_queue, tr->hwq->queuemem_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
tr->slot_state = UFSHCI_SLOT_STATE_SCHEDULED;
req_queue->qops.ring_doorbell(ctrlr, tr);
}
static int
_ufshci_req_queue_submit_request(struct ufshci_req_queue *req_queue,
struct ufshci_request *req)
{
struct ufshci_tracker *tr = NULL;
int error;
mtx_assert(&req_queue->qops.get_hw_queue(req_queue)->qlock, MA_OWNED);
error = req_queue->qops.reserve_slot(req_queue, &tr);
if (error != 0) {
ufshci_printf(req_queue->ctrlr, "Failed to get tracker");
return (error);
}
KASSERT(tr, ("There is no tracker allocated."));
if (tr->slot_state == UFSHCI_SLOT_STATE_RESERVED ||
tr->slot_state == UFSHCI_SLOT_STATE_SCHEDULED)
return (EBUSY);
req->request_upiu.header.task_tag = tr->slot_num;
tr->slot_state = UFSHCI_SLOT_STATE_RESERVED;
tr->response_size = req->response_size;
tr->deadline = SBT_MAX;
tr->req = req;
TAILQ_REMOVE(&tr->hwq->free_tr, tr, tailq);
TAILQ_INSERT_TAIL(&tr->hwq->outstanding_tr, tr, tailq);
ufshci_req_queue_submit_tracker(req_queue, tr, req->data_direction);
return (0);
}
int
ufshci_req_queue_submit_request(struct ufshci_req_queue *req_queue,
struct ufshci_request *req)
{
struct ufshci_hw_queue *hwq;
uint32_t error;
hwq = req_queue->qops.get_hw_queue(req_queue);
KASSERT(hwq, ("There is no HW queue allocated."));
mtx_lock(&hwq->qlock);
error = _ufshci_req_queue_submit_request(req_queue, req);
mtx_unlock(&hwq->qlock);
return (error);
}