TRBS_PER_SEGMENT
if (trb_per_sector > TRBS_PER_SEGMENT) {
TRBS_PER_SEGMENT > 2)
if (TRBS_PER_SEGMENT == 2 && priv_ep->type != USB_ENDPOINT_XFER_ISOC) {
TRBS_PER_SEGMENT == 2)
#define TRB_RING_SIZE (TRB_SIZE * TRBS_PER_SEGMENT)
#if TRBS_PER_SEGMENT < 2
TRBS_PER_ISOC_SEGMENT : TRBS_PER_SEGMENT)
GET_TRBS_PER_SEGMENT(priv_ep->type) > TRBS_PER_SEGMENT ?
sizeof(union cdnsp_trb) * (TRBS_PER_SEGMENT - 1));
#define TRB_SEGMENT_SIZE (TRBS_PER_SEGMENT * 16)
entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
link = &prev->trbs[TRBS_PER_SEGMENT - 1].link;
ring->num_trbs_free += (TRBS_PER_SEGMENT - 1) * num_segs;
ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
last->trbs[TRBS_PER_SEGMENT - 1].link.control |=
ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |=
num_segs_needed = (num_trbs + (TRBS_PER_SEGMENT - 1) - 1) /
(TRBS_PER_SEGMENT - 1);
for (i = 0; i < TRBS_PER_SEGMENT; i++)
temp_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1];
if (trb < seg->trbs || segment_offset >= TRBS_PER_SEGMENT)
return trb == &seg->trbs[TRBS_PER_SEGMENT - 1];
if (ring->enqueue >= &ring->segment->trbs[TRBS_PER_SEGMENT - 1]) {
if (xdbc.evt_ring.dequeue == &xdbc.evt_seg.trbs[TRBS_PER_SEGMENT]) {
if (TRBS_PER_SEGMENT > 40) {
"\t\tTransfer ring %d too big\n", TRBS_PER_SEGMENT);
for (i = 0; i < TRBS_PER_SEGMENT; ++i) {
TRBS_PER_SEGMENT == 2)
link_trb = (ring->trbs + (TRBS_PER_SEGMENT - 1));
pep->ring.free_trbs = TRBS_PER_SEGMENT - 1;
cdns2_ep_inc_trb(&ring->enqueue, &ring->pcs, TRBS_PER_SEGMENT);
cdns2_ep_inc_trb(&ring->dequeue, &ring->ccs, TRBS_PER_SEGMENT);
if (trb == (pep->ring.trbs + (TRBS_PER_SEGMENT - 1)))
if ((ring->enqueue + num_trbs) >= (TRBS_PER_SEGMENT - 1)) {
if (doorbell && dma_index == TRBS_PER_SEGMENT - 1) {
link_trb = ring->trbs + (TRBS_PER_SEGMENT - 1);
if (pep->type == USB_ENDPOINT_XFER_ISOC || TRBS_PER_SEGMENT > 2)
struct cdns2_trb *link_trb = pep->ring.trbs + (TRBS_PER_SEGMENT - 1);
trb = &pep->ring.trbs[TRBS_PER_SEGMENT];
writel(pep->ring.dma + (TRBS_PER_SEGMENT * TRB_SIZE),
if (current_index >= TRBS_PER_SEGMENT)
if (TRBS_PER_SEGMENT == 2 && pep->type != USB_ENDPOINT_XFER_ISOC) {
if ((current_index == (TRBS_PER_SEGMENT - 1)) &&
#if TRBS_PER_SEGMENT < 2
#define TR_SEG_SIZE (TRB_SIZE * (TRBS_PER_SEGMENT + TRB_ISO_RESERVED))
(TRBS_PER_SEGMENT * 65) + CDNS2_MSG_MAX)
(TRBS_PER_SEGMENT * 65) + CDNS2_MSG_MAX))
erst->entries[0].seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
union xhci_trb *trb = &seg->trbs[TRBS_PER_SEGMENT - 1];
if (ring->dequeue == &ring->deq_seg->trbs[TRBS_PER_SEGMENT - 1]) {
for (i = 0; i < TRBS_PER_SEGMENT; i++) {
trb = &seg->trbs[TRBS_PER_SEGMENT - 1];
ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |= cpu_to_le32(LINK_TOGGLE);
for (int i = 0; i < TRBS_PER_SEGMENT; i++)
dst->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control
src->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control &= ~cpu_to_le32(LINK_TOGGLE);
entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
return trb == &seg->trbs[TRBS_PER_SEGMENT - 1];
return seg->num * TRBS_PER_SEGMENT + (trb - seg->trbs);
if (event_loop++ > TRBS_PER_SEGMENT / 2) {
return ring->num_segs * (TRBS_PER_SEGMENT - 1);
last_on_seg = &enq_seg->trbs[TRBS_PER_SEGMENT - 1];
trbs_past_seg = enq_used + num_trbs - (TRBS_PER_SEGMENT - 1);
new_segs = 1 + (trbs_past_seg / (TRBS_PER_SEGMENT - 1));
if (segment_offset >= TRBS_PER_SEGMENT)
memset(seg->trbs, 0, sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
seg->trbs[TRBS_PER_SEGMENT - 1].link.control &= cpu_to_le32(~TRB_CYCLE);
#define MAX_RSVD_CMD_TRBS (TRBS_PER_SEGMENT - 3)
#define TRB_SEGMENT_SIZE (TRBS_PER_SEGMENT*16)