CAKE_QUEUES
static const u32 a = CAKE_MAX_TINS * CAKE_QUEUES;
while (i > 0 && i < CAKE_MAX_TINS * CAKE_QUEUES) {
struct cake_flow flows[CAKE_QUEUES];
u32 backlogs[CAKE_QUEUES];
u32 tags[CAKE_QUEUES]; /* for set association */
u16 overflow_idx[CAKE_QUEUES];
struct cake_host hosts[CAKE_QUEUES]; /* for triple isolation */
for (i = CAKE_MAX_TINS * CAKE_QUEUES / 2 - 1; i >= 0; i--)
if (TC_H_MIN(res.classid) <= CAKE_QUEUES)
if (TC_H_MAJ(res.classid) <= (CAKE_QUEUES << 16))
for (q->cur_flow = 0; q->cur_flow < CAKE_QUEUES; q->cur_flow++)
struct cake_heap_entry overflow_heap[CAKE_QUEUES * CAKE_MAX_TINS];
for (i = 1; i <= CAKE_QUEUES; i++)
for (j = 0; j < CAKE_QUEUES; j++) {
static u16 quantum_div[CAKE_QUEUES + 1] = {0};
if (idx < CAKE_QUEUES * q->tin_cnt) {
&q->tins[q->tin_order[idx / CAKE_QUEUES]];
flow = &b->flows[idx % CAKE_QUEUES];
qs.backlog = b->backlogs[idx % CAKE_QUEUES];
for (j = 0; j < CAKE_QUEUES; j++) {
if (!tc_qdisc_stats_dump(sch, i * CAKE_QUEUES + j + 1,
q->hosts[flow->srchost].srchost_bulk_flow_count < CAKE_QUEUES))
q->hosts[flow->dsthost].dsthost_bulk_flow_count < CAKE_QUEUES))
reduced_hash = flow_hash % CAKE_QUEUES;
srchost_idx = srchost_hash % CAKE_QUEUES;
dsthost_idx = dsthost_hash % CAKE_QUEUES;