ml_len
if (ml_len(&sc->sc_sendq) > 0) {
nofree_cnt = ml_len(&sc->sc_sendq) + sc->sc_hard_done_cnt;
if (ml_len(&sc->sc_sendq) > sc->sc_soft_req_thresh)
if (fillok && ml_len(&sc->tulip_rxq) < TULIP_RXQ_TARGET)
if (ml_len(&sc->tulip_rxq) >= TULIP_RXQ_TARGET)
ml->ml_len = 0;
ml->ml_len++;
mla->ml_len += mlb->ml_len;
ml->ml_len--;
len = ml->ml_len;
dropped = ml_len(ml);
return (ml_len(&cd->q));
KASSERT(fqc->qlength >= ml_len(&ml));
fqc->qlength -= ml_len(&ml);
return ml_len(&cq->q);
if (ml_len(&cq->q) >= cq->qlimit)
enqueue_randomness(ml_len(ml) ^ (uintptr_t)MBUF_LIST_FIRST(ml));
len = ml_len(&ml);
if (ml_len(&p->p_rxm_ml) < AGGR_MAX_SLOW_PKTS)
if (serial->q_list.ml_len < MAX_QUEUED_PKT) {
if (serial->q_list.ml_len < MAX_QUEUED_PKT) {
KASSERT(rv == ml_len(&ml));
ifq->ifq_len -= ml_len(ml);
ifq->ifq_qdrops += ml_len(ml);
kstat_kv_u32(&kd->kd_qlen) = ml_len(&ifiq->ifiq_ml);
packets = ml_len(ml);
len = ml_len(&ifiq->ifiq_ml);
ifiq->ifiq_qdrops += ml_len(ml);
if (qlim && ((len = ml_len(&ifiq->ifiq_ml) >= qlim))) {
if (ml_len(pl) > 0) {
#define ifiq_len(_ifiq) READ_ONCE(ml_len(&(_ifiq)->ifiq_ml))
ipstat_add(ips_ofragments, ml_len(ml));
tcpstat_add(tcps_outpkttso, ml_len(ml));
ip6stat_add(ip6s_ofragments, ml_len(ml));
len = ml_len(&ml);
u_int ml_len;
#define ml_len(_ml) ((_ml)->ml_len)
#define ml_empty(_ml) ((_ml)->ml_len == 0)
#define mq_len(_mq) READ_ONCE((_mq)->mq_list.ml_len)