MSIZE
roundup2(MSIZE - MHLEN, 16) - (MSIZE - MHLEN);
#define IXGBE_RX_COPY_LEN (MSIZE - IXGBE_RX_COPY_HDR_PADDED)
tval *= MSIZE + MCLBYTES;
tval *= (MSIZE + MCLBYTES); /* Brackets for readability. */
nmbufs = lmax(maxmbufmem / MSIZE / 5,
CTASSERT((((MSIZE - 1) ^ MSIZE) + 1) >> 1 == MSIZE);
_Static_assert(sizeof(struct mbuf) <= MSIZE,
zone_mbuf = uma_zcreate(MBUF_MEM_NAME, MSIZE,
MSIZE - 1, UMA_ZONE_CONTIG | UMA_ZONE_MAXBUCKET);
trash_init(m, q == &dn_mbufq ? MSIZE : dn_clsize, flags);
MSIZE, mb_ctor_mbuf, mb_dtor_mbuf, NULL, NULL,
mlen += MSIZE;
CTASSERT(MSIZE - offsetof(struct mbuf, m_dat) == MLEN);
CTASSERT(MSIZE - offsetof(struct mbuf, m_pktdat) == MHLEN);
m->m_pkthdr.memlen = MSIZE;
mbcnt += MSIZE;
mbcnt += MSIZE;
#if MSIZE <= 256
sb->sb_mbcnt -= MSIZE;
sb->sb_mbcnt += MSIZE;
sb->sb_mbcnt -= MSIZE;
sb->sb_mbcnt += MSIZE;
sb->sb_mbcnt -= MSIZE;
if (tmp_sb_max < MSIZE + MCLBYTES)
#define BUF_MAX_ADJ(_sz) (((u_quad_t)(_sz)) * MCLBYTES / (MSIZE + MCLBYTES))
dmbcnt += MSIZE;
mbcnt += MSIZE;
mbcnt += MSIZE;
sb->sb_mbcnt += MSIZE;
mbcnt = MSIZE + m->m_pkthdr.memlen;
dmbcnt += MSIZE;
#if MSIZE <= 256
asoc->cnt_on_reasm_queue * MSIZE));
asoc->cnt_on_all_streams * MSIZE));
sb_max_adj = (u_long)((u_quad_t)(SB_MAX) * MCLBYTES / (MSIZE + MCLBYTES));
SCTP_SAVE_ATOMIC_DECREMENT(&(sb)->sb_mbcnt, MSIZE); \
SCTP_SAVE_ATOMIC_DECREMENT(&(stcb)->asoc.my_rwnd_control_len, MSIZE); \
atomic_add_int(&(sb)->sb_mbcnt, MSIZE); \
atomic_add_int(&(stcb)->asoc.my_rwnd_control_len, MSIZE); \
freed_so_far += MSIZE;
freed_so_far += MSIZE;
freed_so_far += MSIZE;
int len = MSIZE;
len += MSIZE;
(*tlenp < MSIZE)) {
if (tmp_maxsockbuf < MSIZE + MCLBYTES)
mc->mc_mlen += MSIZE;
MPASS(mc->mc_mlen >= MSIZE);
mc->mc_mlen -= MSIZE;
case MSIZE:
#define MLEN ((int)(MSIZE - MHSIZE))
#define MHLEN ((int)(MSIZE - MPKTHSIZE))
#ifndef MSIZE
maxsbsz = maxsbsz * MCLBYTES / (MSIZE + MCLBYTES);