LONG_BIT
#define IDX(c) ((u_char)(c) / LONG_BIT)
#define BIT(c) ((u_long)1 << ((u_char)(c) % LONG_BIT))
u_long tbl[(UCHAR_MAX + 1) / LONG_BIT];
#if LONG_BIT == 64 /* always better to unroll on 64-bit architectures */
#if (LONG_BIT >= 64)
#if LONG_BIT == 32
#elif LONG_BIT == 64
#define IDX(c) ((u_char)(c) / LONG_BIT)
#define BIT(c) ((u_long)1 << ((u_char)(c) % LONG_BIT))
u_long tbl[(UCHAR_MAX + 1) / LONG_BIT];
#if LONG_BIT == 64 /* always better to unroll on 64-bit architectures */
cl_t i = cl / LONG_BIT;
unsigned long usedbit = 1UL << (cl % LONG_BIT);
cl_t i = cl / LONG_BIT;
size_t bitmap_size = roundup2(bits, LONG_BIT) / (LONG_BIT / 8);
if (head % LONG_BIT == 0 &&
head += LONG_BIT;
cl_t i = cl / LONG_BIT;
unsigned long clearmask = ~(1UL << (cl % LONG_BIT));
#if LONG_BIT == 64
#elif LONG_BIT == 32
if (sc->hn_chim_cnt % LONG_BIT != 0) {
sc->hn_chim_cnt % LONG_BIT);
sc->hn_chim_bmap_cnt = sc->hn_chim_cnt / LONG_BIT;
KASSERT(i * LONG_BIT + idx < sc->hn_chim_cnt,
ret = i * LONG_BIT + idx;
idx = chim_idx / LONG_BIT;
mask = 1UL << (chim_idx % LONG_BIT);
#if LONG_BIT == 64
#if LONG_BIT >= 64
#if LONG_BIT >= 64
l1i = (l1i + 1) % LONG_BIT;
l1i = LONG_BIT - 1;
l2i = LONG_BIT - 1;
l2i = (l2i + 1) % LONG_BIT;
l2i = LONG_BIT - 1;
port = (l1i * LONG_BIT) + l2i;
} while (l2i != LONG_BIT - 1);
.last_processed_l1i = LONG_BIT - 1,
.last_processed_l2i = LONG_BIT - 1
#if LONG_BIT != 32 && LONG_BIT != 64
#if LONG_BIT >= 64
#if LONG_BIT >= 64
#define IDX(c) ((u_char)(c) / LONG_BIT)
#define BIT(c) ((u_long)1 << ((u_char)(c) % LONG_BIT))
u_long tbl[(UCHAR_MAX + 1) / LONG_BIT];
#if LONG_BIT == 64 /* always better to unroll on 64-bit architectures */
#if (LONG_BIT >= 64)
#if LONG_BIT == 32
#elif LONG_BIT == 64
#define IDX(c) ((u_char)(c) / LONG_BIT)
#define BIT(c) ((u_long)1 << ((u_char)(c) % LONG_BIT))
u_long tbl[(UCHAR_MAX + 1) / LONG_BIT];
#if LONG_BIT == 64 /* always better to unroll on 64-bit architectures */
#if BYTE_ORDER == BIG_ENDIAN && LONG_BIT == 32
#if BYTE_ORDER == LITTLE_ENDIAN && LONG_BIT == 32