CHAR_BIT
#define NDIGITS(x) (3 * (sizeof(x) * CHAR_BIT) / 10 + 1 + 1 + 1)
#define STRBUF_SIZEOF(t) (1 + CHAR_BIT * sizeof(t) / 3 + 1)
#define DIGITS(var) (3 + (2 + CHAR_BIT * sizeof((var))) / 3)
return (uintmax_t)a << (b & (sizeof(uintmax_t) * CHAR_BIT - 1));
return a >> (b & (sizeof(uintmax_t) * CHAR_BIT - 1));
#define NBBY CHAR_BIT
for (b = sizeof (v) * CHAR_BIT - 1; b >= 0; b--) {
params->len_1 = (CHAR_BIT * n) / params->log_w;
#if CHAR_BIT > 8
#define __MAXINT__(T) ((T)((((T)1) << ((sizeof(T) * CHAR_BIT) - 1)) ^ __MAXUINT__(T)))
#define __MAXINT__(T) ((T)((((T)1) << ((sizeof(T) * CHAR_BIT) - 1)) ^ __MAXUINT__(T)))
#define __MAXINT__(T) ((T)(((((T)1) << ((sizeof(T) * CHAR_BIT) - 2)) - 1) | (((T)1) << ((sizeof(T) * CHAR_BIT) - 2))))
#define __MAXUINT__(T) ((T)(__MAXINT__(T) | (((T)1) << ((sizeof(T) * CHAR_BIT) - 1))))
json->stack_end_bit = (json->stack_end_bit + 1) % CHAR_BIT;
json->stack_end_bit = CHAR_BIT - 1;
obit = CHAR_BIT - 1;
if (sizeof(time_t) * CHAR_BIT >= 64) {
#if defined(CHAR_BIT) && CHAR_BIT != 8
if (sizeof(long) * CHAR_BIT >= 64)
if (sizeof(long) * CHAR_BIT == 32 &&
if (sizeof(long) * CHAR_BIT >= 64 &&
sizeof(void *) * CHAR_BIT >= 64 &&
sizeof(int) * CHAR_BIT >= 32 &&
if (sizeof(int) * CHAR_BIT == 32 &&
if (sizeof(int) * CHAR_BIT == 32 &&
sizeof(off_t) * CHAR_BIT >= 64)
if (sizeof(int) * CHAR_BIT == 32 &&
sizeof(long) * CHAR_BIT == 64 &&
if (sizeof(int) * CHAR_BIT >= 32 &&
sizeof(long) * CHAR_BIT >= 64 &&
sizeof(void *) * CHAR_BIT >= 64 &&
sizeof(off_t) * CHAR_BIT >= 64)
#define QUAD_BITS (sizeof(quad_t) * CHAR_BIT)
#define LONG_BITS (sizeof(long) * CHAR_BIT)
#define HALF_BITS (sizeof(long) * CHAR_BIT / 2)
# define BACKSL (1<<CHAR_BIT)
else if (g->nstates <= CHAR_BIT*sizeof(states1) && !(eflags®_LARGE))
m = sizeof(struct in6_addr) * CHAR_BIT;
m -= CHAR_BIT;
#define TYPE_BIT(type) (sizeof (type) * CHAR_BIT)
#define BUF (sizeof(uintmax_t) * CHAR_BIT)
#define BUF (sizeof(uintmax_t) * CHAR_BIT)
if (sizeof(size_t) * CHAR_BIT <= 32)
#define STEPS_BIT (sizeof(uintptr_t) * CHAR_BIT)
return (mask == 0 ? 0 : CHAR_BIT * sizeof(mask) - __builtin_clz(mask));
return (mask == 0 ? 0 : CHAR_BIT * sizeof(mask) - __builtin_clzl(mask));
return (mask == 0 ? 0 : CHAR_BIT * sizeof(mask) - __builtin_clzll(mask));
int lt = (p1[i] - p2[i]) >> CHAR_BIT;
int gt = (p2[i] - p1[i]) >> CHAR_BIT;
static_assert(sizeof(type) * CHAR_BIT == TYPE ## _WIDTH, \
static_assert(sizeof(type) * CHAR_BIT == TYPE ## _WIDTH, \
const int n = sizeof(TYPE) * CHAR_BIT;
const int n = sizeof(TYPE) * CHAR_BIT;
const int n = sizeof(TYPE) * CHAR_BIT;
const int n = sizeof(TYPE) * CHAR_BIT;
u_long first_invalid = bm->size * CHAR_BIT;
unsigned int mask = 1U << (*idx % CHAR_BIT);
if ((bm->map[*idx / CHAR_BIT] & mask) != 0)
#define LUA_UNSIGNEDBITS (sizeof(LUA_UNSIGNED) * CHAR_BIT)
char *cur, type[sizeof(rule->mbr_object.mbo_type) * CHAR_BIT + 1];
where += CHAR_BIT * sizeof(Elf_Relr) - 1;
#define TIME_MAX ((((time_t) 1 << (sizeof(time_t) * CHAR_BIT - 2)) - 1) * 2 + 1)
map[(u_int)((ino) - 1) / CHAR_BIT] |= \
1 << ((u_int)((ino) - 1) % CHAR_BIT)
map[(u_int)((ino) - 1) / CHAR_BIT] &= \
~(1 << ((u_int)((ino) - 1) % CHAR_BIT))
(map[(u_int)((ino) - 1) / CHAR_BIT] & \
(1 << ((u_int)((ino) - 1) % CHAR_BIT)))
mapsize = roundup(howmany(maxino, CHAR_BIT), TP_BSIZE);
if (((ino - 1) % CHAR_BIT) == 0) /* map is offset by 1 */
if (((ino - 1) % CHAR_BIT) == 0) /* map is offset by 1 */
cp = &cg_inosused(cgp)[(inosused - 1) / CHAR_BIT];
for ( ; inosused > 0; inosused -= CHAR_BIT, cp--) {
for (i = 1 << (CHAR_BIT - 1); i > 0; i >>= 1) {
if (((ino - 1) % CHAR_BIT) == 0) /* map is offset by 1 */
cgp->cg_iusedoff + howmany(sblock.fs_ipg, CHAR_BIT), "%jd");
cgp->cg_freeoff + howmany(sblock.fs_fpg, CHAR_BIT), "%jd");
roundup(cgp->cg_freeoff + howmany(sblock.fs_fpg, CHAR_BIT),
howmany(fragstoblks(&sblock, sblock.fs_fpg), CHAR_BIT),
cgp->cg_freeoff = cgp->cg_iusedoff + howmany(sblock.fs_ipg, CHAR_BIT);
cgp->cg_nextfreeoff = cgp->cg_freeoff + howmany(sblock.fs_fpg,CHAR_BIT);
howmany(fragstoblks(&sblock, sblock.fs_fpg), CHAR_BIT);
bzero(cg_clustersfree(cgp), howmany(maxbno, CHAR_BIT));
cp = &cg_inosused(cgp)[(inosused - 1) / CHAR_BIT];
if (inosused > CHAR_BIT)
inosused -= CHAR_BIT;
for (i = 1 << (CHAR_BIT - 1); i > 0; i >>= 1) {
inomapsize = howmany(fs->fs_ipg, CHAR_BIT);
blkmapsize = howmany(fs->fs_fpg, CHAR_BIT);
howmany(fragstoblks(fs, fs->fs_fpg), CHAR_BIT);
if ((i & (CHAR_BIT - 1)) != (CHAR_BIT - 1)) {
inomapsize = howmany(fs->fs_ipg, CHAR_BIT);
howmany(fs->fs_fpg, CHAR_BIT),
for (m = 0, l = 1; m < CHAR_BIT; m++, l <<= 1) {
n = startvalue + i * CHAR_BIT + m;
bmapsize = roundup(howmany(maxfsblock, CHAR_BIT), sizeof(short));
CHAR_BIT);
e = howmany(sb->fs_old_cpg * sb->fs_old_spc / (sb->fs_old_nspf << sb->fs_fragshift), CHAR_BIT);
acg.cg_freeoff = acg.cg_iusedoff + howmany(sblock.fs_ipg, CHAR_BIT);
acg.cg_nextfreeoff = acg.cg_freeoff + howmany(sblock.fs_fpg, CHAR_BIT);
howmany(fragstoblks(&sblock, sblock.fs_fpg), CHAR_BIT);
if ((i & (CHAR_BIT - 1)) != CHAR_BIT - 1)
for (n = 0; n < sizeof(n) * CHAR_BIT; n++)
acg.cg_freeoff = acg.cg_iusedoff + howmany(sblock.fs_ipg, CHAR_BIT);
acg.cg_nextfreeoff = acg.cg_freeoff + howmany(sblock.fs_fpg, CHAR_BIT);
howmany(fragstoblks(&sblock, sblock.fs_fpg), CHAR_BIT);
if ((i & (CHAR_BIT - 1)) != CHAR_BIT - 1)
cp = &cg_inosused(&cgblk)[(inosused - 1) / CHAR_BIT];
for ( ; inosused > 0; inosused -= CHAR_BIT, cp--) {
for (i = 1 << (CHAR_BIT - 1); i > 0; i >>= 1) {
(map[(u_int)((ino) - 1) / CHAR_BIT] & \
(1 << ((u_int)((ino) - 1) % CHAR_BIT)))
map[(u_int)((ino) - 1) / CHAR_BIT] |= \
1 << ((u_int)((ino) - 1) % CHAR_BIT)
hexlen = 2 + (intlen * CHAR_BIT + 3) / 4;
#define LUA_UNSIGNEDBITS (sizeof(LUA_UNSIGNED) * CHAR_BIT)
#define MAXNBUF (sizeof(intmax_t) * CHAR_BIT + 1)
<< ((id_size - i - 1) * CHAR_BIT);
#define BITS_PER_LONG (sizeof(long) * CHAR_BIT)
#define NV_NUMSTR_MAX ((sizeof(uint64_t) * CHAR_BIT) + 1)
tag = sizeof(w) * CHAR_BIT * i + bit;
hexlen = 2 + (intlen * CHAR_BIT + 3) / 4;
ASSERT((sizeof(*pNumber) * CHAR_BIT < WIDTH_64) || (zDeserializeTypeWidth(pType) >= WIDTH_64));
if (sizeof(ulNumber) * CHAR_BIT < WIDTH_64) {
if (sizeof(ulNumber) * CHAR_BIT < WIDTH_64) {
if (sizeof(ulNumber) * CHAR_BIT < WIDTH_64) {
#define QUAD_BITS (sizeof(quad_t) * CHAR_BIT)
#define LONG_BITS (sizeof(long) * CHAR_BIT)
#define HALF_BITS (sizeof(long) * CHAR_BIT / 2)
#ifndef CHAR_BIT
for (size_t i = 0; i <= sizeof(short) * CHAR_BIT - 1; i++) {
histcounter_size = abs(histcounter_type) / CHAR_BIT;
maxi = setlen * CHAR_BIT;
for (i = 1; i <= sz * CHAR_BIT; i++) {
((sizeof (int) * CHAR_BIT - 1) * 302 / 1000 + 2)
#define INT_BIT (sizeof(int)*CHAR_BIT)
acg.cg_freeoff = acg.cg_iusedoff + howmany(sblock.fs_ipg, CHAR_BIT);
howmany(sblock.fs_fpg, CHAR_BIT);
howmany(sblock.fs_fpg, CHAR_BIT) - sizeof(int32_t);
howmany(fragstoblks(&sblock, sblock.fs_fpg), CHAR_BIT);
if ((i & (CHAR_BIT - 1)) != (CHAR_BIT - 1)) {
for (n = 0; n < sizeof(n) * CHAR_BIT; n++)
if (bitlen > len * CHAR_BIT)
bits = MIN(CHAR_BIT, bitlen);
*p++ = (u_char)~0 << (CHAR_BIT - bits);