#ifndef _LINUX_BITFIELD_H
#define _LINUX_BITFIELD_H
#include <linux/build_bug.h>
#include <linux/compiler.h>
#include <linux/typecheck.h>
#include <asm/byteorder.h>
#define __bf_shf(x) (__builtin_ffsll(x) - 1)
#define __scalar_type_to_unsigned_cases(type) \
unsigned type: (unsigned type)0, \
signed type: (unsigned type)0
#define __unsigned_scalar_typeof(x) typeof( \
_Generic((x), \
char: (unsigned char)0, \
__scalar_type_to_unsigned_cases(char), \
__scalar_type_to_unsigned_cases(short), \
__scalar_type_to_unsigned_cases(int), \
__scalar_type_to_unsigned_cases(long), \
__scalar_type_to_unsigned_cases(long long), \
default: (x)))
#define __bf_cast_unsigned(type, x) ((__unsigned_scalar_typeof(type))(x))
#define __BF_FIELD_CHECK_MASK(_mask, _val, _pfx) \
({ \
BUILD_BUG_ON_MSG(!__builtin_constant_p(_mask), \
_pfx "mask is not constant"); \
BUILD_BUG_ON_MSG((_mask) == 0, _pfx "mask is zero"); \
BUILD_BUG_ON_MSG(__builtin_constant_p(_val) ? \
~((_mask) >> __bf_shf(_mask)) & \
(0 + (_val)) : 0, \
_pfx "value too large for the field"); \
__BUILD_BUG_ON_NOT_POWER_OF_2((_mask) + \
(1ULL << __bf_shf(_mask))); \
})
#define __BF_FIELD_CHECK_REG(mask, reg, pfx) \
BUILD_BUG_ON_MSG(__bf_cast_unsigned(mask, mask) > \
__bf_cast_unsigned(reg, ~0ull), \
pfx "type of reg too small for mask")
#define __BF_FIELD_CHECK(mask, reg, val, pfx) \
({ \
__BF_FIELD_CHECK_MASK(mask, val, pfx); \
__BF_FIELD_CHECK_REG(mask, reg, pfx); \
})
#define __FIELD_PREP(mask, val, pfx) \
({ \
__BF_FIELD_CHECK_MASK(mask, val, pfx); \
((typeof(mask))(val) << __bf_shf(mask)) & (mask); \
})
#define __FIELD_GET(mask, reg, pfx) \
({ \
__BF_FIELD_CHECK_MASK(mask, 0U, pfx); \
(typeof(mask))(((reg) & (mask)) >> __bf_shf(mask)); \
})
#define FIELD_MAX(_mask) \
({ \
__BF_FIELD_CHECK(_mask, 0ULL, 0ULL, "FIELD_MAX: "); \
(typeof(_mask))((_mask) >> __bf_shf(_mask)); \
})
#define FIELD_FIT(_mask, _val) \
({ \
__BF_FIELD_CHECK(_mask, 0ULL, 0ULL, "FIELD_FIT: "); \
!((((typeof(_mask))_val) << __bf_shf(_mask)) & ~(_mask)); \
})
#define FIELD_PREP(_mask, _val) \
({ \
__BF_FIELD_CHECK_REG(_mask, 0ULL, "FIELD_PREP: "); \
__FIELD_PREP(_mask, _val, "FIELD_PREP: "); \
})
#define __BF_CHECK_POW2(n) BUILD_BUG_ON_ZERO(((n) & ((n) - 1)) != 0)
#define FIELD_PREP_CONST(_mask, _val) \
( \
\
BUILD_BUG_ON_ZERO((_mask) == 0) + \
\
BUILD_BUG_ON_ZERO(~((_mask) >> __bf_shf(_mask)) & (_val)) + \
\
__BF_CHECK_POW2((_mask) + (1ULL << __bf_shf(_mask))) + \
\
(((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask)) \
)
#define FIELD_GET(_mask, _reg) \
({ \
__BF_FIELD_CHECK_REG(_mask, _reg, "FIELD_GET: "); \
__FIELD_GET(_mask, _reg, "FIELD_GET: "); \
})
#define FIELD_MODIFY(_mask, _reg_p, _val) \
({ \
typecheck_pointer(_reg_p); \
__BF_FIELD_CHECK(_mask, *(_reg_p), _val, "FIELD_MODIFY: "); \
*(_reg_p) &= ~(_mask); \
*(_reg_p) |= (((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask)); \
})
extern void __compiletime_error("value doesn't fit into mask")
__field_overflow(void);
extern void __compiletime_error("bad bitfield mask")
__bad_mask(void);
static __always_inline u64 field_multiplier(u64 field)
{
if ((field | (field - 1)) & ((field | (field - 1)) + 1))
__bad_mask();
return field & -field;
}
static __always_inline u64 field_mask(u64 field)
{
return field / field_multiplier(field);
}
#define field_max(field) ((typeof(field))field_mask(field))
#define ____MAKE_OP(type,base,to,from) \
static __always_inline __##type __must_check type##_encode_bits(base v, base field) \
{ \
if (__builtin_constant_p(v) && (v & ~field_mask(field))) \
__field_overflow(); \
return to((v & field_mask(field)) * field_multiplier(field)); \
} \
static __always_inline __##type __must_check type##_replace_bits(__##type old, \
base val, base field) \
{ \
return (old & ~to(field)) | type##_encode_bits(val, field); \
} \
static __always_inline void type##p_replace_bits(__##type *p, \
base val, base field) \
{ \
*p = (*p & ~to(field)) | type##_encode_bits(val, field); \
} \
static __always_inline base __must_check type##_get_bits(__##type v, base field) \
{ \
return (from(v) & field)/field_multiplier(field); \
}
#define __MAKE_OP(size) \
____MAKE_OP(le##size,u##size,cpu_to_le##size,le##size##_to_cpu) \
____MAKE_OP(be##size,u##size,cpu_to_be##size,be##size##_to_cpu) \
____MAKE_OP(u##size,u##size,,)
____MAKE_OP(u8,u8,,)
__MAKE_OP(16)
__MAKE_OP(32)
__MAKE_OP(64)
#undef __MAKE_OP
#undef ____MAKE_OP
#define __field_prep(mask, val) \
({ \
auto __mask = (mask); \
typeof(__mask) __val = (val); \
unsigned int __shift = BITS_PER_TYPE(__mask) <= 32 ? \
__ffs(__mask) : __ffs64(__mask); \
(__val << __shift) & __mask; \
})
#define __field_get(mask, reg) \
({ \
auto __mask = (mask); \
typeof(__mask) __reg = (reg); \
unsigned int __shift = BITS_PER_TYPE(__mask) <= 32 ? \
__ffs(__mask) : __ffs64(__mask); \
(__reg & __mask) >> __shift; \
})
#define field_prep(mask, val) \
(__builtin_constant_p(mask) ? __FIELD_PREP(mask, val, "field_prep: ") \
: __field_prep(mask, val))
#define field_get(mask, reg) \
(__builtin_constant_p(mask) ? __FIELD_GET(mask, reg, "field_get: ") \
: __field_get(mask, reg))
#endif