xh
#define RAZWI_INITIATOR_ID_X_Y(xl, yl, xh) \
(RAZWI_INITIATOR_ID_X_Y_LOW(xl, yl) | RAZWI_INITIATOR_ID_X_HIGH(xh))
u8 xh;
x = (((int)regs.point[i].xh & 0xf) << 8) + regs.point[i].xl;
event_type = (regs.point[i].xh >> 6) & 0x03;
struct fuse_ext_header *xh;
xh = extend_arg(ext, sg_len);
if (!xh)
xh->size = sg_len;
xh->type = FUSE_EXT_GROUPS;
sg = (struct fuse_supp_groups *) &xh[1];
struct ocfs2_xattr_header *xh = loc->xl_header;
int i, count = le16_to_cpu(xh->xh_count);
offset = le16_to_cpu(xh->xh_entries[i].xe_name_offset);
struct ocfs2_xattr_header *xh = loc->xl_header;
int count = le16_to_cpu(xh->xh_count);
memmove((char *)xh + first_namevalue_offset + namevalue_size,
(char *)xh + first_namevalue_offset,
memset((char *)xh + first_namevalue_offset, 0, namevalue_size);
offset = le16_to_cpu(xh->xh_entries[i].xe_name_offset);
le16_add_cpu(&xh->xh_entries[i].xe_name_offset,
struct ocfs2_xattr_header *xh = loc->xl_header;
int count = le16_to_cpu(xh->xh_count);
tmp_xe = &xh->xh_entries[tmp];
memmove(&xh->xh_entries[low + 1],
&xh->xh_entries[low],
le16_add_cpu(&xh->xh_count, 1);
loc->xl_entry = &xh->xh_entries[low];
struct ocfs2_xattr_header *xh = loc->xl_header;
xh->xh_free_start = cpu_to_le16(nameval_offset);
le16_add_cpu(&xh->xh_name_value_len, size);
struct ocfs2_xattr_header *xh = loc->xl_header;
count = le16_to_cpu(xh->xh_count) - 1;
index = ((char *)entry - (char *)&xh->xh_entries) /
memmove(&xh->xh_entries[index], &xh->xh_entries[index + 1],
memset(&xh->xh_entries[count], 0,
xh->xh_count = cpu_to_le16(count);
struct ocfs2_xattr_header *xh,
struct ocfs2_xattr_header *xh = bucket_xh(bucket);
for (i = 0; i < le16_to_cpu(xh->xh_count); i++) {
xe = &xh->xh_entries[i];
xh,
struct ocfs2_xattr_header *xh = NULL;
xh = bucket_xh(search);
high_bucket = le16_to_cpu(xh->xh_num_buckets) - 1;
xh = bucket_xh(search);
xe = &xh->xh_entries[0];
if (xh->xh_count)
xe = &xh->xh_entries[le16_to_cpu(xh->xh_count) - 1];
struct ocfs2_xattr_header *xh,
if (index < 0 || index >= le16_to_cpu(xh->xh_count))
name_offset = le16_to_cpu(xh->xh_entries[index].xe_name_offset);
struct ocfs2_xattr_header *xh = bucket_xh(bucket);
xh->xh_count = xb_xh->xh_count;
xh->xh_num_buckets = cpu_to_le16(1);
xh->xh_name_value_len = cpu_to_le16(size);
xh->xh_free_start = cpu_to_le16(OCFS2_XATTR_BUCKET_SIZE - size);
le16_add_cpu(&xh->xh_entries[i].xe_name_offset, off_change);
struct ocfs2_xattr_header *xh;
xh = (struct ocfs2_xattr_header *)bucket_buf;
entries = (char *)xh->xh_entries;
xh_free_start = le16_to_cpu(xh->xh_free_start);
(unsigned long long)blkno, le16_to_cpu(xh->xh_count),
xh_free_start, le16_to_cpu(xh->xh_name_value_len));
sort(entries, le16_to_cpu(xh->xh_count),
xe = xh->xh_entries;
for (i = 0; i < le16_to_cpu(xh->xh_count); i++, xe++) {
xh->xh_free_start = cpu_to_le16(end);
sort(entries, le16_to_cpu(xh->xh_count),
static int ocfs2_xattr_find_divide_pos(struct ocfs2_xattr_header *xh)
struct ocfs2_xattr_entry *entries = xh->xh_entries;
int count = le16_to_cpu(xh->xh_count);
struct ocfs2_xattr_header *xh;
xh = bucket_xh(s_bucket);
count = le16_to_cpu(xh->xh_count);
start = ocfs2_xattr_find_divide_pos(xh);
xe = &xh->xh_entries[start-1];
xh = bucket_xh(t_bucket);
xh->xh_free_start = cpu_to_le16(blocksize);
xh->xh_entries[0].xe_name_hash = xe->xe_name_hash;
le32_add_cpu(&xh->xh_entries[0].xe_name_hash, 1);
xh = bucket_xh(t_bucket);
xe = &xh->xh_entries[i];
xe = &xh->xh_entries[start];
(int)((char *)xe - (char *)xh),
(int)((char *)xh->xh_entries - (char *)xh));
memmove((char *)xh->xh_entries, (char *)xe, len);
xe = &xh->xh_entries[count - start];
le16_add_cpu(&xh->xh_count, -start);
le16_add_cpu(&xh->xh_name_value_len, -name_value_len);
xh->xh_free_start = cpu_to_le16(OCFS2_XATTR_BUCKET_SIZE);
for (i = 0; i < le16_to_cpu(xh->xh_count); i++) {
xe = &xh->xh_entries[i];
le16_to_cpu(xh->xh_free_start))
xh->xh_free_start = xe->xe_name_offset;
xh->xh_num_buckets = cpu_to_le16(1);
xh->xh_num_buckets = 0;
*first_hash = le32_to_cpu(xh->xh_entries[0].xe_name_hash);
xh = bucket_xh(s_bucket);
memset(&xh->xh_entries[start], 0,
xh->xh_count = cpu_to_le16(start);
xh->xh_free_start = cpu_to_le16(name_offset);
xh->xh_name_value_len = cpu_to_le16(name_value_len);
struct ocfs2_xattr_header *xh = bucket_xh(bucket);
xe = &xh->xh_entries[xe_off];
struct ocfs2_xattr_header *xh = bucket_xh(bucket);
if (name_hash != le32_to_cpu(xh->xh_entries[0].xe_name_hash))
if (xh->xh_entries[le16_to_cpu(xh->xh_count) - 1].xe_name_hash ==
xh->xh_entries[0].xe_name_hash) {
le32_to_cpu(xh->xh_entries[0].xe_name_hash));
struct ocfs2_xattr_header *xh = bucket_xh(bucket);
for (i = 0; i < le16_to_cpu(xh->xh_count); i++) {
xe = &xh->xh_entries[i];
struct ocfs2_xattr_header *xh = bucket_xh(bucket);
struct ocfs2_xattr_entry *xe = &xh->xh_entries[offset];
struct ocfs2_xattr_header *xh =
le16_to_cpu(xh->xh_count));
for (i = 0; i < le16_to_cpu(xh->xh_count); i++) {
xe = &xh->xh_entries[i];
struct ocfs2_xattr_header *xh,
struct ocfs2_xattr_header *xh,
for (i = 0; i < le16_to_cpu(xh->xh_count); i++) {
xe = &xh->xh_entries[i];
ret = func(sb, bh, xh, i, &xv, NULL, para);
struct ocfs2_xattr_header *xh,
struct ocfs2_xattr_entry *xe = &xh->xh_entries[offset];
*xv = (struct ocfs2_xattr_value_root *)((void *)xh +
struct ocfs2_xattr_header *xh,
ret = ocfs2_value_metas_in_xattr_header(osb->sb, NULL, xh,
struct ocfs2_xattr_header *xh,
le16_to_cpu(xh->xh_count));
for (i = 0, j = 0; i < le16_to_cpu(xh->xh_count); i++, j++) {
xe = &xh->xh_entries[i];
ret = func(sb, old_bh, xh, i, &xv, NULL, para);
struct ocfs2_xattr_header *xh = (struct ocfs2_xattr_header *)
ret = ocfs2_reflink_lock_xattr_allocators(osb, xh, args->ref_root_bh,
ret = ocfs2_reflink_xattr_header(handle, args, args->old_bh, xh,
struct ocfs2_xattr_header *xh = &xb->xb_attrs.xb_header;
ret = ocfs2_reflink_lock_xattr_allocators(osb, xh, args->ref_root_bh,
ret = ocfs2_reflink_xattr_header(handle, args, blk_bh, xh,
struct ocfs2_xattr_header *xh,
struct ocfs2_xattr_header *xh,
struct ocfs2_xattr_header *xh =
xh, &metas->num_metas,
struct ocfs2_xattr_header *xh;
xh = (struct ocfs2_xattr_header *)
for (i = 0; i < le16_to_cpu(xh->xh_count); i++)
if (!ocfs2_xattr_is_local(&xh->xh_entries[i]))
#define __FP_CLZ_2(R, xh, xl) \
if (xh) \
__FP_CLZ(R,xh); \
#define __FP_FRAC_ADDI_2(xh, xl, i) \
(xh += ((xl += i) < i))
#define __FP_FRAC_ADD_2(rh, rl, xh, xl, yh, yl) \
(rh = xh + yh + ((rl = xl + yl) < xl))
#define __FP_FRAC_SUB_2(rh, rl, xh, xl, yh, yl) \
(rh = xh - yh - ((rl = xl - yl) > xl))
#define __FP_FRAC_DEC_2(xh, xl, yh, yl) \
xh -= yh + ((xl -= yl) > _t); \
#define __FP_FRAC_ADDI_2(xh, xl, i) add_ssaaaa(xh, xl, xh, xl, 0, i)
#define __FP_FRAC_DEC_2(xh, xl, yh, yl) sub_ddmmss(xh, xl, xh, xl, yh, yl)
#define umul_ppmm(xh, xl, m0, m1) \
(xh) = __xx.__i.__h; (xl) = __xx.__i.__l; \
(xh) += ((((SItype) __m0 >> 31) & __m1) \
#define umul_ppmm(xh, xl, m0, m1) \
(xh) = __xx.__i.__h; (xl) = __xx.__i.__l; \
(xh) += ((((signed int) __m0 >> 15) & __m1) \
#define umul_ppmm(xh, xl, m0, m1) \
: "=r" ((USItype)(xh)) \
#define umul_ppmm(xh, xl, a, b) \
: "=&r" (xh), \
#define umul_ppmm(xh, xl, a, b) \
: "=&r" (xh), \
#define umul_ppmm(xh, xl, m0, m1) \
(xh) = __xx.__i.__h; (xl) = __xx.__i.__l; \
(xh) += ((((SItype) __m0 >> 31) & __m1) \
#define smul_ppmm(xh, xl, m0, m1) \
(xh) = __xx.__i.__h; (xl) = __xx.__i.__l; \
#define umul_ppmm(xh, xl, a, b) \
: "=&d" ((USItype)(xh)), "=&d" ((USItype)(xl)), \
#define umul_ppmm(xh, xl, m0, m1) \
: "=r" ((USItype)(xh)), \
(xh) += ((((SItype) __m0 >> 31) & __m1) \
#define smul_ppmm(xh, xl, m0, m1) \
: "=r" ((SItype)(xh)), \
unsigned char *xh = (void *)ip6xh;
switch (xh[off]) {
optlen = xh[off + 1] + 2;
*pl = ntohl(*(__be32 *)(xh + off + 2));
optlen = xh[off + 1] + 2;