sib
#define X86_SIB_SCALE(sib) (((sib) & 0xc0) >> 6)
#define X86_SIB_INDEX(sib) (((sib) & 0x38) >> 3)
#define X86_SIB_BASE(sib) ((sib) & 0x07)
return insn_offset_sib(insn) + insn->sib.nbytes;
struct insn_field sib;
u8 sib;
sib = insn_fetch(u8, ctxt);
index_reg |= (sib >> 3) & 7;
base_reg |= sib & 7;
scale = sib >> 6;
if (!insn->sib.nbytes)
addr32 = base32 + idx32 * (1 << X86_SIB_SCALE(insn->sib.value));
*eff_addr = base + indx * (1 << X86_SIB_SCALE(insn->sib.value));
if (insn->sib.nbytes) {
if (insn->sib.nbytes) {
u8 sib = 0, sib_scale, sib_index, sib_base;
if (insn->sib.nbytes) {
sib = insn->sib.bytes[0];
sib_scale = X86_SIB_SCALE(sib);
sib_index = X86_SIB_INDEX(sib) + 8*x3 + 16*x4;
sib_base = X86_SIB_BASE(sib) + 8*b3 + 16*b4;
sib = 1;
if (sib && (sib_scale != 0 || sib_index != 4)) /* (%reg, %eiz, 1) */
regno = X86_SIB_INDEX(insn->sib.value);
regno = X86_SIB_BASE(insn->sib.value);
if (insn->sib.got)
insn_field_set(&insn->sib,
insn->sib.got = 1;
base = X86_SIB_BASE(insn->sib.value);
return insn->opcode.got && insn->modrm.got && insn->sib.got &&
address = sib(mod, fpu_eip);
dump_field(fp, "sib", "\t", &insn->sib);
dump_field(fp, "sib", "\t", &insn->sib);
void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib);
struct sib_info sib;
const struct sib_info *sib)
exclude_sensitive = sib || !capable(CAP_SYS_ADMIN);
if (nla_put_u32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY) ||
sib.sib_reason = SIB_HELPER_PRE;
sib.helper_name = cmd;
drbd_bcast_event(device, &sib);
if (sib) {
switch(sib->sib_reason) {
if (nla_put_u32(skb, T_prev_state, sib->os.i) ||
nla_put_u32(skb, T_new_state, sib->ns.i))
sib->helper_exit_code))
if (nla_put_string(skb, T_helper, sib->helper_name))
sib.sib_reason = SIB_HELPER_POST;
sib.helper_exit_code = ret;
drbd_bcast_event(device, &sib);
void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib)
if (nla_put_status_info(msg, device, sib))
err, seq, sib->sib_reason);
struct sib_info sib;
sib.sib_reason = SIB_STATE_CHANGE;
sib.os = os;
sib.ns = ns;
drbd_bcast_event(device, &sib);
struct sib_info sib = { .sib_reason = SIB_SYNC_PROGRESS, };
drbd_bcast_event(device, &sib);
struct sockaddr_ib *sib;
sib = (struct sockaddr_ib *) addr;
return htons((u16) (be64_to_cpu(sib->sib_sid) &
be64_to_cpu(sib->sib_sid_mask)));
struct sockaddr_ib *sib;
sib = (struct sockaddr_ib *) cma_src_addr(id_priv);
path_rec.traffic_class = (u8) (be32_to_cpu(sib->sib_flowinfo) >> 20);
struct sockaddr_ib *sib;
sib = (struct sockaddr_ib *) addr;
sid = be64_to_cpu(sib->sib_sid);
mask = be64_to_cpu(sib->sib_sid_mask);
sib->sib_sid = cpu_to_be64((sid & mask) | (u64) ntohs(port));
sib->sib_sid_mask = cpu_to_be64(~0ULL);
struct sockaddr_ib *sib;
sib = (struct sockaddr_ib *) cma_src_addr(id_priv);
mask = be64_to_cpu(sib->sib_sid_mask) & RDMA_IB_IP_PS_MASK;
sid = be64_to_cpu(sib->sib_sid) & mask;
sib->sib_sid = cpu_to_be64(sid_ps | ntohs(cma_port((struct sockaddr *) sib)));
sib->sib_sid_mask = cpu_to_be64(RDMA_IB_IP_PS_MASK |
be64_to_cpu(sib->sib_sid_mask));
static void cma_translate_ib(struct sockaddr_ib *sib, struct rdma_dev_addr *dev_addr)
rdma_addr_set_sgid(dev_addr, (union ib_gid *) &sib->sib_addr);
ib_addr_set_pkey(dev_addr, ntohs(sib->sib_pkey));
struct sockaddr_ib sib = {
cm_ib = rtrs_srv_cm_init(ctx, (struct sockaddr *)&sib, RDMA_PS_IB);
struct dm_block *sib;
r = shadow_child(s->info, vt, parent, parent_index - 1, &sib);
left = dm_block_data(sib);
s->nodes[1] = sib;
unlock_block(s->info, sib);
struct dm_block *sib;
r = shadow_child(s->info, vt, parent, parent_index + 1, &sib);
right = dm_block_data(sib);
unlock_block(s->info, sib);
s->nodes[1] = sib;
u8 sib[204]; /* 0x30-0xfb */
enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
if (match & BIT(sib))
sib_mask = ice_flds_info[sib].mask;
sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
if (match & BIT(sib))
sib_mask = ice_flds_info[sib].mask;
sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
if (sib == ICE_FLOW_FIELD_IDX_MAX ||
flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
flds[sib].xtrct.off != off) {
struct nilfs_btree_node *node, *parent, *sib;
sib = (struct nilfs_btree_node *)bh->b_data;
if (nilfs_btree_node_get_nchildren(sib) < ncblk) {
sib = (struct nilfs_btree_node *)bh->b_data;
if (nilfs_btree_node_get_nchildren(sib) < ncblk) {
sib = (struct nilfs_btree_node *)bh->b_data;
nilfs_btree_node_init(sib, 0, level, 0, ncblk, NULL, NULL);
struct nilfs_btree_node *node, *parent, *sib;
sib = (struct nilfs_btree_node *)bh->b_data;
if (nilfs_btree_node_get_nchildren(sib) > ncmin) {
sib = (struct nilfs_btree_node *)bh->b_data;
if (nilfs_btree_node_get_nchildren(sib) > ncmin) {
struct xbc_node *sib, *node = xbc_add_node(data, flag);
sib = xbc_last_sibling(xbc_nodes);
sib->next = xbc_node_index(node);
sib = xbc_node_get_child(last_parent);
sib = xbc_last_sibling(sib);
sib->next = xbc_node_index(node);
#define X86_SIB_SCALE(sib) (((sib) & 0xc0) >> 6)
#define X86_SIB_INDEX(sib) (((sib) & 0x38) >> 3)
#define X86_SIB_BASE(sib) ((sib) & 0x07)
return insn_offset_sib(insn) + insn->sib.nbytes;
struct insn_field sib;
if (insn->sib.got)
insn_field_set(&insn->sib,
insn->sib.got = 1;
base = X86_SIB_BASE(insn->sib.value);
return insn->opcode.got && insn->modrm.got && insn->sib.got &&
sib = 0, /* sib_scale = 0, */ sib_index = 0, sib_base = 0;
if (ins.sib.nbytes) {
sib = ins.sib.bytes[0];
sib_index = X86_SIB_INDEX(sib) + 8*rex_x;
sib_base = X86_SIB_BASE(sib) + 8*rex_b;
int sib = 0;
for ( ; sib < self->sibling_count; ++sib) {
struct tsync_sibling *s = &self->sibling[sib];
long ret, sib;
sib = 1;
sib = 0;
self->sibling[!sib].num_waits += 1;
PTHREAD_JOIN(self->sibling[sib].tid, &status);
while (!kill(self->sibling[sib].system_tid, 0))
sib = !sib;
if (self->sibling[sib].num_waits > 1)
self->sibling[sib].num_waits = 1;
PTHREAD_JOIN(self->sibling[sib].tid, &status);
while (!kill(self->sibling[sib].system_tid, 0))