rb_entry_safe
return rb_entry_safe(rb, struct drm_mm_node, rb_hole_size);
return rb_entry_safe(rb, struct drm_mm_node, rb_hole_addr);
rb_entry_safe(rb, struct intel_engine_cs, uabi_node)
engine = rb_entry_safe(rb_next(&engine->uabi_node),
while ((node = rb_entry_safe(rb_first(&cache->rb_root),
entry = rb_entry_safe(parent, struct inode_defrag, rb_node);
return rb_entry_safe(node, struct btrfs_delayed_item, rb_node);
return rb_entry_safe(p, struct btrfs_delayed_item, rb_node);
return rb_entry_safe(p, struct btrfs_delayed_item, rb_node);
return rb_entry_safe(p, struct btrfs_delayed_item, rb_node);
return rb_entry_safe(exist, struct btrfs_delayed_ref_node, ref_node);
return rb_entry_safe(next, struct extent_state, rb_node);
return rb_entry_safe(next, struct extent_state, rb_node);
merge = rb_entry_safe(rb, struct extent_map, rb_node);
merge = rb_entry_safe(rb, struct extent_map, rb_node);
return rb_entry_safe(node, struct btrfs_qgroup, node);
return rb_entry_safe(node, struct block_entry, node);
return rb_entry_safe(node, struct block_entry, node);
return rb_entry_safe(node, struct root_entry, node);
return rb_entry_safe(node, struct ref_entry, node);
return rb_entry_safe(node, struct root_entry, node);
return rb_entry_safe(node, struct ulist_node, rb_node);
*next_entry = rb_entry_safe(tmp_node, struct extent_node, rb_node);
*prev_entry = rb_entry_safe(tmp_node, struct extent_node, rb_node);
*prev_entry = rb_entry_safe(tmp_node,
*next_entry = rb_entry_safe(tmp_node,
next_en = rb_entry_safe(node, struct extent_node,
ve = rb_entry_safe(node, struct victim_entry, rb_node);
*next_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node);
*prev_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node);
*prev_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node);
*next_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node);
next_dc = rb_entry_safe(node, struct discard_cmd, rb_node);
dc = rb_entry_safe(node, struct discard_cmd, rb_node);
dc = rb_entry_safe(node, struct discard_cmd, rb_node);
return rb_entry_safe(rb_first(&dir->subdir), struct proc_dir_entry,
return rb_entry_safe(rb_next(&dir->subdir_node), struct proc_dir_entry,
for ((bn) = rb_entry_safe(rb_first(&(bitmap)->xb_root.rb_root), \
(bn) = rb_entry_safe(rb_next(&(bn)->bn_rbnode), \
for ((bn) = rb_entry_safe(rb_first(&(bitmap)->xb_root.rb_root), \
(bn) = rb_entry_safe(rb_next(&(bn)->bn_rbnode), \
for (pos = rb_entry_safe(rb_first_postorder(root), typeof(*pos), field); \
pos && ({ n = rb_entry_safe(rb_next_postorder(&pos->field), \
#define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode)
return rb_entry_safe(leftmost, struct timerqueue_node, node);
for (event = rb_entry_safe(rb_first(&((groups)->tree)), \
event = rb_entry_safe(rb_next(&event->group_node), \
va = rb_entry_safe(node, struct vmap_area, rb_node);
return rb_entry_safe(n, struct vmap_area, rb_node);
for (pos = rb_entry_safe(rb_first_postorder(root), typeof(*pos), field); \
pos && ({ n = rb_entry_safe(rb_next_postorder(&pos->field), \