rb_last
node = rb_last(&slots->gfn_tree);
rb_node = rb_last(&p->svms.objects.rb_root);
struct rb_node *iter = rb_last(&mm->free_trees[tree][i]);
return rbtree_get_free_block(rb_last(root));
iter = rb_last(root);
container_of_or_null(rb_last(root), type, member)
#define MTDSWAP_ECNT_MAX(rbroot) (rb_entry(rb_last(rbroot), struct swap_eb, \
last = rb_entry(rb_last(root), struct ubi_wl_entry, u.rb);
while ((n = rb_last(&info->block_group_cache_tree.rb_root)) != NULL) {
n = rb_last(root);
while ((node = rb_last(&ctl->free_space_offset)) != NULL) {
n = rb_last(&fs_info->mapping_tree.rb_root);
struct rb_node *node = rb_last(root);
#define tn_last(list) rb_entry(rb_last(list), struct jffs2_tmp_dnode_info, rb)
n = rb_last(&wnd->count_tree);
e = rb_entry(rb_last(&wnd->count_tree), struct e_node,
while ((node = rb_last(root)) != NULL) {
while ((node = rb_last(&resmap->m_reservations)) != NULL) {
while ((node = rb_last(root)) != NULL) {
iter->node = rb_last(&slots->gfn_tree);
#define skb_rb_last(root) rb_to_skb(rb_last(root))
struct rb_node *n = rb_last(&lock->rtmutex.waiters.rb_root);
struct rb_node *last = rb_last(&cfs_rq->tasks_timeline.rb_root);
WARN_ON_ONCE(count < (1 << black_path_count(rb_last(&root.rb_root))) - 1);
node = rb_last(&priv->root);
node = rb_last(&priv->root);
n = rb_last(&cl->cl_parent->vt_tree);
return rb_last(root);
extern struct rb_node *rb_last(const struct rb_root *);
nd = rb_last(root);
browser->curr_hot = rb_last(&browser->entries);
nd = rb_last(&browser->entries);
nd = rb_hierarchy_last(rb_last(browser->entries));
bool has_single_node = (rb_first(root) == rb_last(root));
bool has_single_node = (rb_first(root) == rb_last(root));
node = rb_last(&he->hroot_out.rb_root);
struct rb_node *nd = rb_last(&symbols->rb_root);
struct rb_node *n = rb_last(&symbols->rb_root);