readahead_pos
locked_end = readahead_pos(bio_ctrl->ractl) + readahead_length(bio_ctrl->ractl) - 1;
const u64 start = readahead_pos(rac);
const u64 ra_pos = readahead_pos(ractl);
readahead_pos(rac), readahead_length(rac));
Z_EROFS_DEFINE_FRONTEND(f, realinode, sharedinode, readahead_pos(rac));
loff_t pos = readahead_pos(rac);
.pos = readahead_pos(rac),
unsigned long long start = readahead_pos(ractl);
trace_netfs_read(rreq, readahead_pos(ractl), readahead_length(ractl),
if (rreq->start != readahead_pos(ractl) ||
rreq->start = readahead_pos(ractl);
trace_netfs_read(rreq, readahead_pos(ractl), readahead_length(ractl),
trace_nfs_aop_readahead(inode, readahead_pos(ractl), nr_pages);
if (readahead_pos(rac) >= i_size_read(inode))
loff_t new_start = readahead_pos(rac);
loff_t bytes_remaining = inode->i_size - readahead_pos(rac);
offset = readahead_pos(rac);
loff_t start = readahead_pos(ractl) & ~mask;
size_t len = readahead_length(ractl) + readahead_pos(ractl) - start;
if (readahead_pos(ractl) >= i_size_read(inode))
new_len += new_start - readahead_pos(ractl);