#include <linux/export.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/task_io_accounting_ops.h>
#include "internal.h"
#define HIT_PENDING 0x01
#define MADE_PROGRESS 0x04
#define BUFFERED 0x08
#define NEED_RETRY 0x10
#define COPY_TO_CACHE 0x40
#define ABANDON_SREQ 0x80
static void netfs_clear_unread(struct netfs_io_subrequest *subreq)
{
netfs_reset_iter(subreq);
WARN_ON_ONCE(subreq->len - subreq->transferred != iov_iter_count(&subreq->io_iter));
iov_iter_zero(iov_iter_count(&subreq->io_iter), &subreq->io_iter);
if (subreq->start + subreq->transferred >= subreq->rreq->i_size)
__set_bit(NETFS_SREQ_HIT_EOF, &subreq->flags);
}
static void netfs_unlock_read_folio(struct netfs_io_request *rreq,
struct folio_queue *folioq,
int slot)
{
struct netfs_folio *finfo;
struct folio *folio = folioq_folio(folioq, slot);
if (unlikely(folio_pos(folio) < rreq->abandon_to)) {
trace_netfs_folio(folio, netfs_folio_trace_abandon);
goto just_unlock;
}
flush_dcache_folio(folio);
folio_mark_uptodate(folio);
if (!test_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags)) {
finfo = netfs_folio_info(folio);
if (finfo) {
trace_netfs_folio(folio, netfs_folio_trace_filled_gaps);
if (finfo->netfs_group)
folio_change_private(folio, finfo->netfs_group);
else
folio_detach_private(folio);
kfree(finfo);
}
if (test_bit(NETFS_RREQ_FOLIO_COPY_TO_CACHE, &rreq->flags)) {
if (!WARN_ON_ONCE(folio_get_private(folio) != NULL)) {
trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache);
folio_attach_private(folio, NETFS_FOLIO_COPY_TO_CACHE);
folio_mark_dirty(folio);
}
} else {
trace_netfs_folio(folio, netfs_folio_trace_read_done);
}
folioq_clear(folioq, slot);
} else {
if (test_bit(NETFS_RREQ_FOLIO_COPY_TO_CACHE, &rreq->flags))
netfs_pgpriv2_copy_to_cache(rreq, folio);
}
just_unlock:
if (folio->index == rreq->no_unlock_folio &&
test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags)) {
_debug("no unlock");
} else {
trace_netfs_folio(folio, netfs_folio_trace_read_unlock);
folio_unlock(folio);
}
folioq_clear(folioq, slot);
}
static void netfs_read_unlock_folios(struct netfs_io_request *rreq,
unsigned int *notes)
{
struct folio_queue *folioq = rreq->buffer.tail;
unsigned long long collected_to = rreq->collected_to;
unsigned int slot = rreq->buffer.first_tail_slot;
if (rreq->cleaned_to >= rreq->collected_to)
return;
if (slot >= folioq_nr_slots(folioq)) {
folioq = rolling_buffer_delete_spent(&rreq->buffer);
if (!folioq) {
rreq->front_folio_order = 0;
return;
}
slot = 0;
}
for (;;) {
struct folio *folio;
unsigned long long fpos, fend;
unsigned int order;
size_t fsize;
if (*notes & COPY_TO_CACHE)
set_bit(NETFS_RREQ_FOLIO_COPY_TO_CACHE, &rreq->flags);
folio = folioq_folio(folioq, slot);
if (WARN_ONCE(!folio_test_locked(folio),
"R=%08x: folio %lx is not locked\n",
rreq->debug_id, folio->index))
trace_netfs_folio(folio, netfs_folio_trace_not_locked);
order = folioq_folio_order(folioq, slot);
rreq->front_folio_order = order;
fsize = PAGE_SIZE << order;
fpos = folio_pos(folio);
fend = fpos + fsize;
trace_netfs_collect_folio(rreq, folio, fend, collected_to);
if (collected_to < fend)
break;
netfs_unlock_read_folio(rreq, folioq, slot);
WRITE_ONCE(rreq->cleaned_to, fpos + fsize);
*notes |= MADE_PROGRESS;
clear_bit(NETFS_RREQ_FOLIO_COPY_TO_CACHE, &rreq->flags);
folioq_clear(folioq, slot);
slot++;
if (slot >= folioq_nr_slots(folioq)) {
folioq = rolling_buffer_delete_spent(&rreq->buffer);
if (!folioq)
goto done;
slot = 0;
trace_netfs_folioq(folioq, netfs_trace_folioq_read_progress);
}
if (fpos + fsize >= collected_to)
break;
}
rreq->buffer.tail = folioq;
done:
rreq->buffer.first_tail_slot = slot;
}
static void netfs_collect_read_results(struct netfs_io_request *rreq)
{
struct netfs_io_subrequest *front, *remove;
struct netfs_io_stream *stream = &rreq->io_streams[0];
unsigned int notes;
_enter("%llx-%llx", rreq->start, rreq->start + rreq->len);
trace_netfs_rreq(rreq, netfs_rreq_trace_collect);
trace_netfs_collect(rreq);
reassess:
if (rreq->origin == NETFS_READAHEAD ||
rreq->origin == NETFS_READPAGE ||
rreq->origin == NETFS_READ_FOR_WRITE)
notes = BUFFERED;
else
notes = 0;
front = list_first_entry_or_null(&stream->subrequests,
struct netfs_io_subrequest, rreq_link);
while (front) {
size_t transferred;
trace_netfs_collect_sreq(rreq, front);
_debug("sreq [%x] %llx %zx/%zx",
front->debug_index, front->start, front->transferred, front->len);
if (stream->collected_to < front->start) {
trace_netfs_collect_gap(rreq, stream, front->start, 'F');
stream->collected_to = front->start;
}
if (netfs_check_subreq_in_progress(front))
notes |= HIT_PENDING;
smp_rmb();
transferred = READ_ONCE(front->transferred);
if (notes & BUFFERED) {
size_t fsize = PAGE_SIZE << rreq->front_folio_order;
if (!(notes & HIT_PENDING) &&
front->error == 0 &&
transferred < front->len &&
(test_bit(NETFS_SREQ_HIT_EOF, &front->flags) ||
test_bit(NETFS_SREQ_CLEAR_TAIL, &front->flags))) {
netfs_clear_unread(front);
transferred = front->transferred = front->len;
trace_netfs_sreq(front, netfs_sreq_trace_clear);
}
stream->collected_to = front->start + transferred;
rreq->collected_to = stream->collected_to;
if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &front->flags))
notes |= COPY_TO_CACHE;
if (test_bit(NETFS_SREQ_FAILED, &front->flags)) {
rreq->abandon_to = front->start + front->len;
front->transferred = front->len;
transferred = front->len;
trace_netfs_rreq(rreq, netfs_rreq_trace_set_abandon);
}
if (front->start + transferred >= rreq->cleaned_to + fsize ||
test_bit(NETFS_SREQ_HIT_EOF, &front->flags))
netfs_read_unlock_folios(rreq, ¬es);
} else {
stream->collected_to = front->start + transferred;
rreq->collected_to = stream->collected_to;
}
if (notes & HIT_PENDING)
break;
if (test_bit(NETFS_SREQ_FAILED, &front->flags)) {
if (!stream->failed) {
stream->error = front->error;
rreq->error = front->error;
set_bit(NETFS_RREQ_FAILED, &rreq->flags);
stream->failed = true;
}
notes |= MADE_PROGRESS | ABANDON_SREQ;
} else if (test_bit(NETFS_SREQ_NEED_RETRY, &front->flags)) {
stream->need_retry = true;
notes |= NEED_RETRY | MADE_PROGRESS;
break;
} else if (test_bit(NETFS_RREQ_SHORT_TRANSFER, &rreq->flags)) {
notes |= MADE_PROGRESS;
} else {
if (!stream->failed) {
stream->transferred += transferred;
stream->transferred_valid = true;
}
if (front->transferred < front->len)
set_bit(NETFS_RREQ_SHORT_TRANSFER, &rreq->flags);
notes |= MADE_PROGRESS;
}
stream->source = front->source;
spin_lock(&rreq->lock);
remove = front;
trace_netfs_sreq(front,
notes & ABANDON_SREQ ?
netfs_sreq_trace_abandoned : netfs_sreq_trace_consumed);
list_del_init(&front->rreq_link);
front = list_first_entry_or_null(&stream->subrequests,
struct netfs_io_subrequest, rreq_link);
spin_unlock(&rreq->lock);
netfs_put_subrequest(remove,
notes & ABANDON_SREQ ?
netfs_sreq_trace_put_abandon :
netfs_sreq_trace_put_done);
}
trace_netfs_collect_stream(rreq, stream);
trace_netfs_collect_state(rreq, rreq->collected_to, notes);
if (!(notes & BUFFERED))
rreq->cleaned_to = rreq->collected_to;
if (notes & NEED_RETRY)
goto need_retry;
if (notes & MADE_PROGRESS) {
netfs_wake_rreq_flag(rreq, NETFS_RREQ_PAUSE, netfs_rreq_trace_unpause);
goto reassess;
}
out:
_leave(" = %x", notes);
return;
need_retry:
_debug("retry");
netfs_retry_reads(rreq);
goto out;
}
static void netfs_rreq_assess_dio(struct netfs_io_request *rreq)
{
unsigned int i;
if (rreq->origin == NETFS_UNBUFFERED_READ ||
rreq->origin == NETFS_DIO_READ) {
for (i = 0; i < rreq->direct_bv_count; i++) {
flush_dcache_page(rreq->direct_bv[i].bv_page);
set_page_dirty(rreq->direct_bv[i].bv_page);
}
}
if (rreq->iocb) {
rreq->iocb->ki_pos += rreq->transferred;
if (rreq->iocb->ki_complete) {
trace_netfs_rreq(rreq, netfs_rreq_trace_ki_complete);
rreq->iocb->ki_complete(
rreq->iocb, rreq->error ? rreq->error : rreq->transferred);
}
}
if (rreq->netfs_ops->done)
rreq->netfs_ops->done(rreq);
if (rreq->origin == NETFS_UNBUFFERED_READ ||
rreq->origin == NETFS_DIO_READ)
inode_dio_end(rreq->inode);
}
static void netfs_rreq_assess_single(struct netfs_io_request *rreq)
{
struct netfs_io_stream *stream = &rreq->io_streams[0];
if (!rreq->error && stream->source == NETFS_DOWNLOAD_FROM_SERVER &&
fscache_resources_valid(&rreq->cache_resources)) {
trace_netfs_rreq(rreq, netfs_rreq_trace_dirty);
netfs_single_mark_inode_dirty(rreq->inode);
}
if (rreq->iocb) {
rreq->iocb->ki_pos += rreq->transferred;
if (rreq->iocb->ki_complete) {
trace_netfs_rreq(rreq, netfs_rreq_trace_ki_complete);
rreq->iocb->ki_complete(
rreq->iocb, rreq->error ? rreq->error : rreq->transferred);
}
}
if (rreq->netfs_ops->done)
rreq->netfs_ops->done(rreq);
}
bool netfs_read_collection(struct netfs_io_request *rreq)
{
struct netfs_io_stream *stream = &rreq->io_streams[0];
netfs_collect_read_results(rreq);
if (!test_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags))
return false;
smp_rmb();
if (!list_empty(&stream->subrequests))
return false;
rreq->transferred = stream->transferred;
trace_netfs_rreq(rreq, netfs_rreq_trace_complete);
switch (rreq->origin) {
case NETFS_UNBUFFERED_READ:
case NETFS_DIO_READ:
case NETFS_READ_GAPS:
netfs_rreq_assess_dio(rreq);
break;
case NETFS_READ_SINGLE:
netfs_rreq_assess_single(rreq);
break;
default:
break;
}
task_io_account_read(rreq->transferred);
netfs_wake_rreq_flag(rreq, NETFS_RREQ_IN_PROGRESS, netfs_rreq_trace_wake_ip);
trace_netfs_rreq(rreq, netfs_rreq_trace_done);
netfs_clear_subrequests(rreq);
netfs_unlock_abandoned_read_pages(rreq);
if (unlikely(rreq->copy_to_cache))
netfs_pgpriv2_end_copy_to_cache(rreq);
return true;
}
void netfs_read_collection_worker(struct work_struct *work)
{
struct netfs_io_request *rreq = container_of(work, struct netfs_io_request, work);
netfs_see_request(rreq, netfs_rreq_trace_see_work);
if (netfs_check_rreq_in_progress(rreq)) {
if (netfs_read_collection(rreq))
netfs_put_request(rreq, netfs_rreq_trace_put_work_ip);
else
netfs_see_request(rreq, netfs_rreq_trace_see_work_complete);
}
}
void netfs_read_subreq_progress(struct netfs_io_subrequest *subreq)
{
struct netfs_io_request *rreq = subreq->rreq;
struct netfs_io_stream *stream = &rreq->io_streams[0];
size_t fsize = PAGE_SIZE << rreq->front_folio_order;
trace_netfs_sreq(subreq, netfs_sreq_trace_progress);
if (subreq->start + subreq->transferred > rreq->cleaned_to + fsize &&
(rreq->origin == NETFS_READAHEAD ||
rreq->origin == NETFS_READPAGE ||
rreq->origin == NETFS_READ_FOR_WRITE) &&
list_is_first(&subreq->rreq_link, &stream->subrequests)
) {
__set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
netfs_wake_collector(rreq);
}
}
EXPORT_SYMBOL(netfs_read_subreq_progress);
void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq)
{
struct netfs_io_request *rreq = subreq->rreq;
switch (subreq->source) {
case NETFS_READ_FROM_CACHE:
netfs_stat(&netfs_n_rh_read_done);
break;
case NETFS_DOWNLOAD_FROM_SERVER:
netfs_stat(&netfs_n_rh_download_done);
break;
default:
break;
}
if (!subreq->error && subreq->transferred < subreq->len) {
if (test_bit(NETFS_SREQ_HIT_EOF, &subreq->flags)) {
trace_netfs_sreq(subreq, netfs_sreq_trace_hit_eof);
} else if (test_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags)) {
trace_netfs_sreq(subreq, netfs_sreq_trace_need_clear);
} else if (test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) {
trace_netfs_sreq(subreq, netfs_sreq_trace_need_retry);
} else if (test_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags)) {
__set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
trace_netfs_sreq(subreq, netfs_sreq_trace_partial_read);
} else {
__set_bit(NETFS_SREQ_FAILED, &subreq->flags);
subreq->error = -ENODATA;
trace_netfs_sreq(subreq, netfs_sreq_trace_short);
}
}
if (test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) {
trace_netfs_rreq(rreq, netfs_rreq_trace_set_pause);
set_bit(NETFS_RREQ_PAUSE, &rreq->flags);
goto skip_error_checks;
}
if (unlikely(subreq->error < 0)) {
trace_netfs_failure(rreq, subreq, subreq->error, netfs_fail_read);
if (subreq->source == NETFS_READ_FROM_CACHE) {
netfs_stat(&netfs_n_rh_read_failed);
__set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
} else {
netfs_stat(&netfs_n_rh_download_failed);
__set_bit(NETFS_SREQ_FAILED, &subreq->flags);
}
trace_netfs_rreq(rreq, netfs_rreq_trace_set_pause);
set_bit(NETFS_RREQ_PAUSE, &rreq->flags);
}
skip_error_checks:
trace_netfs_sreq(subreq, netfs_sreq_trace_terminated);
netfs_subreq_clear_in_progress(subreq);
netfs_put_subrequest(subreq, netfs_sreq_trace_put_terminated);
}
EXPORT_SYMBOL(netfs_read_subreq_terminated);
void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error)
{
struct netfs_io_subrequest *subreq = priv;
if (transferred_or_error > 0) {
subreq->error = 0;
if (transferred_or_error > 0) {
subreq->transferred += transferred_or_error;
__set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
}
} else {
subreq->error = transferred_or_error;
}
netfs_read_subreq_terminated(subreq);
}